gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
"""
Create and install a Let's Encrypt cert for an API Gateway.
This file is a descendant of @diafygi's 'acme-tiny',
with http-01 replaced with dns-01 via AWS Route 53.
You must generate your own account.key:
openssl genrsa 2048 > account.key # Keep it secret, keep safe!
"""
import base64
import copy
import json
import hashlib
import logging
import re
import subprocess
import os
import time
import binascii
import textwrap
import requests
try:
from urllib.request import urlopen # Python 3
except ImportError:
from urllib2 import urlopen # Python 2
# Staging
# Amazon doesn't accept these though.
# DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
# Production
DEFAULT_CA = "https://acme-v01.api.letsencrypt.org"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
def get_cert_and_update_domain(
zappa_instance,
lambda_name,
api_stage,
domain=None,
clean_up=True,
manual=False,
):
"""
Main cert installer path.
"""
try:
create_domain_key()
create_domain_csr(domain)
get_cert(zappa_instance)
create_chained_certificate()
with open('/tmp/signed.crt') as f:
certificate_body = f.read()
with open('/tmp/domain.key') as f:
certificate_private_key = f.read()
with open('/tmp/intermediate.pem') as f:
certificate_chain = f.read()
if not manual:
if domain:
if not zappa_instance.get_domain_name(domain):
zappa_instance.create_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage
)
print("Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part.")
else:
zappa_instance.update_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage
)
else:
print("Cerificate body:\n")
print(certificate_body)
print("\nCerificate private key:\n")
print(certificate_private_key)
print("\nCerificate chain:\n")
print(certificate_chain)
except Exception as e:
print(e)
return False
if clean_up:
cleanup()
return True
def create_domain_key():
"""
"""
proc = subprocess.Popen(
["openssl genrsa 2048 > /tmp/domain.key"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
out, err = proc.communicate()
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
return True
def create_domain_csr(domain):
subj = "/CN=" + domain
cmd = 'openssl req -new -sha256 -key /tmp/domain.key -subj "{}" > /tmp/domain.csr'.format(subj)
proc = subprocess.Popen(
[cmd],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
out, err = proc.communicate()
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
return True
def create_chained_certificate():
cross_cert_url = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
cert = requests.get(cross_cert_url)
with open('/tmp/intermediate.pem', 'wb') as intermediate_pem:
intermediate_pem.write(cert.content)
proc = subprocess.Popen(
["cat /tmp/signed.crt /tmp/intermediate.pem > /tmp/chained.pem"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
out, err = proc.communicate()
if proc.returncode != 0: # pragma: no cover
raise IOError("Error: {0}".format(err))
return True
def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info("Parsing account key...")
proc = subprocess.Popen(
["openssl rsa -in /tmp/account.key -noout -text"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
out, err = proc.communicate()
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
return out
def parse_csr():
"""
Parse certificate signing request for domains
"""
LOGGER.info("Parsing CSR...")
csr_filename = '/tmp/domain.csr'
proc = subprocess.Popen(
["openssl req -in {} -noout -text".format(csr_filename)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
out, err = proc.communicate()
if proc.returncode != 0: # pragma: no cover
raise IOError("Error loading {0}: {1}".format(csr_filename, err))
domains = set([])
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def get_boulder_header(key_bytes):
"""
Use regular expressions to find crypto values from parsed account key,
and return a header we can send to our Boulder instance.
"""
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
key_bytes.decode('utf8'), re.MULTILINE | re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
header = {
"alg": "RS256",
"jwk": {
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
},
}
return header
def register_account():
"""
Agree to LE TOS
"""
LOGGER.info("Registering account...")
code, result = _send_signed_request(DEFAULT_CA + "/acme/new-reg", {
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf",
})
if code == 201: # pragma: no cover
LOGGER.info("Registered!")
elif code == 409: # pragma: no cover
LOGGER.info("Already registered!")
else: # pragma: no cover
raise ValueError("Error registering: {0} {1}".format(code, result))
def get_cert(zappa_instance, log=LOGGER, CA=DEFAULT_CA):
"""
Call LE to get a new signed CA.
"""
out = parse_account_key()
header = get_boulder_header(out)
accountkey_json = json.dumps(header['jwk'], sort_keys=True, separators=(',', ':'))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
# find domains
domains = parse_csr()
# get the certificate domains and expiration
register_account()
# verify each domain
for domain in domains:
log.info("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(CA + "/acme/new-authz", {
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
})
if code != 201:
raise ValueError("Error requesting challenges: {0} {1}".format(code, result))
challenge = [ch for ch in json.loads(result.decode('utf8'))['challenges'] if ch['type'] == "dns-01"][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = "{0}.{1}".format(token, thumbprint).encode('utf-8')
# sha256_b64
digest = _b64(hashlib.sha256(keyauthorization).digest())
zone_id = zappa_instance.get_hosted_zone_id_for_domain(domain)
if not zone_id:
raise ValueError("Could not find Zone ID for: " + domain)
zappa_instance.set_dns_challenge_txt(zone_id, domain, digest) # resp is unused
print("Waiting for DNS to propagate..")
# What's optimal here?
# import time # double import; import in loop; shadowed import
time.sleep(45)
# notify challenge are met
code, result = _send_signed_request(challenge['uri'], {
"resource": "challenge",
"keyAuthorization": keyauthorization.decode('utf-8'),
})
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
verify_challenge(challenge['uri'])
# Challenge verified, clean up R53
zappa_instance.remove_dns_challenge_txt(zone_id, domain, digest)
# Sign
result = sign_certificate()
# Encode to PEM formate
encode_certificate(result)
return True
def verify_challenge(uri):
"""
Loop until our challenge is verified, else fail.
"""
while True:
try:
resp = urlopen(uri)
challenge_status = json.loads(resp.read().decode('utf8'))
except IOError as e:
raise ValueError("Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read().decode('utf8'))))
if challenge_status['status'] == "pending":
time.sleep(2)
elif challenge_status['status'] == "valid":
LOGGER.info("Domain verified!")
break
else:
raise ValueError("Domain challenge did not pass: {0}".format(
challenge_status))
def sign_certificate():
"""
Get the new certificate.
Returns the signed bytes.
"""
LOGGER.info("Signing certificate...")
proc = subprocess.Popen(
["openssl req -in /tmp/domain.csr -outform DER"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
csr_der, err = proc.communicate()
code, result = _send_signed_request(DEFAULT_CA + "/acme/new-cert", {
"resource": "new-cert",
"csr": _b64(csr_der),
})
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
LOGGER.info("Certificate signed!")
return result
def encode_certificate(result):
"""
Encode cert bytes to PEM encoded cert file.
"""
cert_body = """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result).decode('utf8'), 64)))
signed_crt = open("/tmp/signed.crt", "w")
signed_crt.write(cert_body)
signed_crt.close()
return True
##
# Request Utility
##
def _b64(b):
"""
Helper function base64 encode for jose spec
"""
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
def _send_signed_request(url, payload):
"""
Helper function to make signed requests to Boulder
"""
payload64 = _b64(json.dumps(payload).encode('utf8'))
out = parse_account_key()
header = get_boulder_header(out)
protected = copy.deepcopy(header)
protected["nonce"] = urlopen(DEFAULT_CA + "/directory").headers['Replay-Nonce']
protected64 = _b64(json.dumps(protected).encode('utf8'))
proc = subprocess.Popen(
["openssl dgst -sha256 -sign /tmp/account.key"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode('utf8'))
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps({
"header": header, "protected": protected64,
"payload": payload64, "signature": _b64(out),
})
try:
resp = urlopen(url, data.encode('utf8'))
return resp.getcode(), resp.read()
except IOError as e:
return getattr(e, "code", None), getattr(e, "read", e.__str__)()
##
# File Utility
##
def cleanup():
"""
Delete any temporary files.
"""
filenames = [
'/tmp/account.key',
'/tmp/domain.key',
'/tmp/key.key',
'/tmp/domain.csr',
'/tmp/signed.crt',
'/tmp/intermediate.pem',
'/tmp/chained.pem',
'/tmp/lets-encrypt-x3-cross-signed.pem'
]
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
return True
|
|
#!/usr/bin/env python
from __future__ import print_function
import json
import os
import re
import sys
import logging
import getpass
import random
import requests
import string
import traceback
import inspect
import pickle
from time import sleep
import py
import pytest
from six import print_ as print
from six import integer_types, string_types
from requests.exceptions import ConnectionError
# import sys
# from imp import reload
# reload(sys) # Reload does the trick!
# sys.setdefaultencoding('UTF8')
if sys.version_info < (2, 7, 0):
try:
import unittest2 as unittest
except ImportError:
import pip
if hasattr(sys, 'real_prefix'):
pip.main(['install', '--upgrade', 'unittest2'])
else:
pip.main(['install', '--upgrade', '--user', 'unittest2'])
import unittest2 as unittest
else:
import unittest
cmd_folder = os.path.abspath(os.path.join(os.path.split(inspect.getfile(
inspect.currentframe()))[0], ".."))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import jira
from jira import User, Role, Issue, JIRA, JIRAError, Project
from jira.resources import Resource, cls_for_resource
TEST_ROOT = os.path.dirname(__file__)
TEST_ICON_PATH = os.path.join(TEST_ROOT, 'icon.png')
TEST_ATTACH_PATH = os.path.join(TEST_ROOT, 'tests.py')
OAUTH = False
CONSUMER_KEY = 'oauth-consumer'
KEY_CERT_FILE = '/home/bspeakmon/src/atlassian-oauth-examples/rsa.pem'
KEY_CERT_DATA = None
try:
with open(KEY_CERT_FILE, 'r') as cert:
KEY_CERT_DATA = cert.read()
OAUTH = True
except:
pass
def rndstr():
return ''.join(random.sample(string.ascii_letters, 6))
def rndpassword():
# generates a password of lengh 14
s = ''.join(random.sample(string.ascii_uppercase, 5)) +\
''.join(random.sample(string.ascii_lowercase, 5)) +\
''.join(random.sample(string.digits, 2)) +\
''.join(random.sample('~`!@#$%^&*()_+-=[]\\{}|;\':<>?,./', 2))
return ''.join(random.sample(s, len(s)))
class Singleton(type):
def __init__(cls, name, bases, dict):
super(Singleton, cls).__init__(name, bases, dict)
cls.instance = None
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kw)
return cls.instance
class JiraTestManager(object):
"""
Used to instantiate and populate the JIRA instance with data used by the unit tests.
"""
# __metaclass__ = Singleton
# __instance = None
#
# Singleton implementation
# def __new__(cls, *args, **kwargs):
# if not cls.__instance:
# cls.__instance = super(JiraTestManager, cls).__new__(
# cls, *args, **kwargs)
# return cls.__instance
# Implementing some kind of Singleton, to prevent test initialization
# http://stackoverflow.com/questions/31875/is-there-a-simple-elegant-way-to-define-singletons-in-python/33201#33201
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
if not self.__dict__:
self.initialized = 0
try:
if 'CI_JIRA_URL' in os.environ:
self.CI_JIRA_URL = os.environ['CI_JIRA_URL']
self.max_retries = 5
else:
self.CI_JIRA_URL = "https://pycontribs.atlassian.net"
self.max_retries = 5
if 'CI_JIRA_ADMIN' in os.environ:
self.CI_JIRA_ADMIN = os.environ['CI_JIRA_ADMIN']
else:
self.CI_JIRA_ADMIN = 'ci-admin'
if 'CI_JIRA_ADMIN_PASSWORD' in os.environ:
self.CI_JIRA_ADMIN_PASSWORD = os.environ[
'CI_JIRA_ADMIN_PASSWORD']
else:
self.CI_JIRA_ADMIN_PASSWORD = 'sd4s3dgec5fhg4tfsds3434'
if 'CI_JIRA_USER' in os.environ:
self.CI_JIRA_USER = os.environ['CI_JIRA_USER']
else:
self.CI_JIRA_USER = 'ci-user'
if 'CI_JIRA_USER_PASSWORD' in os.environ:
self.CI_JIRA_USER_PASSWORD = os.environ[
'CI_JIRA_USER_PASSWORD']
else:
self.CI_JIRA_USER_PASSWORD = 'sd4s3dgec5fhg4tfsds3434'
if OAUTH:
self.jira_admin = JIRA(oauth={
'access_token': 'hTxcwsbUQiFuFALf7KZHDaeAJIo3tLUK',
'access_token_secret': 'aNCLQFP3ORNU6WY7HQISbqbhf0UudDAf',
'consumer_key': CONSUMER_KEY,
'key_cert': KEY_CERT_DATA,
})
else:
if self.CI_JIRA_ADMIN:
self.jira_admin = JIRA(self.CI_JIRA_URL, basic_auth=(self.CI_JIRA_ADMIN,
self.CI_JIRA_ADMIN_PASSWORD),
logging=False, validate=True, max_retries=self.max_retries)
else:
self.jira_admin = JIRA(self.CI_JIRA_URL, validate=True,
logging=False, max_retries=self.max_retries)
if self.jira_admin.current_user() != self.CI_JIRA_ADMIN:
# self.jira_admin.
self.initialized = 1
sys.exit(3)
if OAUTH:
self.jira_sysadmin = JIRA(oauth={
'access_token': '4ul1ETSFo7ybbIxAxzyRal39cTrwEGFv',
'access_token_secret':
'K83jBZnjnuVRcfjBflrKyThJa0KSjSs2',
'consumer_key': CONSUMER_KEY,
'key_cert': KEY_CERT_DATA,
}, logging=False, max_retries=self.max_retries)
else:
if self.CI_JIRA_ADMIN:
self.jira_sysadmin = JIRA(self.CI_JIRA_URL,
basic_auth=(self.CI_JIRA_ADMIN,
self.CI_JIRA_ADMIN_PASSWORD),
logging=False, validate=True, max_retries=self.max_retries)
else:
self.jira_sysadmin = JIRA(self.CI_JIRA_URL,
logging=False, max_retries=self.max_retries)
if OAUTH:
self.jira_normal = JIRA(oauth={
'access_token': 'ZVDgYDyIQqJY8IFlQ446jZaURIz5ECiB',
'access_token_secret':
'5WbLBybPDg1lqqyFjyXSCsCtAWTwz1eD',
'consumer_key': CONSUMER_KEY,
'key_cert': KEY_CERT_DATA,
})
else:
if self.CI_JIRA_ADMIN:
self.jira_normal = JIRA(self.CI_JIRA_URL,
basic_auth=(self.CI_JIRA_USER,
self.CI_JIRA_USER_PASSWORD),
validate=True, logging=False, max_retries=self.max_retries)
else:
self.jira_normal = JIRA(self.CI_JIRA_URL,
validate=True, logging=False, max_retries=self.max_retries)
# now we need some data to start with for the tests
# jira project key is max 10 chars, no letter.
# [0] always "Z"
# [1-6] username running the tests (hope we will not collide)
# [7-8] python version A=0, B=1,..
# [9] A,B -- we may need more than one project
prefix = 'Z' + (re.sub("[^A-Z]", "",
getpass.getuser().upper()))[0:6] + \
chr(ord('A') + sys.version_info[0]) + \
chr(ord('A') + sys.version_info[1])
self.project_a = prefix + 'A' # old XSS
self.project_a_name = "Test user=%s python=%s.%s A" \
% (getpass.getuser(), sys.version_info[0],
sys.version_info[1])
self.project_b_name = "Test user=%s python=%s.%s B" \
% (getpass.getuser(), sys.version_info[0],
sys.version_info[1])
self.project_b = prefix + 'B' # old BULK
# TODO: fin a way to prevent SecurityTokenMissing for On Demand
# https://jira.atlassian.com/browse/JRA-39153
try:
self.jira_admin.project(self.project_a)
except Exception as e:
logging.warning(e)
pass
else:
self.jira_admin.delete_project(self.project_a)
try:
self.jira_admin.project(self.project_b)
except Exception as e:
logging.warning(e)
pass
else:
self.jira_admin.delete_project(self.project_b)
# try:
self.jira_admin.create_project(self.project_a,
self.project_a_name)
# except Exception as e:
# logging.warning("Got %s" % e)
# try:
# assert self.jira_admin.create_project(self.project_b,
# self.project_b_name) is True, "Failed to create %s" %
# self.project_b
self.jira_admin.create_project(self.project_b,
self.project_b_name)
# except Exception as e:
# logging.warning("Got %s" % e)
self.project_b_issue1_obj = self.jira_admin.create_issue(project=self.project_b,
summary='issue 1 from %s'
% self.project_b,
issuetype={'name': 'Bug'})
self.project_b_issue1 = self.project_b_issue1_obj.key
self.project_b_issue2_obj = self.jira_admin.create_issue(project=self.project_b,
summary='issue 2 from %s'
% self.project_b,
issuetype={'name': 'Bug'})
self.project_b_issue2 = self.project_b_issue2_obj.key
self.project_b_issue3_obj = self.jira_admin.create_issue(project=self.project_b,
summary='issue 3 from %s'
% self.project_b,
issuetype={'name': 'Bug'})
self.project_b_issue3 = self.project_b_issue3_obj.key
except Exception as e:
# exc_type, exc_value, exc_traceback = sys.exc_info()
formatted_lines = traceback.format_exc().splitlines()
msg = "Basic test setup failed: %s\n\t%s" % (
e, "\n\t".join(formatted_lines))
logging.fatal(msg)
self.initialized = 1
py.test.exit("FATAL")
self.initialized = 1
else:
# already exist but we need to be sure it was initialized
counter = 0
while not self.initialized:
sleep(1)
counter += 1
if counter > 60:
logging.fatal("Something is clearly not right with\
initialization, killing the tests to prevent a\
deadlock.")
sys.exit(3)
def find_by_key(seq, key):
for seq_item in seq:
if seq_item['key'] == key:
return seq_item
def find_by_key_value(seq, key):
for seq_item in seq:
if seq_item.key == key:
return seq_item
def find_by_id(seq, id):
for seq_item in seq:
if seq_item.id == id:
return seq_item
def find_by_name(seq, name):
for seq_item in seq:
if seq_item['name'] == name:
return seq_item
class UniversalResourceTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
self.test_manager = JiraTestManager()
def test_universal_find_existing_resource(self):
resource = self.jira.find('issue/{0}',
self.test_manager.project_b_issue1)
issue = self.jira.issue(self.test_manager.project_b_issue1)
self.assertEqual(resource.self, issue.self)
self.assertEqual(resource.key, issue.key)
def test_find_invalid_resource_raises_exception(self):
with self.assertRaises(JIRAError) as cm:
self.jira.find('woopsydoodle/{0}', '666')
ex = cm.exception
# py26,27,34 gets 404 but on py33 gets 400
assert ex.status_code in [400, 404]
self.assertIsNotNone(ex.text)
self.assertEqual(ex.url,
'https://pycontribs.atlassian.net/rest/api/2/woopsydoodle/666')
@pytest.mark.xfail
# see https://github.com/pycontribs/jira/pull/30
def test_pickling_resource(self):
resource = self.jira.find('issue/{0}',
self.test_manager.project_b_issue1)
pickled = pickle.dumps(resource)
unpickled = pickle.loads(pickled)
self.assertEqual(resource.key, unpickled.key)
class ResourceTests(unittest.TestCase):
def setUp(self):
pass
def test_cls_for_resource(self):
self.assertEqual(cls_for_resource('https://jira.atlassian.com/rest/\
api/2/issue/JRA-1330'), Issue)
self.assertEqual(cls_for_resource('http://localhost:2990/jira/rest/\
api/2/project/BULK'), Project)
self.assertEqual(cls_for_resource('http://imaginary-jira.com/rest/\
api/2/project/IMG/role/10002'), Role)
self.assertEqual(cls_for_resource('http://customized-jira.com/rest/\
plugin-resource/4.5/json/getMyObject'), Resource)
class ApplicationPropertiesTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_application_properties(self):
props = self.jira.application_properties()
self.assertEqual(len(props), 30)
def test_application_property(self):
clone_prefix = self.jira.application_properties(
key='jira.lf.text.headingcolour')
self.assertEqual(clone_prefix['value'], '#292929')
def test_set_application_property(self):
prop = 'jira.lf.favicon.hires.url'
valid_value = '/jira-favicon-hires.png'
invalud_value = '/Tjira-favicon-hires.png'
counter = 0
while self.jira.application_properties(key=prop)['value'] != valid_value and conter < 3:
if counter:
sleep(10)
self.jira.set_application_property(prop, invalid_value)
self.assertEqual(self.jira.application_properties(key=prop)['value'],
invalid_value)
self.jira.set_application_property(prop, valid_value)
self.assertEqual(self.jira.application_properties(key=prop)['value'],
valid_value)
counter += 1
def test_setting_bad_property_raises(self):
prop = 'random.nonexistent.property'
self.assertRaises(JIRAError, self.jira.set_application_property, prop,
'666')
class AttachmentTests(unittest.TestCase):
def setUp(self):
self.test_manager = JiraTestManager()
self.jira = JiraTestManager().jira_admin
self.project_b = self.test_manager.project_b
self.issue_1 = self.test_manager.project_b_issue1
self.attachment = None
def test_0_attachment_meta(self):
meta = self.jira.attachment_meta()
self.assertTrue(meta['enabled'])
self.assertEqual(meta['uploadLimit'], 10485760)
def test_1_add_remove_attachment(self):
issue = self.jira.issue(self.issue_1)
self.attachment = self.jira.add_attachment(issue, open(TEST_ATTACH_PATH, 'rb'),
"new test attachment")
new_attachment = self.jira.attachment(self.attachment.id)
msg = "attachment %s of issue %s" % (new_attachment.__dict__, issue)
self.assertEqual(
new_attachment.filename, 'new test attachment', msg=msg)
self.assertEqual(
new_attachment.size, os.path.getsize(TEST_ATTACH_PATH), msg=msg)
assert self.attachment.delete() is None
class ComponentTests(unittest.TestCase):
def setUp(self):
self.test_manager = JiraTestManager()
self.jira = JiraTestManager().jira_admin
self.project_b = self.test_manager.project_b
self.issue_1 = self.test_manager.project_b_issue1
self.issue_2 = self.test_manager.project_b_issue2
def test_2_create_component(self):
proj = self.jira.project(self.project_b)
name = "project-%s-component-%s" % (proj, rndstr())
component = self.jira.create_component(name,
proj, description='test!!', assigneeType='COMPONENT_LEAD',
isAssigneeTypeValid=False)
self.assertEqual(component.name, name)
self.assertEqual(component.description, 'test!!')
self.assertEqual(component.assigneeType, 'COMPONENT_LEAD')
self.assertFalse(component.isAssigneeTypeValid)
component.delete()
for c in self.jira.project_components(self.project_b):
print(x)
# COmponents field can't be modified from issue.update
# def test_component_count_related_issues(self):
# component = self.jira.create_component('PROJECT_B_TEST',self.project_b, description='test!!',
# assigneeType='COMPONENT_LEAD', isAssigneeTypeValid=False)
# issue1 = self.jira.issue(self.issue_1)
# issue2 = self.jira.issue(self.issue_2)
# (issue1.update ({'components': ['PROJECT_B_TEST']}))
# (issue2.update (components = ['PROJECT_B_TEST']))
# issue_count = self.jira.component_count_related_issues(component.id)
# self.assertEqual(issue_count, 2)
# component.delete()
def test_3_update(self):
try:
components = self.jira.project_components(self.project_b)
for component in components:
if component.name == 'To be updated':
component.delete()
break
except Exception as e:
# We ignore errors as this code intends only to prepare for
# component creation
pass
name = 'component-' + rndstr()
component = self.jira.create_component(name,
self.project_b, description='stand by!',
leadUserName='ci-admin')
name = 'renamed-' + name
component.update(name=name, description='It is done.',
leadUserName='ci-admin')
self.assertEqual(component.name, name)
self.assertEqual(component.description, 'It is done.')
self.assertEqual(component.lead.name, 'ci-admin')
component.delete()
def test_4_delete(self):
component = self.jira.create_component('To be deleted',
self.project_b, description='not long for this world')
myid = component.id
component.delete()
self.assertRaises(JIRAError, self.jira.component, myid)
class CustomFieldOptionTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_custom_field_option(self):
option = self.jira.custom_field_option('10001')
self.assertEqual(option.value, 'To Do')
class DashboardTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_dashboards(self):
dashboards = self.jira.dashboards()
self.assertEqual(len(dashboards), 3)
def test_dashboards_filter(self):
dashboards = self.jira.dashboards(filter='my')
self.assertEqual(len(dashboards), 2)
self.assertEqual(dashboards[0].id, '10101')
def test_dashboards_startat(self):
dashboards = self.jira.dashboards(startAt=1, maxResults=1)
self.assertEqual(len(dashboards), 1)
def test_dashboards_maxresults(self):
dashboards = self.jira.dashboards(maxResults=1)
self.assertEqual(len(dashboards), 1)
def test_dashboard(self):
dashboard = self.jira.dashboard('10101')
self.assertEqual(dashboard.id, '10101')
self.assertEqual(dashboard.name, 'Another test dashboard')
class FieldsTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_fields(self):
fields = self.jira.fields()
self.assertGreater(len(fields), 10)
class FilterTests(unittest.TestCase):
def setUp(self):
self.test_manager = JiraTestManager()
self.jira = JiraTestManager().jira_admin
self.project_b = self.test_manager.project_b
self.issue_1 = self.test_manager.project_b_issue1
self.issue_2 = self.test_manager.project_b_issue2
def test_filter(self):
jql = "project = %s and component is not empty" % self.project_b
name = 'same filter ' + rndstr()
myfilter = self.jira.create_filter(name=name,
description="just some new test filter", jql=jql,
favourite=False)
self.assertEqual(myfilter.name, name)
self.assertEqual(myfilter.owner.name, 'ci-admin')
myfilter.delete()
def test_favourite_filters(self):
# filters = self.jira.favourite_filters()
jql = "project = %s and component is not empty" % self.project_b
name = "filter-to-fav-" + rndstr()
myfilter = self.jira.create_filter(name=name,
description="just some new test filter", jql=jql,
favourite=True)
new_filters = self.jira.favourite_filters()
assert name in [f.name for f in new_filters]
myfilter.delete()
class GroupsTest(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_groups(self):
groups = self.jira.groups()
self.assertGreaterEqual(groups['total'], 0)
def test_groups_for_users(self):
groups = self.jira.groups('users')
self.assertIsNotNone(find_by_name(groups['groups'], 'users'))
def test_groups_with_exclude(self):
groups = self.jira.groups('users')
new_groups = self.jira.groups('users', exclude='users')
self.assertEqual(groups['total'] - 1, new_groups['total'])
def test_groups_for_jira(self):
groups = self.jira.groups('jira')
self.assertIsNotNone(find_by_name(groups['groups'], 'jira-users'))
class IssueTests(unittest.TestCase):
def setUp(self):
self.test_manager = JiraTestManager()
self.jira = JiraTestManager().jira_admin
self.jira_normal = self.test_manager.jira_normal
self.project_b = self.test_manager.project_b
self.project_a = self.test_manager.project_a
self.issue_1 = self.test_manager.project_b_issue1
self.issue_2 = self.test_manager.project_b_issue2
self.issue_3 = self.test_manager.project_b_issue3
def test_issue(self):
issue = self.jira.issue(self.issue_1)
self.assertEqual(issue.key, self.issue_1)
self.assertEqual(issue.fields.summary,
'issue 1 from %s' % self.project_b)
@unittest.skip("disabled as it seems to be ignored by jira, returning all")
def test_issue_field_limiting(self):
issue = self.jira.issue(self.issue_2, fields='summary,comment')
self.assertEqual(issue.fields.summary,
'issue 2 from %s' % self.project_b)
comment1 = self.jira.add_comment(issue, 'First comment')
comment2 = self.jira.add_comment(issue, 'Second comment')
comment3 = self.jira.add_comment(issue, 'Third comment')
self.jira.issue(self.issue_2, fields='summary,comment')
logging.warning(issue.raw['fields'])
self.assertFalse(hasattr(issue.fields, 'reporter'))
self.assertFalse(hasattr(issue.fields, 'progress'))
comment1.delete()
comment2.delete()
comment3.delete()
def test_issue_equal(self):
issue1 = self.jira.issue(self.issue_1)
issue2 = self.jira.issue(self.issue_2)
issues = self.jira.search_issues('key=%s' % self.issue_1)
self.assertTrue(issue1 == issues[0])
self.assertFalse(issue2 == issues[0])
def test_issue_expandos(self):
issue = self.jira.issue(self.issue_1, expand='editmeta,schema')
self.assertTrue(hasattr(issue, 'editmeta'))
self.assertTrue(hasattr(issue, 'schema'))
# testing for changelog is not reliable because it may exist or not based on test order
#self.assertFalse(hasattr(issue, 'changelog'))
def test_create_issue_with_fieldargs(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue created', description='blahery',
issuetype={'name': 'Bug'}, customfield_10022='XSS')
self.assertEqual(issue.fields.summary, 'Test issue created')
self.assertEqual(issue.fields.description, 'blahery')
self.assertEqual(issue.fields.issuetype.name, 'Bug')
self.assertEqual(issue.fields.project.key, self.project_b)
self.assertEqual(issue.fields.customfield_10022, 'XSS')
issue.delete()
def test_create_issue_with_fielddict(self):
fields = {
'project': {
'key': self.project_b
},
'summary': 'Issue created from field dict',
'description': "Some new issue for test",
'issuetype': {
'name': 'Bug'
},
'customfield_10022': 'XSS',
'priority': {
'name': 'Major'
}
}
issue = self.jira.create_issue(fields=fields)
self.assertEqual(issue.fields.summary,
'Issue created from field dict')
self.assertEqual(issue.fields.description, "Some new issue for test")
self.assertEqual(issue.fields.issuetype.name, 'Bug')
self.assertEqual(issue.fields.project.key, self.project_b)
self.assertEqual(issue.fields.customfield_10022, 'XSS')
self.assertEqual(issue.fields.priority.name, 'Major')
issue.delete()
def test_create_issue_without_prefetch(self):
issue = self.jira.create_issue(prefetch=False,
project=self.project_b,
summary='Test issue created',
description='blahery', issuetype={'name': 'Bug'},
customfield_10022='XSS')
assert hasattr(issue, 'self')
assert hasattr(issue, 'raw')
assert 'fields' not in issue.raw
issue.delete()
def test_update_with_fieldargs(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue for updating',
description='Will be updated shortly',
issuetype={'name': 'Bug'},
customfield_10022='XSS')
issue.update(summary='Updated summary', description='Now updated',
issuetype={'name': 'Improvement'})
self.assertEqual(issue.fields.summary, 'Updated summary')
self.assertEqual(issue.fields.description, 'Now updated')
self.assertEqual(issue.fields.issuetype.name, 'Improvement')
self.assertEqual(issue.fields.customfield_10022, 'XSS')
self.assertEqual(issue.fields.project.key, self.project_b)
issue.delete()
def test_update_with_fielddict(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue for updating', description='Will be updated shortly',
issuetype={'name': 'Bug'},
customfield_10022='XSS')
fields = {
'summary': 'Issue is updated',
'description': "it sure is",
'issuetype': {
'name': 'Improvement'
},
'customfield_10022': 'DOC',
'priority': {
'name': 'Major'
}
}
issue.update(fields=fields)
self.assertEqual(issue.fields.summary, 'Issue is updated')
self.assertEqual(issue.fields.description, 'it sure is')
self.assertEqual(issue.fields.issuetype.name, 'Improvement')
self.assertEqual(issue.fields.customfield_10022, 'DOC')
self.assertEqual(issue.fields.priority.name, 'Major')
issue.delete()
def test_update_with_label(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue for updating labels', description='Label testing',
issuetype={'name': 'Bug'})
labelarray = ['testLabel']
fields = {
'labels': labelarray
}
issue.update(fields=fields)
self.assertEqual(issue.fields.labels, ['testLabel'])
def test_update_with_bad_label(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue for updating labels', description='Label testing',
issuetype={'name': 'Bug'})
issue.fields.labels.append('this should not work')
fields = {
'labels': issue.fields.labels
}
self.assertRaises(JIRAError, issue.update, fields=fields)
def test_delete(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue created',
description='Not long for this world',
issuetype={'name': 'Bug'},
customfield_10022='XSS')
key = issue.key
issue.delete()
self.assertRaises(JIRAError, self.jira.issue, key)
def test_createmeta(self):
meta = self.jira.createmeta()
ztravisdeb_proj = find_by_key(meta['projects'], self.project_b)
self.assertEqual(len(ztravisdeb_proj['issuetypes']), 8)
def test_createmeta_filter_by_projectkey_and_name(self):
meta = self.jira.createmeta(projectKeys=self.project_b,
issuetypeNames='Bug')
self.assertEqual(len(meta['projects']), 1)
self.assertEqual(len(meta['projects'][0]['issuetypes']), 1)
def test_createmeta_filter_by_projectkeys_and_name(self):
meta = self.jira.createmeta(projectKeys=(self.project_a,
self.project_b), issuetypeNames='Improvement')
self.assertEqual(len(meta['projects']), 2)
for project in meta['projects']:
self.assertEqual(len(project['issuetypes']), 1)
def test_createmeta_filter_by_id(self):
projects = self.jira.projects()
proja = find_by_key_value(projects, self.project_a)
projb = find_by_key_value(projects, self.project_b)
meta = self.jira.createmeta(projectIds=(proja.id, projb.id),
issuetypeIds=('3', '4', '5'))
self.assertEqual(len(meta['projects']), 2)
for project in meta['projects']:
self.assertEqual(len(project['issuetypes']), 3)
def test_createmeta_expando(self):
# limit to SCR project so the call returns promptly
meta = self.jira.createmeta(projectKeys=self.project_b,
expand='projects.issuetypes.fields')
self.assertTrue('fields' in meta['projects'][0]['issuetypes'][0])
def test_assign_issue(self):
self.assertIsNone(self.jira.assign_issue(self.issue_1, 'ci-admin'))
self.assertEqual(self.jira.issue(self.issue_1).fields.assignee.name,
'ci-admin')
def test_assign_issue_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
self.assertIsNone(self.jira.assign_issue(issue, 'ci-admin'))
self.assertEqual(self.jira.issue(self.issue_1).fields.assignee.name,
'ci-admin')
def test_assign_to_bad_issue_raises(self):
self.assertRaises(JIRAError, self.jira.assign_issue, 'NOPE-1',
'notauser')
def test_comments(self):
for issue in [self.issue_1, self.jira.issue(self.issue_2)]:
self.jira.issue(issue)
comment1 = self.jira.add_comment(issue, 'First comment')
comment2 = self.jira.add_comment(issue, 'Second comment')
comments = self.jira.comments(issue)
assert comments[0].body == 'First comment'
assert comments[1].body == 'Second comment'
comment1.delete()
comment2.delete()
comments = self.jira.comments(issue)
assert len(comments) == 0
def test_add_comment(self):
comment = self.jira.add_comment(self.issue_3, 'a test comment!',
visibility={'type': 'role', 'value': 'Administrators'})
self.assertEqual(comment.body, 'a test comment!')
self.assertEqual(comment.visibility.type, 'role')
self.assertEqual(comment.visibility.value, 'Administrators')
comment.delete()
def test_add_comment_with_issue_obj(self):
issue = self.jira.issue(self.issue_3)
comment = self.jira.add_comment(issue, 'a new test comment!',
visibility={'type': 'role', 'value': 'Administrators'})
self.assertEqual(comment.body, 'a new test comment!')
self.assertEqual(comment.visibility.type, 'role')
self.assertEqual(comment.visibility.value, 'Administrators')
comment.delete()
def test_update_comment(self):
comment = self.jira.add_comment(self.issue_3, 'updating soon!')
comment.update(body='updated!')
self.assertEqual(comment.body, 'updated!')
# self.assertEqual(comment.visibility.type, 'role')
# self.assertEqual(comment.visibility.value, 'Administrators')
comment.delete()
def test_editmeta(self):
meta = self.jira.editmeta(self.issue_1)
self.assertEqual(len(meta['fields']), 18)
self.assertTrue('customfield_10007' in meta['fields'])
self.assertTrue('customfield_10022' in meta['fields'])
def test_editmeta_with_issue_obj(self):
issue = self.jira.issue(self.issue_2)
meta = self.jira.editmeta(issue)
self.assertEqual(len(meta['fields']), 18)
self.assertTrue('customfield_10022' in meta['fields'])
self.assertTrue('customfield_10007' in meta['fields'])
# Nothing from remote link works
# def test_remote_links(self):
# self.jira.add_remote_link ('ZTRAVISDEB-3', globalId='python-test:story.of.horse.riding',
# links = self.jira.remote_links('QA-44')
# self.assertEqual(len(links), 1)
# links = self.jira.remote_links('BULK-1')
# self.assertEqual(len(links), 0)
#
# @unittest.skip("temporary disabled")
# def test_remote_links_with_issue_obj(self):
# issue = self.jira.issue('QA-44')
# links = self.jira.remote_links(issue)
# self.assertEqual(len(links), 1)
# issue = self.jira.issue('BULK-1')
# links = self.jira.remote_links(issue)
# self.assertEqual(len(links), 0)
#
# @unittest.skip("temporary disabled")
# def test_remote_link(self):
# link = self.jira.remote_link('QA-44', '10000')
# self.assertEqual(link.id, 10000)
# self.assertTrue(hasattr(link, 'globalId'))
# self.assertTrue(hasattr(link, 'relationship'))
#
# @unittest.skip("temporary disabled")
# def test_remote_link_with_issue_obj(self):
# issue = self.jira.issue('QA-44')
# link = self.jira.remote_link(issue, '10000')
# self.assertEqual(link.id, 10000)
# self.assertTrue(hasattr(link, 'globalId'))
# self.assertTrue(hasattr(link, 'relationship'))
#
# @unittest.skip("temporary disabled")
# def test_add_remote_link(self):
# link = self.jira.add_remote_link('BULK-3', globalId='python-test:story.of.horse.riding',
# object={'url': 'http://google.com', 'title': 'googlicious!'},
# application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
# creation response doesn't include full remote link info, so we fetch it again using the new internal ID
# link = self.jira.remote_link('BULK-3', link.id)
# self.assertEqual(link.application.name, 'far too silly')
# self.assertEqual(link.application.type, 'sketch')
# self.assertEqual(link.object.url, 'http://google.com')
# self.assertEqual(link.object.title, 'googlicious!')
# self.assertEqual(link.relationship, 'mousebending')
# self.assertEqual(link.globalId, 'python-test:story.of.horse.riding')
#
# @unittest.skip("temporary disabled")
# def test_add_remote_link_with_issue_obj(self):
# issue = self.jira.issue('BULK-3')
# link = self.jira.add_remote_link(issue, globalId='python-test:story.of.horse.riding',
# object={'url': 'http://google.com', 'title': 'googlicious!'},
# application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
# creation response doesn't include full remote link info, so we fetch it again using the new internal ID
# link = self.jira.remote_link(issue, link.id)
# self.assertEqual(link.application.name, 'far too silly')
# self.assertEqual(link.application.type, 'sketch')
# self.assertEqual(link.object.url, 'http://google.com')
# self.assertEqual(link.object.title, 'googlicious!')
# self.assertEqual(link.relationship, 'mousebending')
# self.assertEqual(link.globalId, 'python-test:story.of.horse.riding')
#
# @unittest.skip("temporary disabled")
# def test_update_remote_link(self):
# link = self.jira.add_remote_link('BULK-3', globalId='python-test:story.of.horse.riding',
# object={'url': 'http://google.com', 'title': 'googlicious!'},
# application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
# creation response doesn't include full remote link info, so we fetch it again using the new internal ID
# link = self.jira.remote_link('BULK-3', link.id)
# link.update(object={'url': 'http://yahoo.com', 'title': 'yahooery'}, globalId='python-test:updated.id',
# relationship='cheesing')
# self.assertEqual(link.globalId, 'python-test:updated.id')
# self.assertEqual(link.relationship, 'cheesing')
# self.assertEqual(link.object.url, 'http://yahoo.com')
# self.assertEqual(link.object.title, 'yahooery')
# link.delete()
#
# @unittest.skip("temporary disabled")
# def test_delete_remove_link(self):
# link = self.jira.add_remote_link('BULK-3', globalId='python-test:story.of.horse.riding',
# object={'url': 'http://google.com', 'title': 'googlicious!'},
# application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
# _id = link.id
# link.delete()
# self.assertRaises(JIRAError, self.jira.remote_link, 'BULK-3', _id)
def test_transitions(self):
transitions = self.jira.transitions(self.issue_2)
self.assertEqual(len(transitions), 3)
def test_transitions_with_issue_obj(self):
issue = self.jira.issue(self.issue_2)
transitions = self.jira.transitions(issue)
self.assertEqual(len(transitions), 3)
def test_transition(self):
transition = self.jira.transitions(self.issue_2, '5')
self.assertEqual(transition[0]['name'], 'Resolve Issue')
def test_transition_expand(self):
transition = self.jira.transitions(self.issue_2, '5',
expand='transitions.fields')
self.assertTrue('fields' in transition[0])
def test_transition_issue_with_fieldargs(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue for transition created',
description='blahery', issuetype={'name': 'Bug'},
customfield_10022='XSS')
self.jira.transition_issue(issue.key, '2',
assignee={'name': 'ci-admin'})
issue = self.jira.issue(issue.key)
self.assertEqual(issue.fields.assignee.name, 'ci-admin')
self.assertEqual(issue.fields.status.id, '6') # issue 'Closed'
def test_transition_issue_obj_with_fieldargs(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue for transition created',
description='blahery',
issuetype={'name': 'Bug'},
customfield_10022='XSS')
self.jira.transition_issue(issue, '2', assignee={'name': 'ci-admin'})
issue = self.jira.issue(issue.key)
self.assertEqual(issue.fields.assignee.name, 'ci-admin')
self.assertEqual(issue.fields.status.id, '6')
def test_transition_issue_with_fielddict(self):
issue = self.jira.create_issue(project=self.project_b,
summary='Test issue for transition created',
description='blahery',
issuetype={'name': 'Bug'},
customfield_10022='XSS')
fields = {
'assignee': {
'name': 'ci-admin'
}
}
self.jira.transition_issue(issue.key, '5', fields=fields)
issue = self.jira.issue(issue.key)
self.assertEqual(issue.fields.assignee.name, 'ci-admin')
self.assertEqual(issue.fields.status.id, '5')
def test_votes(self):
self.jira_normal.remove_vote(self.issue_1)
# not checking the result on this
votes = self.jira.votes(self.issue_1)
self.assertEqual(votes.votes, 0)
self.jira_normal.add_vote(self.issue_1)
new_votes = self.jira.votes(self.issue_1)
assert votes.votes + 1 == new_votes.votes
self.jira_normal.remove_vote(self.issue_1)
new_votes = self.jira.votes(self.issue_1)
assert votes.votes == new_votes.votes
def test_votes_with_issue_obj(self):
issue = self.jira_normal.issue(self.issue_1)
self.jira_normal.remove_vote(issue)
# not checking the result on this
votes = self.jira.votes(issue)
self.assertEqual(votes.votes, 0)
self.jira_normal.add_vote(issue)
new_votes = self.jira.votes(issue)
assert votes.votes + 1 == new_votes.votes
self.jira_normal.remove_vote(issue)
new_votes = self.jira.votes(issue)
assert votes.votes == new_votes.votes
def test_watchers(self):
watchers = self.jira.watchers(self.issue_1)
self.assertEqual(watchers.watchCount, 1)
def test_watchers_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
watchers = self.jira.watchers(issue)
self.assertEqual(watchers.watchCount, 1)
def test_add_watcher(self):
init_watchers = self.jira.watchers(self.issue_1).watchCount
self.jira.add_watcher(self.issue_1, 'ci-user')
self.assertEqual(self.jira.watchers(self.issue_1).watchCount,
init_watchers + 1)
self.jira.remove_watcher(self.issue_1, 'ci-user')
def test_remove_watcher(self):
self.jira.add_watcher(self.issue_1, 'ci-user')
init_watchers = self.jira.watchers(self.issue_1).watchCount
self.jira.remove_watcher(self.issue_1, 'ci-user')
self.assertEqual(self.jira.watchers(self.issue_1).watchCount,
init_watchers - 1)
def test_add_watcher_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
init_watchers = self.jira.watchers(issue).watchCount
self.jira.add_watcher(issue, 'ci-user')
self.assertEqual(self.jira.watchers(issue).watchCount,
init_watchers + 1)
self.jira.remove_watcher(issue, 'ci-user')
def test_remove_watcher_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
self.jira.add_watcher(issue, 'ci-user')
init_watchers = self.jira.watchers(issue).watchCount
self.jira.remove_watcher(issue, 'ci-admin')
self.assertEqual(self.jira.watchers(issue).watchCount,
init_watchers - 1)
def test_agile(self):
uniq = rndstr()
board_name = 'board-' + uniq
sprint_name = 'sprint-' + uniq
b = self.jira.create_board(board_name, self.project_a)
assert isinstance(b.id, integer_types)
s = self.jira.create_sprint(sprint_name, b.id)
assert isinstance(s.id, integer_types)
assert s.name == sprint_name
assert s.state == 'FUTURE'
#self.jira.add_issues_to_sprint(s.id, self.issue_1)
#self.jira.add_issues_to_sprint(s.id, self.issue_2)
#self.jira.rank(self.issue_2, self.issue_1)
s.delete()
b.delete()
# self.jira.delete_board(b.id)
def test_worklogs(self):
worklog = self.jira.add_worklog(self.issue_1, '2h')
worklogs = self.jira.worklogs(self.issue_1)
self.assertEqual(len(worklogs), 1)
worklog.delete()
def test_worklogs_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
worklog = self.jira.add_worklog(issue, '2h')
worklogs = self.jira.worklogs(issue)
self.assertEqual(len(worklogs), 1)
worklog.delete()
def test_worklog(self):
worklog = self.jira.add_worklog(self.issue_1, '1d 2h')
new_worklog = self.jira.worklog(self.issue_1, str(worklog))
self.assertEqual(new_worklog.author.name, 'ci-admin')
self.assertEqual(new_worklog.timeSpent, '1d 2h')
worklog.delete()
def test_worklog_with_issue_obj(self):
issue = self.jira.issue(self.issue_1)
worklog = self.jira.add_worklog(issue, '1d 2h')
new_worklog = self.jira.worklog(issue, str(worklog))
self.assertEqual(new_worklog.author.name, 'ci-admin')
self.assertEqual(new_worklog.timeSpent, '1d 2h')
worklog.delete()
def test_add_worklog(self):
worklog_count = len(self.jira.worklogs(self.issue_2))
worklog = self.jira.add_worklog(self.issue_2, '2h')
self.assertIsNotNone(worklog)
self.assertEqual(len(self.jira.worklogs(self.issue_2)), worklog_count + 1)
worklog.delete()
def test_add_worklog_with_issue_obj(self):
issue = self.jira.issue(self.issue_2)
worklog_count = len(self.jira.worklogs(issue))
worklog = self.jira.add_worklog(issue, '2h')
self.assertIsNotNone(worklog)
self.assertEqual(len(self.jira.worklogs(issue)), worklog_count + 1)
worklog.delete()
def test_update_and_delete_worklog(self):
worklog = self.jira.add_worklog(self.issue_3, '3h')
issue = self.jira.issue(self.issue_3, fields='worklog,timetracking')
worklog.update(comment='Updated!', timeSpent='2h')
self.assertEqual(worklog.comment, 'Updated!')
rem_estimate = issue.fields.timetracking.remainingEstimate
self.assertEqual(worklog.timeSpent, '2h')
issue = self.jira.issue(self.issue_3, fields='worklog,timetracking')
self.assertEqual(issue.fields.timetracking.remainingEstimate, "1h")
worklog.delete()
issue = self.jira.issue(self.issue_3, fields='worklog,timetracking')
self.assertEqual(issue.fields.timetracking.remainingEstimate, "3h")
class IssueLinkTests(unittest.TestCase):
def setUp(self):
self.manager = JiraTestManager()
@unittest.skip("broken")
def test_issue_link(self):
self.link = self.manager.jira_admin.issue_link('10002')
link = self.link # Duplicate outward
self.assertEqual(link.id, '10002')
self.assertEqual(link.inwardIssue.id, '10018') # Duplicate inward
def test_create_issue_link(self):
self.manager.jira_admin.create_issue_link('Duplicate',
JiraTestManager().project_b_issue1,
JiraTestManager().project_b_issue2)
def test_create_issue_link_with_issue_objs(self):
inwardissue = self.manager.jira_admin.issue(
JiraTestManager().project_b_issue1)
self.assertIsNotNone(inwardissue)
outwardissue = self.manager.jira_admin.issue(
JiraTestManager().project_b_issue2)
self.assertIsNotNone(outwardissue)
self.manager.jira_admin.create_issue_link('Duplicate',
inwardissue, outwardissue)
#@unittest.skip("Creating an issue link doesn't return its ID, so can't easily test delete")
# def test_delete_issue_link(self):
# pass
class IssueLinkTypeTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_issue_link_types(self):
link_types = self.jira.issue_link_types()
self.assertEqual(len(link_types), 4)
duplicate = find_by_id(link_types, '10001')
self.assertEqual(duplicate.name, 'Cloners')
def test_issue_link_type(self):
link_type = self.jira.issue_link_type('10002')
self.assertEqual(link_type.id, '10002')
self.assertEqual(link_type.name, 'Duplicate')
class IssueTypesTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_issue_types(self):
types = self.jira.issue_types()
self.assertEqual(len(types), 8)
unq_issues = find_by_id(types, '10002')
self.assertEqual(unq_issues.name, 'Technical task')
def test_issue_type(self):
mytype = self.jira.issue_type('4')
self.assertEqual(mytype.id, '4')
self.assertEqual(mytype.name, 'Improvement')
class MyPermissionsTests(unittest.TestCase):
def setUp(self):
self.test_manager = JiraTestManager()
self.jira = JiraTestManager().jira_normal
self.issue_1 = self.test_manager.project_b_issue1
def test_my_permissions(self):
perms = self.jira.my_permissions()
self.assertGreaterEqual(len(perms['permissions']), 40)
@unittest.skip("broken")
def test_my_permissions_by_project(self):
perms = self.jira.my_permissions(projectKey='ZTRAVISDEB')
self.assertGreaterEqual(len(perms['permissions']), 40)
perms = self.jira.my_permissions(projectId='10012')
self.assertGreaterEqual(len(perms['permissions']), 40)
@unittest.skip("broken")
def test_my_permissions_by_issue(self):
perms = self.jira.my_permissions(issueKey='ZTRAVISDEB-7')
self.assertGreaterEqual(len(perms['permissions']), 40)
perms = self.jira.my_permissions(issueId='11021')
self.assertGreaterEqual(len(perms['permissions']), 40)
class PrioritiesTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_priorities(self):
priorities = self.jira.priorities()
self.assertEqual(len(priorities), 5)
def test_priority(self):
priority = self.jira.priority('2')
self.assertEqual(priority.id, '2')
self.assertEqual(priority.name, 'Critical')
class ProjectTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
self.project_b = JiraTestManager().project_b
def test_projects(self):
projects = self.jira.projects()
self.assertGreaterEqual(len(projects), 2)
def test_project(self):
project = self.jira.project(self.project_b)
self.assertEqual(project.key, self.project_b)
# I have no idea what avatars['custom'] is and I get different results every time
# def test_project_avatars(self):
# avatars = self.jira.project_avatars(self.project_b)
# self.assertEqual(len(avatars['custom']), 3)
# self.assertEqual(len(avatars['system']), 16)
#
# def test_project_avatars_with_project_obj(self):
# project = self.jira.project(self.project_b)
# avatars = self.jira.project_avatars(project)
# self.assertEqual(len(avatars['custom']), 3)
# self.assertEqual(len(avatars['system']), 16)
# def test_create_project_avatar(self):
# Tests the end-to-end project avatar creation process: upload as temporary, confirm after cropping,
# and selection.
# project = self.jira.project(self.project_b)
# size = os.path.getsize(TEST_ICON_PATH)
# filename = os.path.basename(TEST_ICON_PATH)
# with open(TEST_ICON_PATH, "rb") as icon:
# props = self.jira.create_temp_project_avatar(project, filename, size, icon.read())
# self.assertIn('cropperOffsetX', props)
# self.assertIn('cropperOffsetY', props)
# self.assertIn('cropperWidth', props)
# self.assertTrue(props['needsCropping'])
#
# props['needsCropping'] = False
# avatar_props = self.jira.confirm_project_avatar(project, props)
# self.assertIn('id', avatar_props)
#
# self.jira.set_project_avatar(self.project_b, avatar_props['id'])
#
# def test_delete_project_avatar(self):
# size = os.path.getsize(TEST_ICON_PATH)
# filename = os.path.basename(TEST_ICON_PATH)
# with open(TEST_ICON_PATH, "rb") as icon:
# props = self.jira.create_temp_project_avatar(self.project_b, filename, size, icon.read(), auto_confirm=True)
# self.jira.delete_project_avatar(self.project_b, props['id'])
#
# def test_delete_project_avatar_with_project_obj(self):
# project = self.jira.project(self.project_b)
# size = os.path.getsize(TEST_ICON_PATH)
# filename = os.path.basename(TEST_ICON_PATH)
# with open(TEST_ICON_PATH, "rb") as icon:
# props = self.jira.create_temp_project_avatar(project, filename, size, icon.read(), auto_confirm=True)
# self.jira.delete_project_avatar(project, props['id'])
def test_set_project_avatar(self):
def find_selected_avatar(avatars):
for avatar in avatars['system']:
if avatar['isSelected']:
return avatar
else:
raise Exception
self.jira.set_project_avatar(self.project_b, '10001')
avatars = self.jira.project_avatars(self.project_b)
self.assertEqual(find_selected_avatar(avatars)['id'], '10001')
project = self.jira.project(self.project_b)
self.jira.set_project_avatar(project, '10208')
avatars = self.jira.project_avatars(project)
self.assertEqual(find_selected_avatar(avatars)['id'], '10208')
def test_project_components(self):
proj = self.jira.project(self.project_b)
name = "component-%s from project %s" % (proj, rndstr())
component = self.jira.create_component(name,
proj, description='test!!', assigneeType='COMPONENT_LEAD',
isAssigneeTypeValid=False)
components = self.jira.project_components(self.project_b)
self.assertGreaterEqual(len(components), 1)
sample = find_by_id(components, component.id)
self.assertEqual(sample.id, component.id)
self.assertEqual(sample.name, name)
component.delete()
def test_project_versions(self):
name = "version-%s" % rndstr()
version = self.jira.create_version(name,
self.project_b, "will be deleted soon")
versions = self.jira.project_versions(self.project_b)
self.assertGreaterEqual(len(versions), 1)
test = find_by_id(versions, version.id)
self.assertEqual(test.id, version.id)
self.assertEqual(test.name, name)
i = self.jira.issue(JiraTestManager().project_b_issue1)
i.update(fixVersions=[{'id': version.id}])
version.delete()
def test_project_versions_with_project_obj(self):
name = "version-%s" % rndstr()
version = self.jira.create_version(name,
self.project_b, "will be deleted soon")
project = self.jira.project(self.project_b)
versions = self.jira.project_versions(project)
self.assertGreaterEqual(len(versions), 1)
test = find_by_id(versions, version.id)
self.assertEqual(test.id, version.id)
self.assertEqual(test.name, name)
version.delete()
def test_project_roles(self):
roles = self.jira.project_roles(self.project_b)
self.assertEqual(len(roles), 7)
self.assertIn('Users', roles)
def test_project_roles_with_project_obj(self):
project = self.jira.project(self.project_b)
roles = self.jira.project_roles(project)
self.assertEqual(len(roles), 7)
self.assertIn('Users', roles)
def test_project_role(self):
role = self.jira.project_role(self.project_b, '10103')
self.assertEqual(role.id, 10103)
self.assertEqual(role.name, 'atlassian-addons-project-access')
def test_project_role_with_project_obj(self):
project = self.jira.project(self.project_b)
role = self.jira.project_role(project, '10103')
self.assertEqual(role.id, 10103)
self.assertEqual(role.name, 'atlassian-addons-project-access')
def test_update_project_role(self):
role = self.jira.project_role(self.project_b, '10103')
role.update(users='ci-admin', groups=['jira-developers',
'jira-users'])
self.assertEqual(role.actors[0].name, 'ci-admin')
class ResolutionTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_resolutions(self):
resolutions = self.jira.resolutions()
self.assertEqual(len(resolutions), 5)
def test_resolution(self):
resolution = self.jira.resolution('2')
self.assertEqual(resolution.id, '2')
self.assertEqual(resolution.name, 'Won\'t Fix')
class SearchTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
self.project_b = JiraTestManager().project_b
self.test_manager = JiraTestManager()
self.issue = self.test_manager.project_b_issue1
def test_search_issues(self):
issues = self.jira.search_issues('project=%s' % self.project_b)
self.assertLessEqual(len(issues), 50) # default maxResults
for issue in issues:
self.assertTrue(issue.key.startswith(self.project_b))
def test_search_issues_maxresults(self):
issues = self.jira.search_issues('project=%s' % self.project_b,
maxResults=10)
self.assertLessEqual(len(issues), 10)
def test_search_issues_startat(self):
issues = self.jira.search_issues('project=%s' % self.project_b,
startAt=5770, maxResults=500)
self.assertLessEqual(len(issues), 500)
def test_search_issues_field_limiting(self):
issues = self.jira.search_issues('key=%s' % self.issue,
fields='summary,comment')
self.assertTrue(hasattr(issues[0].fields, 'summary'))
self.assertTrue(hasattr(issues[0].fields, 'comment'))
self.assertFalse(hasattr(issues[0].fields, 'reporter'))
self.assertFalse(hasattr(issues[0].fields, 'progress'))
# sort of working
@unittest.skip("broken")
def test_search_issues_expandos(self):
issues = self.jira.search_issues('key=%s' % self.issue,
expand='names')
# self.assertTrue(hasattr(issues[0], 'names'))
self.assertFalse(hasattr(issues[0], 'schema'))
@unittest.skip("temporary disabled")
class SecurityLevelTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_security_level(self):
sec_level = self.jira.security_level('10001')
self.assertEqual(sec_level.id, '10001')
self.assertEqual(sec_level.name, 'eee')
class ServerInfoTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_server_info(self):
server_info = self.jira.server_info()
self.assertIn('baseUrl', server_info)
self.assertIn('version', server_info)
class StatusTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_statuses(self):
found = False
for status in self.jira.statuses():
if status.id == '1' and status.name == 'Open':
found = True
break
self.assertTrue(found, "Status Open with id=1 not found.")
def test_status(self):
status = self.jira.status('1')
self.assertEqual(status.id, '1')
self.assertEqual(status.name, 'Open')
class UserTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
self.project_a = JiraTestManager().project_a
self.project_b = JiraTestManager().project_b
self.test_manager = JiraTestManager()
self.issue = self.test_manager.project_b_issue3
def test_user(self):
user = self.jira.user('ci-admin')
self.assertEqual(user.name, 'ci-admin')
self.assertEqual(user.emailAddress, 'noreply@example.com')
def test_search_assignable_users_for_projects(self):
users = self.jira.search_assignable_users_for_projects('ci-admin',
'%s,%s' % (self.project_a, self.project_b))
self.assertGreaterEqual(len(users), 1)
usernames = map(lambda user: user.name, users)
self.assertIn('ci-admin', usernames)
def test_search_assignable_users_for_projects_maxresults(self):
users = self.jira.search_assignable_users_for_projects('ci-admin',
'%s,%s' % (self.project_a, self.project_b), maxResults=1)
self.assertLessEqual(len(users), 1)
def test_search_assignable_users_for_projects_startat(self):
users = self.jira.search_assignable_users_for_projects('ci-admin',
'%s,%s' % (self.project_a, self.project_b), startAt=1)
self.assertGreaterEqual(len(users), 0)
def test_search_assignable_users_for_issues_by_project(self):
users = self.jira.search_assignable_users_for_issues('ci-admin',
project=self.project_b)
self.assertEqual(len(users), 1)
usernames = map(lambda user: user.name, users)
self.assertIn('ci-admin', usernames)
def test_search_assignable_users_for_issues_by_project_maxresults(self):
users = self.jira.search_assignable_users_for_issues('ci-user',
project=self.project_b, maxResults=1)
self.assertLessEqual(len(users), 1)
def test_search_assignable_users_for_issues_by_project_startat(self):
users = self.jira.search_assignable_users_for_issues('ci-user',
project=self.project_a, startAt=1)
self.assertGreaterEqual(len(users), 0)
def test_search_assignable_users_for_issues_by_issue(self):
users = self.jira.search_assignable_users_for_issues('ci-admin',
issueKey=self.issue)
self.assertEqual(len(users), 1)
usernames = map(lambda user: user.name, users)
self.assertIn('ci-admin', usernames)
def test_search_assignable_users_for_issues_by_issue_maxresults(self):
users = self.jira.search_assignable_users_for_issues('ci-admin',
issueKey=self.issue, maxResults=2)
self.assertLessEqual(len(users), 2)
def test_search_assignable_users_for_issues_by_issue_startat(self):
users = self.jira.search_assignable_users_for_issues('ci-admin',
issueKey=self.issue, startAt=2)
self.assertGreaterEqual(len(users), 0)
def test_user_avatars(self):
avatars = self.jira.user_avatars('ci-admin')
self.assertEqual(len(avatars['system']), 24)
self.assertEqual(len(avatars['custom']), 0)
@unittest.skip("disable until I have permissions to write/modify")
# WRONG
def test_create_user_avatar(self):
# Tests the end-to-end user avatar creation process: upload as temporary, confirm after cropping,
# and selection.
size = os.path.getsize(TEST_ICON_PATH)
filename = os.path.basename(TEST_ICON_PATH)
with open(TEST_ICON_PATH, "rb") as icon:
props = self.jira.create_temp_user_avatar('admin', filename,
size, icon.read())
self.assertIn('cropperOffsetX', props)
self.assertIn('cropperOffsetY', props)
self.assertIn('cropperWidth', props)
self.assertTrue(props['needsCropping'])
props['needsCropping'] = False
avatar_props = self.jira.confirm_user_avatar('admin', props)
self.assertIn('id', avatar_props)
self.assertEqual(avatar_props['owner'], 'admin')
self.jira.set_user_avatar('admin', avatar_props['id'])
@unittest.skip("broken")
def test_set_user_avatar(self):
def find_selected_avatar(avatars):
for avatar in avatars['system']:
if avatar['isSelected']:
return avatar
else:
raise Exception
self.jira.set_user_avatar('ci-admin', '10104')
avatars = self.jira.user_avatars('ci-admin')
self.assertEqual(find_selected_avatar(avatars)['id'], '10104')
self.jira.set_user_avatar('ci-admin', '10105')
avatars = self.jira.user_avatars('ci-admin')
self.assertEqual(find_selected_avatar(avatars)['id'], '10105')
@unittest.skip("disable until I have permissions to write/modify")
# WRONG
def test_delete_user_avatar(self):
size = os.path.getsize(TEST_ICON_PATH)
filename = os.path.basename(TEST_ICON_PATH)
with open(TEST_ICON_PATH, "rb") as icon:
props = self.jira.create_temp_user_avatar('ci-admin', filename,
size, icon.read())
# print(props)
self.jira.delete_user_avatar('ci-admin', props['id'])
@unittest.skip("disabled as is not Travis friendly, probably due to parrallel execution")
def test_search_users(self):
users = self.jira.search_users('c')
self.assertEqual(len(users), 2)
usernames = map(lambda user: user.name, users)
self.assertIn('ci-admin', usernames)
self.assertIn('ci-user', usernames)
@unittest.skip("disabled as is not Travis friendly, probably due to parrallel execution")
def test_search_users_maxresults(self):
users = self.jira.search_users('c', maxResults=1)
self.assertGreaterEqual(len(users), 1)
@unittest.skip("disabled as is not Travis friendly, probably due to parrallel execution")
def test_search_users_startat(self):
users = self.jira.search_users('c', startAt=1)
self.assertGreaterEqual(len(users), 1)
def test_search_allowed_users_for_issue_by_project(self):
users = self.jira.search_allowed_users_for_issue('a',
projectKey=self.project_a)
self.assertGreaterEqual(len(users), 1)
def test_search_allowed_users_for_issue_by_issue(self):
users = self.jira.search_allowed_users_for_issue('a',
issueKey=self.issue)
self.assertGreaterEqual(len(users), 1)
def test_search_allowed_users_for_issue_maxresults(self):
users = self.jira.search_allowed_users_for_issue('a',
projectKey=self.project_b, maxResults=2)
self.assertLessEqual(len(users), 2)
def test_search_allowed_users_for_issue_startat(self):
users = self.jira.search_allowed_users_for_issue('c',
projectKey=self.project_b, startAt=1)
self.assertGreaterEqual(len(users), 0)
class VersionTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
self.project_b = JiraTestManager().project_b
def test_create_version(self):
version = self.jira.create_version('new version 1', self.project_b,
releaseDate='2015-03-11', description='test version!')
self.assertEqual(version.name, 'new version 1')
self.assertEqual(version.description, 'test version!')
self.assertEqual(version.releaseDate, '2015-03-11')
version.delete()
def test_create_version_with_project_obj(self):
project = self.jira.project(self.project_b)
version = self.jira.create_version('new version 1', project,
releaseDate='2015-03-11', description='test version!')
self.assertEqual(version.name, 'new version 1')
self.assertEqual(version.description, 'test version!')
self.assertEqual(version.releaseDate, '2015-03-11')
version.delete()
def test_update(self):
version = self.jira.create_version('new updated version 1',
self.project_b, releaseDate='2015-03-11',
description='new to be updated!')
version.update(name='new updated version name 1',
description='new updated!')
self.assertEqual(version.name, 'new updated version name 1')
self.assertEqual(version.description, 'new updated!')
version.delete()
def test_delete(self):
version = self.jira.create_version('To be deleted', self.project_b,
releaseDate='2015-03-11',
description='not long for this world')
myid = version.id
version.delete()
self.assertRaises(JIRAError, self.jira.version, myid)
@unittest.skip("broken")
def test_version(self):
version = self.jira.version('10006')
self.assertEqual(version.id, '10006')
self.assertEqual(version.name, 'updated version name')
def test_version_expandos(self):
pass
class OtherTests(unittest.TestCase):
def test_session_invalid_login(self):
try:
JIRA('https://support.atlassian.com',
basic_auth=("xxx", "xxx"),
validate=True,
logging=False)
except Exception as e:
self.assertIsInstance(e, JIRAError)
assert e.status_code == 401
str(JIRAError) # to see that this does not raise an exception
return
assert False
class SessionTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_session(self):
user = self.jira.session()
self.assertIsNotNone(user.raw['session'])
def test_session_with_no_logged_in_user_raises(self):
anon_jira = JIRA('https://support.atlassian.com', logging=False)
self.assertRaises(JIRAError, anon_jira.session)
def test_session_server_offline(self):
try:
JIRA('https://127.0.0.1:1', logging=False)
except Exception as e:
logging.error(e)
self.assertEqual(type(e), JIRAError)
class WebsudoTests(unittest.TestCase):
def setUp(self):
self.jira = JiraTestManager().jira_admin
def test_kill_websudo(self):
self.jira.kill_websudo()
# def test_kill_websudo_without_login_raises(self):
# self.assertRaises(ConnectionError, JIRA)
class UserAdministrationTests(unittest.TestCase):
jira = None
def setUp(self):
self.jira = JiraTestManager().jira_admin
self.test_username = "test_%s" % JiraTestManager().project_a
self.test_email = "%s@example.com" % self.test_username
self.test_password = rndpassword()
self.test_groupname = 'testGroupFor_%s' % JiraTestManager().project_a
def test_add_user(self):
try:
self.jira.delete_user(self.test_username)
except JIRAError:
pass
result = self.jira.add_user(
self.test_username, self.test_email, password=self.test_password)
assert result, True
x = self.jira.search_users(self.test_username)[0]
assert isinstance(x, jira.User)
x = self.jira.delete_user(self.test_username)
assert x, True
def test_delete_user(self):
try:
# Make sure user exists before attempting test to delete.
self.jira.add_user(
self.test_username, self.test_email, password=self.test_password)
except JIRAError:
pass
result = self.jira.delete_user(self.test_username)
assert result, True
x = self.jira.search_users(self.test_username)
self.assertEqual(
len(x), 0, "Found test user when it should have been deleted. Test Fails.")
def test_add_group(self):
try:
self.jira.remove_group(self.test_groupname)
except JIRAError:
pass
result = self.jira.add_group(self.test_groupname)
assert result, True
x = self.jira.groups(query=self.test_groupname)
self.assertEqual(self.test_groupname, x['groups'][0]['name'], "Did not find expected group after trying to add"
" it. Test Fails.")
self.jira.remove_group(self.test_groupname)
def test_remove_group(self):
try:
self.jira.add_group(self.test_groupname)
except JIRAError:
pass
result = self.jira.remove_group(self.test_groupname)
assert result, True
x = self.jira.groups(query=self.test_groupname)
self.assertEqual(len(
x['groups']), 0, 'Found group with name when it should have been deleted. Test Fails.')
def test_add_user_to_group(self):
try:
self.jira.add_user(
self.test_username, self.test_email, password=self.test_password)
self.jira.add_group(self.test_groupname)
# Just in case user is already there.
self.jira.remove_user_from_group(
self.test_username, self.test_groupname)
except JIRAError:
pass
result = self.jira.add_user_to_group(
self.test_username, self.test_groupname)
assert result, True
x = self.jira.group_members(self.test_groupname)
self.assertIn(self.test_username, x.keys(),
'Username not returned in group member list. Test Fails.')
self.jira.remove_group(self.test_groupname)
self.jira.delete_user(self.test_username)
def test_remove_user_from_group(self):
try:
self.jira.add_user(
self.test_username, self.test_email, password=self.test_password)
self.jira.add_group(self.test_groupname)
self.jira.add_user_to_group(
self.test_username, self.test_groupname)
except JIRAError:
pass
result = self.jira.remove_user_from_group(
self.test_username, self.test_groupname)
assert result, True
x = self.jira.group_members(self.test_groupname)
self.assertNotIn(self.test_username, x.keys(), 'Username found in group when it should have been removed. '
'Test Fails.')
self.jira.remove_group(self.test_groupname)
self.jira.delete_user(self.test_username)
if __name__ == '__main__':
# j = JIRA("https://issues.citrite.net")
# print(j.session())
dirname = "test-reports-%s%s" % (sys.version_info[0], sys.version_info[1])
unittest.main()
# pass
|
|
import datetime
from decimal import Decimal
from unittest import mock
from django.test import TestCase, modify_settings, override_settings
from django.urls import reverse
from django.utils import timezone
from django_webtest import WebTest
from mymoney.apps.bankaccounts.factories import BankAccountFactory
from mymoney.apps.banktransactions.factories import BankTransactionFactory
from mymoney.apps.banktransactionschedulers.models import (
BankTransactionScheduler,
)
from mymoney.core.factories import UserFactory
from ..factories import BankTransactionSchedulerFactory
@modify_settings(MIDDLEWARE={
'remove': ['mymoney.core.middleware.AnonymousRedirectMiddleware'],
})
class AccessTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.not_owner = UserFactory(username='not_owner', user_permissions='staff')
cls.superowner = UserFactory(username='superowner', user_permissions='admin')
cls.bankaccount = BankAccountFactory(owners=[cls.owner, cls.superowner])
def test_access_list(self):
url = reverse('banktransactionschedulers:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
# Anonymous denied
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Non owner.
self.client.force_login(self.not_owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Owner.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_access_create(self):
url = reverse('banktransactionschedulers:create', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
# Missing permission.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
self.client.logout()
# Having permission but not owner.
self.client.force_login(self.not_owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
self.client.logout()
# Owner with permission.
self.client.force_login(self.superowner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.client.logout()
# Fake bank account.
url = reverse('banktransactionschedulers:create', kwargs={
'bankaccount_pk': 20120918,
})
self.client.force_login(self.superowner)
response = self.client.get(url)
self.assertEqual(404, response.status_code)
self.client.logout()
def test_access_update(self):
banktransactionscheduler = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
)
url = reverse('banktransactionschedulers:update', kwargs={
'pk': banktransactionscheduler.pk
})
# Anonymous
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Non-owner with permissions.
self.client.force_login(self.not_owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
self.client.logout()
# Owner without perm.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
self.client.logout()
# Owner with permissions
self.client.force_login(self.superowner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.client.logout()
# Fake bank transaction.
url = reverse('banktransactionschedulers:update', kwargs={
'pk': 20140923
})
self.client.force_login(self.superowner)
response = self.client.get(url)
self.assertEqual(404, response.status_code)
self.client.logout()
def test_access_delete(self):
banktransactionscheduler = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
)
url = reverse('banktransactionschedulers:delete', kwargs={
'pk': banktransactionscheduler.pk
})
# Anonymous
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Non-owner with permissions.
self.client.force_login(self.not_owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
self.client.logout()
# Owner without perm.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
self.client.logout()
# Owner with permissions
self.client.force_login(self.superowner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.client.logout()
class ViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.superowner = UserFactory(username='superowner', user_permissions='admin')
def setUp(self):
self.bankaccount = BankAccountFactory(owners=[self.owner, self.superowner])
@mock.patch('mymoney.apps.banktransactions.models.timezone')
@mock.patch('mymoney.apps.banktransactionschedulers.models.timezone')
def test_summary_queryset(self, mock_bt_timezone, mock_bts_timezone):
mock_bt_timezone.now.return_value = datetime.date(2015, 8, 11)
mock_bts_timezone.now.return_value = datetime.date(2015, 8, 11)
url = reverse('banktransactionschedulers:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
self.client.force_login(self.owner)
# Nothing.
response = self.client.get(url)
self.assertFalse(response.context[0]['summary'])
self.assertEqual(response.context[0]['total'], 0)
# Only credit.
bts1 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
type=BankTransactionScheduler.TYPE_MONTHLY,
amount=Decimal('2000'),
date=datetime.date(2015, 8, 10),
)
response = self.client.get(url)
self.assertDictEqual(
response.context[0]['summary'],
{
BankTransactionScheduler.TYPE_MONTHLY: {
'type': BankTransactionScheduler.TYPES[0][1],
'credit': bts1.amount,
'debit': 0,
'used': 0,
'remaining': bts1.amount,
'total': bts1.amount,
},
},
)
self.assertEqual(response.context[0]['total'], bts1.amount)
# Add debit.
bts2 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
type=BankTransactionScheduler.TYPE_MONTHLY,
amount=Decimal('-900'),
date=datetime.date(2015, 8, 9),
)
bts3 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
type=BankTransactionScheduler.TYPE_MONTHLY,
amount=Decimal('-100'),
date=datetime.date(2015, 8, 25),
)
response = self.client.get(url)
self.assertDictEqual(
response.context[0]['summary'],
{
BankTransactionScheduler.TYPE_MONTHLY: {
'type': BankTransactionScheduler.TYPES[0][1],
'credit': bts1.amount,
'debit': bts2.amount + bts3.amount, # -1000
'used': 0,
'remaining': bts1.amount + bts2.amount + bts3.amount, # 1000
'total': bts1.amount + bts2.amount + bts3.amount, # 1000
},
},
)
self.assertEqual(response.context[0]['total'], bts1.amount + bts2.amount + bts3.amount)
# Add weekly schedulers.
bts4 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
type=BankTransactionScheduler.TYPE_WEEKLY,
amount=Decimal('-30'),
date=datetime.date(2015, 8, 11),
)
bts5 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
type=BankTransactionScheduler.TYPE_WEEKLY,
amount=Decimal('-15'),
date=datetime.date(2015, 8, 12),
)
response = self.client.get(url)
self.assertDictEqual(
response.context[0]['summary'][BankTransactionScheduler.TYPE_MONTHLY],
{
'type': BankTransactionScheduler.TYPES[0][1],
'credit': bts1.amount, # 2000
'debit': bts2.amount + bts3.amount, # -1000
'used': 0,
'remaining': bts1.amount + bts2.amount + bts3.amount, # 1000
'total': bts1.amount + bts2.amount + bts3.amount, # 1000
},
)
self.assertDictEqual(
response.context[0]['summary'][BankTransactionScheduler.TYPE_WEEKLY],
{
'type': BankTransactionScheduler.TYPES[1][1],
'credit': 0,
'debit': bts4.amount + bts5.amount, # -45
'used': 0,
'remaining': bts4.amount + bts5.amount, # -45
'total': bts4.amount + bts5.amount, # -45
},
)
self.assertEqual(
response.context[0]['total'],
bts1.amount + bts2.amount + bts3.amount + bts4.amount + bts5.amount
)
# Then add bank transactions.
bt1 = BankTransactionFactory(
bankaccount=self.bankaccount,
date=datetime.date(2015, 8, 10),
amount=Decimal('-150'),
)
bt2 = BankTransactionFactory(
bankaccount=self.bankaccount,
date=datetime.date(2015, 8, 20),
amount=Decimal('-50'),
)
response = self.client.get(url)
self.assertDictEqual(
response.context[0]['summary'][BankTransactionScheduler.TYPE_MONTHLY],
{
'type': BankTransactionScheduler.TYPES[0][1],
'credit': bts1.amount, # 2000
'debit': bts2.amount + bts3.amount, # -1000
'used': bt1.amount + bt2.amount, # -200
'remaining': bts1.amount + bts2.amount + bts3.amount + bt1.amount + bt2.amount, # 800
'total': bts1.amount + bts2.amount + bts3.amount, # 1000
},
)
self.assertDictEqual(
response.context[0]['summary'][BankTransactionScheduler.TYPE_WEEKLY],
{
'type': BankTransactionScheduler.TYPES[1][1],
'credit': 0,
'debit': bts4.amount + bts5.amount, # -45
'used': bt1.amount, # -150
'remaining': bts4.amount + bts5.amount + bt1.amount, # -195
'total': bts4.amount + bts5.amount, # -45
},
)
self.assertEqual(
response.context[0]['total'],
bts1.amount + bts2.amount + bts3.amount + bts4.amount + bts5.amount
)
def test_list_queryset(self):
url = reverse('banktransactionschedulers:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
bts1 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
last_action=timezone.make_aware(datetime.datetime(2015, 7, 10)),
)
bts2 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
last_action=timezone.make_aware(datetime.datetime(2015, 7, 9)),
)
# Scheduler of another bank account.
BankTransactionSchedulerFactory()
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertQuerysetEqual(
response.context['object_list'],
[
repr(bts1),
repr(bts2),
],
)
class WebViewTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.superowner = UserFactory(username='superowner', user_permissions='admin')
def setUp(self):
self.bankaccount = BankAccountFactory(owners=[self.owner, self.superowner])
@override_settings(LANGUAGE_CODE='en-us')
def test_summary_view(self):
url = reverse('banktransactionschedulers:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
# No scheduler yet.
response = self.app.get(url, user='owner')
self.assertContains(response, "No scheduled bank transaction yet.")
# Schedulers of only one type (no global total display).
bts1 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
type=BankTransactionScheduler.TYPE_MONTHLY,
amount=Decimal('2000'),
)
response = self.app.get(url, user='owner')
self.assertNotContains(
response,
'<tr data-summary-total="{total}">'.format(total=bts1.amount),
)
# Schedulers of both types, display a global total.
bts2 = BankTransactionSchedulerFactory(
bankaccount=self.bankaccount,
type=BankTransactionScheduler.TYPE_WEEKLY,
amount=Decimal('-100'),
)
response = self.app.get(url, user='owner')
self.assertContains(
response,
'<td data-summary-total="{total:.2f}">'.format(total=bts1.amount + bts2.amount),
)
def test_list_links_action(self):
bts = BankTransactionSchedulerFactory(bankaccount=self.bankaccount)
url = reverse('banktransactionschedulers:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
edit_url = reverse('banktransactionschedulers:update', kwargs={
'pk': bts.pk
})
delete_url = reverse('banktransactionschedulers:delete', kwargs={
'pk': bts.pk
})
response = self.app.get(url, user='superowner')
response.click(href=edit_url)
response = self.app.get(url, user='superowner')
response.click(href=delete_url)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=edit_url)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=delete_url)
|
|
# utils/SwiftBuildSupport.py - Utilities for Swift build scripts -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
try:
import ConfigParser # Python 2
except ImportError:
import configparser as ConfigParser # Python 3
import os
import pipes
import subprocess
import sys
HOME = os.environ.get("HOME", "/")
def _get_default_source_root():
result = ""
# Are we in a Swift checkout? Start from this file and check its parent
# directories.
#
# $SWIFT_SOURCE_ROOT/swift/utils/SwiftBuildSupport.py
(swift_path, parent_dirname) = os.path.split(os.path.dirname(__file__))
if parent_dirname != "utils":
return result
if not os.path.exists(os.path.join(swift_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(swift_path)
# Are we in an LLVM checkout? Start from the Swift checkout and check /its/
# parent directories.
#
# $SWIFT_SOURCE_ROOT/llvm/tools/swift/utils/SwiftBuildSupport.py
(llvm_path, parent_dirname) = os.path.split(result)
if parent_dirname != "tools":
return result
if not os.path.exists(os.path.join(llvm_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(llvm_path)
return result
# Set SWIFT_SOURCE_ROOT in your environment to control where the sources
# are found.
SWIFT_SOURCE_ROOT = os.environ.get(
"SWIFT_SOURCE_ROOT", _get_default_source_root())
# Set SWIFT_BUILD_ROOT to a directory that will contain a subdirectory
# for each build configuration
SWIFT_BUILD_ROOT = os.environ.get(
"SWIFT_BUILD_ROOT", os.path.join(SWIFT_SOURCE_ROOT, "build"))
def print_with_argv0(message):
print(sys.argv[0] + ": " + message)
def quote_shell_command(args):
return " ".join([ pipes.quote(a) for a in args ])
def check_call(args, print_command=False, verbose=False):
if print_command:
print(os.getcwd() + "$ " + quote_shell_command(args))
try:
return subprocess.check_call(args)
except subprocess.CalledProcessError as e:
if verbose:
print_with_argv0(e.strerror)
else:
print_with_argv0(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
sys.stdout.flush()
sys.exit(1)
except OSError as e:
print_with_argv0("could not execute '" + quote_shell_command(args) +
"': " + e.strerror)
sys.stdout.flush()
sys.exit(1)
def check_output(args, print_command=False, verbose=False):
if print_command:
print(os.getcwd() + "$ " + quote_shell_command(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
if verbose:
print_with_argv0(e.strerror)
else:
print_with_argv0(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
sys.stdout.flush()
sys.exit(1)
except OSError as e:
print_with_argv0("could not execute '" + quote_shell_command(args) +
"': " + e.strerror)
sys.stdout.flush()
sys.exit(1)
def _load_preset_files_impl(preset_file_names, substitutions={}):
config = ConfigParser.SafeConfigParser(substitutions, allow_no_value=True)
if config.read(preset_file_names) == []:
print_with_argv0(
"preset file not found (tried " + str(preset_file_names) + ")")
sys.exit(1)
return config
_PRESET_PREFIX = "preset: "
def _get_preset_options_impl(config, substitutions, preset_name):
section_name = _PRESET_PREFIX + preset_name
if section_name not in config.sections():
return (None, None, None)
build_script_opts = []
build_script_impl_opts = []
missing_opts = []
dash_dash_seen = False
for o in config.options(section_name):
try:
a = config.get(section_name, o)
except ConfigParser.InterpolationMissingOptionError as e:
# e.reference contains the correctly formatted option
missing_opts.append(e.reference)
continue
if not a:
a = ""
if o in substitutions:
continue
opt = None
if o == "mixin-preset":
# Split on newlines and filter out empty lines.
mixins = filter(None, [m.strip() for m in a.splitlines()])
for mixin in mixins:
(base_build_script_opts,
base_build_script_impl_opts,
base_missing_opts) = \
_get_preset_options_impl(config, substitutions, mixin)
build_script_opts += base_build_script_opts
build_script_impl_opts += base_build_script_impl_opts
missing_opts += base_missing_opts
elif o == "dash-dash":
dash_dash_seen = True
elif a == "":
opt = "--" + o
else:
opt = "--" + o + "=" + a
if opt:
if not dash_dash_seen:
build_script_opts.append(opt)
else:
build_script_impl_opts.append(opt)
return (build_script_opts, build_script_impl_opts, missing_opts)
def get_preset_options(substitutions, preset_file_names, preset_name):
config = _load_preset_files_impl(preset_file_names, substitutions)
(build_script_opts, build_script_impl_opts, missing_opts) = \
_get_preset_options_impl(config, substitutions, preset_name)
if not build_script_opts:
print_with_argv0("preset '" + preset_name + "' not found")
sys.exit(1)
if missing_opts:
print_with_argv0("missing option(s) for preset '" + preset_name +
"': " + ", ".join(missing_opts))
sys.exit(1)
return build_script_opts + [ "--" ] + build_script_impl_opts
def get_all_preset_names(preset_file_names):
config = _load_preset_files_impl(preset_file_names)
return [ name[len(_PRESET_PREFIX):] for name in config.sections()
if name.startswith(_PRESET_PREFIX) ]
# A context manager for changing the current working directory.
#
# with WorkingDirectory('/tmp'):
# ... do work in /tmp...
class WorkingDirectory(object):
def __init__(self, new_cwd):
self.new_cwd = new_cwd
def __enter__(self):
self.old_cwd = os.getcwd()
os.chdir(self.new_cwd)
def __exit__(self, type, value, traceback):
os.chdir(self.old_cwd)
|
|
"""
Title: Semantic Similarity with BERT
Author: [Mohamad Merchant](https://twitter.com/mohmadmerchant1)
Date created: 2020/08/15
Last modified: 2020/08/29
Description: Natural Language Inference by fine-tuning BERT model on SNLI Corpus.
"""
"""
## Introduction
Semantic Similarity is the task of determining how similar
two sentences are, in terms of what they mean.
This example demonstrates the use of SNLI (Stanford Natural Language Inference) Corpus
to predict sentence semantic similarity with Transformers.
We will fine-tune a BERT model that takes two sentences as inputs
and that outputs a similarity score for these two sentences.
### References
* [BERT](https://arxiv.org/pdf/1810.04805.pdf)
* [SNLI](https://nlp.stanford.edu/projects/snli/)
"""
"""
## Setup
Note: install HuggingFace `transformers` via `pip install transformers` (version >= 2.11.0).
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import transformers
"""
## Configuration
"""
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
# Labels in our dataset.
labels = ["contradiction", "entailment", "neutral"]
"""
## Load the Data
"""
"""shell
curl -LO https://raw.githubusercontent.com/MohamadMerchant/SNLI/master/data.tar.gz
tar -xvzf data.tar.gz
"""
# There are more than 550k samples in total; we will use 100k for this example.
train_df = pd.read_csv("SNLI_Corpus/snli_1.0_train.csv", nrows=100000)
valid_df = pd.read_csv("SNLI_Corpus/snli_1.0_dev.csv")
test_df = pd.read_csv("SNLI_Corpus/snli_1.0_test.csv")
# Shape of the data
print(f"Total train samples : {train_df.shape[0]}")
print(f"Total validation samples: {valid_df.shape[0]}")
print(f"Total test samples: {valid_df.shape[0]}")
"""
Dataset Overview:
- sentence1: The premise caption that was supplied to the author of the pair.
- sentence2: The hypothesis caption that was written by the author of the pair.
- similarity: This is the label chosen by the majority of annotators.
Where no majority exists, the label "-" is used (we will skip such samples here).
Here are the "similarity" label values in our dataset:
- Contradiction: The sentences share no similarity.
- Entailment: The sentences have similar meaning.
- Neutral: The sentences are neutral.
"""
"""
Let's look at one sample from the dataset:
"""
print(f"Sentence1: {train_df.loc[1, 'sentence1']}")
print(f"Sentence2: {train_df.loc[1, 'sentence2']}")
print(f"Similarity: {train_df.loc[1, 'similarity']}")
"""
## Preprocessing
"""
# We have some NaN entries in our train data, we will simply drop them.
print("Number of missing values")
print(train_df.isnull().sum())
train_df.dropna(axis=0, inplace=True)
"""
Distribution of our training targets.
"""
print("Train Target Distribution")
print(train_df.similarity.value_counts())
"""
Distribution of our validation targets.
"""
print("Validation Target Distribution")
print(valid_df.similarity.value_counts())
"""
The value "-" appears as part of our training and validation targets.
We will skip these samples.
"""
train_df = (
train_df[train_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
valid_df = (
valid_df[valid_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
"""
One-hot encode training, validation, and test labels.
"""
train_df["label"] = train_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=3)
valid_df["label"] = valid_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=3)
test_df["label"] = test_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=3)
"""
## Create a custom data generator
"""
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
"""
## Build the model
"""
# Create the model under a distribution strategy scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
bert_output = bert_model(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
sequence_output = bert_output.last_hidden_state
pooled_output = bert_output.pooler_output
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)
)(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(3, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
print(f"Strategy: {strategy}")
model.summary()
"""
Create train and validation data generators
"""
train_data = BertSemanticDataGenerator(
train_df[["sentence1", "sentence2"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
valid_data = BertSemanticDataGenerator(
valid_df[["sentence1", "sentence2"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)
"""
## Train the Model
Training is done only for the top layers to perform "feature extraction",
which will allow the model to use the representations of the pretrained model.
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Fine-tuning
This step must only be performed after the feature extraction model has
been trained to convergence on the new data.
This is an optional last step where `bert_model` is unfreezed and retrained
with a very low learning rate. This can deliver meaningful improvement by
incrementally adapting the pretrained features to the new data.
"""
# Unfreeze the bert_model.
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
"""
## Train the entire model end-to-end
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Evaluate model on the test set
"""
test_data = BertSemanticDataGenerator(
test_df[["sentence1", "sentence2"]].values.astype("str"),
y_test,
batch_size=batch_size,
shuffle=False,
)
model.evaluate(test_data, verbose=1)
"""
## Inference on custom sentences
"""
def check_similarity(sentence1, sentence2):
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data[0])[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
"""
Check results on some example sentence pairs.
"""
sentence1 = "Two women are observing something together."
sentence2 = "Two women are standing with their eyes closed."
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs.
"""
sentence1 = "A smiling costumed woman is holding an umbrella"
sentence2 = "A happy woman in a fairy costume holds an umbrella"
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs
"""
sentence1 = "A soccer game with multiple males playing"
sentence2 = "Some men are playing a sport"
check_similarity(sentence1, sentence2)
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from oslo.serialization import jsonutils
import requests
from glance.tests import functional
TENANT1 = str(uuid.uuid4())
class TestNamespaceProperties(functional.FunctionalTest):
def setUp(self):
super(TestNamespaceProperties, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.start_servers(**self.__dict__.copy())
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'admin',
}
base_headers.update(custom_headers or {})
return base_headers
def test_properties_lifecycle(self):
# Namespace should not exist
path = self._url('/v2/metadefs/namespaces/MyNamespace')
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# Create a namespace
path = self._url('/v2/metadefs/namespaces')
headers = self._headers({'content-type': 'application/json'})
namespace_name = 'MyNamespace'
resource_type_name = 'MyResourceType'
resource_type_prefix = 'MyPrefix'
data = jsonutils.dumps({
"namespace": namespace_name,
"display_name": "My User Friendly Namespace",
"description": "My description",
"visibility": "public",
"protected": False,
"owner": "The Test Owner",
"resource_type_associations": [
{
"name": resource_type_name,
"prefix": resource_type_prefix
}
]
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Property1 should not exist
path = self._url('/v2/metadefs/namespaces/MyNamespace/properties'
'/property1')
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# Create a property
path = self._url('/v2/metadefs/namespaces/MyNamespace/properties')
headers = self._headers({'content-type': 'application/json'})
property_name = "property1"
data = jsonutils.dumps(
{
"name": property_name,
"type": "integer",
"title": "property1",
"description": "property1 description",
"default": 100,
"minimum": 100,
"maximum": 30000369,
"readonly": False,
}
)
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the property created above
path = self._url('/v2/metadefs/namespaces/%s/properties/%s' %
(namespace_name, property_name))
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
property_object = jsonutils.loads(response.text)
self.assertEqual("integer", property_object['type'])
self.assertEqual("property1", property_object['title'])
self.assertEqual("property1 description", property_object[
'description'])
self.assertEqual('100', property_object['default'])
self.assertEqual(100, property_object['minimum'])
self.assertEqual(30000369, property_object['maximum'])
# Get the property with specific resource type association
path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % (
namespace_name, property_name, '='.join(['?resource_type',
resource_type_name])))
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# Get the property with prefix and specific resource type association
property_name_with_prefix = ''.join([resource_type_prefix,
property_name])
path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % (
namespace_name, property_name_with_prefix, '='.join([
'?resource_type', resource_type_name])))
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
property_object = jsonutils.loads(response.text)
self.assertEqual("integer", property_object['type'])
self.assertEqual("property1", property_object['title'])
self.assertEqual("property1 description", property_object[
'description'])
self.assertEqual('100', property_object['default'])
self.assertEqual(100, property_object['minimum'])
self.assertEqual(30000369, property_object['maximum'])
self.assertFalse(property_object['readonly'])
# Returned property should match the created property
property_object = jsonutils.loads(response.text)
checked_keys = set([
u'name',
u'type',
u'title',
u'description',
u'default',
u'minimum',
u'maximum',
u'readonly',
])
self.assertEqual(set(property_object.keys()), checked_keys)
expected_metadata_property = {
"type": "integer",
"title": "property1",
"description": "property1 description",
"default": '100',
"minimum": 100,
"maximum": 30000369,
"readonly": False,
}
for key, value in expected_metadata_property.items():
self.assertEqual(property_object[key], value, key)
# The property should be mutable
path = self._url('/v2/metadefs/namespaces/%s/properties/%s' %
(namespace_name, property_name))
media_type = 'application/json'
headers = self._headers({'content-type': media_type})
property_name = "property1-UPDATED"
data = jsonutils.dumps(
{
"name": property_name,
"type": "string",
"title": "string property",
"description": "desc-UPDATED",
"operators": ["<or>"],
"default": "value-UPDATED",
"minLength": 5,
"maxLength": 10,
"readonly": True,
}
)
response = requests.put(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned property should reflect the changes
property_object = jsonutils.loads(response.text)
self.assertEqual('string', property_object['type'])
self.assertEqual('desc-UPDATED', property_object['description'])
self.assertEqual('value-UPDATED', property_object['default'])
self.assertEqual(["<or>"], property_object['operators'])
self.assertEqual(5, property_object['minLength'])
self.assertEqual(10, property_object['maxLength'])
self.assertTrue(property_object['readonly'])
# Updates should persist across requests
path = self._url('/v2/metadefs/namespaces/%s/properties/%s' %
(namespace_name, property_name))
response = requests.get(path, headers=self._headers())
self.assertEqual('string', property_object['type'])
self.assertEqual('desc-UPDATED', property_object['description'])
self.assertEqual('value-UPDATED', property_object['default'])
self.assertEqual(["<or>"], property_object['operators'])
self.assertEqual(5, property_object['minLength'])
self.assertEqual(10, property_object['maxLength'])
# Deletion of property property1
path = self._url('/v2/metadefs/namespaces/%s/properties/%s' %
(namespace_name, property_name))
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# property1 should not exist
path = self._url('/v2/metadefs/namespaces/%s/properties/%s' %
(namespace_name, property_name))
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
|
|
#!/usr/bin/python
import urllib2, base64, re, struct, time, socket, sys, datetime, os.path
try:
import json
except:
import simplejson as json
zabbix_host = '{{ zabbix_server_ipaddr }}' # Zabbix server IP
zabbix_port = 10051 # Zabbix server port
hostname = '{{ hostname }}' # Name of monitored host, like it shows in zabbix web ui
time_delta = 1 # grep interval in minutes
# URL to nginx stat (http_stub_status_module)
stat_url = 'http://localhost/nginx_stat'
# Nginx log file path
nginx_log_file_path = '/usr/local/nginx/logs/open_search_api_access.log'
# Optional Basic Auth
username = 'user'
password = 'pass'
# Temp file, with log file cursor position
seek_file = '/tmp/nginx_log_stat'
class Metric(object):
def __init__(self, host, key, value, clock=None):
self.host = host
self.key = key
self.value = value
self.clock = clock
def __repr__(self):
if self.clock is None:
return 'Metric(%r, %r, %r)' % (self.host, self.key, self.value)
return 'Metric(%r, %r, %r, %r)' % (self.host, self.key, self.value, self.clock)
def send_to_zabbix(metrics, zabbix_host='127.0.0.1', zabbix_port=10051):
j = json.dumps
metrics_data = []
for m in metrics:
clock = m.clock or ('%d' % time.time())
metrics_data.append(('{"host":%s,"key":%s,"value":%s,"clock":%s}') % (j(m.host), j(m.key), j(m.value), j(clock)))
json_data = ('{"request":"sender data","data":[%s]}') % (','.join(metrics_data))
data_len = struct.pack('<Q', len(json_data))
packet = 'ZBXD\x01'+ data_len + json_data
#print packet
#print ':'.join(x.encode('hex') for x in packet)
try:
zabbix = socket.socket()
zabbix.connect((zabbix_host, zabbix_port))
zabbix.sendall(packet)
resp_hdr = _recv_all(zabbix, 13)
if not resp_hdr.startswith('ZBXD\x01') or len(resp_hdr) != 13:
print 'Wrong zabbix response'
return False
resp_body_len = struct.unpack('<Q', resp_hdr[5:])[0]
resp_body = zabbix.recv(resp_body_len)
zabbix.close()
resp = json.loads(resp_body)
#print resp
if resp.get('response') != 'success':
print 'Got error from Zabbix: %s' % resp
return False
return True
except:
print 'Error while sending data to Zabbix'
return False
def _recv_all(sock, count):
buf = ''
while len(buf)<count:
chunk = sock.recv(count-len(buf))
if not chunk:
return buf
buf += chunk
return buf
def get(url, login, passwd):
req = urllib2.Request(url)
if login and passwd:
base64string = base64.encodestring('%s:%s' % (login, passwd)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
q = urllib2.urlopen(req)
res = q.read()
q.close()
return res
def parse_nginx_stat(data):
a = {}
# Active connections
a['active_connections'] = re.match(r'(.*):\s(\d*)', data[0], re.M | re.I).group(2)
# Accepts
a['accepted_connections'] = re.match(r'\s(\d*)\s(\d*)\s(\d*)', data[2], re.M | re.I).group(1)
# Handled
a['handled_connections'] = re.match(r'\s(\d*)\s(\d*)\s(\d*)', data[2], re.M | re.I).group(2)
# Requests
a['handled_requests'] = re.match(r'\s(\d*)\s(\d*)\s(\d*)', data[2], re.M | re.I).group(3)
# Reading
a['header_reading'] = re.match(r'(.*):\s(\d*)(.*):\s(\d*)(.*):\s(\d*)', data[3], re.M | re.I).group(2)
# Writing
a['body_reading'] = re.match(r'(.*):\s(\d*)(.*):\s(\d*)(.*):\s(\d*)', data[3], re.M | re.I).group(4)
# Waiting
a['keepalive_connections'] = re.match(r'(.*):\s(\d*)(.*):\s(\d*)(.*):\s(\d*)', data[3], re.M | re.I).group(6)
return a
def read_seek(file):
if os.path.isfile(file):
f = open(file, 'r')
try:
result = int(f.readline())
f.close()
return result
except:
return 0
else:
return 0
def write_seek(file, value):
f = open(file, 'w')
f.write(value)
f.close()
#print '[12/Mar/2014:03:21:13 +0400]'
d = datetime.datetime.now()-datetime.timedelta(minutes=time_delta)
minute = int(time.mktime(d.timetuple()) / 60)*60
d = d.strftime('%d/%b/%Y:%H:%M')
total_rps = 0
rps = [0]*60
tps = [0]*60
res_code = {}
nf = open(nginx_log_file_path, 'r')
new_seek = seek = read_seek(seek_file)
# if new log file, don't do seek
if os.path.getsize(nginx_log_file_path) > seek:
nf.seek(seek)
line = nf.readline()
while line:
if d in line:
new_seek = nf.tell()
total_rps += 1
sec = int(re.match('(.*):(\d+):(\d+):(\d+)\s', line).group(4))
# Added for ltsv log format
ltsv = re.match(r'time:', line)
if ltsv is None:
code = re.match(r'(.*)"\s(\d*)\s', line).group(2)
else:
code = re.match(r'(.*)status:(\d*)\t', line).group(2)
if code in res_code:
res_code[code] += 1
else:
res_code[code] = 1
rps[sec] += 1
line = nf.readline()
if total_rps != 0:
write_seek(seek_file, str(new_seek))
nf.close()
metric = (len(sys.argv) >= 2) and re.match(r'nginx\[(.*)\]', sys.argv[1], re.M | re.I).group(1) or False
data = get(stat_url, username, password).split('\n')
data = parse_nginx_stat(data)
data_to_send = []
# Adding the metrics to response
if not metric:
for i in data:
data_to_send.append(Metric(hostname, ('nginx[%s]' % i), data[i]))
else:
print data[metric]
# Adding the request per seconds to response
for t in range(0,60):
data_to_send.append(Metric(hostname, 'nginx[rps]', rps[t], minute+t))
# Adding the response codes stats to respons
for t in res_code:
data_to_send.append(Metric(hostname, ('nginx[%s]' % t), res_code[t]))
send_to_zabbix(data_to_send, zabbix_host, zabbix_port)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import itertools
import os
import sys
import compas_rhino
import compas._os
import compas.plugins
__all__ = [
'install',
'installable_rhino_packages',
'after_rhino_install'
]
INSTALLED_VERSION = None
def install(version=None, packages=None, clean=False):
"""Install COMPAS for Rhino.
Parameters
----------
version : {'5.0', '6.0', '7.0', '8.0'}, optional
The version number of Rhino.
Default is ``'7.0'``.
packages : list of str, optional
List of packages to install or None to use default package list.
Default is the result of ``installable_rhino_packages``,
which collects all installable packages in the current environment.
clean : bool, optional
If True, this will clean up the entire scripts folder and remove
also existing symlinks that are not importable in the current environment.
Examples
--------
.. code-block:: python
import compas_rhino.install
compas_rhino.install.install()
.. code-block:: bash
python -m compas_rhino.install
"""
version = compas_rhino._check_rhino_version(version)
# We install COMPAS packages in the scripts folder
# instead of directly as IPy module.
scripts_path = compas_rhino._get_rhino_scripts_path(version)
# This is for old installs
ipylib_path = compas_rhino._get_rhino_ironpython_lib_path(version)
# Filter the provided list of packages
# If no packages are provided
# this first collects all installable packages from the environment.
packages = _filter_installable_packages(version, packages)
results = []
symlinks_to_install = []
symlinks_to_uninstall = []
exit_code = 0
# check all installable packages
# add the packages that can't be imported from the current env to the list of symlinks to uninstall
# and remove the package name from the list of installable packages
# make a copy of the list to avoid problems with removing items
# note: perhaps this should already happen in the filter function...
for name in packages[:]:
try:
importlib.import_module(name)
except ImportError:
path = os.path.join(scripts_path, name)
symlinks_to_uninstall.append(dict(name=name, link=path))
packages.remove(name)
# Also remove all broken symlinks from the scripts folder
# because ... they're broken!
# If it is an actual folder or a file, leave it alone
# because probably someone put it there on purpose.
for name in os.listdir(scripts_path):
path = os.path.join(scripts_path, name)
if os.path.islink(path):
if not os.path.exists(path):
symlinks_to_uninstall.append(dict(name=name, link=path))
try:
importlib.import_module(name)
except ImportError:
pass
else:
if name not in packages:
packages.append(name)
# If the scripts folder is supposed to be cleaned
# also remove all existing symlinks that cannot be imported
# and reinstall symlinks that can be imported
if clean:
for name in os.listdir(scripts_path):
path = os.path.join(scripts_path, name)
if os.path.islink(path):
if os.path.exists(path):
try:
importlib.import_module(name)
except ImportError:
path = os.path.join(scripts_path, name)
symlinks_to_uninstall.append(dict(name=name, link=path))
else:
if name not in packages:
packages.append(name)
# add all of the packages in the list of installable packages
# to the list of symlinks to uninstall
# and to the list of symlinks to install
for package in packages:
symlink_path = os.path.join(scripts_path, package)
symlinks_to_uninstall.append(dict(name=package, link=symlink_path))
package_path = compas_rhino._get_package_path(importlib.import_module(package))
symlinks_to_install.append(dict(name=package, source_path=package_path, link=symlink_path))
# Handle legacy install location
# This does not always work,
# and especially not in cases where it is not necessary :)
if ipylib_path:
legacy_path = os.path.join(ipylib_path, package)
if os.path.exists(legacy_path):
symlinks_to_uninstall.append(dict(name=package, link=legacy_path))
# -------------------------
# Uninstall first
# -------------------------
symlinks = [link['link'] for link in symlinks_to_uninstall]
uninstall_results = compas._os.remove_symlinks(symlinks)
# Let the user know if some symlinks could not be removed.
for uninstall_data, success in zip(symlinks_to_uninstall, uninstall_results):
if not success:
results.append((uninstall_data['name'], 'ERROR: Cannot remove symlink, try to run as administrator.'))
# Handle legacy bootstrapper
# Again, only if possible...
if ipylib_path:
if not compas_rhino._try_remove_bootstrapper(ipylib_path):
results.append(('compas_bootstrapper', 'ERROR: Cannot remove legacy compas_bootstrapper, try to run as administrator.'))
# -------------------------
# Ready to start installing
# -------------------------
# create new symlinks and register the results
symlinks = [(link['source_path'], link['link']) for link in symlinks_to_install]
install_results = compas._os.create_symlinks(symlinks)
# set the exit code based on the installation results
if not all(install_results):
exit_code = -1
# make a list of installed packages
# based on the installation results
# and update the general results list
installed_packages = []
for install_data, success in zip(symlinks_to_install, install_results):
if success:
installed_packages.append(install_data['name'])
result = 'OK'
else:
result = 'ERROR: Cannot create symlink, try to run as administrator.'
results.append((install_data['name'], result))
# finalize the general results list with info about the bootstrapper
if exit_code == -1:
results.append(('compas_bootstrapper', 'WARNING: One or more packages failed, will not install bootstrapper, try uninstalling first'))
else:
try:
_update_bootstrapper(scripts_path, packages)
results.append(('compas_bootstrapper', 'OK'))
except: # noqa: E722
results.append(('compas_bootstrapper', 'ERROR: Could not create compas_bootstrapper to auto-determine Python environment'))
# output the outcome of the installation process
# perhaps we should more info here
print('Installing COMPAS packages to Rhino {0} scripts folder:'.format(version))
print('{}\n'.format(scripts_path))
for package, status in results:
print(' {} {}'.format(package.ljust(20), status))
if status != 'OK':
exit_code = -1
if exit_code == 0 and len(installed_packages):
print('\nRunning post-installation steps...\n')
if not _run_post_execution_steps(after_rhino_install(installed_packages)):
exit_code = -1
print('\nInstall completed.')
if exit_code != 0:
sys.exit(exit_code)
global INSTALLED_VERSION
INSTALLED_VERSION = version
def _run_post_execution_steps(steps_generator):
all_steps_succeeded = True
post_execution_errors = []
for result in steps_generator:
if isinstance(result, Exception):
post_execution_errors.append(result)
continue
for item in result:
try:
package, message, success = item
status = 'OK' if success else 'ERROR'
if not success:
all_steps_succeeded = False
print(' {} {}: {}'.format(package.ljust(20), status, message))
except ValueError:
post_execution_errors.append(ValueError('Step ran without errors but result is wrongly formatted: {}'.format(str(item))))
if post_execution_errors:
print('\nOne or more errors occurred:\n')
for error in post_execution_errors:
print(' - {}'.format(repr(error)))
all_steps_succeeded = False
return all_steps_succeeded
@compas.plugins.plugin(category='install', pluggable_name='installable_rhino_packages', tryfirst=True)
def default_installable_rhino_packages():
# While this list could obviously be hard-coded, I think
# eating our own dogfood and using plugins to define this, just like
# any other extension/plugin would be is a better way to ensure consistent behavior.
return ['compas', 'compas_rhino']
@compas.plugins.pluggable(category='install', selector='collect_all')
def installable_rhino_packages():
"""Provide a list of packages to make available inside Rhino.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to automatically
have their packages made available inside Rhino when
COMPAS is installed into it.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def installable_rhino_packages():
... return ['compas_fab']
Returns
-------
:obj:`list` of :obj:`str`
List of package names to make available inside Rhino.
"""
pass
@compas.plugins.pluggable(category='install', selector='collect_all')
def after_rhino_install(installed_packages):
"""Allows extensions to execute actions after install to Rhino is done.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to perform
additional steps after an installation to Rhino has
been completed.
Parameters
----------
installed_packages : :obj:`list` of :obj:`str`
List of packages that have been installed successfully.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def after_rhino_install(installed_packages):
... # Do something after package is installed to Rhino, eg, copy components, etc
... return [('compas_ghpython', 'GH Components installed', True)]
Returns
-------
:obj:`list` of 3-tuple (str, str, bool)
List containing a 3-tuple with component name, message and True/False success flag.
"""
pass
def _update_bootstrapper(install_path, packages):
# Take either the CONDA environment directory or the current Python executable's directory
python_directory = os.environ.get('CONDA_PREFIX', None) or os.path.dirname(sys.executable)
environment_name = os.environ.get('CONDA_DEFAULT_ENV', '')
conda_exe = os.environ.get('CONDA_EXE', '')
compas_bootstrapper = compas_rhino._get_bootstrapper_path(install_path)
bootstrapper_data = compas_rhino._get_bootstrapper_data(compas_bootstrapper)
installed_packages = bootstrapper_data.get('INSTALLED_PACKAGES', [])
installed_packages = list(set(installed_packages + list(packages)))
with open(compas_bootstrapper, 'w') as f:
f.write('ENVIRONMENT_NAME = r"{}"\n'.format(environment_name))
f.write('PYTHON_DIRECTORY = r"{}"\n'.format(python_directory))
f.write('CONDA_EXE = r"{}"\n'.format(conda_exe))
f.write('INSTALLED_PACKAGES = {}'.format(repr(installed_packages)))
def _filter_installable_packages(version, packages):
ghpython_incompatible = False
if compas.OSX and version == 5.0:
ghpython_incompatible = True
if not packages:
# Flatten list of results (resulting from collect_all pluggable)
packages = sorted(set(itertools.chain.from_iterable(installable_rhino_packages())))
elif 'compas_ghpython' in packages and ghpython_incompatible:
print('Skipping installation of compas_ghpython since it\'s not supported for Rhino 5 for Mac')
if ghpython_incompatible:
packages.remove('compas_ghpython')
return packages
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--version',
choices=compas_rhino.SUPPORTED_VERSIONS,
default=compas_rhino.DEFAULT_VERSION,
help="The version of Rhino to install the packages in."
)
parser.add_argument('-p', '--packages', nargs='+', help="The packages to install.")
parser.add_argument('--clean', dest='clean', default=False, action='store_true')
args = parser.parse_args()
install(version=args.version, packages=args.packages, clean=args.clean)
|
|
"""Module for sending notifications to an iPhone via Prowl.
Includes a `post` method for one-off messages, a `Prowl` class to assist in
sending multiple messages, and a `LogHandler` for sending log records via
prowl.
See: http://www.prowlapp.com/
"""
__author__ = 'Mike Boers'
__author_email__ = 'github@mikeboers.com'
__version__ = '1.0b'
__license__ = 'New BSD License'
from urllib import urlencode
from urllib2 import urlopen, HTTPError
from xml.etree.ElementTree import XML
import logging
__all__ = ['Error', 'get_remaining', 'get_reset_time', 'verify', 'post',
'Prowl', 'LogHandler']
API_URL_BASE = 'https://api.prowlapp.com/publicapi/'
DEFAULT_PRIORITY = 0
DEFAULT_APP = 'py:%s' % __name__
DEFAULT_EVENT = 'default'
class Error(ValueError):
pass
# This will be continually updated whenever a request is made.
_last_meta_data = {}
def get_remaining():
"""Get the number of posts we are allowed to make before the reset date.
Returns None if no successful requests have been made yet.
"""
return _last_meta_data.get('remaining')
def get_reset_time():
"""Get the time in unix time (UTC) at which our remaining counter resets.
Returns None if no successful requests have been made yet.
"""
return _last_meta_data.get('resetdate')
def _request(method, data=None):
"""Make the raw request to the Prowl API."""
# Catch the errors and treat them just like the normal response.
try:
res = urlopen(API_URL_BASE + method, urlencode(data) if data else None)
except HTTPError as res:
pass
xml = XML(res.read())
if xml.tag != 'prowl':
raise Error('malformed response: unexpected tag %r' % xml.tag)
children = xml.getchildren()
if len(children) != 1:
raise Error('malformed response: too many children')
node = children[0]
status, data, text = node.tag, node.attrib, node.text
if status not in ('success', 'error'):
raise Error('malformed response: unknown status %r' % node.tag)
if 'code' not in node.attrib:
raise Error('malformed response: no response code')
if status == 'error' and not text:
raise Error('malformed response: no error message with code %d' % data['code'])
data = dict((k, int(v)) for k, v in data.items())
_last_meta_data.update(data)
return status, data, text
def verify(key):
"""Verify an API key is valid.
Params:
key -- The API key to verify
Return:
True if the key is valid, False if not.
Raises prowl.Error if the response is malformed in any way, or for any
error reason besides an invalid key (which is what we are testing for).
From the official docs:
For the sake of adding a notification do not call verify first; it costs
you an API call. You should only use verify to confirm an API key is valid
in situations like a user entering an API key into your program. If it's
not valid while posting the notification, you will get the appropriate
error.
"""
tag, data, text = _request('verify?apikey=' + key)
if tag == 'success':
return True
if data['code'] == 401 and text == 'Invalid API key':
return False
raise Error(text.lower())
def post(key, message, priority=None, url=None, app=None, event=None, providerkey=None):
"""Send a message.
Parameters:
key -- An API key, or a list of API keys to post to.
message -- The message to send.
priority -- Integer from -2 to 2 inclusive.
url -- Requires Prowl 1.2 The URL which should be attached to the
notification.
app -- App identifier to send as.
event -- Event identifier to send as.
providerkey -- Provider API key if you have been whitelisted.
"""
# We are not enforcing the maximum lengths on the application (256),
# event (1024), or message (10000). Nor am I forcing anything to be an
# int or str. I'm going to let the server yell at you.
data = {
'apikey': key if isinstance(key, basestring) else ','.join(key),
'priority': priority or DEFAULT_PRIORITY,
'application': app or DEFAULT_APP,
'event': event or DEFAULT_EVENT,
'description': message
}
if url is not None:
data['url'] = url
if providerkey is not None:
data['providerkey'] = providerkey
status, data, text = _request('add', data)
if status != 'success':
raise Error(text.lower())
class Prowl(object):
"""An object to simplify repeated prowling.
Parameters for the constructor are the same as for prowl.send, and set the
defaults which can be overidden by the Prowl.post.
"""
def __init__(self, key, **defaults):
self.defaults = defaults
self.defaults['key'] = key
def post(self, message, **kwargs):
"""Post a message.
Parameters here overide the defaults of this object.
"""
meta = self.defaults.copy()
meta.update(kwargs)
return post(
message=message,
**meta
)
class LogHandler(logging.Handler, Prowl):
"""Log handler which sends messages via Prowl.
Constructor takes prowl parameters which will be used when sending logs.
The event and app will be used as format strings with the log record
data. You can use the same keys for log formatters found here:
http://docs.python.org/library/logging.html#formatter-objects
"""
def __init__(self, key, **kwargs):
logging.Handler.__init__(self)
Prowl.__init__(self, key, **kwargs)
def emit(self, record):
data = {}
for key in ('app', 'event'):
if key in self.defaults:
data[key] = self.defaults[key] % record.__dict__
message = self.format(record)
self.post(message=message, **data)
|
|
import argparse
import os
import librosa
from tensorflow.python.ops import gen_audio_ops as contrib_audio
import tensorflow as tf
import numpy as np
import pickle as pkl
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def _mel_to_hertz(mel_values, name=None):
"""Converts frequencies in `mel_values` from the mel scale to linear scale.
Args:
mel_values: A `Tensor` of frequencies in the mel scale.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type as `mel_values` containing linear
scale frequencies in Hertz.
"""
with tf.name_scope(name, 'mel_to_hertz', [mel_values]):
mel_values = tf.convert_to_tensor(mel_values)
return _MEL_BREAK_FREQUENCY_HERTZ * (
tf.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0
)
def _hertz_to_mel(frequencies_hertz, name=None):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
with tf.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = tf.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * tf.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def _griffin_lim_tensorflow(S, stft, istft, num_iters=50):
with tf.variable_scope('griffinlim'):
# TensorFlow's stft and istft operate on a batch of spectrograms; create batch of size 1
S = tf.expand_dims(S, 0)
S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
y = istft(S_complex)
for i in range(num_iters):
est = stft(y)
angles = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64)
y = istft(S_complex * angles)
return tf.squeeze(y, 0)
def get_deepspeech_mfccs(samples, sample_rate=16000):
decoded = contrib_audio.decode_wav(samples, desired_channels=1)
spectrogram = contrib_audio.audio_spectrogram(decoded.audio, window_size=512, stride=320, magnitude_squared=True)
return contrib_audio.mfcc(
spectrogram=spectrogram, sample_rate=decoded.sample_rate, dct_coefficient_count=26,
upper_frequency_limit=sample_rate / 2)
def audio2mfccs(pcm, frame_length=512, frame_step=320, sample_rate=16000):
log_mel_spectrograms = audio2logmel(pcm, frame_length, frame_step, sample_rate)
# Compute MFCCs from log_mel_spectrograms and take the first 26.
# dct2 = tf.signal.dct(log_mel_spectrograms, type=2)
# mfccs = dct2 * tf.rsqrt(40 * 2.0)
# mfccs = mfccs[:, :26]
return logmel2mfccs(log_mel_spectrograms)
def audio2spectrograms(pcm, frame_length=512, frame_step=320, sample_rate=16000):
stft = lambda inp: tf.signal.stft(inp, frame_length=frame_length, frame_step=frame_step)
pcm = tf.squeeze(pcm, -1)
stfts = stft(pcm)
return tf.abs(stfts)
def audio2logmel(pcm, frame_length=512, frame_step=320, sample_rate=16000):
spectrograms = audio2spectrograms(pcm, frame_length, frame_step, sample_rate)
# Warp the linear scale spectrograms into the mel-scale.
# linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
# num_mel_bins=40,
# num_spectrogram_bins=stfts.shape[-1].value,
# sample_rate=sample_rate,
# lower_edge_hertz=20.,
# upper_edge_hertz=sample_rate / 2)
num_mel_bins = 40
num_spectrogram_bins = spectrograms.shape[-1].value
lower_edge_hertz = 20.
upper_edge_hertz = sample_rate / 2
zero = 0.0
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = tf.linspace(
zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = tf.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = tf.signal.frame(
tf.linspace(_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2), frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(tf.reshape(
t, [1, num_mel_bins]) for t in tf.split(band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = tf.maximum(zero, tf.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
linear_to_mel_weight_matrix = tf.pad(
mel_weights_matrix, [[bands_to_zero, 0], [0, 0]])
mel_spectrograms = tf.matmul(spectrograms, linear_to_mel_weight_matrix)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
return tf.math.log(mel_spectrograms)
def logmel2mfccs(log_mel_spectrograms):
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrograms)[:, :26]
return mfccs
def mfccs2audio(mfccs, frame_length=512, frame_step=320, sample_rate=16000):
_mfccs = tf.concat([mfccs, tf.zeros([mfccs.shape[0].value, 14])], axis=-1)
dct2 = _mfccs / tf.rsqrt(40 * 2.0)
log_mel_spectrograms = tf.signal.idct(dct2, type=2) * 0.5 / 40
return logmel2audio(log_mel_spectrograms, frame_length, frame_step, sample_rate)
def logmel2audio(log_mel_spectrograms, frame_length=512, frame_step=320, sample_rate=16000):
mel_spectrograms = tf.math.exp(log_mel_spectrograms)
num_spectrogram_bins = 257
num_mel_bins = 40
# HTK excludes the spectrogram DC bin.
nyquist_hertz = sample_rate / 2.0
mel_frequencies = tf.linspace(_hertz_to_mel(20.), _hertz_to_mel(sample_rate / 2), num_mel_bins)
spectrogram_bins_mel = tf.expand_dims(_mel_to_hertz(mel_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = tf.signal.frame(
tf.linspace(0., nyquist_hertz, num_spectrogram_bins + 2)[1:],
frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(
tf.reshape(t, [1, num_spectrogram_bins - 1]) for t in tf.split(band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = tf.maximum(0., tf.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
mel_to_linear_weight_matrix = tf.pad(mel_weights_matrix, [[0, 0], [1, 0]])
spectrograms = tf.matmul(mel_spectrograms, mel_to_linear_weight_matrix)
return spectrograms2audio(spectrograms, frame_length, frame_step, sample_rate)
def spectrograms2audio(spectrograms, frame_length=512, frame_step=320, sample_rate=16000):
stft = lambda inp: tf.signal.stft(inp, frame_length=frame_length, frame_step=frame_step)
istft = lambda inp: tf.signal.inverse_stft(inp, frame_length=frame_length, frame_step=frame_step)
pcm = _griffin_lim_tensorflow(spectrograms, stft, istft, num_iters=50)
return pcm
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Invert MFCCs to audio.')
parser.add_argument('input_file', help='Path to .pkl / .wav input file')
parser.add_argument('output_file', help='Path to .wav output file')
parser.add_argument('--input_type', default='mfccs', help='Input type: logmel / mfccs')
args = parser.parse_args()
# Load from file
ext = os.path.splitext(args.input_file)[-1]
print("Reading from file...")
if ext == '.wav':
samples = tf.io.read_file(args.input_file)
decoded = contrib_audio.decode_wav(samples, desired_channels=1)
audio = decoded.audio
if args.input_type == 'mfccs':
inp = audio2mfccs(audio)
elif args.input_type == 'logmel':
inp = audio2logmel(audio)
elif args.input_type == 'spectrograms':
inp = audio2spectrograms(audio)
else:
raise ValueError("%s is not supported" % args.input_type)
elif ext == '.pkl':
audio = None
with open(args.input_file, 'rb') as f:
x_r = pkl.load(f)
x_r = tf.squeeze(tf.constant(x_r), 0)
inp = x_r
else:
raise ValueError("%s input is not supported" % ext)
if args.input_type == 'mfccs':
pcm = mfccs2audio(inp)
elif args.input_type == 'logmel':
pcm = logmel2audio(inp)
elif args.input_type == 'spectrograms':
pcm = spectrograms2audio(inp)
elif args.input_type == 'audio':
pcm = inp[:, 0]
encoded = tf.audio.encode_wav(tf.expand_dims(pcm, 1), sample_rate=16000)
if audio is not None:
dist = tf.norm(pcm - audio)
else:
dist = tf.constant(0.)
with tf.Session() as sess:
wav = sess.run(encoded)
with open(args.output_file, 'wb') as f:
f.write(wav)
# print("Distance to original audio: %f" % dist)
print("File is outputted to %s" % args.output_file)
|
|
from __future__ import absolute_import
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import datetime
from lxml.builder import E
from lxml.etree import tostring
try:
import mx.DateTime
HAS_MX_DATETIME = True
except ImportError:
HAS_MX_DATETIME = False
from .schema import solr_date, SolrSchema, SolrError
from .search import SolrSearch, MltSolrSearch, PaginateOptions, SortOptions, FieldLimitOptions, \
FacetOptions, FacetRangeOptions, HighlightOptions, MoreLikeThisOptions, params_from_dict
from .strings import RawString
from .sunburnt import SolrInterface
from .test_sunburnt import MockConnection, MockResponse
from nose.tools import assert_equal
debug = False
schema_string = \
"""<schema name="timetric" version="1.1">
<types>
<fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="text" class="solr.TextField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="int" class="solr.IntField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sint" class="solr.SortableIntField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="long" class="solr.LongField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="slong" class="solr.SortableLongField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="float" class="solr.FloatField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sfloat" class="solr.SortableFloatField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="double" class="solr.DoubleField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="date" class="solr.DateField" sortMissingLast="true" omitNorms="true"/>
</types>
<fields>
<field name="string_field" required="true" type="string" multiValued="true"/>
<field name="text_field" required="true" type="text"/>
<field name="boolean_field" required="false" type="boolean"/>
<field name="int_field" required="true" type="int"/>
<field name="sint_field" type="sint"/>
<field name="long_field" type="long"/>
<field name="slong_field" type="slong"/>
<field name="long_field" type="long"/>
<field name="slong_field" type="slong"/>
<field name="float_field" type="float"/>
<field name="sfloat_field" type="sfloat"/>
<field name="double_field" type="double"/>
<field name="sdouble_field" type="sdouble"/>
<field name="date_field" type="date"/>
</fields>
<defaultSearchField>text_field</defaultSearchField>
<uniqueKey>int_field</uniqueKey>
</schema>"""
schema = SolrSchema(StringIO(schema_string))
class MockInterface(object):
schema = schema
interface = MockInterface()
good_query_data = {
"query_by_term":(
(["hello"], {},
[("q", u"hello")]),
(["hello"], {"int_field":3},
[("q", u"hello AND int_field:3")]),
(["hello", "world"], {},
[("q", u"hello AND world")]),
# NB this next is not really what we want,
# probably this should warn
(["hello world"], {},
[("q", u"hello\\ world")]),
),
"query_by_phrase":(
(["hello"], {},
[("q", u"hello")]),
(["hello"], {"int_field":3},
[("q", u"int_field:3 AND hello")]), # Non-text data is always taken to be a term, and terms come before phrases, so order is reversed
(["hello", "world"], {},
[("q", u"hello AND world")]),
(["hello world"], {},
[("q", u"hello\\ world")]),
([], {'string_field':['hello world', 'goodbye, cruel world']},
[("q", u"string_field:goodbye,\\ cruel\\ world AND string_field:hello\\ world")]),
),
"filter_by_term":(
(["hello"], {},
[("fq", u"hello"), ("q", "*:*")]),
(["hello"], {"int_field":3},
[("fq", u"hello AND int_field:3"), ("q", "*:*")]),
(["hello", "world"], {},
[("fq", u"hello AND world"), ("q", "*:*")]),
# NB this next is not really what we want,
# probably this should warn
(["hello world"], {},
[("fq", u"hello\\ world"), ("q", "*:*")]),
),
"filter_by_phrase":(
(["hello"], {},
[("fq", u"hello"), ("q", "*:*")]),
(["hello"], {"int_field":3},
[("fq", u"int_field:3 AND hello"), ("q", "*:*")]),
(["hello", "world"], {},
[("fq", u"hello AND world"), ("q", "*:*")]),
(["hello world"], {},
[("fq", u"hello\\ world"), ("q", "*:*")]),
),
"filter":(
(["hello"], {},
[("fq", u"hello"), ("q", "*:*")]),
(["hello"], {"int_field":3},
[("fq", u"hello AND int_field:3"), ("q", "*:*")]),
(["hello", "world"], {},
[("fq", u"hello AND world"), ("q", "*:*")]),
(["hello world"], {},
[("fq", u"hello\\ world"), ("q", "*:*")]),
),
"query":(
#Basic queries
(["hello"], {},
[("q", u"hello")]),
(["hello"], {"int_field":3},
[("q", u"hello AND int_field:3")]),
(["hello", "world"], {},
[("q", u"hello AND world")]),
(["hello world"], {},
[("q", u"hello\\ world")]),
#Test fields
# Boolean fields take any truth-y value
([], {"boolean_field":True},
[("q", u"boolean_field:true")]),
([], {"boolean_field":'true'},
[("q", u"boolean_field:true")]),
([], {"boolean_field":1},
[("q", u"boolean_field:true")]),
([], {"boolean_field":"false"},
[("q", u"boolean_field:false")]),
([], {"boolean_field":0},
[("q", u"boolean_field:false")]),
([], {"boolean_field":False},
[("q", u"boolean_field:false")]),
([], {"int_field":3},
[("q", u"int_field:3")]),
([], {"int_field":3.1}, # casting from float should work
[("q", u"int_field:3")]),
([], {"sint_field":3},
[("q", u"sint_field:3")]),
([], {"sint_field":3.1}, # casting from float should work
[("q", u"sint_field:3")]),
([], {"long_field":2**31},
[("q", u"long_field:2147483648")]),
([], {"slong_field":2**31},
[("q", u"slong_field:2147483648")]),
([], {"float_field":3.0},
[("q", u"float_field:3.0")]),
([], {"float_field":3}, # casting from int should work
[("q", u"float_field:3.0")]),
([], {"sfloat_field":3.0},
[("q", u"sfloat_field:3.0")]),
([], {"sfloat_field":3}, # casting from int should work
[("q", u"sfloat_field:3.0")]),
([], {"double_field":3.0},
[("q", u"double_field:3.0")]),
([], {"double_field":3}, # casting from int should work
[("q", u"double_field:3.0")]),
([], {"sdouble_field":3.0},
[("q", u"sdouble_field:3.0")]),
([], {"sdouble_field":3}, # casting from int should work
[("q", u"sdouble_field:3.0")]),
([], {"date_field":datetime.datetime(2009, 1, 1)},
[("q", u"date_field:2009\\-01\\-01T00\\:00\\:00Z")]),
#Test ranges
([], {"int_field__any":True},
[("q", u"int_field:[* TO *]")]),
([], {"int_field__lt":3},
[("q", u"int_field:{* TO 3}")]),
([], {"int_field__gt":3},
[("q", u"int_field:{3 TO *}")]),
([], {"int_field__rangeexc":(-3, 3)},
[("q", u"int_field:{\-3 TO 3}")]),
([], {"int_field__rangeexc":(3, -3)},
[("q", u"int_field:{\-3 TO 3}")]),
([], {"int_field__lte":3},
[("q", u"int_field:[* TO 3]")]),
([], {"int_field__gte":3},
[("q", u"int_field:[3 TO *]")]),
([], {"int_field__range":(-3, 3)},
[("q", u"int_field:[\-3 TO 3]")]),
([], {"int_field__range":(3, -3)},
[("q", u"int_field:[\-3 TO 3]")]),
([], {"date_field__lt":datetime.datetime(2009, 1, 1)},
[("q", u"date_field:{* TO 2009\\-01\\-01T00\\:00\\:00Z}")]),
([], {"date_field__gt":datetime.datetime(2009, 1, 1)},
[("q", u"date_field:{2009\\-01\\-01T00\\:00\\:00Z TO *}")]),
([], {"date_field__rangeexc":(datetime.datetime(2009, 1, 1), datetime.datetime(2009, 1, 2))},
[("q", "date_field:{2009\\-01\\-01T00\\:00\\:00Z TO 2009\\-01\\-02T00\\:00\\:00Z}")]),
([], {"date_field__lte":datetime.datetime(2009, 1, 1)},
[("q", u"date_field:[* TO 2009\\-01\\-01T00\\:00\\:00Z]")]),
([], {"date_field__gte":datetime.datetime(2009, 1, 1)},
[("q", u"date_field:[2009\\-01\\-01T00\\:00\\:00Z TO *]")]),
([], {"date_field__range":(datetime.datetime(2009, 1, 1), datetime.datetime(2009, 1, 2))},
[("q", u"date_field:[2009\\-01\\-01T00\\:00\\:00Z TO 2009\\-01\\-02T00\\:00\\:00Z]")]),
([], {'string_field':['hello world', 'goodbye, cruel world']},
[("q", u"string_field:goodbye,\\ cruel\\ world AND string_field:hello\\ world")]),
# Raw strings
([], {'string_field':RawString("abc*???")},
[("q", "string_field:abc\\*\\?\\?\\?")]),
),
}
if HAS_MX_DATETIME:
good_query_data['query'] += \
(([], {"date_field":mx.DateTime.DateTime(2009, 1, 1)},
[("q", u"date_field:2009\\-01\\-01T00\\:00\\:00Z")]),)
def check_query_data(method, args, kwargs, output):
solr_search = SolrSearch(interface)
p = getattr(solr_search, method)(*args, **kwargs).params()
try:
assert p == output, "Unequal: %r, %r" % (p, output)
except AssertionError:
if debug:
print p
print output
import pdb;pdb.set_trace()
raise
else:
raise
def test_query_data():
for method, data in good_query_data.items():
for args, kwargs, output in data:
yield check_query_data, method, args, kwargs, output
bad_query_data = (
{"int_field":"a"},
{"int_field":2**31},
{"int_field":-(2**31)-1},
{"long_field":"a"},
{"long_field":2**63},
{"long_field":-(2**63)-1},
{"float_field":"a"},
{"float_field":2**1000},
{"float_field":-(2**1000)},
{"double_field":"a"},
{"double_field":2**2000},
{"double_field":-(2**2000)},
{"date_field":"a"},
{"int_field__gt":"a"},
{"date_field__gt":"a"},
{"int_field__range":1},
{"date_field__range":1},
)
def check_bad_query_data(kwargs):
solr_search = SolrSearch(interface)
try:
solr_search.query(**kwargs).params()
except SolrError:
pass
else:
assert False
def test_bad_query_data():
for kwargs in bad_query_data:
yield check_bad_query_data, kwargs
good_option_data = {
PaginateOptions:(
({"start":5, "rows":10},
{"start":5, "rows":10}),
({"start":5, "rows":None},
{"start":5}),
({"start":None, "rows":10},
{"rows":10}),
),
FacetOptions:(
({"fields":"int_field"},
{"facet":True, "facet.field":["int_field"]}),
({"fields":["int_field", "text_field"]},
{"facet":True, "facet.field":["int_field","text_field"]}),
({"prefix":"abc"},
{"facet":True, "facet.prefix":"abc"}),
({"prefix":"abc", "sort":True, "limit":3, "offset":25, "mincount":1, "missing":False, "method":"enum"},
{"facet":True, "facet.prefix":"abc", "facet.sort":True, "facet.limit":3, "facet.offset":25, "facet.mincount":1, "facet.missing":False, "facet.method":"enum"}),
({"fields":"int_field", "prefix":"abc"},
{"facet":True, "facet.field":["int_field"], "f.int_field.facet.prefix":"abc"}),
({"fields":"int_field", "prefix":"abc", "limit":3},
{"facet":True, "facet.field":["int_field"], "f.int_field.facet.prefix":"abc", "f.int_field.facet.limit":3}),
({"fields":["int_field", "text_field"], "prefix":"abc", "limit":3},
{"facet":True, "facet.field":["int_field", "text_field"], "f.int_field.facet.prefix":"abc", "f.int_field.facet.limit":3, "f.text_field.facet.prefix":"abc", "f.text_field.facet.limit":3, }),
),
FacetRangeOptions:(
({"fields": {"int_field": {"start": 1, "end": 10, "gap": 2}}},
{'facet': True, 'facet.range': ['int_field'], 'f.int_field.facet.range.start': 1, 'f.int_field.facet.range.end': 10, 'f.int_field.facet.range.gap': 2}),
({"fields": {"float_field": {"start": 2.5, "end": 11.5, "gap": 1.5}}},
{'facet': True, 'facet.range': ['float_field'], 'f.float_field.facet.range.start': 2.5, 'f.float_field.facet.range.end': 11.5, 'f.float_field.facet.range.gap': 1.5}),
({"fields": {"date_field": {"start": solr_date(datetime.datetime(2000,1,1)), "end": solr_date(datetime.datetime(2010,12,1)), "gap": "+1YEAR"}}},
{'facet': True, 'facet.range': ['date_field'], 'f.date_field.facet.range.start': u"2000-01-01T00:00:00Z", 'f.date_field.facet.range.end': u"2010-12-01T00:00:00Z", 'f.date_field.facet.range.gap': '+1YEAR'}),
({"fields": {"int_field": {"start": 1, "end": 10, "gap": 2, "hardend": True, "include": "lower", "other": "none"}}},
{'facet': True, 'facet.range': ['int_field'], 'f.int_field.facet.range.start': 1, 'f.int_field.facet.range.end': 10, 'f.int_field.facet.range.gap': 2, 'f.int_field.facet.range.hardend': True, 'f.int_field.facet.range.include': "lower", 'f.int_field.facet.range.other': "none"}),
),
SortOptions:(
({"field":"int_field"},
{"sort":"int_field asc"}),
({"field":"-int_field"},
{"sort":"int_field desc"}),
),
HighlightOptions:(
({"fields":"int_field"},
{"hl":True, "hl.fl":"int_field"}),
({"fields":["int_field", "text_field"]},
{"hl":True, "hl.fl":"int_field,text_field"}),
({"snippets":3},
{"hl":True, "hl.snippets":3}),
({"snippets":3, "fragsize":5, "mergeContinuous":True, "requireFieldMatch":True, "maxAnalyzedChars":500, "alternateField":"text_field", "maxAlternateFieldLength":50, "formatter":"simple", "simple.pre":"<b>", "simple.post":"</b>", "fragmenter":"regex", "usePhraseHighlighter":True, "highlightMultiTerm":True, "regex.slop":0.2, "regex.pattern":"\w", "regex.maxAnalyzedChars":100},
{"hl":True, "hl.snippets":3, "hl.fragsize":5, "hl.mergeContinuous":True, "hl.requireFieldMatch":True, "hl.maxAnalyzedChars":500, "hl.alternateField":"text_field", "hl.maxAlternateFieldLength":50, "hl.formatter":"simple", "hl.simple.pre":"<b>", "hl.simple.post":"</b>", "hl.fragmenter":"regex", "hl.usePhraseHighlighter":True, "hl.highlightMultiTerm":True, "hl.regex.slop":0.2, "hl.regex.pattern":"\w", "hl.regex.maxAnalyzedChars":100}),
({"fields":"int_field", "snippets":"3"},
{"hl":True, "hl.fl":"int_field", "f.int_field.hl.snippets":3}),
({"fields":"int_field", "snippets":3, "fragsize":5},
{"hl":True, "hl.fl":"int_field", "f.int_field.hl.snippets":3, "f.int_field.hl.fragsize":5}),
({"fields":["int_field", "text_field"], "snippets":3, "fragsize":5},
{"hl":True, "hl.fl":"int_field,text_field", "f.int_field.hl.snippets":3, "f.int_field.hl.fragsize":5, "f.text_field.hl.snippets":3, "f.text_field.hl.fragsize":5}),
),
MoreLikeThisOptions:(
({"fields":"int_field"},
{"mlt":True, "mlt.fl":"int_field"}),
({"fields":["int_field", "text_field"]},
{"mlt":True, "mlt.fl":"int_field,text_field"}),
({"fields":["text_field", "string_field"], "query_fields":{"text_field":0.25, "string_field":0.75}},
{"mlt":True, "mlt.fl":"string_field,text_field", "mlt.qf":"text_field^0.25 string_field^0.75"}),
({"fields":"text_field", "count":1},
{"mlt":True, "mlt.fl":"text_field", "mlt.count":1}),
),
FieldLimitOptions:(
({},
{}),
({"fields":"int_field"},
{"fl":"int_field"}),
({"fields":["int_field", "text_field"]},
{"fl":"int_field,text_field"}),
({"score": True},
{"fl":"score"}),
({"all_fields": True, "score": True},
{"fl":"*,score"}),
({"fields":"int_field", "score": True},
{"fl":"int_field,score"}),
),
}
def check_good_option_data(OptionClass, kwargs, output):
optioner = OptionClass(schema)
optioner.update(**kwargs)
assert_equal(output, optioner.options())
def test_good_option_data():
for OptionClass, option_data in good_option_data.items():
for kwargs, output in option_data:
yield check_good_option_data, OptionClass, kwargs, output
# All these tests should really nominate which exception they're going to throw.
bad_option_data = {
PaginateOptions:(
{"start":-1, "rows":None}, # negative start
{"start":None, "rows":-1}, # negative rows
),
FacetOptions:(
{"fields":"myarse"}, # Undefined field
{"oops":True}, # undefined option
{"limit":"a"}, # invalid type
{"sort":"yes"}, # invalid choice
{"offset":-1}, # invalid value
),
FacetRangeOptions:(
{"fields": {"int_field": {"start": 1, "end": 10}}}, # start, end, & gap are all required
{"fields": {"int_field": {"start": "foo", "end": "bar", "gap": "+1YEAR"}}}, # string is not a valid type for range facet endpoint
{"fields": {"int_field": {"start": 1, "end": 10, "gap": "+1YEAR"}}}, # gap must be an appropriate type
{"fields": {"int_field": {"start": 10, "end": 1, "gap": 2}}}, # start must be less than end
{"fields": {"date_field": {"start": datetime.datetime(2000,1,1), "end": datetime.datetime(2010,1,1), "gap":"+1YEAR"}}}, # datetime is not a valid type for range facet endpoint
{"fields": {"date_field": {"start": datetime.datetime(2000,1,1), "end": datetime.datetime(2010,1,1), "gap": "blah blah"}}}, # if the gap is a string, it must meet solr syntax
{"fields": {"date_field": {"start": datetime.datetime(2000,1,1), "end": datetime.datetime(2010,1,1), "gap": "+1EON"}}}, # if the gap is a string, it must use valid lucene units
{"fields": {"date_field": {"start": 1, "end": 3.5, "gap": 0.5}}}, # incompatible types for start and end
),
SortOptions:(
{"field":"myarse"}, # Undefined field
{"field":"string_field"}, # Multivalued field
),
HighlightOptions:(
{"fields":"myarse"}, # Undefined field
{"oops":True}, # undefined option
{"snippets":"a"}, # invalid type
{"alternateField":"yourarse"}, # another invalid option
),
MoreLikeThisOptions:(
{"fields":"myarse"}, # Undefined field
{"fields":"text_field", "query_fields":{"text_field":0.25, "string_field":0.75}}, # string_field in query_fields, not fields
{"fields":"text_field", "query_fields":{"text_field":"a"}}, # Non-float value for boost
{"fields":"text_field", "oops":True}, # undefined option
{"fields":"text_field", "count":"a"} # Invalid value for option
),
}
def check_bad_option_data(OptionClass, kwargs):
option = OptionClass(schema)
try:
option.update(**kwargs)
except SolrError:
pass
else:
assert False
def test_bad_option_data():
for OptionClass, option_data in bad_option_data.items():
for kwargs in option_data:
yield check_bad_option_data, OptionClass, kwargs
complex_boolean_queries = (
(lambda q: q.query("hello world").filter(q.Q(text_field="tow") | q.Q(boolean_field=False, int_field__gt=3)),
[('fq', u'text_field:tow OR (boolean_field:false AND int_field:{3 TO *})'), ('q', u'hello\\ world')]),
(lambda q: q.query("hello world").filter(q.Q(text_field="tow") & q.Q(boolean_field=False, int_field__gt=3)),
[('fq', u'boolean_field:false AND text_field:tow AND int_field:{3 TO *}'), ('q', u'hello\\ world')]),
# Test various combinations of NOTs at the top level.
# Sometimes we need to do the *:* trick, sometimes not.
(lambda q: q.query(~q.Q("hello world")),
[('q', u'NOT hello\\ world')]),
(lambda q: q.query(~q.Q("hello world") & ~q.Q(int_field=3)),
[('q', u'NOT hello\\ world AND NOT int_field:3')]),
(lambda q: q.query("hello world", ~q.Q(int_field=3)),
[('q', u'hello\\ world AND NOT int_field:3')]),
(lambda q: q.query("abc", q.Q("def"), ~q.Q(int_field=3)),
[('q', u'abc AND def AND NOT int_field:3')]),
(lambda q: q.query("abc", q.Q("def") & ~q.Q(int_field=3)),
[('q', u'abc AND def AND NOT int_field:3')]),
(lambda q: q.query("abc", q.Q("def") | ~q.Q(int_field=3)),
[('q', u'abc AND (def OR (*:* AND NOT int_field:3))')]),
(lambda q: q.query(q.Q("abc") | ~q.Q("def")),
[('q', u'abc OR (*:* AND NOT def)')]),
(lambda q: q.query(q.Q("abc") | q.Q(~q.Q("def"))),
[('q', u'abc OR (*:* AND NOT def)')]),
# Make sure that ANDs are flattened
(lambda q: q.query("def", q.Q("abc"), q.Q(q.Q("xyz"))),
[('q', u'abc AND def AND xyz')]),
# Make sure that ORs are flattened
(lambda q: q.query(q.Q("def") | q.Q(q.Q("xyz"))),
[('q', u'def OR xyz')]),
# Make sure that empty queries are discarded in ANDs
(lambda q: q.query("def", q.Q("abc"), q.Q(), q.Q(q.Q() & q.Q("xyz"))),
[('q', u'abc AND def AND xyz')]),
# Make sure that empty queries are discarded in ORs
(lambda q: q.query(q.Q() | q.Q("def") | q.Q(q.Q() | q.Q("xyz"))),
[('q', u'def OR xyz')]),
# Test cancellation of NOTs.
(lambda q: q.query(~q.Q(~q.Q("def"))),
[('q', u'def')]),
(lambda q: q.query(~q.Q(~q.Q(~q.Q("def")))),
[('q', u'NOT def')]),
# Test it works through sub-sub-queries
(lambda q: q.query(~q.Q(q.Q(q.Q(~q.Q(~q.Q("def")))))),
[('q', u'NOT def')]),
# Even with empty queries in there
(lambda q: q.query(~q.Q(q.Q(q.Q() & q.Q(q.Q() | ~q.Q(~q.Q("def")))))),
[('q', u'NOT def')]),
# Test escaping of AND, OR, NOT
(lambda q: q.query("AND", "OR", "NOT"),
[('q', u'"AND" AND "NOT" AND "OR"')]),
# Test exclude (rather than explicit NOT
(lambda q: q.query("blah").exclude(q.Q("abc") | q.Q("def") | q.Q("ghi")),
[('q', u'blah AND NOT (abc OR def OR ghi)')]),
# Try boosts
(lambda q: q.query("blah").query(q.Q("def")**1.5),
[('q', u'blah AND def^1.5')]),
(lambda q: q.query("blah").query((q.Q("def") | q.Q("ghi"))**1.5),
[('q', u'blah AND (def OR ghi)^1.5')]),
(lambda q: q.query("blah").query(q.Q("def", ~q.Q("pqr") | q.Q("mno"))**1.5),
[('q', u'blah AND (def AND ((*:* AND NOT pqr) OR mno))^1.5')]),
# And boost_relevancy
(lambda q: q.query("blah").boost_relevancy(1.5, int_field=3),
[('q', u'blah OR (blah AND int_field:3^1.5)')]),
(lambda q: q.query("blah").boost_relevancy(1.5, int_field=3).boost_relevancy(2, string_field='def'),
[('q', u'blah OR (blah AND (int_field:3^1.5 OR string_field:def^2))')]),
(lambda q: q.query("blah").query("blah2").boost_relevancy(1.5, int_field=3),
[('q', u'(blah AND blah2) OR (blah AND blah2 AND int_field:3^1.5)')]),
(lambda q: q.query(q.Q("blah") | q.Q("blah2")).boost_relevancy(1.5, int_field=3),
[('q', u'blah OR blah2 OR ((blah OR blah2) AND int_field:3^1.5)')]),
# And ranges
(lambda q: q.query(int_field__any=True),
[('q', u'int_field:[* TO *]')]),
(lambda q: q.query("blah", ~q.Q(int_field__any=True)),
[('q', u'blah AND NOT int_field:[* TO *]')]),
)
def check_complex_boolean_query(solr_search, query, output):
p = query(solr_search).params()
try:
assert p == output
except AssertionError:
if debug:
print p
print output
import pdb;pdb.set_trace()
raise
else:
raise
# And check no mutation of the base object
q = query(solr_search).params()
try:
assert p == q
except AssertionError:
if debug:
print p
print q
import pdb;pdb.set_trace()
raise
def test_complex_boolean_queries():
solr_search = SolrSearch(interface)
for query, output in complex_boolean_queries:
yield check_complex_boolean_query, solr_search, query, output
param_encode_data = (
({"int":3, "string":"string", "unicode":u"unicode"},
[("int", "3"), ("string", "string"), ("unicode", "unicode")]),
({"int":3, "string":"string", "unicode":u"\N{UMBRELLA}nicode"},
[("int", "3"), ("string", "string"), ("unicode", "\xe2\x98\x82nicode")]),
({"int":3, "string":"string", u"\N{UMBRELLA}nicode":u"\N{UMBRELLA}nicode"},
[("int", "3"), ("string", "string"), ("\xe2\x98\x82nicode", "\xe2\x98\x82nicode")]),
({"true":True, "false":False},
[("false", "false"), ("true", "true")]),
({"list":["first", "second", "third"]},
[("list", "first"), ("list", "second"), ("list", "third")]),
)
def check_url_encode_data(kwargs, output):
# Convert for pre-2.6.5 python
s_kwargs = dict((k.encode('utf8'), v) for k, v in kwargs.items())
assert params_from_dict(**s_kwargs) == output
def test_url_encode_data():
for kwargs, output in param_encode_data:
yield check_url_encode_data, kwargs, output
mlt_query_options_data = (
('text_field', {}, {},
[('mlt.fl', 'text_field')]),
(['string_field', 'text_field'], {'string_field': 3.0}, {},
[('mlt.fl', 'string_field,text_field'), ('mlt.qf', 'string_field^3.0')]),
('text_field', {}, {'mindf': 3, 'interestingTerms': 'details'},
[('mlt.fl', 'text_field'), ('mlt.interestingTerms', 'details'),
('mlt.mindf', '3')]),
)
def check_mlt_query_options(fields, query_fields, kwargs, output):
q = MltSolrSearch(interface, content="This is the posted content.")
q = q.mlt(fields, query_fields=query_fields, **kwargs)
assert_equal(q.params(), output)
def test_mlt_query_options():
for (fields, query_fields, kwargs, output) in mlt_query_options_data:
yield check_mlt_query_options, fields, query_fields, kwargs, output
class HighlightingMockResponse(MockResponse):
def __init__(self, highlighting, *args, **kwargs):
self.highlighting = highlighting
super(HighlightingMockResponse, self).__init__(*args, **kwargs)
def extra_response_parts(self):
contents = []
if self.highlighting:
contents.append(
E.lst({'name':'highlighting'}, E.lst({'name':'0'}, E.arr({'name':'string_field'}, E.str('zero'))))
)
return contents
class HighlightingMockConnection(MockConnection):
def _handle_request(self, uri_obj, params, method, body, headers):
highlighting = params.get('hl') == ['true']
if method == 'GET' and uri_obj.path.endswith('/select/'):
return self.MockStatus(200), HighlightingMockResponse(highlighting, 0, 1).xml_response()
highlighting_interface = SolrInterface("http://test.example.com/", http_connection=HighlightingMockConnection())
solr_highlights_data = (
(None, dict, None),
(['string_field'], dict, {'string_field': ['zero']}),
)
def check_transform_results(highlighting, constructor, solr_highlights):
q = highlighting_interface.query('zero')
if highlighting:
q = q.highlight(highlighting)
docs = q.execute(constructor=constructor).result.docs
assert_equal(docs[0].get('solr_highlights'), solr_highlights)
assert isinstance(docs[0], constructor)
def test_transform_result():
for highlighting, constructor, solr_highlights in solr_highlights_data:
yield check_transform_results, highlighting, constructor, solr_highlights
#Test More Like This results
class MltMockResponse(MockResponse):
def extra_response_parts(self):
contents = []
create_doc = lambda value: E.doc(E.str({'name':'string_field'}, value))
#Main response result
contents.append(
E.result({'name': 'response'},
create_doc('zero')
)
)
#More like this results
contents.append(
E.lst({'name':'moreLikeThis'},
E.result({'name': 'zero', 'numFound': '3', 'start': '0'},
create_doc('one'),
create_doc('two'),
create_doc('three')
)
)
)
return contents
class MltMockConnection(MockConnection):
def _handle_request(self, uri_obj, params, method, body, headers):
if method == 'GET' and uri_obj.path.endswith('/select/'):
return self.MockStatus(200), MltMockResponse(0, 1).xml_response()
mlt_interface = SolrInterface("http://test.example.com/",
http_connection=MltMockConnection())
class DummyDocument(object):
def __init__(self, **kw):
self.kw = kw
def __repr__(self):
return "DummyDocument<%r>" % self.kw
def get(self, key):
return self.kw.get(key)
def make_dummydoc(**kwargs):
return DummyDocument(**kwargs)
solr_mlt_transform_data = (
(dict, dict),
(DummyDocument, DummyDocument),
(make_dummydoc, DummyDocument),
)
def check_mlt_transform_results(constructor, _type):
q = mlt_interface.query('zero')
query = q.mlt(fields='string_field')
response = q.execute(constructor=constructor)
for doc in response.result.docs:
assert isinstance(doc, _type)
for key in response.more_like_these:
for doc in response.more_like_these[key].docs:
assert isinstance(doc, _type)
def test_mlt_transform_result():
for constructor, _type in solr_mlt_transform_data:
yield check_mlt_transform_results, constructor, _type
|
|
import json
class Event(object):
def __init__(self, sender=None, recipient=None, timestamp=None, **kwargs):
if sender is None:
sender = dict()
if recipient is None:
recipient = dict()
self.sender = sender
self.recipient = recipient
self.timestamp = timestamp
@property
def sender_id(self):
return self.sender.get('id')
@property
def recipient_id(self):
return self.recipient.get('id')
@classmethod
def new_from_json_dict(cls, data):
return cls(**data)
def __str__(self):
return json.dumps(self.__class__.__name__)
class MessageEvent(Event):
def __init__(self, message, **kwargs):
super(MessageEvent, self).__init__(**kwargs)
self.name = 'message'
self.message = message
@property
def mid(self):
return self.message.get('mid')
@property
def text(self):
return self.message.get('text')
@property
def attachments(self):
return self.message.get('attachments', [])
@property
def quick_reply(self):
return self.message.get('quick_reply', {})
@property
def quick_reply_payload(self):
return self.quick_reply.get('payload')
@property
def is_quick_reply(self):
return self.message.get('quick_reply') is not None
class DeliveriesEvent(Event):
def __init__(self, delivery, **kwargs):
super(DeliveriesEvent, self).__init__(**kwargs)
self.name = 'delivery'
self.delivery = delivery
@property
def mids(self):
return self.delivery.get('mids')
@property
def watermark(self):
return self.delivery.get('watermark')
@property
def seq(self):
return self.delivery.get('seq')
class EchoEvent(Event):
def __init__(self, message, **kwargs):
super(EchoEvent, self).__init__(**kwargs)
self.name = 'echo'
self.message = message
@property
def mid(self):
return self.message.get('mid')
@property
def app_id(self):
return self.message.get('app_id')
@property
def metadata(self):
return self.message.get('metadata')
@property
def text(self):
return self.message.get('text')
@property
def attachments(self):
return self.message.get('attachments')
class ReadEvent(Event):
def __init__(self, read, **kwargs):
super(ReadEvent, self).__init__(**kwargs)
self.name = 'read'
self.read = read
@property
def seq(self):
return self.read.get('seq')
@property
def watermark(self):
return self.read.get('watermark')
class AccountLinkingEvent(Event):
def __init__(self, account_linking, **kwargs):
super(AccountLinkingEvent, self).__init__(**kwargs)
self.name = 'account_linking'
self.account_linking = account_linking
@property
def status(self):
return self.account_linking.get('status')
@property
def is_linked(self):
return self.status == 'linked'
@property
def authorization_code(self):
return self.account_linking.get('authorization_code')
class GamePlayEvent(Event):
def __init__(self, game_play, **kwargs):
super(GamePlayEvent, self).__init__(**kwargs)
self.name = 'game_play'
self.game_play = game_play
@property
def game_id(self):
return self.game_play.get('game_id')
@property
def player_id(self):
return self.game_play.get('player_id')
@property
def context_type(self):
return self.game_play.get('context_type')
@property
def context_id(self):
return self.game_play.get('context_id')
@property
def score(self):
return self.game_play.get('score')
@property
def payload(self):
return self.game_play.get('payload')
class PassThreadEvent(Event):
def __init__(self, pass_thread_control, **kwargs):
super(PassThreadEvent, self).__init__(**kwargs)
self.name = 'pass_thread_control'
self.pass_thread_control = pass_thread_control
@property
def new_owner_app_id(self):
return self.pass_thread_control.get('new_owner_app_id')
@property
def metadata(self):
return self.pass_thread_control.get('metadata')
class TakeThreadEvent(Event):
def __init__(self, take_thread_control, **kwargs):
super(TakeThreadEvent, self).__init__(**kwargs)
self.name = 'take_thread_control'
self.take_thread_control = take_thread_control
@property
def previous_owner_app_id(self):
return self.take_thread_control.get('previous_owner_app_id')
@property
def metadata(self):
return self.take_thread_control.get('metadata')
class RequestThreadEvent(Event):
def __init__(self, request_thread_control, **kwargs):
super(RequestThreadEvent, self).__init__(**kwargs)
self.name = 'request_thread_control'
self.request_thread_control = request_thread_control
@property
def requested_owner_app_id(self):
return self.request_thread_control.get('requested_owner_app_id')
@property
def metadata(self):
return self.request_thread_control.get('metadata')
class AppRoleEvent(Event):
def __init__(self, app_roles, **kwargs):
super(AppRoleEvent, self).__init__(**kwargs)
self.name = 'app_roles'
self.app_roles = app_roles
class OptinEvent(Event):
def __init__(self, optin, **kwargs):
super(OptinEvent, self).__init__(**kwargs)
self.name = 'optin'
self.optin = optin
@property
def ref(self):
return self.optin.get('ref')
@property
def user_ref(self):
return self.optin.get('user_ref')
class PolicyEnforcementEvent(Event):
def __init__(self, policy_enforcement, **kwargs):
super(PolicyEnforcementEvent, self).__init__(**kwargs)
self.name = 'policy_enforcement'
self.policy_enforcement = policy_enforcement
@property
def action(self):
return self.policy_enforcement.get('action')
@property
def reason(self):
return self.policy_enforcement.get('reason')
class PostBackEvent(Event):
def __init__(self, postback, **kwargs):
super(PostBackEvent, self).__init__(**kwargs)
self.name = 'postback'
self.postback = postback
@property
def title(self):
return self.postback.get('title')
@property
def payload(self):
return self.postback.get('payload')
@property
def referral(self):
return self.postback.get('referral')
class ReferralEvent(Event):
def __init__(self, referral, **kwargs):
super(ReferralEvent, self).__init__(**kwargs)
self.name = 'referral'
self.referral = referral
@property
def source(self):
return self.referral.get('source')
@property
def type(self):
return self.referral.get('type')
@property
def ref(self):
return self.referral.get('ref')
@property
def referer_uri(self):
return self.referral.get('referer_uri')
class CheckOutUpdateEvent(Event): #beta
def __init__(self, checkout_update, **kwargs):
super(CheckOutUpdateEvent, self).__init__(**kwargs)
self.name = 'checkout_update'
self.checkout_update = checkout_update
@property
def payload(self):
return self.checkout_update.get('payload')
@property
def shipping_address(self):
return self.checkout_update.get('shipping_address')
class PaymentEvent(Event): #beta
def __init__(self, payment, **kwargs):
super(PaymentEvent, self).__init__(**kwargs)
self.name = 'payment'
self.payment = payment
@property
def payload(self):
return self.payment.get('payload')
@property
def requested_user_info(self):
return self.payment.get('requested_user_info')
@property
def payment_credential(self):
return self.payment.get('payment_credential')
@property
def amount(self):
return self.payment.get('amount')
@property
def shipping_option_id(self):
return self.payment.get('shipping_option_id')
class StandByEvent(Event):
# suggest me to handle it.
pass
class PrecheckoutEvent(Event): # beta
pass
|
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import datetime
import logging
import signal
import threading
import time
import sys
from typing import DefaultDict, Dict, List, Mapping, Set, Sequence, Tuple
import collections
from concurrent import futures
import grpc
from grpc_channelz.v1 import channelz
import grpc_admin
from src.proto.grpc.testing import test_pb2
from src.proto.grpc.testing import test_pb2_grpc
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import empty_pb2
logger = logging.getLogger()
console_handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(asctime)s: %(levelname)-8s %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
_SUPPORTED_METHODS = (
"UnaryCall",
"EmptyCall",
)
_METHOD_CAMEL_TO_CAPS_SNAKE = {
"UnaryCall": "UNARY_CALL",
"EmptyCall": "EMPTY_CALL",
}
_METHOD_STR_TO_ENUM = {
"UnaryCall": messages_pb2.ClientConfigureRequest.UNARY_CALL,
"EmptyCall": messages_pb2.ClientConfigureRequest.EMPTY_CALL,
}
_METHOD_ENUM_TO_STR = {v: k for k, v in _METHOD_STR_TO_ENUM.items()}
PerMethodMetadataType = Mapping[str, Sequence[Tuple[str, str]]]
_CONFIG_CHANGE_TIMEOUT = datetime.timedelta(milliseconds=500)
class _StatsWatcher:
_start: int
_end: int
_rpcs_needed: int
_rpcs_by_peer: DefaultDict[str, int]
_rpcs_by_method: DefaultDict[str, DefaultDict[str, int]]
_no_remote_peer: int
_lock: threading.Lock
_condition: threading.Condition
def __init__(self, start: int, end: int):
self._start = start
self._end = end
self._rpcs_needed = end - start
self._rpcs_by_peer = collections.defaultdict(int)
self._rpcs_by_method = collections.defaultdict(
lambda: collections.defaultdict(int))
self._condition = threading.Condition()
self._no_remote_peer = 0
def on_rpc_complete(self, request_id: int, peer: str, method: str) -> None:
"""Records statistics for a single RPC."""
if self._start <= request_id < self._end:
with self._condition:
if not peer:
self._no_remote_peer += 1
else:
self._rpcs_by_peer[peer] += 1
self._rpcs_by_method[method][peer] += 1
self._rpcs_needed -= 1
self._condition.notify()
def await_rpc_stats_response(
self, timeout_sec: int) -> messages_pb2.LoadBalancerStatsResponse:
"""Blocks until a full response has been collected."""
with self._condition:
self._condition.wait_for(lambda: not self._rpcs_needed,
timeout=float(timeout_sec))
response = messages_pb2.LoadBalancerStatsResponse()
for peer, count in self._rpcs_by_peer.items():
response.rpcs_by_peer[peer] = count
for method, count_by_peer in self._rpcs_by_method.items():
for peer, count in count_by_peer.items():
response.rpcs_by_method[method].rpcs_by_peer[peer] = count
response.num_failures = self._no_remote_peer + self._rpcs_needed
return response
_global_lock = threading.Lock()
_stop_event = threading.Event()
_global_rpc_id: int = 0
_watchers: Set[_StatsWatcher] = set()
_global_server = None
_global_rpcs_started: Mapping[str, int] = collections.defaultdict(int)
_global_rpcs_succeeded: Mapping[str, int] = collections.defaultdict(int)
_global_rpcs_failed: Mapping[str, int] = collections.defaultdict(int)
# Mapping[method, Mapping[status_code, count]]
_global_rpc_statuses: Mapping[str, Mapping[int, int]] = collections.defaultdict(
lambda: collections.defaultdict(int))
def _handle_sigint(sig, frame) -> None:
_stop_event.set()
_global_server.stop(None)
class _LoadBalancerStatsServicer(test_pb2_grpc.LoadBalancerStatsServiceServicer
):
def __init__(self):
super(_LoadBalancerStatsServicer).__init__()
def GetClientStats(
self, request: messages_pb2.LoadBalancerStatsRequest,
context: grpc.ServicerContext
) -> messages_pb2.LoadBalancerStatsResponse:
logger.info("Received stats request.")
start = None
end = None
watcher = None
with _global_lock:
start = _global_rpc_id + 1
end = start + request.num_rpcs
watcher = _StatsWatcher(start, end)
_watchers.add(watcher)
response = watcher.await_rpc_stats_response(request.timeout_sec)
with _global_lock:
_watchers.remove(watcher)
logger.info("Returning stats response: %s", response)
return response
def GetClientAccumulatedStats(
self, request: messages_pb2.LoadBalancerAccumulatedStatsRequest,
context: grpc.ServicerContext
) -> messages_pb2.LoadBalancerAccumulatedStatsResponse:
logger.info("Received cumulative stats request.")
response = messages_pb2.LoadBalancerAccumulatedStatsResponse()
with _global_lock:
for method in _SUPPORTED_METHODS:
caps_method = _METHOD_CAMEL_TO_CAPS_SNAKE[method]
response.num_rpcs_started_by_method[
caps_method] = _global_rpcs_started[method]
response.num_rpcs_succeeded_by_method[
caps_method] = _global_rpcs_succeeded[method]
response.num_rpcs_failed_by_method[
caps_method] = _global_rpcs_failed[method]
response.stats_per_method[
caps_method].rpcs_started = _global_rpcs_started[method]
for code, count in _global_rpc_statuses[method].items():
response.stats_per_method[caps_method].result[code] = count
logger.info("Returning cumulative stats response.")
return response
def _start_rpc(method: str, metadata: Sequence[Tuple[str, str]],
request_id: int, stub: test_pb2_grpc.TestServiceStub,
timeout: float, futures: Mapping[int, Tuple[grpc.Future,
str]]) -> None:
logger.debug(f"Sending {method} request to backend: {request_id}")
if method == "UnaryCall":
future = stub.UnaryCall.future(messages_pb2.SimpleRequest(),
metadata=metadata,
timeout=timeout)
elif method == "EmptyCall":
future = stub.EmptyCall.future(empty_pb2.Empty(),
metadata=metadata,
timeout=timeout)
else:
raise ValueError(f"Unrecognized method '{method}'.")
futures[request_id] = (future, method)
def _on_rpc_done(rpc_id: int, future: grpc.Future, method: str,
print_response: bool) -> None:
exception = future.exception()
hostname = ""
_global_rpc_statuses[method][future.code().value[0]] += 1
if exception is not None:
with _global_lock:
_global_rpcs_failed[method] += 1
if exception.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
logger.error(f"RPC {rpc_id} timed out")
else:
logger.error(exception)
else:
response = future.result()
hostname = None
for metadatum in future.initial_metadata():
if metadatum[0] == "hostname":
hostname = metadatum[1]
break
else:
hostname = response.hostname
if future.code() == grpc.StatusCode.OK:
with _global_lock:
_global_rpcs_succeeded[method] += 1
else:
with _global_lock:
_global_rpcs_failed[method] += 1
if print_response:
if future.code() == grpc.StatusCode.OK:
logger.debug("Successful response.")
else:
logger.debug(f"RPC failed: {call}")
with _global_lock:
for watcher in _watchers:
watcher.on_rpc_complete(rpc_id, hostname, method)
def _remove_completed_rpcs(futures: Mapping[int, grpc.Future],
print_response: bool) -> None:
logger.debug("Removing completed RPCs")
done = []
for future_id, (future, method) in futures.items():
if future.done():
_on_rpc_done(future_id, future, method, args.print_response)
done.append(future_id)
for rpc_id in done:
del futures[rpc_id]
def _cancel_all_rpcs(futures: Mapping[int, Tuple[grpc.Future, str]]) -> None:
logger.info("Cancelling all remaining RPCs")
for future, _ in futures.values():
future.cancel()
class _ChannelConfiguration:
"""Configuration for a single client channel.
Instances of this class are meant to be dealt with as PODs. That is,
data member should be accessed directly. This class is not thread-safe.
When accessing any of its members, the lock member should be held.
"""
def __init__(self, method: str, metadata: Sequence[Tuple[str, str]],
qps: int, server: str, rpc_timeout_sec: int,
print_response: bool, secure_mode: bool):
# condition is signalled when a change is made to the config.
self.condition = threading.Condition()
self.method = method
self.metadata = metadata
self.qps = qps
self.server = server
self.rpc_timeout_sec = rpc_timeout_sec
self.print_response = print_response
self.secure_mode = secure_mode
def _run_single_channel(config: _ChannelConfiguration) -> None:
global _global_rpc_id # pylint: disable=global-statement
with config.condition:
server = config.server
channel = None
if config.secure_mode:
fallback_creds = grpc.experimental.insecure_channel_credentials()
channel_creds = grpc.xds_channel_credentials(fallback_creds)
channel = grpc.secure_channel(server, channel_creds)
else:
channel = grpc.insecure_channel(server)
with channel:
stub = test_pb2_grpc.TestServiceStub(channel)
futures: Dict[int, Tuple[grpc.Future, str]] = {}
while not _stop_event.is_set():
with config.condition:
if config.qps == 0:
config.condition.wait(
timeout=_CONFIG_CHANGE_TIMEOUT.total_seconds())
continue
else:
duration_per_query = 1.0 / float(config.qps)
request_id = None
with _global_lock:
request_id = _global_rpc_id
_global_rpc_id += 1
_global_rpcs_started[config.method] += 1
start = time.time()
end = start + duration_per_query
with config.condition:
_start_rpc(config.method, config.metadata, request_id, stub,
float(config.rpc_timeout_sec), futures)
with config.condition:
_remove_completed_rpcs(futures, config.print_response)
logger.debug(f"Currently {len(futures)} in-flight RPCs")
now = time.time()
while now < end:
time.sleep(end - now)
now = time.time()
_cancel_all_rpcs(futures)
class _XdsUpdateClientConfigureServicer(
test_pb2_grpc.XdsUpdateClientConfigureServiceServicer):
def __init__(self, per_method_configs: Mapping[str, _ChannelConfiguration],
qps: int):
super(_XdsUpdateClientConfigureServicer).__init__()
self._per_method_configs = per_method_configs
self._qps = qps
def Configure(
self, request: messages_pb2.ClientConfigureRequest,
context: grpc.ServicerContext
) -> messages_pb2.ClientConfigureResponse:
logger.info("Received Configure RPC: %s", request)
method_strs = (_METHOD_ENUM_TO_STR[t] for t in request.types)
for method in _SUPPORTED_METHODS:
method_enum = _METHOD_STR_TO_ENUM[method]
channel_config = self._per_method_configs[method]
if method in method_strs:
qps = self._qps
metadata = ((md.key, md.value)
for md in request.metadata
if md.type == method_enum)
# For backward compatibility, do not change timeout when we
# receive a default value timeout.
if request.timeout_sec == 0:
timeout_sec = channel_config.rpc_timeout_sec
else:
timeout_sec = request.timeout_sec
else:
qps = 0
metadata = ()
# Leave timeout unchanged for backward compatibility.
timeout_sec = channel_config.rpc_timeout_sec
with channel_config.condition:
channel_config.qps = qps
channel_config.metadata = list(metadata)
channel_config.rpc_timeout_sec = timeout_sec
channel_config.condition.notify_all()
return messages_pb2.ClientConfigureResponse()
class _MethodHandle:
"""An object grouping together threads driving RPCs for a method."""
_channel_threads: List[threading.Thread]
def __init__(self, num_channels: int,
channel_config: _ChannelConfiguration):
"""Creates and starts a group of threads running the indicated method."""
self._channel_threads = []
for i in range(num_channels):
thread = threading.Thread(target=_run_single_channel,
args=(channel_config,))
thread.start()
self._channel_threads.append(thread)
def stop(self) -> None:
"""Joins all threads referenced by the handle."""
for channel_thread in self._channel_threads:
channel_thread.join()
def _run(args: argparse.Namespace, methods: Sequence[str],
per_method_metadata: PerMethodMetadataType) -> None:
logger.info("Starting python xDS Interop Client.")
global _global_server # pylint: disable=global-statement
method_handles = []
channel_configs = {}
for method in _SUPPORTED_METHODS:
if method in methods:
qps = args.qps
else:
qps = 0
channel_config = _ChannelConfiguration(
method, per_method_metadata.get(method, []), qps, args.server,
args.rpc_timeout_sec, args.print_response, args.secure_mode)
channel_configs[method] = channel_config
method_handles.append(_MethodHandle(args.num_channels, channel_config))
_global_server = grpc.server(futures.ThreadPoolExecutor())
_global_server.add_insecure_port(f"0.0.0.0:{args.stats_port}")
test_pb2_grpc.add_LoadBalancerStatsServiceServicer_to_server(
_LoadBalancerStatsServicer(), _global_server)
test_pb2_grpc.add_XdsUpdateClientConfigureServiceServicer_to_server(
_XdsUpdateClientConfigureServicer(channel_configs, args.qps),
_global_server)
channelz.add_channelz_servicer(_global_server)
grpc_admin.add_admin_servicers(_global_server)
_global_server.start()
_global_server.wait_for_termination()
for method_handle in method_handles:
method_handle.stop()
def parse_metadata_arg(metadata_arg: str) -> PerMethodMetadataType:
metadata = metadata_arg.split(",") if args.metadata else []
per_method_metadata = collections.defaultdict(list)
for metadatum in metadata:
elems = metadatum.split(":")
if len(elems) != 3:
raise ValueError(
f"'{metadatum}' was not in the form 'METHOD:KEY:VALUE'")
if elems[0] not in _SUPPORTED_METHODS:
raise ValueError(f"Unrecognized method '{elems[0]}'")
per_method_metadata[elems[0]].append((elems[1], elems[2]))
return per_method_metadata
def parse_rpc_arg(rpc_arg: str) -> Sequence[str]:
methods = rpc_arg.split(",")
if set(methods) - set(_SUPPORTED_METHODS):
raise ValueError("--rpc supported methods: {}".format(
", ".join(_SUPPORTED_METHODS)))
return methods
def bool_arg(arg: str) -> bool:
if arg.lower() in ("true", "yes", "y"):
return True
elif arg.lower() in ("false", "no", "n"):
return False
else:
raise argparse.ArgumentTypeError(f"Could not parse '{arg}' as a bool.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Run Python XDS interop client.')
parser.add_argument(
"--num_channels",
default=1,
type=int,
help="The number of channels from which to send requests.")
parser.add_argument("--print_response",
default="False",
type=bool_arg,
help="Write RPC response to STDOUT.")
parser.add_argument(
"--qps",
default=1,
type=int,
help="The number of queries to send from each channel per second.")
parser.add_argument("--rpc_timeout_sec",
default=30,
type=int,
help="The per-RPC timeout in seconds.")
parser.add_argument("--server",
default="localhost:50051",
help="The address of the server.")
parser.add_argument(
"--stats_port",
default=50052,
type=int,
help="The port on which to expose the peer distribution stats service.")
parser.add_argument(
"--secure_mode",
default="False",
type=bool_arg,
help="If specified, uses xDS credentials to connect to the server.")
parser.add_argument('--verbose',
help='verbose log output',
default=False,
action='store_true')
parser.add_argument("--log_file",
default=None,
type=str,
help="A file to log to.")
rpc_help = "A comma-delimited list of RPC methods to run. Must be one of "
rpc_help += ", ".join(_SUPPORTED_METHODS)
rpc_help += "."
parser.add_argument("--rpc", default="UnaryCall", type=str, help=rpc_help)
metadata_help = (
"A comma-delimited list of 3-tuples of the form " +
"METHOD:KEY:VALUE, e.g. " +
"EmptyCall:key1:value1,UnaryCall:key2:value2,EmptyCall:k3:v3")
parser.add_argument("--metadata", default="", type=str, help=metadata_help)
args = parser.parse_args()
signal.signal(signal.SIGINT, _handle_sigint)
if args.verbose:
logger.setLevel(logging.DEBUG)
if args.log_file:
file_handler = logging.FileHandler(args.log_file, mode='a')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
_run(args, parse_rpc_arg(args.rpc), parse_metadata_arg(args.metadata))
|
|
#!/usr/bin/env python
import tweepy
import time
import sys
import os
import random
import gspread
import logging
from configobj import ConfigObj
from subprocess import call
def find_col_or_none(name, wks):
"""A short function which returns None or the column cell. """
logger = logging.getLogger(__name__)
try:
x = wks.find(name).col
logger.debug('Column named "' + name + "' is number " + str(x) + ".")
return x
except gspread.exceptions.CellNotFound:
logger.debug('Column named "' + name + "' not found.")
return None
class TwitterBot:
"""A class to make a twitter bot."""
def refresh_google_connection(self):
"""Simple function to refresh the google connection. """
self.gc = gspread.login(self.config['gspread']['username'],
self.config['gspread']['password'])
self.config_ss = self.gc.open(self.config_ss_name)
def __init__(self,
config_file=os.path.expanduser('~/.bslbot'),
config_ss_name="bslbot's brain",
logging_level=logging.DEBUG,
remote_media_dir="nasfarley88@nathanda.co.uk:Dropbox/vimeo_drop/gifs/",
local_media_dir="~/tmp/",
weighted_categories=None):
"Initiate twitter bot with appropriate config and logins."
logging.basicConfig(level=logging_level)
self.logger = logging.getLogger(__name__)
self.logger.debug('Fetching config file')
self.config = ConfigObj(config_file, unrepr=True)
self.logger.debug("Current config:\n" + str(self.config))
self.config_ss_name = config_ss_name
self.refresh_google_connection()
# self.gc = gspread.login(self.config['gspread']['username'],
# self.config['gspread']['password'])
# self.config_ss = self.gc.open(self.config_ss_name)
self._tweepy_auth = tweepy.OAuthHandler(
self.config['authentication']['consumer_key'],
self.config['authentication']['consumer_secret'])
self._tweepy_auth.set_access_token(
self.config['authentication']['access_key'],
self.config['authentication']['access_secret'])
self._tweepy_api = tweepy.API(self._tweepy_auth)
self.remote_media_dir = remote_media_dir
self.local_media_dir = local_media_dir
# TODO: Add some check that the twitter api has connected correctly.
self.weighted_categories = [('SelfPromotion', 1.0/100),
('Advice', 10.0/100),
('BSLDictionary', 89.0/100)]
def _print_tweet(self, tweet, media=None):
"""Prints the tweet to stdout. """
self.logger.info('Tweet: ' + str(tweet))
self.logger.info('Media: ' + str(media))
def _tweet_tweet(self, tweet, media=None):
"""Tweets the tweet."""
if media == None:
self.logger.info('Tweeting...')
self._tweepy_api.update_status(status=tweet)
else:
try:
self.logger.info('Attempting to scp ' + media)
scp_return = call('scp',
self.remote_media_dir+media,
self.local_media_dir,
shell=True)
assert scp_return == 0, "scp returned non-zero value: " + scp_return
assert os.path.isfile(os.path.expanduser(self.local_media_dir+media)),\
self.local_media_dir+media + " does not exist."
self._tweepy_api.update_with_media(
filename=self.local_media_dir+media,
status=tweet)
self.logger.info('Attempting to rm ' + media)
rm_return = call('rm '+local_media, shell=True)
self.logger.info('rm return status: ' + rm_return)
except AssertionError as e:
self.logger.warning('Caught an assertion error: ' + e)
self.logger.info('Tweeting without media')
self._tweepy_api.update_status(status=tweet)
def print_or_tweet(self, tweet, media=None):
"""Simple function that prints or tweets based on the config file. """
if self.config['misc']['printortweet'] == 'print':
self._print_tweet(tweet, media)
elif self.config['misc']['printortweet'] == 'tweet':
self._tweet_tweet(tweet, media)
def _choose_category(self):
"""Shamelessly stolen from
http://stackoverflow.com/questions/3679694/a-weighted-version-of-random-choice.
"""
total = sum(w for c, w in self.weighted_categories)
r = random.uniform(0, total)
upto = 0
for c, w in self.weighted_categories:
if upto + w > r:
return c
upto += w
assert False, "Shouldn't get here"
def choose_tweet_from_category(self, category):
"""Fetch tweet and media from spreadsheet. """
# Refresh the connection to google sheets
self.refresh_google_connection()
wks = self.config_ss.worksheet(category)
# TODO: I don't like this, fetching all the values is inefficient.
wks_list = wks.get_all_values()
tweet_cell_col = wks.find("Tweet").col
no_of_times_tweeted_cell_col = wks.find("No of times tweeted").col
link_cell_col = find_col_or_none("Link", wks)
media_cell_col = find_col_or_none("Media", wks)
# Remove the titles
wks_list.pop(0)
lowest_no_of_times_tweeted = 9001
candidates_for_tweeting = []
for i in wks_list:
# -1 in the next line because python counts from 0, spreadsheets count
# from 1
no_of_times_tweeted = int(i[no_of_times_tweeted_cell_col-1])
if no_of_times_tweeted < lowest_no_of_times_tweeted:
# if this tweet has the lowest no_of_times_tweeted so far dump the
# rest of them and start the list with this one
lowest_no_of_times_tweeted = no_of_times_tweeted
logging.debug("lowest_no_of_times_tweeted reduced to "+str(no_of_times_tweeted))
# Start the list again with the current tweet
candidates_for_tweeting = [ i ]
elif no_of_times_tweeted == lowest_no_of_times_tweeted:
# otherwise if it's equally untweeted, carry on and add this one to
# the list
candidates_for_tweeting.append(i)
chosen_tweet = random.choice(candidates_for_tweeting)
cell_for_chosen_tweet = wks.find(chosen_tweet[tweet_cell_col-1])
tweet_to_return = wks.cell(cell_for_chosen_tweet.row, tweet_cell_col).value + \
" " + self.config['misc']['signature']
if link_cell_col is not None:
tweet_to_return += "\n" + wks.cell(cell_for_chosen_tweet.row, link_cell_col).value
self.logger.debug("Cell: " + str(wks.cell(cell_for_chosen_tweet.row, 5)))
self.logger.debug("Cell value: " + str(wks.cell(cell_for_chosen_tweet.row, 5).value))
current_no_of_times_tweeeted = int( wks.cell( cell_for_chosen_tweet.row,
no_of_times_tweeted_cell_col ).value )
# Update the number of times tweeted
wks.update_cell( cell_for_chosen_tweet.row,
no_of_times_tweeted_cell_col,
current_no_of_times_tweeeted + 1)
if media_cell_col == None:
return (tweet_to_return, None)
else:
return (tweet_to_return,
wks.cell(cell_for_chosen_tweet.row, media_cell_col).value)
def tweet_for_self(self, delay=None):
if delay is None:
delay = random.randint(1, self.config['misc']['max_delay'])
time.sleep(delay)
chosen_tweet, chosen_media = self.choose_tweet_from_category(self._choose_category())
self.print_or_tweet(chosen_tweet, media=chosen_media)
def auto_follow_back(self):
"""Follow back people automatically. """
for follower in tweepy.Cursor(self._tweepy_api.followers).items():
if follower._json['protected'] is False:
follower.follow()
if __name__ == '__main__':
bot = TwitterBot()
# bot.auto_follow_back()
bot.tweet_for_self()
|
|
#!/usr/bin/env python
"""
Based on a tutorial from deeplearning.net
gradient descent.
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import time
import csv
import numpy as np
import IPython as ipy
import pandas as pd
import theano
import theano.tensor as T
sys.path.append('./deeplearning')
from logistic_regression import LogisticRegression
from load_minst_data import load_data, shared_dataset
def load_my_data():
#############
# LOAD DATA #
#############
print '... loading data'
# Training Data
training_data_path = "../data/train.csv"
train_df = pd.read_csv(training_data_path, header=0)
train_data = train_df.drop(["label"], axis=1).values
train_target = train_df["label"].values
# Validation Data
valid_count = 4000 # Take ~10% for validation
valid_data = train_data[0:valid_count]
valid_target = train_target[0:valid_count]
train_data = train_data[valid_count:]
train_target = train_target[valid_count:]
# Test Data
test_data_path = "../data/test.csv"
test_df = pd.read_csv(test_data_path, header=0)
test_data = test_df.values
test_target = test_df.index + 1
train_set = (train_data, train_target)
valid_set = (valid_data, valid_target)
test_set = (test_data, test_target)
# make the data theano shared datasets
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
batch_size=600):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_my_data()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = np.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(
(
'Optimization complete with best validation score of %f %%'
)
% (best_validation_loss * 100.)
)
print 'The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time)))
print ('Predicting class of test sets...')
test_model = theano.function(inputs=[], outputs=classifier.y_pred, givens={x:test_set_x})
output = test_model()
prediction_filepath = "../prediction/logistic_regression.csv"
prediction_dir = os.path.dirname(prediction_filepath)
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir)
predictions_file = open(prediction_filepath, "wb")
open_file_object = csv.writer(predictions_file)
open_file_object.writerow(["ImageId","Label"])
open_file_object.writerows(zip(test_set_y.eval(), output))
predictions_file.close()
import pdb
pdb.set_trace()
if __name__ == '__main__':
sgd_optimization_mnist()
|
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Utility functions for saving various types of objects state.
"""
import logging
import os
import yaml
from neon.util.compat import pickle
logger = logging.getLogger(__name__)
def ensure_dirs_exist(path):
"""
Simple helper that ensures that any directories specified in the path are
created prior to use.
Arguments:
path (str): the path (may be to a file or directory). Any intermediate
directories will be created.
Returns:
str: The unmodified path value.
"""
outdir = os.path.dirname(path)
if outdir != '' and not os.path.isdir(outdir):
os.makedirs(outdir)
return path
def convert_scalar_node(val):
"""
Helper to extract and return the appropriately types value of a ScalarNode
object.
Arguments:
val: (yaml.nodes.ScalarNode): object to extract value from
Returns:
float, int, string: the actual value
"""
if not isinstance(val, yaml.nodes.ScalarNode):
return val
if val.tag.endswith("int"):
return int(val.value)
elif val.tag.endswith("float"):
return float(val.value)
else:
# assume a string
return val.value
def extract_child_node_vals(node, keys):
"""
Helper to iterate through the immediate children of the yaml node object
passed, looking for the key values specified.
Arguments:
node (yaml.nodes.Node): the parent node upon which to begin the search
keys (list): set of strings indicating the child keys we want to
extract corresponding values for.
Returns:
dict: with one item for each key. value is value found in search for
that key, or None if not found.
"""
res = dict()
for child in node.value:
# child node values are two element tuples, where the first is a scalar
# node, and the second can be other types of nodes.
tag = child[0].value
if isinstance(child[1], yaml.nodes.ScalarNode):
val = convert_scalar_node(child[1])
elif isinstance(child[1], yaml.nodes.SequenceNode):
val = [convert_scalar_node(x) for x in child[1].value]
elif isinstance(child[1], yaml.nodes.MappingNode):
val = dict()
for item in child[1].value:
val[item[0].value] = convert_scalar_node(item[1])
else:
logger.warning("unknown node type: %s, ignoring tag %s",
str(type(child[1])), tag)
val = None
for key in keys:
if tag == key:
res[key] = val
for key in keys:
if key not in res:
res[key] = None
return res
def obj_multi_constructor(loader, tag_suffix, node):
"""
Utility function used to actually import and generate a new class instance
from its name and parameters
Arguments:
loader (yaml.loader.SafeLoader): carries out actual loading
tag_suffix (str): The latter portion of the tag, representing the full
module and class name of the object being
instantiated.
node (yaml.MappingNode): tag/value set specifying the parameters
required for constructing new objects of this
type
"""
# extract class name and import neccessary module.
parts = tag_suffix.split('.')
module = '.'.join(parts[:-1])
try:
cls = __import__(module)
except ImportError as err:
# we allow a shortcut syntax that skips neon. from import path, try
# again with this prepended
if parts[0] != "neon":
parts.insert(0, "neon")
module = '.'.join(parts[:-1])
cls = __import__(module)
if 'datasets' in parts:
# clear any previous datasets loaded with a different backend
cls.datasets.dataset.Dataset.inputs = {
'train': None, 'test': None, 'validation': None}
cls.datasets.dataset.Dataset.targets = {
'train': None, 'test': None, 'validation': None}
else:
raise err
for comp in parts[1:]:
cls = getattr(cls, comp)
# need to create a new object
try:
res = cls(**loader.construct_mapping(node, deep=True))
except TypeError as e:
logger.warning("Unable to construct '%s' instance. Error: %s",
cls.__name__, e.message)
res = None
return res
def initialize_yaml():
yaml.add_multi_constructor('!obj:', obj_multi_constructor,
yaml.loader.SafeLoader)
def deserialize(load_path, verbose=True):
"""
Converts a serialized object into a python data structure. We currently
support reading from the following file formats (expected filename
extension in brackets):
* python pickle (.pkl)
* YAML (.yaml)
Arguments:
load_path (str, File): path and name of the serialized on-disk file to
load (or an already loaded file object).
The type to write is inferred based on filename
extension. If no extension given, pickle format
is attempted.
Returns:
object: Converted in-memory python data structure.
See Also:
serialize
"""
if isinstance(load_path, str):
load_path = open(os.path.expandvars(os.path.expanduser(load_path)))
fname = load_path.name
if verbose:
logger.warn("deserializing object from: %s", fname)
if (fname.lower().endswith('.yaml') or fname.lower().endswith('.yml')):
initialize_yaml()
return yaml.safe_load(load_path)
else:
try:
return pickle.load(load_path)
except AttributeError:
msg = ("Problems deserializing: %s. Its possible the interface "
"for this object has changed since being serialized. You "
"may need to remove and recreate it." % load_path)
logger.error(msg)
raise AttributeError(msg)
def serialize(obj, save_path, verbose=True):
"""
Dumps a python data structure to a saved on-disk representation. We
currently support writing to the following file formats (expected filename
extension in brackets):
* python pickle (.pkl)
Arguments:
obj (object): the python object to be saved.
save_path (str): Where to write the serialized object (full path and
file name)
See Also:
deserialize
"""
if save_path is None or len(save_path) == 0:
return
save_path = os.path.expandvars(os.path.expanduser(save_path))
if verbose:
logger.warn("serializing object to: %s", save_path)
ensure_dirs_exist(save_path)
pickle.dump(obj, open(save_path, 'wb'), -1)
class YAMLable(yaml.YAMLObject):
"""
Base class for any objects we'd like to be able to safely parse from yaml
configuration strems (or dump suitable representation back out to such a
stream).
"""
yaml_loader = yaml.SafeLoader
|
|
import pytest
from conans.model.build_info import CppInfo
from conans.model.new_build_info import NewCppInfo, _DIRS_VAR_NAMES, _FIELD_VAR_NAMES, \
fill_old_cppinfo, from_old_cppinfo
def test_components_order():
cppinfo = NewCppInfo()
cppinfo.components["c1"].requires = ["c4", "OtherPackage::OtherComponent2"]
cppinfo.components["c2"].requires = ["OtherPackage::OtherComponent"]
cppinfo.components["c3"].requires = ["c2"]
cppinfo.components["c4"].requires = ["c3"]
sorted_c = list(cppinfo.get_sorted_components().keys())
assert sorted_c == ["c2", "c3", "c4", "c1"]
def test_component_aggregation():
cppinfo = NewCppInfo()
cppinfo.includedirs = ["includedir"]
cppinfo.libdirs = ["libdir"]
cppinfo.srcdirs = ["srcdir"]
cppinfo.bindirs = ["bindir"]
cppinfo.builddirs = ["builddir"]
cppinfo.frameworkdirs = ["frameworkdir"]
cppinfo.set_property("foo", "bar")
cppinfo.components["c2"].includedirs = ["includedir_c2"]
cppinfo.components["c2"].libdirs = ["libdir_c2"]
cppinfo.components["c2"].srcdirs = ["srcdir_c2"]
cppinfo.components["c2"].bindirs = ["bindir_c2"]
cppinfo.components["c2"].builddirs = ["builddir_c2"]
cppinfo.components["c2"].frameworkdirs = ["frameworkdir_c2"]
cppinfo.components["c2"].cxxflags = ["cxxflags_c2"]
cppinfo.components["c2"].defines = ["defines_c2"]
cppinfo.components["c2"].set_property("my_foo", ["bar", "bar2"])
cppinfo.components["c2"].set_property("cmake_build_modules", ["build_module_c2",
"build_module_c22"])
cppinfo.components["c1"].requires = ["c2", "LIB_A::C1"]
cppinfo.components["c1"].includedirs = ["includedir_c1"]
cppinfo.components["c1"].libdirs = ["libdir_c1"]
cppinfo.components["c1"].srcdirs = ["srcdir_c1"]
cppinfo.components["c1"].bindirs = ["bindir_c1"]
cppinfo.components["c1"].builddirs = ["builddir_c1"]
cppinfo.components["c1"].frameworkdirs = ["frameworkdir_c1"]
cppinfo.components["c1"].cxxflags = ["cxxflags_c1"]
cppinfo.components["c1"].defines = ["defines_c1"]
cppinfo.components["c1"].set_property("my_foo", "jander")
cppinfo.components["c1"].set_property("my_foo2", "bar2")
ret = cppinfo.aggregated_components()
assert ret.get_property("foo") == "bar"
assert ret.includedirs == ["includedir_c1", "includedir_c2"]
assert ret.libdirs == ["libdir_c1", "libdir_c2"]
assert ret.srcdirs == ["srcdir_c1", "srcdir_c2"]
assert ret.bindirs == ["bindir_c1", "bindir_c2"]
assert ret.builddirs == ["builddir_c1", "builddir_c2"]
assert ret.frameworkdirs == ["frameworkdir_c1", "frameworkdir_c2"]
assert ret.cxxflags == ["cxxflags_c1", "cxxflags_c2"]
assert ret.defines == ["defines_c1", "defines_c2"]
# The properties are not aggregated because we cannot generalize the meaning of a property
# that belongs to a component, it could make sense to aggregate it or not, "cmake_target_name"
# for example, cannot be aggregated. But "cmake_build_modules" is aggregated.
assert ret.get_property("my_foo") is None
assert ret.get_property("my_foo2") is None
assert ret.get_property("cmake_build_modules") == None
# If we change the internal graph the order is different
cppinfo.components["c1"].requires = []
cppinfo.components["c2"].requires = ["c1"]
cppinfo._aggregated = None # Dirty, just to force recomputation
ret = cppinfo.aggregated_components()
assert ret.includedirs == ["includedir_c2", "includedir_c1"]
assert ret.libdirs == ["libdir_c2", "libdir_c1"]
assert ret.srcdirs == ["srcdir_c2", "srcdir_c1"]
assert ret.bindirs == ["bindir_c2", "bindir_c1"]
assert ret.builddirs == ["builddir_c2", "builddir_c1"]
assert ret.frameworkdirs == ["frameworkdir_c2", "frameworkdir_c1"]
def test_cpp_info_sysroot_merge():
# If the value was already set is kept in the merge
one = NewCppInfo()
one.sysroot = "sys1"
two = NewCppInfo()
two.sysroot = "sys2"
one.merge(two)
assert one.sysroot == "sys1"
# If the value was not set it is assigned
one = NewCppInfo()
two = NewCppInfo()
two.sysroot = "sys2"
one.merge(two)
assert one.sysroot == "sys2"
@pytest.mark.parametrize("aggregate_first", [True, False])
def test_cpp_info_merge_aggregating_components_first(aggregate_first):
cppinfo = NewCppInfo()
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
setattr(cppinfo.components["foo"], n, ["var_{}_1".format(n), "var_{}_2".format(n)])
setattr(cppinfo.components["foo2"], n, ["var2_{}_1".format(n), "var2_{}_2".format(n)])
cppinfo.components["foo"].requires = ["foo2"] # Deterministic order
other = NewCppInfo()
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
setattr(other.components["boo"], n, ["jar_{}_1".format(n), "jar_{}_2".format(n)])
setattr(other.components["boo2"], n, ["jar2_{}_1".format(n), "jar2_{}_2".format(n)])
other.components["boo"].requires = ["boo2"] # Deterministic order
if aggregate_first:
cppinfo = cppinfo.aggregated_components()
other = other.aggregated_components()
cppinfo.merge(other)
if aggregate_first:
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
assert getattr(cppinfo, n) == ["var_{}_1".format(n), "var_{}_2".format(n),
"var2_{}_1".format(n), "var2_{}_2".format(n),
"jar_{}_1".format(n), "jar_{}_2".format(n),
"jar2_{}_1".format(n), "jar2_{}_2".format(n)]
else:
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
assert getattr(cppinfo.components["foo"], n) == ["var_{}_1".format(n),
"var_{}_2".format(n)]
assert getattr(cppinfo.components["foo2"], n) == ["var2_{}_1".format(n),
"var2_{}_2".format(n)]
assert getattr(cppinfo.components["boo"], n) == ["jar_{}_1".format(n),
"jar_{}_2".format(n)]
assert getattr(cppinfo.components["boo2"], n) == ["jar2_{}_1".format(n),
"jar2_{}_2".format(n)]
assert getattr(cppinfo, n) == None
def test_from_old_cppinfo_components():
oldcppinfo = CppInfo("ref", "/root/")
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
setattr(oldcppinfo.components["foo"], n, ["var_{}_1".format(n), "var_{}_2".format(n)])
setattr(oldcppinfo.components["foo2"], n, ["var2_{}_1".format(n), "var2_{}_2".format(n)])
oldcppinfo.components["foo"].requires = ["my_req::my_component"]
# The names and filenames are not copied to the new model
oldcppinfo.components["foo"].names["Gen"] = ["MyName"]
oldcppinfo.filenames["Gen"] = ["Myfilename"]
oldcppinfo.components["foo"].build_modules = \
{"cmake_find_package_multi": ["foo_my_scripts.cmake"],
"cmake_find_package": ["foo.cmake"]}
oldcppinfo.components["foo2"].build_modules = \
{"cmake_find_package_multi": ["foo2_my_scripts.cmake"]}
cppinfo = from_old_cppinfo(oldcppinfo)
assert isinstance(cppinfo, NewCppInfo)
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
assert getattr(cppinfo.components["foo"], n) == ["var_{}_1".format(n),
"var_{}_2".format(n)]
assert getattr(cppinfo.components["foo2"], n) == ["var2_{}_1".format(n),
"var2_{}_2".format(n)]
# The .build_modules are assigned to the root cppinfo because it is something
# global that make no sense to set as a component property
assert cppinfo.components["foo"].get_property("cmake_build_modules") is None
assert cppinfo.components["foo"].requires == ["my_req::my_component"]
assert cppinfo.components["foo2"].get_property("cmake_build_modules") is None
assert cppinfo.get_property("cmake_build_modules") is None
def test_from_old_cppinfo_no_components():
oldcppinfo = CppInfo("ref", "/root/")
oldcppinfo.requires = ["my_req::my_component"]
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
setattr(oldcppinfo, n, ["var_{}_1".format(n), "var_{}_2".format(n)])
oldcppinfo.build_modules = {"cmake_find_package": ["my_scripts.cmake", "foo.cmake"],
"cmake_find_package_multi": ["my_scripts.cmake", "foo2.cmake"]}
cppinfo = from_old_cppinfo(oldcppinfo)
assert isinstance(cppinfo, NewCppInfo)
for n in _DIRS_VAR_NAMES + _FIELD_VAR_NAMES:
assert getattr(cppinfo, n) == ["var_{}_1".format(n), "var_{}_2".format(n)]
assert cppinfo.get_property("cmake_build_modules") is None
assert cppinfo.requires == ["my_req::my_component"]
def test_fill_old_cppinfo():
"""The source/build have priority unless it is not declared at all"""
source = NewCppInfo()
source.libdirs = ["source_libdir"]
source.cxxflags = ["source_cxxflags"]
build = NewCppInfo()
build.libdirs = ["build_libdir"]
build.frameworkdirs = [] # An empty list is an explicit delaration with priority too
build.set_property("cmake_build_modules", ["my_cmake.cmake"])
build.builddirs = ["my_build"]
old_cpp = CppInfo("lib/1.0", "/root/folder")
old_cpp.filter_empty = False
old_cpp.libdirs = ["package_libdir"]
old_cpp.cxxflags = ["package_cxxflags"]
old_cpp.cflags = ["package_cflags"]
old_cpp.frameworkdirs = ["package_frameworks"]
full_editables = NewCppInfo()
full_editables.merge(source)
full_editables.merge(build)
fill_old_cppinfo(full_editables, old_cpp)
assert [e.replace("\\", "/") for e in old_cpp.lib_paths] == \
["/root/folder/source_libdir", "/root/folder/build_libdir"]
assert old_cpp.cxxflags == ["source_cxxflags"]
assert old_cpp.cflags == ["package_cflags"]
assert old_cpp.frameworkdirs == []
assert old_cpp.get_property("cmake_build_modules")
assert old_cpp.builddirs == ["my_build"]
def test_fill_old_cppinfo_simple():
""" The previous test but simpler, just with one cppinfo simulating the package layout"""
package_info = NewCppInfo()
package_info.libs = [] # This is explicit declaration too
package_info.includedirs = ["other_include"]
old_cpp = CppInfo("lib/1.0", "/root/folder")
old_cpp.filter_empty = False
old_cpp.libs = ["this_is_discarded"]
old_cpp.libdirs = ["package_libdir"]
old_cpp.cxxflags = ["package_cxxflags"]
old_cpp.cflags = ["package_cflags"]
old_cpp.frameworkdirs = ["package_frameworks"]
fill_old_cppinfo(package_info, old_cpp)
assert [e.replace("\\", "/") for e in old_cpp.lib_paths] == \
["/root/folder/package_libdir"]
assert old_cpp.cxxflags == ["package_cxxflags"]
assert old_cpp.cflags == ["package_cflags"]
assert old_cpp.frameworkdirs == ["package_frameworks"]
assert old_cpp.libs == []
assert old_cpp.includedirs == ["other_include"]
|
|
"""Annotation and rtyping support for the result of os.stat(), os.lstat()
and os.fstat(). In RPython like in plain Python the stat result can be
indexed like a tuple but also exposes the st_xxx attributes.
"""
import os, sys
from rpython.flowspace.model import Constant
from rpython.flowspace.operation import op
from rpython.annotator import model as annmodel
from rpython.rtyper import extregistry
from rpython.tool.pairtype import pairtype
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rtyper.rmodel import Repr
from rpython.rtyper.rint import IntegerRepr
from rpython.rtyper.error import TyperError
from rpython.rlib._os_support import _preferred_traits, string_traits
from rpython.rlib.objectmodel import specialize, we_are_translated, not_rpython
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.rposix import (
replace_os_function, handle_posix_error, _as_bytes0)
from rpython.rlib import rposix
_WIN32 = sys.platform.startswith('win')
_LINUX = sys.platform.startswith('linux')
if _WIN32:
from rpython.rlib import rwin32
from rpython.rlib.rwin32file import make_win32_traits
# Support for float times is here.
# - ALL_STAT_FIELDS contains Float fields if the system can retrieve
# sub-second timestamps.
# - TIMESPEC is defined when the "struct stat" contains st_atim field.
if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'):
from rpython.rlib.rposix import TIMESPEC
else:
TIMESPEC = None
# all possible fields - some of them are not available on all platforms
ALL_STAT_FIELDS = [
("st_mode", lltype.Signed),
("st_ino", lltype.SignedLongLong),
("st_dev", lltype.SignedLongLong),
("st_nlink", lltype.Signed),
("st_uid", lltype.Signed),
("st_gid", lltype.Signed),
("st_size", lltype.SignedLongLong),
("st_atime", lltype.SignedLongLong), # integral number of seconds
("st_mtime", lltype.SignedLongLong), #
("st_ctime", lltype.SignedLongLong), #
("st_blksize", lltype.Signed),
("st_blocks", lltype.Signed),
("st_rdev", lltype.Signed),
("st_flags", lltype.Signed),
#("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented
#("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented
("nsec_atime", lltype.Signed), # number of nanoseconds
("nsec_mtime", lltype.Signed), #
("nsec_ctime", lltype.Signed), #
]
N_INDEXABLE_FIELDS = 10
# For OO backends, expose only the portable fields (the first 10).
PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS]
STATVFS_FIELDS = [
("f_bsize", lltype.Signed),
("f_frsize", lltype.Signed),
("f_blocks", lltype.Signed),
("f_bfree", lltype.Signed),
("f_bavail", lltype.Signed),
("f_files", lltype.Signed),
("f_ffree", lltype.Signed),
("f_favail", lltype.Signed),
("f_flag", lltype.Signed),
("f_namemax", lltype.Signed),
]
@specialize.arg(1)
def get_stat_ns_as_bigint(st, name):
"""'name' is one of the strings "atime", "mtime" or "ctime".
Returns a bigint that represents the number of nanoseconds
stored inside the RPython-level os.stat_result 'st'.
Note that when running untranslated, the os.stat_result type
is from Python 2.7, which doesn't store more precision than
a float anyway. You will only get more after translation.
"""
from rpython.rlib.rbigint import rbigint
if not we_are_translated():
as_float = getattr(st, "st_" + name)
return rbigint.fromfloat(as_float * 1e9)
if name == "atime":
i, j = 7, -3
elif name == "mtime":
i, j = 8, -2
elif name == "ctime":
i, j = 9, -1
else:
raise AssertionError(name)
sec = st[i]
nsec = st[j]
result = rbigint.fromrarith_int(sec).int_mul(1000000000)
result = result.int_add(nsec)
return result
# ____________________________________________________________
#
# Annotation support
class SomeStatResult(annmodel.SomeObject):
knowntype = os.stat_result
def rtyper_makerepr(self, rtyper):
return StatResultRepr(rtyper)
def rtyper_makekey(self):
return self.__class__,
def getattr(self, s_attr):
if not s_attr.is_constant():
raise annmodel.AnnotatorError("non-constant attr name in getattr()")
attrname = s_attr.const
if attrname in ('st_atime', 'st_mtime', 'st_ctime'):
# like CPython, in RPython we can read the st_Xtime
# attribute and get a floating-point result. We can also
# get a full-precision bigint with get_stat_ns_as_bigint().
# The floating-point result is computed like a property
# by _ll_get_st_Xtime().
TYPE = lltype.Float
else:
TYPE = STAT_FIELD_TYPES[attrname]
return lltype_to_annotation(TYPE)
def _get_rmarshall_support_(self): # for rlib.rmarshal
# reduce and recreate stat_result objects from 10-tuples
# (we ignore the extra values here for simplicity and portability)
def stat_result_reduce(st):
return (st[0], st[1], st[2], st[3], st[4],
st[5], st[6], st.st_atime, st.st_mtime, st.st_ctime)
def stat_result_recreate(tup):
atime, mtime, ctime = tup[7:]
result = tup[:7]
result += (int(atime), int(mtime), int(ctime))
result += extra_zeroes
result += (int((atime - result[7]) * 1e9),
int((mtime - result[8]) * 1e9),
int((ctime - result[9]) * 1e9))
return make_stat_result(result)
s_reduced = annmodel.SomeTuple([lltype_to_annotation(TYPE)
for name, TYPE in PORTABLE_STAT_FIELDS[:7]]
+ 3 * [lltype_to_annotation(lltype.Float)])
extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS) - 3)
return s_reduced, stat_result_reduce, stat_result_recreate
class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)):
def getitem((s_sta, s_int)):
assert s_int.is_constant(), "os.stat()[index]: index must be constant"
index = s_int.const
assert -3 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range"
name, TYPE = STAT_FIELDS[index]
return lltype_to_annotation(TYPE)
class StatResultRepr(Repr):
def __init__(self, rtyper):
self.rtyper = rtyper
self.stat_field_indexes = {}
for i, (name, TYPE) in enumerate(STAT_FIELDS):
self.stat_field_indexes[name] = i
self.s_tuple = annmodel.SomeTuple(
[lltype_to_annotation(TYPE) for name, TYPE in STAT_FIELDS])
self.r_tuple = rtyper.getrepr(self.s_tuple)
self.lowleveltype = self.r_tuple.lowleveltype
def redispatch_getfield(self, hop, index):
rtyper = self.rtyper
s_index = rtyper.annotator.bookkeeper.immutablevalue(index)
hop2 = hop.copy()
spaceop = op.getitem(hop.args_v[0], Constant(index))
spaceop.result = hop.spaceop.result
hop2.spaceop = spaceop
hop2.args_v = spaceop.args
hop2.args_s = [self.s_tuple, s_index]
hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)]
return hop2.dispatch()
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
attr = s_attr.const
if attr in ('st_atime', 'st_mtime', 'st_ctime'):
ll_func = globals()['_ll_get_' + attr]
v_tuple = hop.inputarg(self, arg=0)
return hop.gendirectcall(ll_func, v_tuple)
try:
index = self.stat_field_indexes[attr]
except KeyError:
raise TyperError("os.stat().%s: field not available" % (attr,))
return self.redispatch_getfield(hop, index)
@specialize.memo()
def _stfld(name):
index = STAT_FIELD_NAMES.index(name)
return 'item%d' % index
def _ll_get_st_atime(tup):
return (float(getattr(tup, _stfld("st_atime"))) +
1E-9 * getattr(tup, _stfld("nsec_atime")))
def _ll_get_st_mtime(tup):
return (float(getattr(tup, _stfld("st_mtime"))) +
1E-9 * getattr(tup, _stfld("nsec_mtime")))
def _ll_get_st_ctime(tup):
return (float(getattr(tup, _stfld("st_ctime"))) +
1E-9 * getattr(tup, _stfld("nsec_ctime")))
class __extend__(pairtype(StatResultRepr, IntegerRepr)):
def rtype_getitem((r_sta, r_int), hop):
s_int = hop.args_s[1]
index = s_int.const
if index < 0:
index += len(STAT_FIELDS)
return r_sta.redispatch_getfield(hop, index)
s_StatResult = SomeStatResult()
@not_rpython
def make_stat_result(tup):
"""Turn a tuple into an os.stat_result object."""
assert len(tup) == len(STAT_FIELDS)
assert float not in [type(x) for x in tup]
positional = []
for i in range(N_INDEXABLE_FIELDS):
name, TYPE = STAT_FIELDS[i]
value = lltype.cast_primitive(TYPE, tup[i])
positional.append(value)
kwds = {}
kwds['st_atime'] = tup[7] + 1e-9 * tup[-3]
kwds['st_mtime'] = tup[8] + 1e-9 * tup[-2]
kwds['st_ctime'] = tup[9] + 1e-9 * tup[-1]
for value, (name, TYPE) in zip(tup, STAT_FIELDS)[N_INDEXABLE_FIELDS:]:
if name.startswith('nsec_'):
continue # ignore the nsec_Xtime here
kwds[name] = lltype.cast_primitive(TYPE, value)
return os.stat_result(positional, kwds)
class MakeStatResultEntry(extregistry.ExtRegistryEntry):
_about_ = make_stat_result
def compute_result_annotation(self, s_tup):
return s_StatResult
def specialize_call(self, hop):
r_StatResult = hop.rtyper.getrepr(s_StatResult)
[v_result] = hop.inputargs(r_StatResult.r_tuple)
# no-op conversion from r_StatResult.r_tuple to r_StatResult
hop.exception_cannot_occur()
return v_result
class SomeStatvfsResult(annmodel.SomeObject):
if hasattr(os, 'statvfs_result'):
knowntype = os.statvfs_result
else:
knowntype = None # will not be used
def rtyper_makerepr(self, rtyper):
return StatvfsResultRepr(rtyper)
def rtyper_makekey(self):
return self.__class__,
def getattr(self, s_attr):
assert s_attr.is_constant()
TYPE = STATVFS_FIELD_TYPES[s_attr.const]
return lltype_to_annotation(TYPE)
class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)):
def getitem((s_stat, s_int)):
assert s_int.is_constant()
name, TYPE = STATVFS_FIELDS[s_int.const]
return lltype_to_annotation(TYPE)
s_StatvfsResult = SomeStatvfsResult()
class StatvfsResultRepr(Repr):
def __init__(self, rtyper):
self.rtyper = rtyper
self.statvfs_field_indexes = {}
for i, (name, TYPE) in enumerate(STATVFS_FIELDS):
self.statvfs_field_indexes[name] = i
self.s_tuple = annmodel.SomeTuple(
[lltype_to_annotation(TYPE) for name, TYPE in STATVFS_FIELDS])
self.r_tuple = rtyper.getrepr(self.s_tuple)
self.lowleveltype = self.r_tuple.lowleveltype
def redispatch_getfield(self, hop, index):
rtyper = self.rtyper
s_index = rtyper.annotator.bookkeeper.immutablevalue(index)
hop2 = hop.copy()
spaceop = op.getitem(hop.args_v[0], Constant(index))
spaceop.result = hop.spaceop.result
hop2.spaceop = spaceop
hop2.args_v = spaceop.args
hop2.args_s = [self.s_tuple, s_index]
hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)]
return hop2.dispatch()
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
attr = s_attr.const
try:
index = self.statvfs_field_indexes[attr]
except KeyError:
raise TyperError("os.statvfs().%s: field not available" % (attr,))
return self.redispatch_getfield(hop, index)
class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)):
def rtype_getitem((r_sta, r_int), hop):
s_int = hop.args_s[1]
index = s_int.const
return r_sta.redispatch_getfield(hop, index)
def make_statvfs_result(tup):
args = tuple(
lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in
zip(tup, STATVFS_FIELDS))
return os.statvfs_result(args)
class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry):
_about_ = make_statvfs_result
def compute_result_annotation(self, s_tup):
return s_StatvfsResult
def specialize_call(self, hop):
r_StatvfsResult = hop.rtyper.getrepr(s_StatvfsResult)
[v_result] = hop.inputargs(r_StatvfsResult.r_tuple)
hop.exception_cannot_occur()
return v_result
# ____________________________________________________________
#
# RFFI support
if sys.platform.startswith('win'):
_name_struct_stat = '_stati64'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h']
else:
if _LINUX:
_name_struct_stat = 'stat64'
else:
_name_struct_stat = 'stat'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h']
compilation_info = ExternalCompilationInfo(
# This must be set to 64 on some systems to enable large file support.
#pre_include_bits = ['#define _FILE_OFFSET_BITS 64'],
# ^^^ nowadays it's always set in all C files we produce.
includes=INCLUDES
)
def posix_declaration(try_to_add=None):
global STAT_STRUCT, STATVFS_STRUCT
LL_STAT_FIELDS = STAT_FIELDS[:]
if try_to_add:
LL_STAT_FIELDS.append(try_to_add)
if TIMESPEC is not None:
def _expand(lst, originalname, timespecname):
for i, (_name, _TYPE) in enumerate(lst):
if _name == originalname:
# replace the 'st_atime' field of type rffi.DOUBLE
# with a field 'st_atim' of type 'struct timespec'
lst[i] = (timespecname, TIMESPEC)
break
_expand(LL_STAT_FIELDS, 'st_atime', 'st_atim')
_expand(LL_STAT_FIELDS, 'st_mtime', 'st_mtim')
_expand(LL_STAT_FIELDS, 'st_ctime', 'st_ctim')
del _expand
else:
# Replace float fields with integers
for name in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
for i, (_name, _TYPE) in enumerate(LL_STAT_FIELDS):
if _name == name:
LL_STAT_FIELDS[i] = (_name, lltype.Signed)
break
class CConfig:
_compilation_info_ = compilation_info
STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS)
STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS)
try:
config = platform.configure(CConfig, ignore_errors=try_to_add is not None)
except platform.CompilationError:
if try_to_add:
return # failed to add this field, give up
raise
STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT'])
STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT'])
if try_to_add:
STAT_FIELDS.append(try_to_add)
# This lists only the fields that have been found on the underlying platform.
# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the
# following loop.
STAT_FIELDS = PORTABLE_STAT_FIELDS[:]
if sys.platform != 'win32':
posix_declaration()
for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)):
posix_declaration(ALL_STAT_FIELDS[_i])
del _i
STAT_FIELDS += ALL_STAT_FIELDS[-3:] # nsec_Xtime
# these two global vars only list the fields defined in the underlying platform
STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE}
STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS]
del _name, _TYPE
STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS)
STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS]
def build_stat_result(st):
# only for LL backends
if TIMESPEC is not None:
atim = st.c_st_atim
mtim = st.c_st_mtim
ctim = st.c_st_ctim
atime, extra_atime = atim.c_tv_sec, int(atim.c_tv_nsec)
mtime, extra_mtime = mtim.c_tv_sec, int(mtim.c_tv_nsec)
ctime, extra_ctime = ctim.c_tv_sec, int(ctim.c_tv_nsec)
else:
atime, extra_atime = st.c_st_atime, 0
mtime, extra_mtime = st.c_st_mtime, 0
ctime, extra_ctime = st.c_st_ctime, 0
result = (st.c_st_mode,
st.c_st_ino,
st.c_st_dev,
st.c_st_nlink,
st.c_st_uid,
st.c_st_gid,
st.c_st_size,
atime,
mtime,
ctime)
if "st_blksize" in STAT_FIELD_TYPES: result += (st.c_st_blksize,)
if "st_blocks" in STAT_FIELD_TYPES: result += (st.c_st_blocks,)
if "st_rdev" in STAT_FIELD_TYPES: result += (st.c_st_rdev,)
if "st_flags" in STAT_FIELD_TYPES: result += (st.c_st_flags,)
result += (extra_atime,
extra_mtime,
extra_ctime)
return make_stat_result(result)
def build_statvfs_result(st):
return make_statvfs_result((
st.c_f_bsize,
st.c_f_frsize,
st.c_f_blocks,
st.c_f_bfree,
st.c_f_bavail,
st.c_f_files,
st.c_f_ffree,
st.c_f_favail,
st.c_f_flag,
st.c_f_namemax
))
# Implement and register os.stat() & variants
if not _WIN32:
c_fstat = rffi.llexternal('fstat64' if _LINUX else 'fstat',
[rffi.INT, STAT_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO,
macro=True)
c_stat = rffi.llexternal('stat64' if _LINUX else 'stat',
[rffi.CCHARP, STAT_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO,
macro=True)
c_lstat = rffi.llexternal('lstat64' if _LINUX else 'lstat',
[rffi.CCHARP, STAT_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO,
macro=True)
c_fstatvfs = rffi.llexternal('fstatvfs',
[rffi.INT, STATVFS_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO)
c_statvfs = rffi.llexternal('statvfs',
[rffi.CCHARP, STATVFS_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO)
@replace_os_function('fstat')
def fstat(fd):
if not _WIN32:
with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult:
handle_posix_error('fstat', c_fstat(fd, stresult))
return build_stat_result(stresult)
else:
handle = rwin32.get_osfhandle(fd)
win32traits = make_win32_traits(string_traits)
filetype = win32traits.GetFileType(handle)
if filetype == win32traits.FILE_TYPE_CHAR:
# console or LPT device
return make_stat_result((win32traits._S_IFCHR,
0, 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0))
elif filetype == win32traits.FILE_TYPE_PIPE:
# socket or named pipe
return make_stat_result((win32traits._S_IFIFO,
0, 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0))
elif filetype == win32traits.FILE_TYPE_UNKNOWN:
error = rwin32.GetLastError_saved()
if error != 0:
raise WindowsError(error, "os_fstat failed")
# else: unknown but valid file
# normal disk file (FILE_TYPE_DISK)
info = lltype.malloc(win32traits.BY_HANDLE_FILE_INFORMATION,
flavor='raw', zero=True)
try:
res = win32traits.GetFileInformationByHandle(handle, info)
if res == 0:
raise WindowsError(rwin32.GetLastError_saved(),
"os_fstat failed")
return win32_by_handle_info_to_stat(win32traits, info)
finally:
lltype.free(info, flavor='raw')
@replace_os_function('stat')
@specialize.argtype(0)
def stat(path):
if not _WIN32:
with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult:
arg = _as_bytes0(path)
handle_posix_error('stat', c_stat(arg, stresult))
return build_stat_result(stresult)
else:
traits = _preferred_traits(path)
path = traits.as_str0(path)
return win32_xstat(traits, path, traverse=True)
@replace_os_function('lstat')
@specialize.argtype(0)
def lstat(path):
if not _WIN32:
with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult:
arg = _as_bytes0(path)
handle_posix_error('lstat', c_lstat(arg, stresult))
return build_stat_result(stresult)
else:
traits = _preferred_traits(path)
path = traits.as_str0(path)
return win32_xstat(traits, path, traverse=False)
if rposix.HAVE_FSTATAT:
from rpython.rlib.rposix import AT_FDCWD, AT_SYMLINK_NOFOLLOW
c_fstatat = rffi.llexternal('fstatat64' if _LINUX else 'fstatat',
[rffi.INT, rffi.CCHARP, STAT_STRUCT, rffi.INT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO, macro=True)
def fstatat(pathname, dir_fd=AT_FDCWD, follow_symlinks=True):
if follow_symlinks:
flags = 0
else:
flags = AT_SYMLINK_NOFOLLOW
with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult:
error = c_fstatat(dir_fd, pathname, stresult, flags)
handle_posix_error('fstatat', error)
return build_stat_result(stresult)
@replace_os_function('fstatvfs')
def fstatvfs(fd):
with lltype.scoped_alloc(STATVFS_STRUCT.TO) as stresult:
handle_posix_error('fstatvfs', c_fstatvfs(fd, stresult))
return build_statvfs_result(stresult)
@replace_os_function('statvfs')
@specialize.argtype(0)
def statvfs(path):
with lltype.scoped_alloc(STATVFS_STRUCT.TO) as stresult:
arg = _as_bytes0(path)
handle_posix_error('statvfs', c_statvfs(arg, stresult))
return build_statvfs_result(stresult)
#__________________________________________________
# Helper functions for win32
if _WIN32:
from rpython.rlib.rwin32file import FILE_TIME_to_time_t_nsec
def make_longlong(high, low):
return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low)
@specialize.arg(0)
def win32_xstat(traits, path, traverse=False):
# XXX 'traverse' is ignored
win32traits = make_win32_traits(traits)
with lltype.scoped_alloc(
win32traits.WIN32_FILE_ATTRIBUTE_DATA) as data:
res = win32traits.GetFileAttributesEx(
path, win32traits.GetFileExInfoStandard, data)
if res == 0:
errcode = rwin32.GetLastError_saved()
if errcode == win32traits.ERROR_SHARING_VIOLATION:
res = win32_attributes_from_dir(
win32traits, path, data)
if res == 0:
errcode = rwin32.GetLastError_saved()
raise WindowsError(errcode, "os_stat failed")
return win32_attribute_data_to_stat(win32traits, data)
@specialize.arg(0)
def win32_attributes_to_mode(win32traits, attributes):
m = 0
attributes = intmask(attributes)
if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY:
m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other
else:
m |= win32traits._S_IFREG
if attributes & win32traits.FILE_ATTRIBUTE_READONLY:
m |= 0444
else:
m |= 0666
return m
@specialize.arg(0)
def win32_attribute_data_to_stat(win32traits, info):
st_mode = win32_attributes_to_mode(win32traits, info.c_dwFileAttributes)
st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow)
ctime, extra_ctime = FILE_TIME_to_time_t_nsec(info.c_ftCreationTime)
mtime, extra_mtime = FILE_TIME_to_time_t_nsec(info.c_ftLastWriteTime)
atime, extra_atime = FILE_TIME_to_time_t_nsec(info.c_ftLastAccessTime)
result = (st_mode,
0, 0, 0, 0, 0,
st_size,
atime, mtime, ctime,
extra_atime, extra_mtime, extra_ctime)
return make_stat_result(result)
def win32_by_handle_info_to_stat(win32traits, info):
# similar to the one above
st_mode = win32_attributes_to_mode(win32traits, info.c_dwFileAttributes)
st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow)
ctime, extra_ctime = FILE_TIME_to_time_t_nsec(info.c_ftCreationTime)
mtime, extra_mtime = FILE_TIME_to_time_t_nsec(info.c_ftLastWriteTime)
atime, extra_atime = FILE_TIME_to_time_t_nsec(info.c_ftLastAccessTime)
# specific to fstat()
st_ino = make_longlong(info.c_nFileIndexHigh, info.c_nFileIndexLow)
st_nlink = info.c_nNumberOfLinks
result = (st_mode,
st_ino, 0, st_nlink, 0, 0,
st_size,
atime, mtime, ctime,
extra_atime, extra_mtime, extra_ctime)
return make_stat_result(result)
@specialize.arg(0)
def win32_attributes_from_dir(win32traits, path, data):
filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw')
try:
hFindFile = win32traits.FindFirstFile(path, filedata)
if hFindFile == rwin32.INVALID_HANDLE_VALUE:
return 0
win32traits.FindClose(hFindFile)
data.c_dwFileAttributes = filedata.c_dwFileAttributes
rffi.structcopy(data.c_ftCreationTime, filedata.c_ftCreationTime)
rffi.structcopy(data.c_ftLastAccessTime, filedata.c_ftLastAccessTime)
rffi.structcopy(data.c_ftLastWriteTime, filedata.c_ftLastWriteTime)
data.c_nFileSizeHigh = filedata.c_nFileSizeHigh
data.c_nFileSizeLow = filedata.c_nFileSizeLow
return 1
finally:
lltype.free(filedata, flavor='raw')
|
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Eli Bendersky [http://eli.thegreenplace.net]
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.9'
__tabversion__ = '3.8'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
|
import numpy as np
import pywt
from PIL import Image, ImageOps
import colorsys
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import matplotlib.rcsetup as rcsetup
from haar2d import haar2d, ihaar2d
#print matplotlib.matplotlib_fname()
#print(rcsetup.all_backends)
#http://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_matplotlib_rgb_brg_image_load_display_save.php
#http://stackoverflow.com/questions/7534453/matplotlib-does-not-show-my-drawings-although-i-call-pyplot-show
file_img = '/home/janson/download/z2.jpg'
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def im2array(img, dtype='float'):
im = Image.open(img)
return np.asarray(im, dtype=dtype)
def im2arr(img):
im = Image.open(img)
row,col = im.size
data = np.zeros([row, col])
#im.show()
pixels = im.load()
for i in range(row):
for j in range(col):
r,g,b = pixels[i,j]
data[i,j] = r
return data
def thumbnail(infile='/home/janson/download/z.jpg'):
try:
img = Image.open(infile)
size = (128,128)
#img.thumbnail(size, Image.ANTIALIAS)
thub = ImageOps.fit(img, size, Image.ANTIALIAS)
thub.save(file_img, "JPEG")
except IOError:
print "cannot create thumbnail for '%s'" % infile
return None
def paint_img(rlt, pos=0):
if len(rlt.shape) == 2:
img = Image.new('RGB', rlt.shape, "black")
pixels = img.load()
for i in range(img.size[0]): # for every pixel:
for j in range(img.size[1]):
x = [0,0,0]
x[pos] = int(rlt[i][j])
x[0],x[1],x[2] = yiq2rgb(x[0],x[1],x[2])
pixels[i,j] = (x[0],x[1],x[2])
img.show()
def rgb2yiq(x,y,z):
return colorsys.rgb_to_yiq(float(x)/255, float(y)/255, float(z)/255)
def yiq2rgb(x,y,z):
r = colorsys.yiq_to_rgb(x,y,z)
return (int(r[0]*255), int(r[1]*255), int(r[2]*255))
def wtHighFreq(img, mode='haar', level=1):
'''
Apply Wavelet Transform to an image
Author: Lee Seongjoo seongjoo@csai.yonsei.ac.kr
2009 (c) Lee Seongjoo
'''
imArray = im2arr(img)
# compute coefficients of multiresolution WT
coeffs = pywt.wavedec2(imArray, mode, level=level)
# high frequency coeffs
coeffs_H = list(coeffs)
#print coeffs_H[0].shape
# discarding the low frequency
# Approximation coeffs are from the low-pass filter
coeffs_H[0] = np.zeros(coeffs_H[0].shape)
# multilevel reconstruction
imArray_H = pywt.waverec2(coeffs_H, mode)
#paint_img(imArray_H)
imgplot = plt.imshow(imArray_H.T)
plt.show()
def paint(img):
rlt = im2array(img)
(x,y,z) = rlt.shape
for i in range(x):
for j in range(y):
rlt[i][j][1] = 0
rlt[i][j][2] = 0
imgplot = plt.imshow(rlt)
#fname = 'cartoon.png'
#image = Image.open(fname).convert("L")
#arr = np.asarray(image)
#plt.imshow(arr, cmap = cm.Greys_r)
plt.show()
def paint2():
fname = '/home/janson/download/z2.png'
image = Image.open(fname).convert("L")
arr = np.asarray(image)
plt.imshow(arr, cmap = cm.Greys_r)
plt.show()
def new_img():
img = Image.new( 'RGB', (255,255), "black")
pixels = img.load()
for i in range(img.size[0]): # for every pixel:
for j in range(img.size[1]):
pixels[i,j] = (i, j, 100) # set the colour accordingly
img.show()
def im2arr_3(img):
im = Image.open(img)
row,col = im.size
arr_r,arr_g,arr_b = (np.zeros([row, col]), np.zeros([row, col]), np.zeros([row, col]))
pixels = im.load()
for i in range(row):
for j in range(col):
#r,g,b = rgb2yiq(*pixels[i,j])
r,g,b = pixels[i,j]
arr_r[i,j] = r
arr_g[i,j] = g
arr_b[i,j] = b
return arr_r,arr_g,arr_b
def trans(imArray, mode='haar', level=1):
coeffs = pywt.wavedec2(imArray, mode, level=level)
coeffs_H = list(coeffs)
coeffs_H[0] = np.zeros(coeffs_H[0].shape)
imArray_H = pywt.waverec2(coeffs_H, mode)
#return imArray
#print "img1", imArray[0]
#print "img2", imArray_H[0]
return imArray_H
def test_02(img):
ar,ag,ab = im2arr_3(img)
tar,tag,tab = trans(ar), trans(ag), trans(ab)
#paint_img(tar, pos=0)
#paint_img(tag, pos=1)
#paint_img(tab, pos=2)
#img = Image.new('RGB', tar.shape)
#pixels = img.load()
pixels = np.zeros((tar.shape[0], tar.shape[1], 3))
for i in range(tar.shape[0]): # for every pixel:
for j in range(tar.shape[1]):
r,g,b = yiq2rgb(tar[i][j], tag[i][j], tab[i][j])
#pixels[i,j] = (r,g,b)
pixels[i,j] = [r,g,b]
#print pixels[i,j]
#img.show()
imgplot = plt.imshow(pixels)
plt.show()
def test_yiq():
a = [0,0,0,0,0,0]
a[0] = (145, 149, 152)
a[1] = (151, 155, 158)
a[2] = (127, 131, 134)
a[3] =(86, 90, 93)
a[4] = (61, 66, 70)
a[5] = (57, 62, 2)
for nn in a:
n = [float(m)/255 for m in nn]
yiq = colorsys.rgb_to_yiq(*n)
#r = (int(yiq[0]),int(yiq[1]),int(yiq[2]))
rgb = colorsys.yiq_to_rgb(*yiq)
r = (int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255))
print n, yiq, colorsys.yiq_to_rgb(*yiq), r, nn
def test_yiq2():
a = [0,0,0,0,0,0]
a[0] = (145, 149, 152)
a[1] = (151, 155, 158)
a[2] = (127, 131, 134)
a[3] =(86, 90, 93)
a[4] = (61, 66, 70)
a[5] = (57, 62, 2)
for nn in a:
yiq = rgb2yiq(*nn)
r = yiq2rgb(*yiq)
print nn, yiq, r
def test_yiq3():
a = [0,0,0,0,0,0]
a[0] = [145, 149, 152]
a[1] = [151, 155, 158]
a[2] = [127, 131, 134]
a[3] = [86, 90, 93]
a[4] = [61, 66, 70]
a[5] = [57, 62, 2]
#s = np.array([145.0, 149.0, 152.0])/255
s = np.asarray(a, dtype='float')/255
s2 = np.random.random(s.shape)
s3 = np.asarray([s,s2], dtype='float')
print s3.shape
tranform = np.array([[0.299, 0.587, 0.114], [0.596, -0.275, -0.321], [0.212, -0.523, 0.311]])
y = s[0][0]*0.299 + s[0][1]*0.587 + s[0][2]*0.114
z = np.array([tranform[0][0], tranform[1][0], tranform[2][0]])
print y
print colorsys.rgb_to_yiq(*s[-1])
#print tranform[0], np.dot(s, z)
#print s
#print tranform
print np.dot(s3, tranform.T)
def test_03():
db8 = pywt.Wavelet('db8')
scaling, wavelet, x = db8.wavefun()
fig, axes = plt.subplots(1, 2, sharey=True, figsize=(8,6))
ax1, ax2 = axes
ax1.plot(x, scaling);
ax1.set_title('Scaling function, N=8');
ax1.set_ylim(-1.2, 1.2);
ax2.set_title('Wavelet, N=8');
ax2.tick_params(labelleft=False);
ax2.plot(x-x.mean(), wavelet);
fig.tight_layout()
def test_04():
#ar = np.array([[[1,2,3,4,5,6],[11,22,33,44,55,66],[21,22,23,24,25,26]], [[1,2,3,4,5,6],[11,22,33,44,55,66],[21,22,23,24,25,26]]])
#print ar.shape
#print ar[:,:,1:2].shape
image = np.random.random([10,5,3]) * 10
img3 = image[:,:,1:2]
img2 = np.zeros((10,5))
for i in range(10):
for j in range(5):
img2[i,j] = img3[i,j,0]
print img2
print image[:,:,1:2].reshape(image.shape[0], image.shape[1])
def test_haar2d(img):
im = Image.open(img)
#im.show()
arr = np.asarray(im, dtype='float')
#plt.imshow(arr, cmap = cm.Greys_r)
#plt.show()
arr = arr/255
#arr = arr[0:5,0:5]
arr2 = arr.copy()
row, col = arr.shape[0], arr.shape[1]
assert (arr - arr2 < 0.0001).all()
tranform = np.array([[0.299, 0.587, 0.114], [0.596, -0.275, -0.321], [0.212, -0.523, 0.311]])
#print arr[0,0]
#print np.dot(arr[0,0], tranform.T)
#print colorsys.rgb_to_yiq(*arr[0,0])
arr = np.dot(arr, tranform.T)
arr_r,arr_g,arr_b = (np.zeros([row, col]), np.zeros([row, col]), np.zeros([row, col]))
arr3 = arr.copy()
for i in range(row):
for j in range(col):
r,g,b = colorsys.rgb_to_yiq(*arr2[i,j])
arr_r[i,j] = r
arr_g[i,j] = g
arr_b[i,j] = b
arr3[i,j] = [r,g,b]
assert (arr - arr3 < 0.01).all()
images = np.array([arr[:,:,:1].reshape(row, col), arr[:,:,1:2].reshape(row, col), arr[:,:,2:].reshape(row, col)])
assert (images[0] - arr_r < 0.01).all()
colors = images.shape[0]
haars = [haar2d(images[i]) for i in range(colors)]
#print haars[0].shape
#print row, col
#print haars[0] - images[0]
assert not (images[0] - haars[0] < 0.1).all()
haars = [haars[i].reshape(row*col) for i in range(colors)]
lefts = 41
inds = [np.argpartition(np.absolute(haars[i]), 0-lefts)[:((row**2)-lefts)] for i in range(colors)]
print inds[0].shape
#reversed_inds = [list(set(range(row**2)) - set(inds[i])) for i in range(colors)]
for i in range(colors):
haars[i][inds[i]] = np.zeros(inds[i].shape[0])
haars = [haars[i].reshape([row, col]) for i in range(colors)]
ihaars = [ihaar2d(haars[i]) for i in range(colors)]
#assert (images[0] - ihaars[0] < 0.1).all()
for i in range(row):
for j in range(col):
r,g,b = colorsys.yiq_to_rgb(ihaars[0][i,j], ihaars[1][i,j], ihaars[2][i,j])
arr3[i,j] = [r,g,b]
arr3 = arr3*255
#arr3 = arr3.astype(numpy.int32, copy=False)
#plt.imshow(arr3, cmap = cm.Greys_r)
#plt.show()
img = Image.new('RGB', [row,col])
pixels = img.load()
for i in range(row):
for j in range(col):
pixels[j,i] = (int(arr3[i,j][0]), int(arr3[i,j][1]), int(arr3[i,j][2]))
img.show()
def test_haar2d2(img):
im = Image.open(img)
arr = np.asarray(im, dtype='float')
row, col = arr.shape[0], arr.shape[1]
tranform = np.array([[0.299, 0.587, 0.114], [0.596, -0.275, -0.321], [0.212, -0.523, 0.311]])
arr = np.dot(arr, tranform.T)
images = [arr[:,:,:1].reshape(row, col), arr[:,:,1:2].reshape(row, col), arr[:,:,2:].reshape(row, col)]
colors = 3
haars = [haar2d(images[i]).reshape(row*col) for i in range(colors)]
avgl = [0.0, 0.0, 0.0]
avgl = [haars[i][0]/(256*128) for i in range(colors)]
for i in range(colors):
haars[i][0] = 0.0
print 'avgl', avgl
lefts = 40
inds = [np.argpartition(np.absolute(haars[i]), 0-lefts)[-lefts:] for i in range(colors)] #lefts
haars = [haars[i][inds[i]] for i in range(colors)] #value in lefts
big_i = [(haars[i] > 0) for i in range(colors)]
small_i = [(haars[i] < 0) for i in range(colors)]
for i in range(colors):
print inds[i][big_i[i]]
print inds[i][small_i[i]]
#thumbnail()
#print im2arr(file_img)
#wtHighFreq(file_img)
#paint(file_img)
#paint2()
#new_img()
#test_02(file_img)
#test_yiq()
#test_03()
#test_yiq2()
#test_yiq3()
#test_04()
file_img = '/home/janson/download/testimg/thumb/6.jpeg'
test_haar2d(file_img)
#test_haar2d2(file_img)
|
|
#!/usr/bin/env python
# coding=utf-8
# Copyright [2017] [B2W Digital]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import httpretty
from httpretty import httpretty as httpretty_object
from marvin_python_toolbox.common.http_client import ApiClient, ListResultSet
from marvin_python_toolbox.common.exceptions import HTTPException
class TestHttpClient:
@httpretty.activate
def test_list_result_set(self):
data = [{'id': str(n)} for n in range(100)]
per_page = 2
total_pages = len(data) / per_page
def fake_items(start=0):
httpretty.register_uri(
httpretty.GET, "http://localhost:8000/service1/",
body=json.dumps({
'objects': data[start:start + per_page],
'total': len(data),
}),
content_type="application/json",
status=200,
)
fake_items(0)
result_set = ListResultSet(path='/service1/', limit=per_page)
assert len(result_set) == len(data)
# force iter all
all_items = list(result_set)
assert len(all_items) == len(data)
assert len(httpretty_object.latest_requests) == total_pages
@httpretty.activate
def test_get_ok(self):
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='[{"id": "1"}]',
content_type="application/json",
status=200)
response = ApiClient().get('/service1/')
assert response.ok
assert response.data is not None
@httpretty.activate
def test_get_not_ok(self):
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='[{"error": "deu merda"}]',
content_type="application/json",
status=500)
response = ApiClient().get('/service1/')
assert not response.ok
@httpretty.activate
def test_get_not_ok_not_json(self):
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='error: "deu merda"',
content_type="text/html",
status=500)
response = ApiClient().get('/service1/')
assert not response.ok
@httpretty.activate
def test_get_all_ok(self):
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='{"objects": [{"id": "3"}], "total": 3}',
content_type="application/json",
status=200)
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='{"objects": [{"id": "1"}, {"id": "2"}], "total": 3}',
content_type="application/json",
status=200)
response = ApiClient().get_all('/service1/', limit=2)
response_list = list(response)
assert len(response) == 3
assert len(response_list) == 3
assert response_list[0]['id'] == '1'
assert response_list[1]['id'] == '2'
assert response_list[2]['id'] == '3'
@httpretty.activate
def test_get_all_not_ok(self):
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='{"error": "deu merda"}',
content_type="application/json",
status=500)
with pytest.raises(HTTPException):
response = ApiClient().get_all('/service1/', limit=2)
@httpretty.activate
def test_get_all_not_ok_second_page(self):
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='{"error": "deu merda"}',
content_type="application/json",
status=500)
httpretty.register_uri(httpretty.GET, "http://localhost:8000/service1/",
body='{"objects": [{"id": "1"}, {"id": "2"}], "total": 3}',
content_type="application/json",
status=200)
response = ApiClient().get_all('/service1/', limit=2)
assert len(response) == 3
with pytest.raises(HTTPException):
response_list = list(response)
@httpretty.activate
def test_post_not_ok(self):
httpretty.register_uri(httpretty.POST, "http://localhost:8000/service1/",
body='[{"error": "name required"}]',
content_type='text/json',
status=500)
response = ApiClient().post('/service1/', {"name": "americanas", "url": "www.americanas.com.br"})
assert not response.ok
@httpretty.activate
def test_post_ok(self):
httpretty.register_uri(httpretty.POST, "http://localhost:8000/service1/",
body='{"success": true}',
content_type='text/json',
status=201)
response = ApiClient().post('/service1/', {"name": "americanas", "url": "www.americanas.com.br"})
assert response.ok
@httpretty.activate
def test_put_not_ok(self):
httpretty.register_uri(httpretty.PUT, "http://localhost:8000/service1/",
body='[{"error": "name required"}]',
content_type="application/json",
status=500)
response = ApiClient().put('/service1/', {"id": "1", "url": "www.americanas.com.br"})
assert not response.ok
@httpretty.activate
def test_put_ok(self):
httpretty.register_uri(httpretty.PUT, "http://localhost:8000/service1/",
body='{"success": true}',
content_type='text/json',
status=200)
response = ApiClient().put('/service1/', {"id": "1", "name": "americanas", "url": "www.americanas.com.br"})
assert response.ok
@httpretty.activate
def test_delete_not_ok(self):
httpretty.register_uri(httpretty.DELETE, "http://localhost:8000/service1/",
body='[{"error": "name required"}]',
content_type="application/json",
status=500)
response = ApiClient().delete('/service1/')
assert not response.ok
@httpretty.activate
def test_delete_ok(self):
httpretty.register_uri(httpretty.DELETE, "http://localhost:8000/service1/",
body='{"success": true}',
content_type='text/json',
status=200)
response = ApiClient().delete('/service1/')
assert response.ok
@httpretty.activate
def test_full_url_path(self):
httpretty.register_uri(httpretty.GET, "http://localhost:9999/service_full/",
body='[{"id": "1"}]',
content_type="application/json",
status=200)
response = ApiClient().get('http://localhost:9999/service_full/')
assert response.ok
assert response.data is not None
|
|
# -*- coding: utf-8 -*-
'''
Interaction with Git repositories
=================================
Important: Before using git over ssh, make sure your remote host fingerprint
exists in "~/.ssh/known_hosts" file. To avoid requiring password
authentication, it is also possible to pass private keys to use explicitly.
.. code-block:: yaml
https://github.com/saltstack/salt.git:
git.latest:
- rev: develop
- target: /tmp/salt
'''
# Import python libs
import logging
import os
import shutil
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if git is available
'''
return 'git' if __salt__['cmd.has_exec']('git') else False
def latest(name,
rev=None,
target=None,
runas=None,
user=None,
force=None,
force_checkout=False,
submodules=False,
mirror=False,
bare=False,
remote_name='origin',
always_fetch=False,
identity=None,
onlyif=False,
unless=False):
'''
Make sure the repository is cloned to the given directory and is up to date
name
Address of the remote repository as passed to "git clone"
rev
The remote branch, tag, or revision ID to checkout after
clone / before update
target
Name of the target directory where repository is about to be cloned
runas
Name of the user performing repository management operations
.. deprecated:: 0.17.0
user
Name of the user performing repository management operations
.. versionadded:: 0.17.0
force
Force git to clone into pre-existing directories (deletes contents)
force_checkout
Force a checkout even if there might be overwritten changes
(Default: False)
submodules
Update submodules on clone or branch change (Default: False)
mirror
True if the repository is to be a mirror of the remote repository.
This implies bare, and thus is incompatible with rev.
bare
True if the repository is to be a bare clone of the remote repository.
This is incompatible with rev, as nothing will be checked out.
remote_name
defines a different remote name.
For the first clone the given name is set to the default remote,
else it is just a additional remote. (Default: 'origin')
always_fetch
If a tag or branch name is used as the rev a fetch will not occur
until the tag or branch name changes. Setting this to true will force
a fetch to occur. Only applies when rev is set. (Default: False)
identity
A path to a private key to use over SSH
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}, 'state_stdout': ''}
# Check to make sure rev and mirror/bare are not both in use
if rev and (mirror or bare):
return _fail(ret, ('"rev" is not compatible with the "mirror" and'
'"bare" arguments'))
if not target:
return _fail(ret, '"target" option is required')
salt.utils.warn_until(
'Hydrogen',
'Please remove \'runas\' support at this stage. \'user\' support was '
'added in 0.17.0',
_dont_call_warnings=True
)
if runas:
# Warn users about the deprecation
ret.setdefault('warnings', []).append(
'The \'runas\' argument is being deprecated in favor of \'user\', '
'please update your state files.'
)
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
run_check_cmd_kwargs = {'runas': user}
# check if git.latest should be applied
cret = _run_check(
run_check_cmd_kwargs, onlyif, unless
)
if isinstance(cret, dict):
ret.update(cret)
return ret
bare = bare or mirror
check = 'refs' if bare else '.git'
if os.path.isdir(target) and os.path.isdir('{0}/{1}'.format(target,
check)):
# git pull is probably required
log.debug(('target {0} is found, "git pull" '
'is probably required'.format(target)))
try:
current_rev = __salt__['git.revision'](target, user=user, state_ret=ret)
# handle the case where a branch was provided for rev
remote_rev = None
branch = __salt__['git.current_branch'](target, user=user)
# We're only interested in the remote branch if a branch
# (instead of a hash, for example) was provided for rev.
if len(branch) > 0 and branch == rev:
remote_rev = __salt__['git.ls_remote'](target,
repository=name,
branch=branch, user=user,
identity=identity)
# only do something, if the specified rev differs from the
# current_rev and remote_rev
if current_rev in [rev, remote_rev]:
new_rev = current_rev
else:
if __opts__['test']:
return _neutral_test(
ret,
('Repository {0} update is probably required (current '
'revision is {1})').format(target, current_rev))
# if remote_name is defined set fetch_opts to remote_name
if remote_name != 'origin':
fetch_opts = remote_name
else:
fetch_opts = ''
# check remote if fetch_url not == name set it
remote = __salt__['git.remote_get'](target,
remote=remote_name,
user=user)
if remote is None or remote[0] != name:
__salt__['git.remote_set'](target,
name=remote_name,
url=name,
user=user,
state_ret=ret)
ret['changes']['remote/{0}'.format(remote_name)] = "{0} => {1}".format(str(remote), name)
# check if rev is already present in repo, git-fetch otherwise
if bare:
__salt__['git.fetch'](target,
opts=fetch_opts,
user=user,
identity=identity,
state_ret=ret)
elif rev:
cmd = "git rev-parse " + rev + '^{commit}'
retcode = __salt__['cmd.retcode'](cmd,
cwd=target,
runas=user)
# there is a issues #3938 addressing this
if 0 != retcode or always_fetch:
__salt__['git.fetch'](target,
opts=fetch_opts,
user=user,
identity=identity,
state_ret=ret)
__salt__['git.checkout'](target,
rev,
force=force_checkout,
user=user,
state_ret=ret)
# check if we are on a branch to merge changes
cmd = "git symbolic-ref -q HEAD > /dev/null"
retcode = __salt__['cmd.retcode'](cmd, cwd=target, runas=user)
if 0 == retcode:
__salt__['git.fetch' if bare else 'git.pull'](target,
opts=fetch_opts,
user=user,
identity=identity, state_ret=ret)
if submodules:
__salt__['git.submodule'](target,
user=user,
identity=identity,
opts='--recursive',
state_ret=ret)
new_rev = __salt__['git.revision'](cwd=target, user=user, state_ret=ret)
except Exception as exc:
return _fail(
ret,
str(exc))
if current_rev != new_rev:
log.info('Repository {0} updated: {1} => {2}'.format(target,
current_rev,
new_rev))
ret['comment'] = 'Repository {0} updated'.format(target)
ret['changes']['revision'] = '{0} => {1}'.format(
current_rev, new_rev)
else:
if os.path.isdir(target):
# git clone is required, target exists but force is turned on
if force:
log.debug(('target {0} found, but not a git repository. Since '
'force option is in use, deleting.').format(target))
if os.path.islink(target):
os.remove(target)
else:
shutil.rmtree(target)
# git clone is required, but target exists and is non-empty
elif os.listdir(target):
return _fail(ret, 'Directory \'{0}\' exists, is non-empty, and '
'force option not in use'.format(target))
# git clone is required
log.debug(
'target {0} is not found, "git clone" is required'.format(
target))
if 'test' in __opts__:
if __opts__['test']:
return _neutral_test(
ret,
'Repository {0} is about to be cloned to {1}'.format(
name, target))
try:
# make the clone
opts = '--mirror' if mirror else '--bare' if bare else ''
# if remote_name is not origin add --origin <name> to opts
if remote_name != 'origin':
opts += ' --origin {0}'.format(remote_name)
# do the clone
__salt__['git.clone'](target,
name,
user=user,
opts=opts,
identity=identity,
state_ret=ret)
if rev and not bare:
__salt__['git.checkout'](target, rev, user=user, state_ret=ret)
if submodules:
__salt__['git.submodule'](target,
user=user,
identity=identity,
opts='--recursive',
state_ret=ret)
new_rev = None if bare else (
__salt__['git.revision'](cwd=target, user=user, state_ret=ret))
except Exception as exc:
return _fail(
ret,
str(exc))
message = 'Repository {0} cloned to {1}'.format(name, target)
log.info(message)
ret['comment'] = message
ret['changes']['new'] = name
ret['changes']['revision'] = new_rev
return ret
def present(name, bare=True, runas=None, user=None, force=False):
'''
Make sure the repository is present in the given directory
name
Name of the directory where the repository is about to be created
bare
Create a bare repository (Default: True)
runas
Name of the user performing repository management operations
.. deprecated:: 0.17.0
user
Name of the user performing repository management operations
.. versionadded:: 0.17.0
force
Force-create a new repository into an pre-existing non-git directory
(deletes contents)
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
salt.utils.warn_until(
'Hydrogen',
'Please remove \'runas\' support at this stage. \'user\' support was '
'added in 0.17.0',
_dont_call_warnings=True
)
if runas:
# Warn users about the deprecation
ret.setdefault('warnings', []).append(
'The \'runas\' argument is being deprecated in favor of \'user\', '
'please update your state files.'
)
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
# If the named directory is a git repo return True
if os.path.isdir(name):
if bare and os.path.isfile('{0}/HEAD'.format(name)):
return ret
elif not bare and os.path.isdir('{0}/.git'.format(name)):
return ret
# Directory exists and is not a git repo, if force is set destroy the
# directory and recreate, otherwise throw an error
elif not force and os.listdir(name):
return _fail(ret,
'Directory which does not contain a git repo '
'is already present at {0}. To delete this '
'directory and create a fresh git repo set '
'force: True'.format(name))
# Run test is set
if __opts__['test']:
ret['changes']['new repository'] = name
return _neutral_test(ret, ('New git repo set for'
' creation at {0}').format(name))
if force and os.path.isdir(name):
if os.path.islink(name):
os.remove(name)
else:
shutil.rmtree(name)
opts = '--bare' if bare else ''
__salt__['git.init'](cwd=name, user=user, opts=opts, state_ret=ret)
message = 'Initialized repository {0}'.format(name)
log.info(message)
ret['changes']['new repository'] = name
ret['comment'] = message
return ret
def _fail(ret, comment):
ret['result'] = False
ret['comment'] = comment
return ret
def _neutral_test(ret, comment):
ret['result'] = None
ret['comment'] = comment
return ret
def _run_check(cmd_kwargs, onlyif, unless):
'''
Execute the onlyif and unless logic.
Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
else return True
'''
if onlyif:
if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:
return {'comment': 'onlyif execution failed',
'result': True}
if unless:
if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:
return {'comment': 'unless execution succeeded',
'result': True}
# No reason to stop, return True
return True
|
|
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from hyperv.nova import constants
from hyperv.nova import vmutils
from hyperv.tests import test
class VMUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V VMUtils class."""
_FAKE_VM_NAME = 'fake_vm'
_FAKE_MEMORY_MB = 2
_FAKE_VCPUS_NUM = 4
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_RET_VAL_BAD = -1
_FAKE_PATH = "fake_path"
_FAKE_CTRL_PATH = 'fake_ctrl_path'
_FAKE_CTRL_ADDR = 0
_FAKE_DRIVE_ADDR = 0
_FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
_FAKE_VM_PATH = "fake_vm_path"
_FAKE_VHD_PATH = "fake_vhd_path"
_FAKE_DVD_PATH = "fake_dvd_path"
_FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
_FAKE_VM_UUID = "04e79212-39bc-4065-933c-50f6d48a57f6"
_FAKE_INSTANCE = {"name": _FAKE_VM_NAME,
"uuid": _FAKE_VM_UUID}
_FAKE_SNAPSHOT_PATH = "fake_snapshot_path"
_FAKE_RES_DATA = "fake_res_data"
_FAKE_HOST_RESOURCE = "fake_host_resource"
_FAKE_CLASS = "FakeClass"
_FAKE_RES_PATH = "fake_res_path"
_FAKE_RES_NAME = 'fake_res_name'
_FAKE_ADDRESS = "fake_address"
_FAKE_JOB_STATUS_DONE = 7
_FAKE_JOB_STATUS_BAD = -1
_FAKE_JOB_DESCRIPTION = "fake_job_description"
_FAKE_ERROR = "fake_error"
_FAKE_ELAPSED_TIME = 0
_CONCRETE_JOB = "Msvm_ConcreteJob"
_FAKE_DYNAMIC_MEMORY_RATIO = 1.0
_FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4,
'EnabledState': 2,
'MemoryUsage': 2,
'UpTime': 1}
_DEFINE_SYSTEM = 'DefineVirtualSystem'
_DESTROY_SYSTEM = 'DestroyVirtualSystem'
_DESTROY_SNAPSHOT = 'RemoveVirtualSystemSnapshot'
_ADD_RESOURCE = 'AddVirtualSystemResources'
_REMOVE_RESOURCE = 'RemoveVirtualSystemResources'
_SETTING_TYPE = 'SettingType'
_VM_GEN = constants.VM_GEN_1
_VIRTUAL_SYSTEM_TYPE_REALIZED = 3
def setUp(self):
self._vmutils = vmutils.VMUtils()
self._vmutils._conn = mock.MagicMock()
super(VMUtilsTestCase, self).setUp()
def test_enable_vm_metrics_collection(self):
self.assertRaises(NotImplementedError,
self._vmutils.enable_vm_metrics_collection,
self._FAKE_VM_NAME)
def test_get_vm_summary_info(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_summary = mock.MagicMock()
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
[mock_summary])
for (key, val) in self._FAKE_SUMMARY_INFO.items():
setattr(mock_summary, key, val)
summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_SUMMARY_INFO, summary)
def _lookup_vm(self):
mock_vm = mock.MagicMock()
self._vmutils._lookup_vm_check = mock.MagicMock(
return_value=mock_vm)
mock_vm.path_.return_value = self._FAKE_VM_PATH
return mock_vm
def test_lookup_vm_ok(self):
mock_vm = mock.MagicMock()
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME)
self.assertEqual(mock_vm, vm)
def test_lookup_vm_multiple(self):
mockvm = mock.MagicMock()
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm]
self.assertRaises(vmutils.HyperVException,
self._vmutils._lookup_vm_check,
self._FAKE_VM_NAME)
def test_lookup_vm_none(self):
self._vmutils._conn.Msvm_ComputerSystem.return_value = []
self.assertRaises(exception.NotFound,
self._vmutils._lookup_vm_check,
self._FAKE_VM_NAME)
def test_set_vm_memory_static(self):
self._test_set_vm_memory_dynamic(1.0)
def test_set_vm_memory_dynamic(self):
self._test_set_vm_memory_dynamic(2.0)
def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio):
mock_vm = self._lookup_vm()
mock_s = self._vmutils._conn.Msvm_VirtualSystemSettingData()[0]
mock_s.SystemType = 3
mock_vmsetting = mock.MagicMock()
mock_vmsetting.associators.return_value = [mock_s]
self._vmutils._modify_virt_resource = mock.MagicMock()
self._vmutils._set_vm_memory(mock_vm, mock_vmsetting,
self._FAKE_MEMORY_MB,
dynamic_memory_ratio)
self._vmutils._modify_virt_resource.assert_called_with(
mock_s, self._FAKE_VM_PATH)
if dynamic_memory_ratio > 1:
self.assertTrue(mock_s.DynamicMemoryEnabled)
else:
self.assertFalse(mock_s.DynamicMemoryEnabled)
def test_soft_shutdown_vm(self):
mock_vm = self._lookup_vm()
mock_shutdown = mock.MagicMock()
mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, )
mock_vm.associators.return_value = [mock_shutdown]
with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
mock_shutdown.InitiateShutdown.assert_called_once_with(
Force=False, Reason=mock.ANY)
mock_check.assert_called_once_with(self._FAKE_RET_VAL, None)
def test_soft_shutdown_vm_no_component(self):
mock_vm = self._lookup_vm()
mock_vm.associators.return_value = []
with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
self.assertFalse(mock_check.called)
@mock.patch('hyperv.nova.vmutils.VMUtils._get_vm_disks')
def test_get_vm_storage_paths(self, mock_get_vm_disks):
self._lookup_vm()
mock_rasds = self._create_mock_disks()
mock_get_vm_disks.return_value = ([mock_rasds[0]], [mock_rasds[1]])
storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
(disk_files, volume_drives) = storage
self.assertEqual([self._FAKE_VHD_PATH], disk_files)
self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
def test_get_vm_disks(self):
mock_vm = self._lookup_vm()
mock_vmsettings = [mock.MagicMock()]
mock_vm.associators.return_value = mock_vmsettings
mock_rasds = self._create_mock_disks()
mock_vmsettings[0].associators.return_value = mock_rasds
(disks, volumes) = self._vmutils._get_vm_disks(mock_vm)
mock_vm.associators.assert_called_with(
wmi_result_class=self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
mock_vmsettings[0].associators.assert_called_with(
wmi_result_class=self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS)
self.assertEqual([mock_rasds[0]], disks)
self.assertEqual([mock_rasds[1]], volumes)
def _create_mock_disks(self):
mock_rasd1 = mock.MagicMock()
mock_rasd1.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE
mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
mock_rasd1.Connection = [self._FAKE_VHD_PATH]
mock_rasd1.Parent = self._FAKE_CTRL_PATH
mock_rasd1.Address = self._FAKE_ADDRESS
mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
mock_rasd2 = mock.MagicMock()
mock_rasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
return [mock_rasd1, mock_rasd2]
@mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus')
@mock.patch.object(vmutils.VMUtils, '_set_vm_memory')
@mock.patch.object(vmutils.VMUtils, '_get_wmi_obj')
def test_create_vm(self, mock_get_wmi_obj, mock_set_mem, mock_set_vcpus):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._DEFINE_SYSTEM).return_value = (
None, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_vm = mock_get_wmi_obj.return_value
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
mock_s = mock.MagicMock()
setattr(mock_s,
self._SETTING_TYPE,
self._VIRTUAL_SYSTEM_TYPE_REALIZED)
mock_vm.associators.return_value = [mock_s]
self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
self._FAKE_VCPUS_NUM, False,
self._FAKE_DYNAMIC_MEMORY_RATIO,
self._VM_GEN,
mock.sentinel.instance_path)
self.assertTrue(getattr(mock_svc, self._DEFINE_SYSTEM).called)
mock_set_mem.assert_called_with(mock_vm, mock_s, self._FAKE_MEMORY_MB,
self._FAKE_DYNAMIC_MEMORY_RATIO)
mock_set_vcpus.assert_called_with(mock_vm, mock_s,
self._FAKE_VCPUS_NUM,
False)
def test_get_vm_scsi_controller(self):
self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_RES_PATH, path)
@mock.patch("hyperv.nova.vmutils.VMUtils.get_attached_disks")
def test_get_free_controller_slot(self, mock_get_attached_disks):
with mock.patch.object(self._vmutils,
'_get_disk_resource_address') as mock_get_addr:
mock_get_addr.return_value = 3
mock_get_attached_disks.return_value = [mock.sentinel.disk]
response = self._vmutils.get_free_controller_slot(
self._FAKE_CTRL_PATH)
mock_get_attached_disks.assert_called_once_with(
self._FAKE_CTRL_PATH)
self.assertEqual(response, 0)
def test_get_free_controller_slot_exception(self):
mock_get_address = mock.Mock()
mock_get_address.side_effect = range(
constants.SCSI_CONTROLLER_SLOTS_NUMBER)
mock_get_attached_disks = mock.Mock()
mock_get_attached_disks.return_value = (
[mock.sentinel.drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
with mock.patch.multiple(self._vmutils,
get_attached_disks=mock_get_attached_disks,
_get_disk_resource_address=mock_get_address):
self.assertRaises(vmutils.HyperVException,
self._vmutils.get_free_controller_slot,
mock.sentinel.scsi_controller_path)
def test_get_vm_ide_controller(self):
self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME,
self._FAKE_ADDRESS)
self.assertEqual(self._FAKE_RES_PATH, path)
def test_get_vm_ide_controller_none(self):
self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_ide_controller(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_NOT_FOUND_ADDR)
self.assertNotEqual(self._FAKE_RES_PATH, path)
def _prepare_get_vm_controller(self, resource_sub_type):
mock_vm = self._lookup_vm()
mock_vm_settings = mock.MagicMock()
mock_rasds = mock.MagicMock()
mock_rasds.path_.return_value = self._FAKE_RES_PATH
mock_rasds.ResourceSubType = resource_sub_type
mock_rasds.Address = self._FAKE_ADDRESS
mock_vm_settings.associators.return_value = [mock_rasds]
mock_vm.associators.return_value = [mock_vm_settings]
def _prepare_resources(self, mock_path, mock_subtype, mock_vm_settings):
mock_rasds = mock_vm_settings.associators.return_value[0]
mock_rasds.path_.return_value = mock_path
mock_rasds.ResourceSubType = mock_subtype
return mock_rasds
@mock.patch("hyperv.nova.vmutils.VMUtils.get_free_controller_slot")
@mock.patch("hyperv.nova.vmutils.VMUtils._get_vm_scsi_controller")
def test_attach_scsi_drive(self, mock_get_vm_scsi_controller,
mock_get_free_controller_slot):
mock_vm = self._lookup_vm()
mock_get_vm_scsi_controller.return_value = self._FAKE_CTRL_PATH
mock_get_free_controller_slot.return_value = self._FAKE_DRIVE_ADDR
with mock.patch.object(self._vmutils,
'attach_drive') as mock_attach_drive:
self._vmutils.attach_scsi_drive(mock_vm, self._FAKE_PATH,
constants.DISK)
mock_get_vm_scsi_controller.assert_called_once_with(mock_vm)
mock_get_free_controller_slot.assert_called_once_with(
self._FAKE_CTRL_PATH)
mock_attach_drive.assert_called_once_with(
mock_vm, self._FAKE_PATH, self._FAKE_CTRL_PATH,
self._FAKE_DRIVE_ADDR, constants.DISK)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
@mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller')
def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_get_new_rsd):
mock_vm = self._lookup_vm()
mock_rsd = mock_get_new_rsd.return_value
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_CTRL_ADDR,
self._FAKE_DRIVE_ADDR)
mock_add_virt_res.assert_called_with(mock_rsd,
mock_vm.path_.return_value)
mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR)
self.assertTrue(mock_get_new_rsd.called)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
def test_create_scsi_controller(self, mock_get_new_rsd):
mock_vm = self._lookup_vm()
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
mock_vm.path_.return_value)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
def test_attach_volume_to_controller(self, mock_get_new_rsd):
mock_vm = self._lookup_vm()
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.attach_volume_to_controller(
self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR,
self._FAKE_MOUNTED_DISK_PATH)
mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
mock_vm.path_.return_value)
@mock.patch.object(vmutils.VMUtils, '_modify_virt_resource')
@mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_conn, mock_modify_virt_res):
self._lookup_vm()
mock_nic = mock_get_nic_conn.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_modify_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
@mock.patch.object(vmutils.VMUtils, '_get_new_setting_data')
def test_create_nic(self, mock_get_new_virt_res):
self._lookup_vm()
mock_nic = mock_get_new_virt_res.return_value
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.create_nic(
self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS)
mock_add_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
@mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name')
def test_destroy_nic(self, mock_get_nic_data_by_name):
self._lookup_vm()
fake_nic_data = mock_get_nic_data_by_name.return_value
with mock.patch.object(self._vmutils,
'_remove_virt_resource') as mock_rem_virt_res:
self._vmutils.destroy_nic(self._FAKE_VM_NAME,
mock.sentinel.FAKE_NIC_NAME)
mock_rem_virt_res.assert_called_once_with(fake_nic_data,
self._FAKE_VM_PATH)
def test_set_vm_state(self):
mock_vm = self._lookup_vm()
mock_vm.RequestStateChange.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.set_vm_state(self._FAKE_VM_NAME,
constants.HYPERV_VM_STATE_ENABLED)
mock_vm.RequestStateChange.assert_called_with(
constants.HYPERV_VM_STATE_ENABLED)
def test_destroy_vm(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._DESTROY_SYSTEM).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.destroy_vm(self._FAKE_VM_NAME)
getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with(
self._FAKE_VM_PATH)
@mock.patch.object(vmutils.VMUtils, '_wait_for_job')
def test_check_ret_val_ok(self, mock_wait_for_job):
self._vmutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
self._FAKE_JOB_PATH)
mock_wait_for_job.assert_called_once_with(self._FAKE_JOB_PATH)
def test_check_ret_val_exception(self):
self.assertRaises(vmutils.HyperVException,
self._vmutils.check_ret_val,
self._FAKE_RET_VAL_BAD,
self._FAKE_JOB_PATH)
def test_wait_for_job_done(self):
mockjob = self._prepare_wait_for_job(constants.WMI_JOB_STATE_COMPLETED)
job = self._vmutils._wait_for_job(self._FAKE_JOB_PATH)
self.assertEqual(mockjob, job)
def test_wait_for_job_exception_concrete_job(self):
mock_job = self._prepare_wait_for_job()
mock_job.path.return_value.Class = self._CONCRETE_JOB
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def test_wait_for_job_exception_with_error(self):
mock_job = self._prepare_wait_for_job()
mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL)
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def test_wait_for_job_exception_no_error(self):
mock_job = self._prepare_wait_for_job()
mock_job.GetError.return_value = (None, None)
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD):
mock_job = mock.MagicMock()
mock_job.JobState = state
mock_job.Description = self._FAKE_JOB_DESCRIPTION
mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
self._vmutils._get_wmi_obj = mock.MagicMock(return_value=mock_job)
return mock_job
def test_add_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._ADD_RESOURCE).return_value = (
self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._add_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
self._assert_add_resources(mock_svc)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyVirtualSystemResources.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyVirtualSystemResources.assert_called_with(
ResourceSettingData=[self._FAKE_RES_DATA],
ComputerSystem=self._FAKE_VM_PATH)
def test_remove_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._REMOVE_RESOURCE).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
self._vmutils._remove_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
self._assert_remove_resources(mock_svc)
def test_set_disk_host_resource(self):
self._lookup_vm()
mock_rasds = self._create_mock_disks()
self._vmutils._get_vm_disks = mock.MagicMock(
return_value=([mock_rasds[0]], [mock_rasds[1]]))
self._vmutils._modify_virt_resource = mock.MagicMock()
self._vmutils._get_disk_resource_address = mock.MagicMock(
return_value=self._FAKE_ADDRESS)
self._vmutils.set_disk_host_resource(
self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_ADDRESS,
mock.sentinel.fake_new_mounted_disk_path)
self._vmutils._get_disk_resource_address.assert_called_with(
mock_rasds[0])
self._vmutils._modify_virt_resource.assert_called_with(
mock_rasds[0], self._FAKE_VM_PATH)
self.assertEqual(
mock.sentinel.fake_new_mounted_disk_path,
mock_rasds[0].HostResource[0])
@mock.patch.object(vmutils, 'wmi', create=True)
@mock.patch.object(vmutils.VMUtils, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateVirtualSystemSnapshot.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL, mock.MagicMock())
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateVirtualSystemSnapshot.assert_called_with(
self._FAKE_VM_PATH)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
def test_remove_vm_snapshot(self):
mock_svc = self._get_snapshot_service()
getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with(
self._FAKE_SNAPSHOT_PATH)
def test_detach_vm_disk(self):
self._lookup_vm()
mock_disk = self._prepare_mock_disk()
with mock.patch.object(self._vmutils,
'_remove_virt_resource') as mock_rm_virt_res:
self._vmutils.detach_vm_disk(self._FAKE_VM_NAME,
self._FAKE_HOST_RESOURCE)
mock_rm_virt_res.assert_called_with(mock_disk, self._FAKE_VM_PATH)
def _test_get_mounted_disk_resource_from_path(self, is_physical):
mock_disk_1 = mock.MagicMock()
mock_disk_2 = mock.MagicMock()
conn_attr = (self._vmutils._PHYS_DISK_CONNECTION_ATTR if is_physical
else self._vmutils._VIRT_DISK_CONNECTION_ATTR)
setattr(mock_disk_2, conn_attr, [self._FAKE_MOUNTED_DISK_PATH])
self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2]
mounted_disk = self._vmutils._get_mounted_disk_resource_from_path(
self._FAKE_MOUNTED_DISK_PATH, is_physical)
self.assertEqual(mock_disk_2, mounted_disk)
def test_get_physical_mounted_disk_resource_from_path(self):
self._test_get_mounted_disk_resource_from_path(is_physical=True)
def test_get_virtual_mounted_disk_resource_from_path(self):
self._test_get_mounted_disk_resource_from_path(is_physical=False)
def test_get_controller_volume_paths(self):
self._prepare_mock_disk()
mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE}
disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH)
self.assertEqual(mock_disks, disks)
def _prepare_mock_disk(self):
mock_disk = mock.MagicMock()
mock_disk.HostResource = [self._FAKE_HOST_RESOURCE]
mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH
mock_disk.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE
self._vmutils._conn.query.return_value = [mock_disk]
return mock_disk
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
[self._FAKE_RES_DATA], self._FAKE_VM_PATH)
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH], self._FAKE_VM_PATH)
def test_get_active_instances(self):
fake_vm = mock.MagicMock()
type(fake_vm).ElementName = mock.PropertyMock(
side_effect=['active_vm', 'inactive_vm'])
type(fake_vm).EnabledState = mock.PropertyMock(
side_effect=[constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_DISABLED])
self._vmutils.list_instances = mock.MagicMock(
return_value=[mock.sentinel.fake_vm_name] * 2)
self._vmutils._lookup_vm = mock.MagicMock(side_effect=[fake_vm] * 2)
active_instances = self._vmutils.get_active_instances()
self.assertEqual(['active_vm'], active_instances)
def test_get_vm_serial_ports(self):
mock_vm = self._lookup_vm()
mock_vmsettings = [mock.MagicMock()]
mock_vm.associators.return_value = mock_vmsettings
fake_serial_port = mock.MagicMock()
fake_serial_port.ResourceSubType = (
self._vmutils._SERIAL_PORT_RES_SUB_TYPE)
mock_rasds = [fake_serial_port]
mock_vmsettings[0].associators.return_value = mock_rasds
ret_val = self._vmutils._get_vm_serial_ports(mock_vm)
mock_vmsettings[0].associators.assert_called_once_with(
wmi_result_class=self._vmutils._SERIAL_PORT_SETTING_DATA_CLASS)
self.assertEqual(mock_rasds, ret_val)
def test_set_vm_serial_port_conn(self):
mock_vm = self._lookup_vm()
mock_com_1 = mock.Mock()
mock_com_2 = mock.Mock()
self._vmutils._get_vm_serial_ports = mock.Mock(
return_value=[mock_com_1, mock_com_2])
self._vmutils._modify_virt_resource = mock.Mock()
self._vmutils.set_vm_serial_port_connection(
mock.sentinel.vm_name,
port_number=1,
pipe_path=mock.sentinel.pipe_path)
self.assertEqual([mock.sentinel.pipe_path], mock_com_1.Connection)
self._vmutils._modify_virt_resource.assert_called_once_with(
mock_com_1, mock_vm.path_())
def test_get_serial_port_conns(self):
self._lookup_vm()
mock_com_1 = mock.Mock()
mock_com_1.Connection = []
mock_com_2 = mock.Mock()
mock_com_2.Connection = [mock.sentinel.pipe_path]
self._vmutils._get_vm_serial_ports = mock.Mock(
return_value=[mock_com_1, mock_com_2])
ret_val = self._vmutils.get_vm_serial_port_connections(
mock.sentinel.vm_name)
expected_ret_val = [mock.sentinel.pipe_path]
self.assertEqual(expected_ret_val, ret_val)
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'}
vs.configure_mock(**attrs)
vs2 = mock.MagicMock(ElementName='fake_name2', Notes=None)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs,
vs2]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], [attrs['Notes']])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
@mock.patch('hyperv.nova.vmutils.VMUtils.check_ret_val')
def test_modify_virtual_system(self, mock_check_ret_val):
mock_vs_man_svc = mock.MagicMock()
mock_vmsetting = mock.MagicMock()
fake_path = 'fake path'
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
mock_vs_man_svc.ModifyVirtualSystem.return_value = (0, fake_job_path,
fake_ret_val)
self._vmutils._modify_virtual_system(vs_man_svc=mock_vs_man_svc,
vm_path=fake_path,
vmsetting=mock_vmsetting)
mock_vs_man_svc.ModifyVirtualSystem.assert_called_once_with(
ComputerSystem=fake_path,
SystemSettingData=mock_vmsetting.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
@mock.patch('hyperv.nova.vmutils.VMUtils.check_ret_val')
@mock.patch('hyperv.nova.vmutils.VMUtils._get_wmi_obj')
@mock.patch('hyperv.nova.vmutils.VMUtils._modify_virtual_system')
@mock.patch('hyperv.nova.vmutils.VMUtils._get_vm_setting_data')
def test_create_vm_obj(self, mock_get_vm_setting_data,
mock_modify_virtual_system,
mock_get_wmi_obj, mock_check_ret_val):
mock_vs_man_svc = mock.MagicMock()
mock_vs_gs_data = mock.MagicMock()
fake_vm_path = 'fake vm path'
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
_conn = self._vmutils._conn.Msvm_VirtualSystemGlobalSettingData
_conn.new.return_value = mock_vs_gs_data
mock_vs_man_svc.DefineVirtualSystem.return_value = (fake_vm_path,
fake_job_path,
fake_ret_val)
response = self._vmutils._create_vm_obj(
vs_man_svc=mock_vs_man_svc,
vm_name='fake vm', vm_gen='fake vm gen',
notes='fake notes', dynamic_memory_ratio=1.0,
instance_path=mock.sentinel.instance_path)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_gs_data.ElementName, 'fake vm')
mock_vs_man_svc.DefineVirtualSystem.assert_called_once_with(
[], None, mock_vs_gs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_gs_data.ExternalDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_gs_data.SnapshotDataRoot)
mock_get_wmi_obj.assert_called_with(fake_vm_path)
mock_get_vm_setting_data.assert_called_once_with(mock_get_wmi_obj())
mock_modify_virtual_system.assert_called_once_with(
mock_vs_man_svc, fake_vm_path, mock_get_vm_setting_data())
self.assertEqual(mock_get_vm_setting_data().Notes,
'\n'.join('fake notes'))
self.assertEqual(response, mock_get_wmi_obj())
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
@mock.patch.object(vmutils.VMUtils, "_clone_wmi_obj")
def _test_check_clone_wmi_obj(self, mock_clone_wmi_obj, clone_objects):
mock_obj = mock.MagicMock()
self._vmutils._clone_wmi_objs = clone_objects
response = self._vmutils._check_clone_wmi_obj(class_name="fakeClass",
obj=mock_obj)
if not clone_objects:
self.assertEqual(mock_obj, response)
else:
mock_clone_wmi_obj.assert_called_once_with("fakeClass", mock_obj)
self.assertEqual(mock_clone_wmi_obj.return_value, response)
def test_check_clone_wmi_obj_true(self):
self._test_check_clone_wmi_obj(clone_objects=True)
def test_check_clone_wmi_obj_false(self):
self._test_check_clone_wmi_obj(clone_objects=False)
def test_clone_wmi_obj(self):
mock_obj = mock.MagicMock()
mock_value = mock.MagicMock()
mock_value.Value = mock.sentinel.fake_value
mock_obj._properties = [mock.sentinel.property]
mock_obj.Properties_.Item.return_value = mock_value
response = self._vmutils._clone_wmi_obj(
class_name="FakeClass", obj=mock_obj)
compare = self._vmutils._conn.FakeClass.new()
self.assertEqual(mock.sentinel.fake_value,
compare.Properties_.Item().Value)
self.assertEqual(compare, response)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s')"
" AND Parent='%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"parent":
mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)
def _get_fake_instance_notes(self):
return self._FAKE_VM_UUID
def test_instance_notes(self):
self._lookup_vm()
mock_vm_settings = mock.Mock()
mock_vm_settings.Notes = self._get_fake_instance_notes()
self._vmutils._get_vm_setting_data = mock.Mock(
return_value=mock_vm_settings)
notes = self._vmutils._get_instance_notes(mock.sentinel.vm_name)
self.assertEqual(notes[0], self._FAKE_VM_UUID)
def test_get_event_wql_query(self):
cls = self._vmutils._COMPUTER_SYSTEM_CLASS
field = self._vmutils._VM_ENABLED_STATE_PROP
timeframe = 10
filtered_states = [constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_DISABLED]
expected_checks = ' OR '.join(
["TargetInstance.%s = '%s'" % (field, state)
for state in filtered_states])
expected_query = (
"SELECT %(field)s, TargetInstance "
"FROM __InstanceModificationEvent "
"WITHIN %(timeframe)s "
"WHERE TargetInstance ISA '%(class)s' "
"AND TargetInstance.%(field)s != "
"PreviousInstance.%(field)s "
"AND (%(checks)s)" %
{'class': cls,
'field': field,
'timeframe': timeframe,
'checks': expected_checks})
query = self._vmutils._get_event_wql_query(
cls=cls, field=field, timeframe=timeframe,
filtered_states=filtered_states)
self.assertEqual(expected_query, query)
def test_get_vm_power_state_change_listener(self):
with mock.patch.object(self._vmutils,
'_get_event_wql_query') as mock_get_query:
listener = self._vmutils.get_vm_power_state_change_listener(
mock.sentinel.timeframe,
mock.sentinel.filtered_states)
mock_get_query.assert_called_once_with(
cls=self._vmutils._COMPUTER_SYSTEM_CLASS,
field=self._vmutils._VM_ENABLED_STATE_PROP,
timeframe=mock.sentinel.timeframe,
filtered_states=mock.sentinel.filtered_states)
watcher = self._vmutils._conn.Msvm_ComputerSystem.watch_for
watcher.assert_called_once_with(
raw_wql=mock_get_query.return_value,
fields=[self._vmutils._VM_ENABLED_STATE_PROP])
self.assertEqual(watcher.return_value, listener)
|
|
import uuid
from datetime import date, datetime
from unittest.mock import MagicMock, patch
from django.test import TestCase
from django.test.testcases import SimpleTestCase
from corehq.apps.case_search.const import IS_RELATED_CASE, RELEVANCE_SCORE
from corehq.apps.case_search.models import (
CaseSearchConfig,
)
from corehq.apps.es import queries
from corehq.apps.es.case_search import (
CaseSearchES,
case_property_missing,
case_property_range_query,
case_property_text_query,
flatten_result,
wrap_case_search_hit,
)
from corehq.apps.es.tests.utils import ElasticTestMixin, es_test
from corehq.elastic import SIZE_LIMIT, get_es_new
from corehq.form_processor.models import CommCareCaseIndex
from corehq.form_processor.tests.utils import FormProcessorTestUtils
from corehq.pillows.case_search import CaseSearchReindexerFactory
from corehq.pillows.mappings.case_search_mapping import (
CASE_SEARCH_INDEX,
CASE_SEARCH_INDEX_INFO,
)
from corehq.util.elastic import ensure_index_deleted
from corehq.util.test_utils import create_and_save_a_case
from pillowtop.es_utils import initialize_index_and_mapping
@es_test
class TestCaseSearchES(ElasticTestMixin, SimpleTestCase):
def setUp(self):
self.es = CaseSearchES()
def test_simple_case_property_query(self):
json_output = {
"query": {
"bool": {
"filter": [
{
"term": {
"domain.exact": "swashbucklers"
}
},
{
"match_all": {}
}
],
"must": {
"bool": {
"must": [
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "redbeard"
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
]
}
}
}
},
"size": SIZE_LIMIT
}
query = self.es.domain('swashbucklers').case_property_query("name", "redbeard")
self.checkQuery(query, json_output, validate_query=False)
def test_multiple_case_search_queries(self):
json_output = {
"query": {
"bool": {
"filter": [
{
"term": {
"domain.exact": "swashbucklers"
}
},
{
"match_all": {}
}
],
"must": {
"bool": {
"must": [
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "redbeard"
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
],
"should": [
{
"bool": {
"should": [
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "parrot_name"
}
}
],
"must": {
"match": {
"case_properties.value": {
"query": "polly",
"fuzziness": "AUTO"
}
}
}
}
}
}
},
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "parrot_name"
}
}
],
"must": {
"match": {
"case_properties.value": {
"query": "polly",
"fuzziness": "0"
}
}
}
}
}
}
}
]
}
}
]
}
}
}
},
"size": SIZE_LIMIT
}
query = (self.es.domain('swashbucklers')
.case_property_query("name", "redbeard")
.case_property_query("parrot_name", "polly", clause="should", fuzzy=True))
self.checkQuery(query, json_output, validate_query=False)
def test_flatten_result(self):
expected = {'name': 'blah', 'foo': 'bar', 'baz': 'buzz', RELEVANCE_SCORE: "1.095"}
self.assertEqual(
flatten_result(
{
"_score": "1.095",
"_source": {
'name': 'blah',
'case_properties': [
{'key': '@case_id', 'value': 'should be removed'},
{'key': 'name', 'value': 'should be removed'},
{'key': 'case_name', 'value': 'should be removed'},
{'key': 'last_modified', 'value': 'should be removed'},
{'key': 'foo', 'value': 'bar'},
{'key': 'baz', 'value': 'buzz'}]
}
},
include_score=True
),
expected
)
def test_blacklisted_owner_ids(self):
query = self.es.domain('swashbucklers').blacklist_owner_id('123').owner('234')
expected = {
'query': {
'bool': {
'filter': [
{'term': {'domain.exact': 'swashbucklers'}},
{'bool': {'must_not': {'term': {'owner_id': '123'}}}},
{'term': {'owner_id': '234'}},
{'match_all': {}}
],
'must': {'match_all': {}},
},
},
'size': SIZE_LIMIT,
}
self.checkQuery(query, expected, validate_query=False)
class TestCaseSearchHitConversions(SimpleTestCase):
maxDiff = None
def test_wrap_case_search_hit(self):
case = wrap_case_search_hit(self.make_hit())
self.assertEqual(case.case_id, '2a3341db-0ca4-444b-a44c-3bde3a16954e')
self.assertEqual(case.closed, False)
self.assertEqual(case.closed_by, None)
self.assertEqual(case.closed_on, None)
self.assertEqual(case.doc_type, 'CommCareCase')
self.assertEqual(case.domain, 'healsec')
self.assertEqual(case.external_id, None)
self.assertEqual(case.location_id, None)
self.assertEqual(case.modified_on, datetime(2019, 6, 21, 17, 32, 48))
self.assertEqual(case.name, 'blah')
self.assertEqual(case.opened_by, '29383d6a335847f985aeeeca94031f82')
self.assertEqual(case.opened_on, datetime(2019, 6, 21, 17, 31, 18, 349000))
self.assertEqual(case.owner_id, '29383d6a335847f985aeeeca94031f82')
self.assertEqual(case.server_modified_on, datetime(2019, 6, 21, 17, 32, 48, 437901))
self.assertEqual(case.type, 'mother')
self.assertEqual(case.user_id, '29383d6a335847f985aeeeca94031f82')
self.assertEqual(case.indices, [
CommCareCaseIndex(
case_id=case.case_id,
domain='healsec',
identifier='host',
referenced_type='person',
referenced_id='abc123',
relationship_id=CommCareCaseIndex.EXTENSION,
)
])
self.assertEqual(case.case_json, {
'closed': 'nope',
'doc_type': 'frankle',
'domain': 'batter',
'foo': 'bar',
'baz': 'buzz',
})
def test_wrap_case_search_hit_include_score(self):
case = wrap_case_search_hit(self.make_hit(), include_score=True)
self.assertEqual(case.case_json[RELEVANCE_SCORE], "1.095")
def test_wrap_case_search_hit_is_related_case(self):
case = wrap_case_search_hit(self.make_hit(), is_related_case=True)
self.assertEqual(case.case_json[IS_RELATED_CASE], 'true')
@staticmethod
def make_hit():
return {
"_score": "1.095",
"_source": {
'_id': '2a3341db-0ca4-444b-a44c-3bde3a16954e',
'closed': False,
'closed_by': None,
'closed_on': None,
'doc_type': 'CommCareCase',
'domain': 'healsec',
'external_id': None,
'location_id': None,
'modified_on': '2019-06-21T17:32:48Z',
'name': 'blah',
'opened_by': '29383d6a335847f985aeeeca94031f82',
'opened_on': '2019-06-21T17:31:18.349000Z',
'owner_id': '29383d6a335847f985aeeeca94031f82',
'server_modified_on': '2019-06-21T17:32:48.437901Z',
'type': 'mother',
'user_id': '29383d6a335847f985aeeeca94031f82',
'@indexed_on': '2020-04-18T12:34:56.332000Z',
'indices': [
{
'case_id': '2a3341db-0ca4-444b-a44c-3bde3a16954e',
'domain': 'healsec',
'identifier': 'host',
'referenced_type': 'person',
'referenced_id': 'abc123',
'relationship': 'extension',
},
],
'case_properties': [
{'key': '@case_id', 'value': '2a3341db-0ca4-444b-a44c-3bde3a16954e'},
{'key': '@case_type', 'value': 'mother'},
{'key': '@owner_id', 'value': '29383d6a335847f985aeeeca94031f82'},
{'key': '@status', 'value': 'open'},
{'key': 'name', 'value': 'blah'},
{'key': 'case_name', 'value': 'blah'},
{'key': 'external_id', 'value': None},
{'key': 'date_opened', 'value': '2019-06-21T17:31:18.349000Z'},
{'key': 'closed_on', 'value': None},
{'key': 'last_modified', 'value': '2019-06-21T17:32:48.332000Z'},
{'key': 'closed', 'value': 'nope'},
{'key': 'doc_type', 'value': 'frankle'},
{'key': 'domain', 'value': 'batter'},
{'key': 'foo', 'value': 'bar'},
{'key': 'baz', 'value': 'buzz'},
],
},
}
@es_test
class BaseCaseSearchTest(TestCase):
def setUp(self):
self.domain = 'case_search_es'
self.case_type = 'person'
super(BaseCaseSearchTest, self).setUp()
FormProcessorTestUtils.delete_all_cases()
self.elasticsearch = get_es_new()
ensure_index_deleted(CASE_SEARCH_INDEX)
# Bootstrap ES
initialize_index_and_mapping(get_es_new(), CASE_SEARCH_INDEX_INFO)
def tearDown(self):
ensure_index_deleted(CASE_SEARCH_INDEX)
super(BaseCaseSearchTest, self).tearDown()
def _make_case(self, domain, case_properties, index=None):
# make a case
case_properties = case_properties or {}
case_id = case_properties.pop('_id')
case_type = case_properties.pop('case_type', self.case_type)
case_name = 'case-name-{}'.format(uuid.uuid4().hex)
owner_id = case_properties.pop('owner_id', None)
case = create_and_save_a_case(
domain, case_id, case_name, case_properties, owner_id=owner_id, case_type=case_type, index=index
)
return case
def _bootstrap_cases_in_es_for_domain(self, domain, input_cases):
for case in input_cases:
index = case.pop('index', None)
self._make_case(domain, case, index=index)
with patch('corehq.pillows.case_search.domains_needing_search_index',
MagicMock(return_value=[domain])):
CaseSearchReindexerFactory(domain=domain).build().reindex()
self.elasticsearch.indices.refresh(CASE_SEARCH_INDEX)
def _assert_query_runs_correctly(self, domain, input_cases, query, xpath_query, output):
self._bootstrap_cases_in_es_for_domain(domain, input_cases)
self.assertItemsEqual(
query.get_ids(),
output
)
if xpath_query:
self.assertItemsEqual(
CaseSearchES().xpath_query(self.domain, xpath_query).get_ids(),
output
)
def _create_case_search_config(self):
config, _ = CaseSearchConfig.objects.get_or_create(pk=self.domain, enabled=True)
self.addCleanup(config.delete)
return config
class TestCaseSearchLookups(BaseCaseSearchTest):
def test_simple_case_property_query(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'foo': 'redbeard'},
{'_id': 'c2', 'foo': 'blackbeard'},
],
CaseSearchES().domain(self.domain).case_property_query("foo", "redbeard"),
"foo = 'redbeard'",
['c1']
)
def test_fuzzy_case_property_query(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'foo': 'redbeard'},
{'_id': 'c2', 'foo': 'blackbeard'},
],
CaseSearchES().domain(self.domain).case_property_query("foo", "backbeard", fuzzy=True),
None,
['c2']
)
def test_regex_case_property_query(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'foo': 'redbeard'},
{'_id': 'c2', 'foo': 'blackbeard'},
{'_id': 'c3', 'foo': 'redblack'},
],
CaseSearchES().domain(self.domain).regexp_case_property_query("foo", ".*beard.*"),
None,
['c1', 'c2']
)
def test_multiple_case_search_queries(self):
query = (CaseSearchES().domain(self.domain)
.case_property_query("foo", "redbeard")
.case_property_query("parrot_name", "polly"))
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'foo': 'redbeard', 'parrot_name': 'polly'},
{'_id': 'c2', 'foo': 'blackbeard', 'parrot_name': 'polly'},
{'_id': 'c3', 'foo': 'redbeard', 'parrot_name': 'molly'}
],
query,
"foo = 'redbeard' and parrot_name = 'polly'",
['c1']
)
def test_multiple_case_search_queries_should_clause(self):
query = (CaseSearchES().domain(self.domain)
.case_property_query("foo", "redbeard")
.case_property_query("parrot_name", "polly", clause="should"))
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'foo': 'redbeard', 'parrot_name': 'polly'},
{'_id': 'c2', 'foo': 'blackbeard', 'parrot_name': 'polly'},
{'_id': 'c3', 'foo': 'redbeard', 'parrot_name': 'molly'}
],
query,
None,
['c1', 'c3']
)
def test_blacklisted_owner_ids(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'owner_id': '123'},
{'_id': 'c2', 'owner_id': '234'},
],
CaseSearchES().domain(self.domain).blacklist_owner_id('123'),
None,
['c2']
)
def test_missing_case_property(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c2', 'foo': 'blackbeard'},
{'_id': 'c3', 'foo': ''},
{'_id': 'c4'},
],
CaseSearchES().domain(self.domain).filter(case_property_missing('foo')),
"foo = ''",
['c3', 'c4']
)
def test_full_text_query(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'description': 'redbeards are red'},
{'_id': 'c2', 'description': 'blackbeards are black'},
],
CaseSearchES().domain(self.domain).filter(case_property_text_query('description', 'red')),
None,
['c1']
)
def test_numeric_range_query(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'num': '1'},
{'_id': 'c2', 'num': '2'},
{'_id': 'c3', 'num': '3'},
{'_id': 'c4', 'num': '4'},
],
CaseSearchES().domain(self.domain).numeric_range_case_property_query('num', gte=2, lte=3),
'num <= 3 and num >= 2',
['c2', 'c3']
)
def test_date_range_query(self):
self._assert_query_runs_correctly(
self.domain,
[
{'_id': 'c1', 'dob': date(2020, 3, 1)},
{'_id': 'c2', 'dob': date(2020, 3, 2)},
{'_id': 'c3', 'dob': date(2020, 3, 3)},
{'_id': 'c4', 'dob': date(2020, 3, 4)},
],
CaseSearchES().domain(self.domain).add_query(
case_property_range_query('dob', gte='2020-03-02', lte='2020-03-03'),
clause=queries.MUST
),
"dob >= '2020-03-02' and dob <= '2020-03-03'",
['c2', 'c3']
)
|
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import os
import typing
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
from typing import Tuple
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.extractors import EntityExtractor
from rasa_nlu.model import Metadata
from rasa_nlu.tokenizers import Token
from rasa_nlu.training_data import Message
from rasa_nlu.training_data import TrainingData
from builtins import str
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from spacy.language import Language
import sklearn_crfsuite
from spacy.tokens import Doc
class CRFEntityExtractor(EntityExtractor):
name = "ner_crf"
provides = ["entities"]
requires = ["spacy_doc", "tokens"]
function_dict = {
'low': lambda doc: doc[0].lower(),
'title': lambda doc: doc[0].istitle(),
'word3': lambda doc: doc[0][-3:],
'word2': lambda doc: doc[0][-2:],
'pos': lambda doc: doc[1],
'pos2': lambda doc: doc[1][:2],
'bias': lambda doc: 'bias',
'upper': lambda doc: doc[0].isupper(),
'digit': lambda doc: doc[0].isdigit(),
'pattern': lambda doc: str(doc[3]) if doc[3] is not None else 'N/A',
}
def __init__(self, ent_tagger=None, entity_crf_features=None, entity_crf_BILOU_flag=True):
# type: (sklearn_crfsuite.CRF, List[List[Text]], bool) -> None
self.ent_tagger = ent_tagger
# BILOU_flag determines whether to use BILOU tagging or not.
# More rigorous however requires more examples per entity
# rule of thumb: use only if more than 100 egs. per entity
self.BILOU_flag = entity_crf_BILOU_flag
if not entity_crf_features:
# crf_features is [before, word, after] array with before, word, after holding keys about which
# features to use for each word, for example, 'title' in array before will have the feature
# "is the preceding word in title case?"
self.crf_features = [
['low', 'title', 'upper', 'pos', 'pos2'],
['bias', 'low', 'word3', 'word2', 'upper', 'title', 'digit', 'pos', 'pos2', 'pattern'],
['low', 'title', 'upper', 'pos', 'pos2']
]
else:
self.crf_features = entity_crf_features
@classmethod
def required_packages(cls):
return ["sklearn_crfsuite", "sklearn", "spacy"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUConfig) -> None
self.BILOU_flag = config["entity_crf_BILOU_flag"]
self.crf_features = config["entity_crf_features"]
if training_data.entity_examples:
# convert the dataset into features
dataset = self._create_dataset(training_data.entity_examples)
# train the model
self._train_model(dataset)
def _create_dataset(self, examples):
# type: (List[Message]) -> List[List[Tuple[Text, Text, Text, Text]]]
dataset = []
for example in examples:
entity_offsets = self._convert_example(example)
dataset.append(self._from_json_to_crf(example, entity_offsets))
return dataset
def test(self, testing_data):
# type: (TrainingData, Language) -> None
if testing_data.num_entity_examples > 0:
dataset = self._create_dataset(testing_data.entity_examples)
self._test_model(dataset)
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
extracted = self.add_extractor_name(self.extract_entities(message))
message.set("entities", message.get("entities", []) + extracted, add_to_output=True)
def _convert_example(self, example):
# type: (Message) -> List[Tuple[int, int, Text]]
def convert_entity(ent):
return ent["start"], ent["end"], ent["entity"]
return [convert_entity(ent) for ent in example.get("entities", [])]
def extract_entities(self, message):
# type: (Message) -> List[Dict[Text, Any]]
"""Take a sentence and return entities in json format"""
if self.ent_tagger is not None:
text_data = self._from_text_to_crf(message)
features = self._sentence_to_features(text_data)
ents = self.ent_tagger.predict_single(features)
return self._from_crf_to_json(message, ents)
else:
return []
def _from_crf_to_json(self, message, entities):
# type: (Message, List[Any]) -> List[Dict[Text, Any]]
sentence_doc = message.get("spacy_doc")
json_ents = []
if len(sentence_doc) != len(entities):
raise Exception('Inconsistency in amount of tokens between crfsuite and spacy')
if self.BILOU_flag:
# using the BILOU tagging scheme
for word_idx in range(len(sentence_doc)):
entity = entities[word_idx]
word = sentence_doc[word_idx]
if entity.startswith('U-'):
ent = {'start': word.idx, 'end': word.idx + len(word),
'value': word.text, 'entity': entity[2:]}
json_ents.append(ent)
elif entity.startswith('B-'):
# start of a multi-word entity, need to represent whole extent
ent_word_idx = word_idx + 1
finished = False
while not finished:
if len(entities) > ent_word_idx and entities[ent_word_idx][2:] != entity[2:]:
# words are not tagged the same entity class
logger.debug(
"Inconsistent BILOU tagging found, B- tag, L- tag pair encloses multiple " +
"entity classes.i.e. ['B-a','I-b','L-a'] instead of ['B-a','I-a','L-a'].\n" +
"Assuming B- class is correct.")
if len(entities) > ent_word_idx and entities[ent_word_idx].startswith('L-'):
# end of the entity
finished = True
elif len(entities) > ent_word_idx and entities[ent_word_idx].startswith('I-'):
# middle part of the entity
ent_word_idx += 1
else:
# entity not closed by an L- tag
finished = True
ent_word_idx -= 1
logger.debug(
"Inconsistent BILOU tagging found, B- tag not closed by L- tag, " +
"i.e ['B-a','I-a','O'] instead of ['B-a','L-a','O'].\nAssuming last tag is L-")
ent = {'start': word.idx,
'end': sentence_doc[word_idx:ent_word_idx + 1].end_char,
'value': sentence_doc[word_idx:ent_word_idx + 1].text,
'entity': entity[2:]}
json_ents.append(ent)
elif not self.BILOU_flag:
# not using BILOU tagging scheme, multi-word entities are split.
for word_idx in range(len(sentence_doc)):
entity = entities[word_idx]
word = sentence_doc[word_idx]
if entity != 'O':
ent = {'start': word.idx,
'end': word.idx + len(word),
'value': word.text,
'entity': entity}
json_ents.append(ent)
return json_ents
@classmethod
def load(cls, model_dir, model_metadata, cached_component, **kwargs):
# type: (Text, Metadata, Optional[CRFEntityExtractor], **Any) -> CRFEntityExtractor
from sklearn.externals import joblib
if model_dir and model_metadata.get("entity_extractor_crf"):
meta = model_metadata.get("entity_extractor_crf")
ent_tagger = joblib.load(os.path.join(model_dir, meta["model_file"]))
return CRFEntityExtractor(ent_tagger=ent_tagger,
entity_crf_features=meta['crf_features'],
entity_crf_BILOU_flag=meta['BILOU_flag'])
else:
return CRFEntityExtractor()
def persist(self, model_dir):
# type: (Text) -> Dict[Text, Any]
"""Persist this model into the passed directory. Returns the metadata necessary to load the model again."""
from sklearn.externals import joblib
if self.ent_tagger:
model_file_name = os.path.join(model_dir, "crf_model.pkl")
joblib.dump(self.ent_tagger, model_file_name)
return {"entity_extractor_crf": {"model_file": "crf_model.pkl",
"crf_features": self.crf_features,
"BILOU_flag": self.BILOU_flag,
"version": 1}}
else:
return {"entity_extractor_crf": None}
def _sentence_to_features(self, sentence):
# type: (List[Tuple[Text, Text, Text, Text]]) -> List[Dict[Text, Any]]
"""Convert a word into discrete features in self.crf_features, including word before and word after."""
sentence_features = []
for word_idx in range(len(sentence)):
# word before(-1), current word(0), next word(+1)
prefixes = ['-1', '0', '+1']
word_features = {}
for i in range(3):
if word_idx == len(sentence) - 1 and i == 2:
word_features['EOS'] = True
# End Of Sentence
elif word_idx == 0 and i == 0:
word_features['BOS'] = True
# Beginning Of Sentence
else:
word = sentence[word_idx - 1 + i]
prefix = prefixes[i]
features = self.crf_features[i]
for feature in features:
# append each feature to a feature vector
# word_features.append(prefix + feature + ':' + self.function_dict[feature](word))
word_features[prefix + ":" + feature] = self.function_dict[feature](word)
sentence_features.append(word_features)
return sentence_features
def _sentence_to_labels(self, sentence):
# type: (List[Tuple[Text, Text, Text, Text]]) -> List[Text]
return [label for _, _, label, _ in sentence]
def _from_json_to_crf(self, message, entity_offsets):
# type: (Message, List[Tuple[int, int, Text]]) -> List[Tuple[Text, Text, Text, Text]]
"""Takes the json examples and switches them to a format which crfsuite likes."""
from spacy.gold import GoldParse
doc = message.get("spacy_doc")
gold = GoldParse(doc, entities=entity_offsets)
ents = [l[5] for l in gold.orig_annot]
if '-' in ents:
logger.warn("Misaligned entity annotation in sentence '{}'. ".format(doc.text) +
"Make sure the start and end values of the annotated training " +
"examples end at token boundaries (e.g. don't include trailing whitespaces).")
if not self.BILOU_flag:
for i, entity in enumerate(ents):
if entity.startswith('B-') or \
entity.startswith('I-') or \
entity.startswith('U-') or \
entity.startswith('L-'):
ents[i] = entity[2:] # removes the BILOU tags
return self._from_text_to_crf(message, ents)
def __pattern_of_token(self, message, i):
if message.get("tokens"):
return message.get("tokens")[i].get("pattern")
else:
return None
def _from_text_to_crf(self, message, entities=None):
# type: (Message, List[Text]) -> List[Tuple[Text, Text, Text, Text]]
"""Takes a sentence and switches it to crfsuite format."""
crf_format = []
for i, token in enumerate(message.get("spacy_doc")):
pattern = self.__pattern_of_token(message, i)
entity = entities[i] if entities else "N/A"
crf_format.append((token.text, token.tag_, entity, pattern))
return crf_format
def _train_model(self, df_train):
# type: (List[List[Tuple[Text, Text, Text, Text]]]) -> None
"""Train the crf tagger based on the training data."""
import sklearn_crfsuite
X_train = [self._sentence_to_features(sent) for sent in df_train]
y_train = [self._sentence_to_labels(sent) for sent in df_train]
self.ent_tagger = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=1.0, # coefficient for L1 penalty
c2=1e-3, # coefficient for L2 penalty
max_iterations=50, # stop earlier
all_possible_transitions=True # include transitions that are possible, but not observed
)
self.ent_tagger.fit(X_train, y_train)
def _test_model(self, df_test):
# type: (List[List[Tuple[Text, Text, Text, Text]]]) -> None
X_test = [self._sentence_to_features(sent) for sent in df_test]
y_test = [self._sentence_to_labels(sent) for sent in df_test]
y_pred = [self.ent_tagger.predict_single(xseq) for xseq in X_test]
print(bio_classification_report(y_test, y_pred))
def bio_classification_report(y_true, y_pred):
"""Evaluates entity extraction accuracy.
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
Taken from https://github.com/scrapinghub/python-crfsuite/blob/master/examples/CoNLL%202002.ipynb
"""
from sklearn.preprocessing import LabelBinarizer
from itertools import chain
from sklearn.metrics import classification_report
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(
y_true_combined,
y_pred_combined,
labels=[class_indices[cls] for cls in tagset],
target_names=tagset,
)
|
|
from troposphere import rds, Ref, Sub, ec2, GetAZs, Select, Output, Export, GetAtt
from . import ExportTemplate
class Network(ExportTemplate):
def __init__(self, configuration=None, description="An Export Template", metadata={}):
super(Network, self).__init__(configuration, description, metadata)
self.vpc = self.add_resource(ec2.VPC("Vpc",
CidrBlock="10.0.0.0/16",
EnableDnsSupport=True,
EnableDnsHostnames=True
))
self.gateways = {}
self.zones = []
self.db_subnet_groups = {}
self.zone_multiplier = 1
igw = self.add_resource(ec2.InternetGateway(
"Igw",
))
igw_attachment = self.add_resource(ec2.VPCGatewayAttachment(
"IgwAttachment",
VpcId=Ref(self.vpc),
InternetGatewayId=Ref(igw),
))
self.public_route_table = self.add_resource(ec2.RouteTable(
"PublicRouteTable",
VpcId=Ref(self.vpc),
))
public_route = self.add_resource(ec2.Route(
"PublicRoute",
DependsOn=[igw_attachment.title],
DestinationCidrBlock="0.0.0.0/0",
GatewayId=Ref(igw),
RouteTableId=Ref(self.public_route_table),
))
self.add_output(Output(
"Vpc",
Value=Ref(self.vpc),
Export=Export(
Sub("${AWS::StackName}-Vpc")
)
))
def get_zone(self, title):
for z in self.zones:
if title == z.title:
return z
return None
def add_zone(self, zone, azs=3):
azs -= 1
z = zone
if self.get_zone(z.title) is not None:
raise NameError("Zone with the name {} already added".format(z.title))
self.zones.append(zone)
if isinstance(z, PublicZone):
for k, v in [('a', 0), ('b', 1), ('c', 2)]:
if v > azs:
continue
z.subnets.append(self.add_resource(ec2.Subnet(
"{}{}".format(z.title, k.capitalize()),
AvailabilityZone=Select(v, GetAZs()),
CidrBlock="10.0.{}.0/24".format(v * self.zone_multiplier),
MapPublicIpOnLaunch=z.public,
VpcId=Ref(self.vpc),
)))
self.add_output(Output(
"{}{}".format(z.title, k.capitalize()),
Value=Ref(z.subnets[v]),
Export=Export(
Sub("${AWS::StackName}-" + z.title + k.capitalize())
)
))
for s in z.subnets:
self.add_resource(ec2.SubnetRouteTableAssociation(
"Assoc{}".format(s.title),
RouteTableId=Ref(self.public_route_table),
SubnetId=Ref(s)
))
elif isinstance(z, DbZone):
for k, v in [('a', 0), ('b', 1), ('c', 2)]:
if v > azs:
continue
z.subnets.append(self.add_resource(ec2.Subnet(
"{}{}".format(z.title, k.capitalize()),
AvailabilityZone=Select(v, GetAZs()),
CidrBlock="10.0.{}.0/24".format(v + self.zone_multiplier),
MapPublicIpOnLaunch=z.public,
VpcId=Ref(self.vpc),
)))
self.add_output(Output(
"{}{}".format(z.title, k.capitalize()),
Value=Ref(z.subnets[v]),
Export=Export(
Sub("${AWS::StackName}-" + z.title + k.capitalize())
)
))
db_subnet_group = self.add_resource(
z.get_db_subnet_group()
)
self.add_output(Output(
db_subnet_group.title,
Value=Ref(db_subnet_group),
Export=Export(
Sub("${AWS::StackName}-" + db_subnet_group.title)
)
))
self.db_subnet_groups[db_subnet_group.title] = db_subnet_group
elif isinstance(z, Zone):
for k, v in [('a', 0), ('b', 1), ('c', 2)]:
if v > azs:
continue
z.subnets.append(self.add_resource(ec2.Subnet(
"{}{}".format(z.title, k.capitalize()),
AvailabilityZone=Select(v, GetAZs()),
CidrBlock="10.0.{}.0/24".format(v + self.zone_multiplier),
MapPublicIpOnLaunch=z.public,
VpcId=Ref(self.vpc),
)))
self.add_output(Output(
"{}{}".format(z.title, k.capitalize()),
Value=Ref(z.subnets[v]),
Export=Export(
Sub("${AWS::StackName}-" + z.title + k.capitalize())
)
))
self.zone_multiplier += (1 * (azs + 1))
def create_nat_in_zone(self, title):
zone = self.get_zone(title)
self.gateways["nat"] = []
for s in zone.subnets:
self.gateways["nat"].append(self.add_resource(ec2.NatGateway(
"NatGw{}".format(s.title),
SubnetId=Ref(s),
AllocationId=GetAtt(self.add_resource(
ec2.EIP(
"NatIp{}".format(s.title),
Domain="vpc",
DependsOn="IgwAttachment"
)
), "AllocationId")
)))
def create_nat_route(self, zone_title, dest):
zone = self.get_zone(zone_title)
for s in range(len(zone.subnets)):
subnet = zone.subnets[s]
rt = self.add_resource(ec2.RouteTable(
"RouteTable{}".format(subnet.title),
VpcId=Ref(self.vpc),
))
self.add_resource(ec2.Route(
"Route{}".format(subnet.title),
DependsOn=[self.gateways["nat"][s].title],
DestinationCidrBlock=dest,
NatGatewayId=Ref(self.gateways["nat"][s]),
RouteTableId=Ref(rt),
))
self.add_resource(ec2.SubnetRouteTableAssociation(
"RtAssoc{}".format(subnet.title),
RouteTableId=Ref(rt),
SubnetId=Ref(subnet)
))
class Zone(object):
def __init__(self, title):
self.public = False
self.subnets = []
self.efs_mount_targets = []
self.azs = []
self.title = title
class PublicZone(Zone):
def __init__(self, title, nat=0):
super(PublicZone, self).__init__(title)
self.public = True
self.nat = nat
for index in range(nat):
pass
class DbZone(Zone):
def __init__(self, title):
super(DbZone, self).__init__(title)
def get_db_subnet_group(self):
group = rds.DBSubnetGroup(
"DbSubnetGroup{}".format(self.title),
DBSubnetGroupDescription=Sub("Db subnet group created in ${AWS::StackName}"),
SubnetIds=[]
)
for s in self.subnets:
group.SubnetIds.append(Ref(s))
return group
|
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import copy
import os
import re
from threading import Timer
import time
import uuid
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_img_cache_opts
from cinder.volume.drivers.netapp.options import netapp_nfs_extra_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume.drivers.netapp.utils import provide_ems
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume.drivers import nfs
LOG = logging.getLogger(__name__)
class NetAppNFSDriver(nfs.NfsDriver):
"""Base class for NetApp NFS driver.
Executes commands relating to Volumes.
"""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
# NOTE(vish): db is set by Manager
validate_instantiation(**kwargs)
self._execute = None
self._context = None
super(NetAppNFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_connection_opts)
self.configuration.append_config_values(netapp_basicauth_opts)
self.configuration.append_config_values(netapp_transport_opts)
self.configuration.append_config_values(netapp_img_cache_opts)
def set_execute(self, execute):
self._execute = execute
def do_setup(self, context):
super(NetAppNFSDriver, self).do_setup(context)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
raise NotImplementedError()
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
vol_size = volume.size
snap_size = snapshot.volume_size
self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
share = self._get_volume_location(snapshot.volume_id)
volume['provider_location'] = share
path = self.local_path(volume)
if self._discover_file_till_timeout(path):
self._set_rw_permissions_for_all(path)
if vol_size != snap_size:
try:
self.extend_volume(volume, vol_size)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_("Resizing %s failed. Cleaning volume."),
volume.name)
self._execute('rm', path, run_as_root=True)
else:
raise exception.CinderException(
_("NFS file %s not discovered.") % volume['name'])
return {'provider_location': volume['provider_location']}
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self._clone_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
nfs_mount = self._get_provider_location(snapshot.volume_id)
if self._volume_not_present(nfs_mount, snapshot.name):
return True
self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
run_as_root=True)
def _get_client(self):
"""Creates client for server."""
raise NotImplementedError()
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>."""
nfs_server_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
return (nfs_server_ip + ':' + export_path)
def _clone_volume(self, volume_name, clone_name, volume_id, share=None):
"""Clones mounted volume using NetApp api."""
raise NotImplementedError()
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
volume = self.db.volume_get(self._context, volume_id)
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume."""
return self._get_provider_location(volume_id).split(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume."""
return self._get_provider_location(volume_id).split(':')[1]
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists."""
try:
self._try_execute('ls', self._get_volume_path(nfs_mount,
volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError:
tries = tries + 1
if tries >= self.configuration.num_shell_tries:
raise
LOG.exception(_("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given volume name on given nfs
share.
@param nfs_share string, example 172.18.194.100:/var/nfs
@param volume_name string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume.size
src_vol_size = src_vref.size
self._clone_volume(src_vref.name, volume.name, src_vref.id)
share = self._get_volume_location(src_vref.id)
volume['provider_location'] = share
path = self.local_path(volume)
if self._discover_file_till_timeout(path):
self._set_rw_permissions_for_all(path)
if vol_size != src_vol_size:
try:
self.extend_volume(volume, vol_size)
except Exception as e:
LOG.error(
_("Resizing %s failed. Cleaning volume."), volume.name)
self._execute('rm', path, run_as_root=True)
raise e
else:
raise exception.CinderException(
_("NFS file %s not discovered.") % volume['name'])
return {'provider_location': volume['provider_location']}
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(NetAppNFSDriver, self)._update_volume_stats()
self._spawn_clean_cache_job()
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
super(NetAppNFSDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info(_('Copied image to volume %s using regular download.'),
volume['name'])
self._register_image_in_cache(volume, image_id)
def _register_image_in_cache(self, volume, image_id):
"""Stores image in the cache."""
file_name = 'img-cache-%s' % image_id
LOG.info(_("Registering image in cache %s"), file_name)
try:
self._do_clone_rel_img_cache(
volume['name'], file_name,
volume['provider_location'], file_name)
except Exception as e:
LOG.warn(
_('Exception while registering image %(image_id)s'
' in cache. Exception: %(exc)s')
% {'image_id': image_id, 'exc': e.__str__()})
def _find_image_in_cache(self, image_id):
"""Finds image in cache and returns list of shares with file name."""
result = []
if getattr(self, '_mounted_shares', None):
for share in self._mounted_shares:
dir = self._get_mount_point_for_share(share)
file_name = 'img-cache-%s' % image_id
file_path = '%s/%s' % (dir, file_name)
if os.path.exists(file_path):
LOG.debug('Found cache file for image %(image_id)s'
' on share %(share)s'
% {'image_id': image_id, 'share': share})
result.append((share, file_name))
return result
def _do_clone_rel_img_cache(self, src, dst, share, cache_file):
"""Do clone operation w.r.t image cache file."""
@utils.synchronized(cache_file, external=True)
def _do_clone():
dir = self._get_mount_point_for_share(share)
file_path = '%s/%s' % (dir, dst)
if not os.path.exists(file_path):
LOG.info(_('Cloning from cache to destination %s'), dst)
self._clone_volume(src, dst, volume_id=None, share=share)
_do_clone()
@utils.synchronized('clean_cache')
def _spawn_clean_cache_job(self):
"""Spawns a clean task if not running."""
if getattr(self, 'cleaning', None):
LOG.debug('Image cache cleaning in progress. Returning... ')
return
else:
#set cleaning to True
self.cleaning = True
t = Timer(0, self._clean_image_cache)
t.start()
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
try:
LOG.debug('Image cache cleaning in progress.')
thres_size_perc_start =\
self.configuration.thres_avl_size_perc_start
thres_size_perc_stop =\
self.configuration.thres_avl_size_perc_stop
for share in getattr(self, '_mounted_shares', []):
try:
total_size, total_avl, total_alc =\
self._get_capacity_info(share)
avl_percent = int((total_avl / total_size) * 100)
if avl_percent <= thres_size_perc_start:
LOG.info(_('Cleaning cache for share %s.'), share)
eligible_files = self._find_old_cache_files(share)
threshold_size = int(
(thres_size_perc_stop * total_size) / 100)
bytes_to_free = int(threshold_size - total_avl)
LOG.debug('Files to be queued for deletion %s',
eligible_files)
self._delete_files_till_bytes_free(
eligible_files, share, bytes_to_free)
else:
continue
except Exception as e:
LOG.warn(_(
'Exception during cache cleaning'
' %(share)s. Message - %(ex)s')
% {'share': share, 'ex': e.__str__()})
continue
finally:
LOG.debug('Image cache cleaning done.')
self.cleaning = False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
raise NotImplementedError()
def _find_old_cache_files(self, share):
"""Finds the old files in cache."""
mount_fs = self._get_mount_point_for_share(share)
threshold_minutes = self.configuration.expiry_thres_minutes
cmd = ['find', mount_fs, '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+%s' % (threshold_minutes)]
res, __ = self._execute(*cmd, run_as_root=True)
if res:
old_file_paths = res.strip('\n').split('\n')
mount_fs_len = len(mount_fs)
old_files = [x[mount_fs_len + 1:] for x in old_file_paths]
eligible_files = self._shortlist_del_eligible_files(
share, old_files)
return eligible_files
return []
def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0):
"""Delete files from disk till bytes are freed or list exhausted."""
LOG.debug('Bytes to free %s', bytes_to_free)
if file_list and bytes_to_free > 0:
sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True)
mount_fs = self._get_mount_point_for_share(share)
for f in sorted_files:
if f:
file_path = '%s/%s' % (mount_fs, f[0])
LOG.debug('Delete file path %s', file_path)
@utils.synchronized(f[0], external=True)
def _do_delete():
if self._delete_file(file_path):
return True
return False
if _do_delete():
bytes_to_free = bytes_to_free - int(f[1])
if bytes_to_free <= 0:
return
def _delete_file(self, path):
"""Delete file from disk and return result as boolean."""
try:
LOG.debug('Deleting file at path %s', path)
cmd = ['rm', '-f', path]
self._execute(*cmd, run_as_root=True)
return True
except Exception as ex:
LOG.warning(_('Exception during deleting %s'), ex.__str__())
return False
def clone_image(self, volume, image_location, image_id, image_meta):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
image_id is a string which represents id of the image.
It can be used by the driver to introspect internal
stores or registry to do an efficient image clone.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred.
"""
cloned = False
post_clone = False
share = None
try:
cache_result = self._find_image_in_cache(image_id)
if cache_result:
cloned = self._clone_from_cache(volume, image_id, cache_result)
else:
cloned = self._direct_nfs_clone(volume, image_location,
image_id)
if cloned:
post_clone = self._post_clone_image(volume)
except Exception as e:
msg = e.msg if getattr(e, 'msg', None) else e.__str__()
LOG.info(_('Image cloning unsuccessful for image'
' %(image_id)s. Message: %(msg)s')
% {'image_id': image_id, 'msg': msg})
vol_path = self.local_path(volume)
volume['provider_location'] = None
if os.path.exists(vol_path):
self._delete_file(vol_path)
finally:
cloned = cloned and post_clone
share = volume['provider_location'] if cloned else None
bootable = True if cloned else False
return {'provider_location': share, 'bootable': bootable}, cloned
def _clone_from_cache(self, volume, image_id, cache_result):
"""Clones a copy from image cache."""
cloned = False
LOG.info(_('Cloning image %s from cache'), image_id)
for res in cache_result:
# Repeat tries in other shares if failed in some
(share, file_name) = res
LOG.debug('Cache share: %s', share)
if (share and
self._is_share_vol_compatible(volume, share)):
try:
self._do_clone_rel_img_cache(
file_name, volume['name'], share, file_name)
cloned = True
volume['provider_location'] = share
break
except Exception:
LOG.warn(_('Unexpected exception during'
' image cloning in share %s'), share)
return cloned
def _direct_nfs_clone(self, volume, image_location, image_id):
"""Clone directly in nfs share."""
LOG.info(_('Checking image clone %s from glance share.'), image_id)
cloned = False
image_location = self._construct_image_nfs_url(image_location)
share = self._is_cloneable_share(image_location)
if share and self._is_share_vol_compatible(volume, share):
LOG.debug('Share is cloneable %s', share)
volume['provider_location'] = share
(__, ___, img_file) = image_location.rpartition('/')
dir_path = self._get_mount_point_for_share(share)
img_path = '%s/%s' % (dir_path, img_file)
img_info = image_utils.qemu_img_info(img_path)
if img_info.file_format == 'raw':
LOG.debug('Image is raw %s', image_id)
self._clone_volume(
img_file, volume['name'],
volume_id=None, share=share)
cloned = True
else:
LOG.info(
_('Image will locally be converted to raw %s'),
image_id)
dst = '%s/%s' % (dir_path, volume['name'])
image_utils.convert_image(img_path, dst, 'raw')
data = image_utils.qemu_img_info(dst)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but"
" format is now %s") % data.file_format)
else:
cloned = True
self._register_image_in_cache(
volume, image_id)
return cloned
def _post_clone_image(self, volume):
"""Do operations post image cloning."""
LOG.info(_('Performing post clone for %s'), volume['name'])
vol_path = self.local_path(volume)
if self._discover_file_till_timeout(vol_path):
self._set_rw_permissions_for_all(vol_path)
self._resize_image_file(vol_path, volume['size'])
return True
raise exception.InvalidResults(
_("NFS file could not be discovered."))
def _resize_image_file(self, path, new_size):
"""Resize the image file on share to new size."""
LOG.debug('Checking file for resize')
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info(_('Resizing file to %sG'), new_size)
image_utils.resize_image(path, new_size)
if self._is_file_size_equal(path, new_size):
return
else:
raise exception.InvalidResults(
_('Resizing image file failed.'))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
return False
def _discover_file_till_timeout(self, path, timeout=45):
"""Checks if file size at path is equal to size."""
# Sometimes nfs takes time to discover file
# Retrying in case any unexpected situation occurs
retry_seconds = timeout
sleep_interval = 2
while True:
if os.path.exists(path):
return True
else:
if retry_seconds <= 0:
LOG.warn(_('Discover file retries exhausted.'))
return False
else:
time.sleep(sleep_interval)
retry_seconds = retry_seconds - sleep_interval
def _is_cloneable_share(self, image_location):
"""Finds if the image at location is cloneable."""
conn, dr = self._check_get_nfs_path_segs(image_location)
return self._check_share_in_use(conn, dr)
def _check_get_nfs_path_segs(self, image_location):
"""Checks if the nfs path format is matched.
WebNFS url format with relative-path is supported.
Accepting all characters in path-names and checking
against the mounted shares which will contain only
allowed path segments. Returns connection and dir details.
"""
conn, dr = None, None
if image_location:
nfs_loc_pattern =\
('^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)'
'*(/[^\/\\\\]+)$)')
matched = re.match(nfs_loc_pattern, image_location, flags=0)
if not matched:
LOG.debug('Image location not in the'
' expected format %s', image_location)
else:
conn = matched.group(2)
dr = matched.group(3) or '/'
return (conn, dr)
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
raise NotImplementedError()
def _check_share_in_use(self, conn, dir):
"""Checks if share is cinder mounted and returns it."""
try:
if conn:
host = conn.split(':')[0]
ip = na_utils.resolve_hostname(host)
share_candidates = []
for sh in self._mounted_shares:
sh_exp = sh.split(':')[1]
if sh_exp == dir:
share_candidates.append(sh)
if share_candidates:
LOG.debug('Found possible share matches %s',
share_candidates)
return self._share_match_for_ip(ip, share_candidates)
except Exception:
LOG.warn(_("Unexpected exception while short listing used share."))
return None
def _construct_image_nfs_url(self, image_location):
"""Construct direct url for nfs backend.
It creates direct url from image_location
which is a tuple with direct_url and locations.
Returns url with nfs scheme if nfs store
else returns url. It needs to be verified
by backend before use.
"""
direct_url, locations = image_location
if not direct_url and not locations:
raise exception.NotFound(_('Image location not present.'))
# Locations will be always a list of one until
# bp multiple-image-locations is introduced
if not locations:
return direct_url
location = locations[0]
url = location['url']
if not location['metadata']:
return url
location_type = location['metadata'].get('type')
if not location_type or location_type.lower() != "nfs":
return url
share_location = location['metadata'].get('share_location')
mount_point = location['metadata'].get('mount_point')
if not share_location or not mount_point:
return url
url_parse = urlparse.urlparse(url)
abs_path = os.path.join(url_parse.netloc, url_parse.path)
rel_path = os.path.relpath(abs_path, mount_point)
direct_url = "%s/%s" % (share_location, rel_path)
return direct_url
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
LOG.info(_('Extending volume %s.'), volume['name'])
path = self.local_path(volume)
self._resize_image_file(path, new_size)
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
raise NotImplementedError()
def _check_share_can_hold_size(self, share, size):
"""Checks if volume can hold image with size."""
tot_size, tot_available, tot_allocated = self._get_capacity_info(share)
if tot_available < size:
msg = _("Container size smaller than required file size.")
raise exception.VolumeDriverException(msg)
def _move_nfs_file(self, source_path, dest_path):
"""Moves source to destination."""
@utils.synchronized(dest_path, external=True)
def _move_file(src, dst):
if os.path.exists(dst):
LOG.warn(_("Destination %s already exists."), dst)
return False
self._execute('mv', src, dst, run_as_root=True)
return True
try:
return _move_file(source_path, dest_path)
except Exception as e:
LOG.warn(_('Exception moving file %(src)s. Message - %(e)s')
% {'src': source_path, 'e': e})
return False
class NetAppDirectNfsDriver (NetAppNFSDriver):
"""Executes commands related to volumes on NetApp filer."""
def __init__(self, *args, **kwargs):
super(NetAppDirectNfsDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(NetAppDirectNfsDriver, self).do_setup(context)
self._context = context
self._client = self._get_client()
self._do_custom_setup(self._client)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self._check_flags()
def _check_flags(self):
"""Raises error if any required configuration flag is missing."""
required_flags = ['netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port',
'netapp_transport_type']
for flag in required_flags:
if not getattr(self.configuration, flag, None):
raise exception.CinderException(_('%s is not set') % flag)
def _get_client(self):
"""Creates NetApp api client."""
client = NaServer(
host=self.configuration.netapp_server_hostname,
server_type=NaServer.SERVER_TYPE_FILER,
transport_type=self.configuration.netapp_transport_type,
style=NaServer.STYLE_LOGIN_PASSWORD,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password)
return client
def _do_custom_setup(self, client):
"""Do the customized set up on client if any for different types."""
raise NotImplementedError()
def _is_naelement(self, elem):
"""Checks if element is NetApp element."""
if not isinstance(elem, NaElement):
raise ValueError('Expects NaElement')
def _get_ontapi_version(self):
"""Gets the supported ontapi version."""
ontapi_version = NaElement('system-get-ontapi-version')
res = self._client.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return (major, minor)
def _get_export_ip_path(self, volume_id=None, share=None):
"""Returns export ip and path.
One of volume id or share is used to return the values.
"""
if volume_id:
host_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
elif share:
host_ip = share.split(':')[0]
export_path = share.split(':')[1]
else:
raise exception.InvalidInput('None of vol id or share specified.')
return (host_ip, export_path)
def _create_file_usage_req(self, path):
"""Creates the request element for file_usage_get."""
file_use = NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
return file_use
class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
"""Executes commands related to volumes on c mode."""
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_cluster_opts)
self.configuration.append_config_values(netapp_nfs_extra_opts)
def _do_custom_setup(self, client):
"""Do the customized set up on client for cluster mode."""
# Default values to run first api
client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
client.set_api_version(major, minor)
self.vserver = self.configuration.netapp_vserver
self.ssc_vols = None
self.stale_vols = set()
if self.vserver:
self.ssc_enabled = True
LOG.info(_("Shares on vserver %s will only"
" be used for provisioning.") % (self.vserver))
else:
self.ssc_enabled = False
LOG.warn(_("No vserver set in config. SSC will be disabled."))
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
super(NetAppDirectCmodeNfsDriver, self).check_for_setup_error()
if self.ssc_enabled:
ssc_utils.check_ssc_api_permissions(self._client)
def _invoke_successfully(self, na_element, vserver=None):
"""Invoke the api for successful result.
If vserver is present then invokes vserver api
else Cluster api.
:param vserver: vserver name.
"""
self._is_naelement(na_element)
server = copy.copy(self._client)
if vserver:
server.set_vserver(vserver)
else:
server.set_vserver(None)
result = server.invoke_successfully(na_element, True)
return result
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
self._ensure_shares_mounted()
extra_specs = get_volume_extra_specs(volume)
qos_policy_group = None
if extra_specs:
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None)
eligible = self._find_shares(volume['size'], extra_specs)
if not eligible:
raise exception.NfsNoSuitableShareFound(
volume_size=volume['size'])
for sh in eligible:
try:
volume['provider_location'] = sh
LOG.info(_('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
if qos_policy_group:
self._set_qos_policy_group_on_volume(volume, sh,
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
LOG.error(_("Exception creating vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
volume['provider_location'] = None
finally:
if self.ssc_enabled:
self._update_stale_vols(self._get_vol_for_share(sh))
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def _set_qos_policy_group_on_volume(self, volume, share, qos_policy_group):
target_path = '%s' % (volume['name'])
export_path = share.split(':')[1]
flex_vol_name = self._get_vol_by_junc_vserver(self.vserver,
export_path)
file_assign_qos = NaElement.create_node_with_children(
'file-assign-qos',
**{'volume': flex_vol_name,
'qos-policy-group-name': qos_policy_group,
'file': target_path,
'vserver': self.vserver})
self._invoke_successfully(file_assign_qos)
def _find_shares(self, size, extra_specs):
"""Finds suitable shares for given params."""
shares = []
containers = []
if self.ssc_enabled:
vols = ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs)
containers = [x.export['path'] for x in vols]
else:
containers = self._mounted_shares
for sh in containers:
if self._is_share_eligible(sh, size):
size, avl, alloc = self._get_capacity_info(sh)
shares.append((sh, avl))
shares = [a for a, b in sorted(
shares, key=lambda x: x[1], reverse=True)]
return shares
def _clone_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clones mounted volume on NetApp Cluster."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self._clone_file(exp_volume, volume_name, clone_name, vserver)
share = share if share else self._get_provider_location(volume_id)
self._post_prov_deprov_in_ssc(share)
def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
"""Gets the vserver and export volume for share."""
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
ifs = self._get_if_info_by_ip(host_ip)
vserver = ifs[0].get_child_content('vserver')
exp_volume = self._get_vol_by_junc_vserver(vserver, export_path)
return (vserver, exp_volume)
def _get_if_info_by_ip(self, ip):
"""Gets the network interface info by ip."""
net_if_iter = NaElement('net-interface-get-iter')
net_if_iter.add_new_child('max-records', '10')
query = NaElement('query')
net_if_iter.add_child_elem(query)
query.add_node_with_children(
'net-interface-info', **{'address': na_utils.resolve_hostname(ip)})
result = self._invoke_successfully(net_if_iter)
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
return attr_list.get_children()
raise exception.NotFound(
_('No interface found on cluster for ip %s')
% (ip))
def _get_vserver_ips(self, vserver):
"""Get ips for the vserver."""
result = na_utils.invoke_api(
self._client, api_name='net-interface-get-iter',
is_iter=True, tunnel=vserver)
if_list = []
for res in result:
records = res.get_child_content('num-records')
if records > 0:
attr_list = res['attributes-list']
ifs = attr_list.get_children()
if_list.extend(ifs)
return if_list
def _get_vol_by_junc_vserver(self, vserver, junction):
"""Gets the volume by junction path and vserver."""
vol_iter = NaElement('volume-get-iter')
vol_iter.add_new_child('max-records', '10')
query = NaElement('query')
vol_iter.add_child_elem(query)
vol_attrs = NaElement('volume-attributes')
query.add_child_elem(vol_attrs)
vol_attrs.add_node_with_children(
'volume-id-attributes',
**{'junction-path': junction,
'owning-vserver-name': vserver})
des_attrs = NaElement('desired-attributes')
des_attrs.add_node_with_children('volume-attributes',
**{'volume-id-attributes': None})
vol_iter.add_child_elem(des_attrs)
result = self._invoke_successfully(vol_iter, vserver)
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
vols = attr_list.get_children()
vol_id = vols[0].get_child_by_name('volume-id-attributes')
return vol_id.get_child_content('name')
msg_fmt = {'vserver': vserver, 'junction': junction}
raise exception.NotFound(_("""No volume on cluster with vserver
%(vserver)s and junction path %(junction)s
""") % msg_fmt)
def _clone_file(self, volume, src_path, dest_path, vserver=None,
dest_exists=False):
"""Clones file on vserver."""
msg = _("""Cloning with params volume %(volume)s, src %(src_path)s,
dest %(dest_path)s, vserver %(vserver)s""")
msg_fmt = {'volume': volume, 'src_path': src_path,
'dest_path': dest_path, 'vserver': vserver}
LOG.debug(msg % msg_fmt)
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': src_path,
'destination-path': dest_path})
major, minor = self._client.get_api_version()
if major == 1 and minor >= 20 and dest_exists:
clone_create.add_new_child('destination-exists', 'true')
self._invoke_successfully(clone_create, vserver)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(NetAppDirectCmodeNfsDriver, self)._update_volume_stats()
netapp_backend = 'NetApp_NFS_cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats["volume_backend_name"] = (backend_name or
netapp_backend)
self._stats["vendor_name"] = 'NetApp'
self._stats["driver_version"] = '1.0'
self._update_cluster_vol_stats(self._stats)
provide_ems(self, self._client, self._stats, netapp_backend)
def _update_cluster_vol_stats(self, data):
"""Updates vol stats with cluster config."""
if self.ssc_enabled:
sync = True if self.ssc_vols is None else False
ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver,
synchronous=sync)
else:
LOG.warn(_("No vserver set in config. SSC will be disabled."))
if self.ssc_vols:
data['netapp_mirrored'] = 'true'\
if self.ssc_vols['mirrored'] else 'false'
data['netapp_unmirrored'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['mirrored']) else 'false'
data['netapp_dedup'] = 'true'\
if self.ssc_vols['dedup'] else 'false'
data['netapp_nodedup'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['dedup']) else 'false'
data['netapp_compression'] = 'true'\
if self.ssc_vols['compression'] else 'false'
data['netapp_nocompression'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['compression']) else 'false'
data['netapp_thin_provisioned'] = 'true'\
if self.ssc_vols['thin'] else 'false'
data['netapp_thick_provisioned'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['thin']) else 'false'
if self.ssc_vols['all']:
vol_max = max(self.ssc_vols['all'])
data['total_capacity_gb'] =\
int(vol_max.space['size_total_bytes']) / units.Gi
data['free_capacity_gb'] =\
int(vol_max.space['size_avl_bytes']) / units.Gi
else:
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
elif self.ssc_enabled:
LOG.warn(_("No cluster ssc stats found."
" Wait for next volume stats update."))
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
if volume:
self.stale_vols.add(volume)
set_copy = self.stale_vols.copy()
if reset:
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
if not self._mounted_shares:
LOG.warn(_("No shares found hence skipping ssc refresh."))
return
mnt_share_vols = set()
vs_ifs = self._get_vserver_ips(self.vserver)
for vol in vols['all']:
for sh in self._mounted_shares:
host = sh.split(':')[0]
junction = sh.split(':')[1]
ip = na_utils.resolve_hostname(host)
if (self._ip_in_ifs(ip, vs_ifs) and
junction == vol.id['junction_path']):
mnt_share_vols.add(vol)
vol.export['path'] = sh
break
for key in vols.keys():
vols[key] = vols[key] & mnt_share_vols
self.ssc_vols = vols
def _ip_in_ifs(self, ip, api_ifs):
"""Checks if ip is listed for ifs in api format."""
if api_ifs is None:
return False
for ifc in api_ifs:
ifc_ip = ifc.get_child_content("address")
if ifc_ip == ip:
return True
return False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(vserver, exp_volume) = self._get_vserver_and_exp_vol(
volume_id=None, share=share)
for file in old_files:
path = '/vol/%s/%s' % (exp_volume, file)
u_bytes = self._get_cluster_file_usage(path, vserver)
file_list.append((file, u_bytes))
LOG.debug('Shortlisted del elg files %s', file_list)
return file_list
def _get_cluster_file_usage(self, path, vserver):
"""Gets the file unique bytes."""
LOG.debug('Getting file usage for %s', path)
file_use = NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self._invoke_successfully(file_use, vserver)
bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s'
% {'path': path, 'bytes': bytes})
return bytes
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
ip_vserver = self._get_vserver_for_ip(ip)
if ip_vserver and shares:
for share in shares:
ip_sh = share.split(':')[0]
sh_vserver = self._get_vserver_for_ip(ip_sh)
if sh_vserver == ip_vserver:
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _get_vserver_for_ip(self, ip):
"""Get vserver for the mentioned ip."""
try:
ifs = self._get_if_info_by_ip(ip)
vserver = ifs[0].get_child_content('vserver')
return vserver
except Exception:
return None
def _get_vol_for_share(self, nfs_share):
"""Gets the ssc vol with given share."""
if self.ssc_vols:
for vol in self.ssc_vols['all']:
if vol.export['path'] == nfs_share:
return vol
return None
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
compatible = self._is_share_eligible(share, volume['size'])
if compatible and self.ssc_enabled:
matched = self._is_share_vol_type_match(volume, share)
compatible = compatible and matched
return compatible
def _is_share_vol_type_match(self, volume, share):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
extra_specs = get_volume_extra_specs(volume)
vols = ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
def delete_volume(self, volume):
"""Deletes a logical volume."""
share = volume['provider_location']
super(NetAppDirectCmodeNfsDriver, self).delete_volume(volume)
self._post_prov_deprov_in_ssc(share)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
share = self._get_provider_location(snapshot.volume_id)
super(NetAppDirectCmodeNfsDriver, self).delete_snapshot(snapshot)
self._post_prov_deprov_in_ssc(share)
def _post_prov_deprov_in_ssc(self, share):
if self.ssc_enabled and share:
netapp_vol = self._get_vol_for_share(share)
if netapp_vol:
self._update_stale_vols(volume=netapp_vol)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
copy_success = False
try:
major, minor = self._client.get_api_version()
col_path = self.configuration.netapp_copyoffload_tool_path
if (major == 1 and minor >= 20 and col_path):
self._try_copyoffload(context, volume, image_service, image_id)
copy_success = True
LOG.info(_('Copied image %(img)s to volume %(vol)s using copy'
' offload workflow.')
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug("Copy offload either not configured or"
" unsupported.")
except Exception as e:
LOG.exception(_('Copy offload workflow unsuccessful. %s'), e)
finally:
if not copy_success:
super(NetAppDirectCmodeNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
if self.ssc_enabled:
sh = self._get_provider_location(volume['id'])
self._update_stale_vols(self._get_vol_for_share(sh))
def _try_copyoffload(self, context, volume, image_service, image_id):
"""Tries server side file copy offload."""
copied = False
cache_result = self._find_image_in_cache(image_id)
if cache_result:
copied = self._copy_from_cache(volume, image_id, cache_result)
if not cache_result or not copied:
self._copy_from_img_service(context, volume, image_service,
image_id)
def _get_ip_verify_on_cluster(self, host):
"""Verifies if host on same cluster and returns ip."""
ip = na_utils.resolve_hostname(host)
vserver = self._get_vserver_for_ip(ip)
if not vserver:
raise exception.NotFound(_("No vserver owning the ip %s.") % ip)
return ip
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file_name."""
LOG.debug("Trying copy from cache using copy offload.")
copied = False
for res in cache_result:
try:
(share, file_name) = res
LOG.debug("Found cache file_name on share %s.", share)
if share != self._get_provider_location(volume['id']):
col_path = self.configuration.netapp_copyoffload_tool_path
src_ip = self._get_ip_verify_on_cluster(
share.split(':')[0])
src_path = os.path.join(share.split(':')[1], file_name)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
dst_path = os.path.join(
self._get_export_path(volume['id']), volume['name'])
self._execute(col_path, src_ip, dst_ip,
src_path, dst_path, run_as_root=False,
check_exit_code=0)
self._register_image_in_cache(volume, image_id)
LOG.debug("Copied image from cache to volume %s using"
" copy offload.", volume['id'])
else:
self._clone_file_dst_exists(share, file_name,
volume['name'],
dest_exists=True)
LOG.debug("Copied image from cache to volume %s using"
" cloning.", volume['id'])
self._post_clone_image(volume)
copied = True
break
except Exception as e:
LOG.exception(_('Error in workflow copy from cache. %s.'), e)
return copied
def _clone_file_dst_exists(self, share, src_name, dst_name,
dest_exists=False):
"""Clone file even if dest exists."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
self._clone_file(exp_volume, src_name, dst_name, vserver,
dest_exists=dest_exists)
def _copy_from_img_service(self, context, volume, image_service,
image_id):
"""Copies from the image service using copy offload."""
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
image_loc = self._construct_image_nfs_url(image_loc)
conn, dr = self._check_get_nfs_path_segs(image_loc)
if conn:
src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
else:
raise exception.NotFound(_("Source host details not found."))
(__, ___, img_file) = image_loc.rpartition('/')
src_path = os.path.join(dr, img_file)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
# tmp file is required to deal with img formats
tmp_img_file = str(uuid.uuid4())
col_path = self.configuration.netapp_copyoffload_tool_path
img_info = image_service.show(context, image_id)
dst_share = self._get_provider_location(volume['id'])
self._check_share_can_hold_size(dst_share, img_info['size'])
dst_dir = self._get_mount_point_for_share(dst_share)
dst_img_local = os.path.join(dst_dir, tmp_img_file)
try:
# If src and dst share not equal
if (('%s:%s' % (src_ip, dr)) !=
('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
dst_img_serv_path = os.path.join(
self._get_export_path(volume['id']), tmp_img_file)
self._execute(col_path, src_ip, dst_ip, src_path,
dst_img_serv_path, run_as_root=False,
check_exit_code=0)
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
% {'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % (image_id))
if img_info['disk_format'] == 'raw':
LOG.debug('Image is raw %s.', image_id)
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = str(uuid.uuid4())
dst_img_conv_local = os.path.join(dst_dir, img_conv)
# Checking against image size which is approximate check
self._check_share_can_hold_size(dst_share, img_info['size'])
try:
image_utils.convert_image(dst_img_local,
dst_img_conv_local, 'raw')
data = image_utils.qemu_img_info(dst_img_conv_local)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but format is now %s.")
% data.file_format)
else:
self._clone_file_dst_exists(dst_share, img_conv,
volume['name'],
dest_exists=True)
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file(dst_img_conv_local)
self._post_clone_image(volume)
finally:
if os.path.exists(dst_img_local):
self._delete_file(dst_img_local)
class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
"""Executes commands related to volumes on 7 mode."""
def __init__(self, *args, **kwargs):
super(NetAppDirect7modeNfsDriver, self).__init__(*args, **kwargs)
def _do_custom_setup(self, client):
"""Do the customized set up on client if any for 7 mode."""
(major, minor) = self._get_ontapi_version()
client.set_api_version(major, minor)
def check_for_setup_error(self):
"""Checks if setup occurred properly."""
api_version = self._client.get_api_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported ONTAP version."
" ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Api version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
super(NetAppDirect7modeNfsDriver, self).check_for_setup_error()
def _invoke_successfully(self, na_element, vfiler=None):
"""Invoke the api for successful result.
If vfiler is present then invokes vfiler api
else filer api.
:param vfiler: vfiler name.
"""
self._is_naelement(na_element)
server = copy.copy(self._client)
if vfiler:
server.set_vfiler(vfiler)
else:
server.set_vfiler(None)
result = server.invoke_successfully(na_element, True)
return result
def _clone_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clones mounted volume with NetApp filer."""
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self._get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
(clone_id, vol_uuid) = self._start_clone('%s/%s' % (storage_path,
volume_name),
target_path)
if vol_uuid:
try:
self._wait_for_clone_finish(clone_id, vol_uuid)
except NaApiError as e:
if e.code != 'UnknownCloneId':
self._clear_clone(clone_id)
raise e
def _get_actual_path_for_export(self, export_path):
"""Gets the actual path on the filer for export path."""
storage_path = NaElement.create_node_with_children(
'nfs-exportfs-storage-path', **{'pathname': export_path})
result = self._invoke_successfully(storage_path, None)
if result.get_child_content('actual-pathname'):
return result.get_child_content('actual-pathname')
raise exception.NotFound(_('No storage path found for export path %s')
% (export_path))
def _start_clone(self, src_path, dest_path):
"""Starts the clone operation.
:returns: clone-id
"""
msg_fmt = {'src_path': src_path, 'dest_path': dest_path}
LOG.debug("""Cloning with src %(src_path)s, dest %(dest_path)s"""
% msg_fmt)
clone_start = NaElement.create_node_with_children(
'clone-start',
**{'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true'})
result = self._invoke_successfully(clone_start, None)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
return (clone_id, vol_uuid)
def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
"""Waits till a clone operation is complete or errored out."""
clone_ls_st = NaElement('clone-list-status')
clone_id = NaElement('clone-id')
clone_ls_st.add_child_elem(clone_id)
clone_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_op_id,
'volume-uuid': vol_uuid})
task_running = True
while task_running:
result = self._invoke_successfully(clone_ls_st, None)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
state = ops_info[0].get_child_content('clone-state')
if state == 'completed':
task_running = False
elif state == 'failed':
code = ops_info[0].get_child_content('error')
reason = ops_info[0].get_child_content('reason')
raise NaApiError(code, reason)
else:
time.sleep(1)
else:
raise NaApiError(
'UnknownCloneId',
'No clone operation for clone id %s found on the filer'
% (clone_id))
def _clear_clone(self, clone_id):
"""Clear the clone information.
Invoke this in case of failed clone.
"""
clone_clear = NaElement.create_node_with_children(
'clone-clear',
**{'clone-id': clone_id})
retry = 3
while retry:
try:
self._invoke_successfully(clone_clear, None)
break
except Exception:
# Filer might be rebooting
time.sleep(5)
retry = retry - 1
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(NetAppDirect7modeNfsDriver, self)._update_volume_stats()
netapp_backend = 'NetApp_NFS_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats["volume_backend_name"] = (backend_name or
'NetApp_NFS_7mode_direct')
self._stats["vendor_name"] = 'NetApp'
self._stats["driver_version"] = self.VERSION
provide_ems(self, self._client, self._stats, netapp_backend,
server_type="7mode")
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
exp_volume = self._get_actual_path_for_export(share)
for file in old_files:
path = '/vol/%s/%s' % (exp_volume, file)
u_bytes = self._get_filer_file_usage(path)
file_list.append((file, u_bytes))
LOG.debug('Shortlisted del elg files %s', file_list)
return file_list
def _get_filer_file_usage(self, path):
"""Gets the file unique bytes."""
LOG.debug('Getting file usage for %s', path)
file_use = NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self._invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s'
% {'path': path, 'bytes': bytes})
return bytes
def _is_filer_ip(self, ip):
"""Checks whether ip is on the same filer."""
try:
ifconfig = NaElement('net-ifconfig-get')
res = self._invoke_successfully(ifconfig, None)
if_info = res.get_child_by_name('interface-config-info')
if if_info:
ifs = if_info.get_children()
for intf in ifs:
v4_addr = intf.get_child_by_name('v4-primary-address')
if v4_addr:
ip_info = v4_addr.get_child_by_name('ip-address-info')
if ip_info:
address = ip_info.get_child_content('address')
if ip == address:
return True
else:
continue
except Exception:
return False
return False
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
if self._is_filer_ip(ip) and shares:
for share in shares:
ip_sh = share.split(':')[0]
if self._is_filer_ip(ip_sh):
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
return self._is_share_eligible(share, volume['size'])
|
|
import bpy
import math
import mathutils
from os.path import join, dirname, abspath
class PBSMaterial(bpy.types.Panel):
""" This is a panel to display the PBS properties of the currently
selected material """
bl_idname = "MATERIAL_PT_pbs_material_props"
bl_label = "Physically Based Shading Properties"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
def draw(self, context):
if not hasattr(context.material, "pbepbs"):
self.layout.label("No PBS datablock")
return
pbepbs = context.material.pbepbs
box = self.layout.box()
box.row().prop(pbepbs, "shading_model")
box.row().prop(context.material, "diffuse_color", "Base Color")
if pbepbs.shading_model not in ["EMISSIVE", "TRANSPARENT_EMISSIVE"]:
if pbepbs.shading_model not in ("SKIN", "FOLIAGE", "CLEARCOAT", "TRANSPARENT_GLASS"):
box.row().prop(pbepbs, "metallic")
if not pbepbs.metallic and pbepbs.shading_model != "CLEARCOAT":
box.row().prop(pbepbs, "ior", "Index of Refraction")
box.row().prop(pbepbs, "roughness")
box.row().prop(pbepbs, "normal_strength")
box.row()
self.layout.separator()
if pbepbs.shading_model not in ("DEFAULT", "FOLIAGE", "CLEARCOAT", "SKIN"):
box = self.layout.box()
box.label("Shading model properties")
if pbepbs.shading_model in ["EMISSIVE", "TRANSPARENT_EMISSIVE"]:
box.row().prop(pbepbs, "emissive_factor")
if pbepbs.shading_model == "TRANSLUCENT":
box.row().prop(pbepbs, "translucency")
if pbepbs.shading_model in ["TRANSPARENT_GLASS", "TRANSPARENT_EMISSIVE"]:
box.row().prop(context.material, "alpha", "Transparency")
box.row()
self.layout.label("Operators:")
self.layout.row().operator("pbepbs.set_default_textures")
self.layout.separator()
def draw_header(self, context):
self.layout.label(text="", icon="MATERIAL")
class PBSMatProps(bpy.types.PropertyGroup):
""" This is the property group which stores the PBS properties of each
material """
def update_roughness(self, context):
if self.roughness <= 0.0:
context.material.specular_hardness = 511
else:
a = self.roughness * self.roughness
context.material.specular_hardness = min(2 / (a * a) - 2, 511)
def update_specular(self, context):
f0 = (self.ior - 1) / (self.ior + 1)
f0 *= f0
context.material.specular_intensity = f0
shading_model = bpy.props.EnumProperty(
name="Shading Model",
description="The shading model to use",
items=(
("DEFAULT", "Default", "Default shading model"),
("EMISSIVE", "Emissive", "Emissive material"),
("CLEARCOAT", "Clear Coat", "Clearcoat shading model e.g. for car paint"),
("TRANSPARENT_GLASS", "Transparent (Glass)", "Transparent glass material"),
("TRANSPARENT_EMISSIVE", "Transparent (Emissive)", "Transparent emissive material"),
("SKIN", "Skin", "Skin material"),
("FOLIAGE", "Foliage (Vegetation)", "Two-Sided foliage"),
),
default="DEFAULT"
)
roughness = bpy.props.FloatProperty(
name="Roughness",
description="The roughness of the material, 0 for perfect flat surfaces, "
"1 for complete rough surfaces.",
subtype="FACTOR",
update=update_roughness,
default=0.3, min=0.0, max=1.0)
ior = bpy.props.FloatProperty(
name="Index of Refraction",
description="Index of refraction, usually 1.5 for most materials.",
subtype="FACTOR",
update=update_specular,
default=1.5, min=1.001, max=2.4)
metallic = bpy.props.BoolProperty(
name="Metallic",
description="Controls whether the material is metallic or not.",
default=False)
emissive_factor = bpy.props.FloatProperty(
name="Emissive Factor",
description="Values > 0.0 make the material emissive, receiving no shading "
"but emitting light with a color of the BaseColor instead",
subtype="FACTOR",
default=0.0, min=0.0, max=1.0)
translucency = bpy.props.FloatProperty(
name="Translucency",
description="Makes the material translucent, e.g. for skin and foliage.",
subtype="FACTOR",
default=0.0, min=0.0, max=1.0)
normal_strength = bpy.props.FloatProperty(
name="Normalmap Strength",
description="Strength of the Normal-Map, a value of 0.0 will cause no "
"normal mapping",
subtype="FACTOR", default=0.0, min=0.0, max=1.0)
class OperatorSetDefaultTextures(bpy.types.Operator):
""" Operator to fill the empty texture slots on a material with default textures """
bl_idname = "pbepbs.set_default_textures"
bl_label = "Fill all materials with default PBS textures"
def execute(self, context):
slot_types = ["basecolor", "normal", "specular", "roughness"]
print("Setting default textures")
for material in bpy.data.materials:
for index, slot_name in enumerate(slot_types):
slot = material.texture_slots[index]
if slot is not None and slot.texture is not None:
search = "empty_" + slot_name
if search in slot.name or search in slot.texture.name:
# Don't skip when we found an empty texture, instead
# reassign it
pass
else:
print("Skipping", slot.name)
continue
slot = material.texture_slots.create(index)
texname = "empty_" + slot_name
default_pth = join(dirname(__file__), "../res/" + texname + ".png")
image = None
for img in bpy.data.images:
if img.filepath == default_pth:
image = img
break
else:
bpy.ops.image.open(filepath=default_pth, relative_path=False)
for key in bpy.data.images.keys():
if (texname + ".png") in key:
image = bpy.data.images[key]
break
else:
raise Exception("Loaded " + texname + " from '" + default_pth +
"' but it was not loaded into bpy.data.images!")
texture = None
for tex in bpy.data.textures:
if tex.name == texname:
texture = tex
break
else:
texture = bpy.data.textures.new(texname, type="IMAGE")
texture.image = image
try:
slot.texture_coords = "UV"
except Exception as msg:
print("FAILED to set texture slot, due to the following error:")
print(msg)
slot.texture_coords = "GLOBAL"
slot.texture = texture
print("Assigned", texture, "to", slot.name)
print("Done.")
return {'FINISHED'}
class OperatorFixLampTypes(bpy.types.Operator):
""" Operator to set use_sphere on all lights """
bl_idname = "pbepbs.fix_lamp_types"
bl_label = "Fix PBS Lamp types"
def execute(self, context):
for lamp in bpy.data.lamps:
if lamp.type == "POINT":
lamp.use_sphere = True
return {'FINISHED'}
class OperatorFixNegativeScale(bpy.types.Operator):
""" Operator to fix any negative scale """
bl_idname = "pbepbs.fix_negative_scale"
bl_label = "Fix negative scale on objects"
def execute(self, context):
for obj in bpy.data.objects:
sx, sy, sz = obj.scale
if sx < 0 or sy < 0 or sz < 0:
print("Invalid scale on", obj.name)
obj.scale = [abs(sx), abs(sy), abs(sz)]
return {'FINISHED'}
class PBSDataPanel(bpy.types.Panel):
""" This is a panel to display the PBS properties of the currently
selected object """
bl_idname = "MATERIAL_PT_pbs_light_props"
bl_label = "Physically Based Shading Properties"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
def draw(self, context):
if not context.object:
self.layout.label("No object selected")
return
obj = context.object
if obj.type == "LAMP":
if not hasattr(obj.data, "pbepbs"):
self.layout.label("Lamp has no PBS datablock")
return
obj_data = obj.data
pbs_data = obj_data.pbepbs
box = self.layout.box()
# Header
row = box.row(align=True)
row.alignment = 'CENTER'
row.label(text="Panda3D PBS Properties")
box.row()
box.row().prop(obj.data, "type", "Light type")
if obj.data.type not in ("POINT", "SPOT", "AREA"):
box.row().label("Type not supported yet!")
return
# Light color
box.row().prop(pbs_data, "use_temperature")
if pbs_data.use_temperature:
row = box.row()
row.prop(pbs_data, "color_temperature")
row.prop(pbs_data, "color_preview", "")
else:
box.row().prop(obj.data, "color", "Color")
if obj.data.type == "POINT":
box.row().prop(pbs_data, "sphere_radius")
# Maximum culling distance
box.row().prop(obj.data, "distance", "Max Cull Distance")
# Light intensity
box.row().prop(obj.data, "energy", "Intensity (Lumens)")
# Spot light size
if obj.data.type == "SPOT":
box.row().prop(obj.data, "spot_size")
# Area light size
if obj.data.type == "AREA":
box.row().prop(obj.data, "shape", "Shape")
if obj.data.shape == "SQUARE":
box.row().prop(obj.data, "size", "Size")
else:
box.row().prop(obj.data, "size", "Width")
box.row().prop(obj.data, "size_y", "Height")
# IES Profile
if obj.data.type in ("SPOT", "POINT"):
box.row().prop(pbs_data, "ies_profile", "IES Profile")
# Shadows
box.row().prop(obj.data, "use_shadow", "Enable Shadows")
if obj.data.use_shadow:
box.row().prop(pbs_data, "shadow_map_res")
if int(pbs_data.shadow_map_res) > 512:
box.row().label("WARNING: Very high shadow map resolution!")
box.row()
# Matrix to convert from xyz to rgb
xyz_to_rgb = mathutils.Matrix((
(3.2406, -0.9689, 0.0557),
(-1.5372, 1.8758, -0.2050),
(-0.4986, 0.0415, 1.0570)
)).transposed()
def get_temperature_color_preview(lamp_props):
""" Returns a preview color for the lamp data when a color temperature is used """
temperature = lamp_props.color_temperature
mm = 1000.0 / temperature
mm2 = mm ** 2
mm3 = mm2 * mm
x, y = 0, 0
if temperature < 4000:
x = -0.2661239 * mm3 - 0.2343580 * mm2 + 0.8776956 * mm + 0.179910
else:
x = -3.0258469 * mm3 + 2.1070379 * mm2 + 0.2226347 * mm + 0.240390
x2 = x**2
x3 = x2 * x
if temperature < 2222:
y = -1.1063814 * x3 - 1.34811020 * x2 + 2.18555832 * x - 0.20219683
elif temperature < 4000:
y = -0.9549476 * x3 - 1.37418593 * x2 + 2.09137015 * x - 0.16748867
else:
y = 3.0817580 * x3 - 5.87338670 * x2 + 3.75112997 * x - 0.37001483
# xyY to XYZ, assuming Y=1.
xyz = mathutils.Vector((x / y, 1, (1 - x - y) / y))
return xyz_to_rgb * xyz
def get_ies_profiles():
""" Returns a list of all available ies profiles """
# XXX: Read dynamically from rp installation
profiles = [
'area_light.ies', 'bollard.ies', 'cylinder_narrow.ies',
'cylinder_wide.ies', 'defined_diffuse.ies', 'defined_diffuse_spot.ies',
'defined_spot.ies', 'display.ies', 'jelly_fish.ies', 'medium_scatter.ies', 'overhead.ies',
'parallel_beam.ies', 'pear.ies', 'scatter_light.ies', 'soft_arrow.ies',
'soft_display.ies', 'star_focused.ies', 'three_lobe_umbrella.ies', 'three_lobe_vee.ies',
'tight_focused.ies', 'top_post.ies', 'trapezoid.ies', 'umbrella.ies', 'vee.ies',
'x_arrow.ies', 'x_arrow_diffuse.ies', 'x_arrow_soft.ies'
]
options = [("none", "None", "None")]
for profile_id in profiles:
name = profile_id.replace(".ies", "").title().replace("_", " ")
options.append((profile_id, name, name))
return options
class PBSLampProps(bpy.types.PropertyGroup):
""" This is the property group which stores the PBS properties of each
lamp """
def update_shadow_resolution(self, context):
if context.object:
context.object.data.shadow_buffer_size = int(context.object.data.pbepbs.shadow_map_res)
def update_color_temperature(self, context):
if context.object:
if context.object.data.pbepbs.use_temperature:
context.object.data.color = get_temperature_color_preview(context.object.data.pbepbs)
shadow_map_res = bpy.props.EnumProperty(
name="Shadow Resolution",
description="Resolution of the shadow map in pixels",
items=(
("128", "128 px", "128 Pixel Resolution"),
("256", "256 px", "256 Pixel Resolution"),
("512", "512 px", "512 Pixel Resolution"),
("1024", "1024 px", "1024 Pixel Resolution"),
("2048", "2048 px", "2048 Pixel Resolution")
),
default="128",
update=update_shadow_resolution
)
use_temperature = bpy.props.BoolProperty(
name="Use Color Temperature",
default=True,
description="Whether to set the lights color via a color temperature",
update=update_color_temperature
)
color_temperature = bpy.props.FloatProperty(
name="Color Temperature",
description="Color teperature of the light in Kelvin",
default=6500.0,
precision=0,
step=500,
min=1400.0, max=25000.0,
update=update_color_temperature
)
color_preview = bpy.props.FloatVectorProperty(
name="Color Preview",
description="Color preview of the temperature",
subtype="COLOR",
size=3,
default=(1, 1, 1),
min=0.0, max=1.0,
set=None,
get=get_temperature_color_preview
)
sphere_radius = bpy.props.FloatProperty(
name="Sphere Radius",
default=1.0,
description="Spherical Area Light Sphere radius",
min=0.05,
max=100.0
)
ies_profile = bpy.props.EnumProperty(
name="IES Profile",
description="IES Lighting profile",
items=get_ies_profiles(),
default="none",
)
def register():
bpy.utils.register_class(OperatorSetDefaultTextures)
bpy.utils.register_class(OperatorFixLampTypes)
bpy.utils.register_class(OperatorFixNegativeScale)
bpy.utils.register_class(PBSMatProps)
bpy.utils.register_class(PBSLampProps)
bpy.utils.register_class(PBSMaterial)
bpy.utils.register_class(PBSDataPanel)
bpy.types.Material.pbepbs = bpy.props.PointerProperty(type=PBSMatProps)
bpy.types.Lamp.pbepbs = bpy.props.PointerProperty(type=PBSLampProps)
def unregister():
del bpy.types.Material.pbepbs
bpy.utils.unregister_class(OperatorSetDefaultTextures)
bpy.utils.unregister_class(OperatorFixLampTypes)
bpy.utils.unregister_class(OperatorFixNegativeScale)
bpy.utils.unregister_class(PBSMatProps)
bpy.utils.unregister_class(PBSLampProps)
bpy.utils.unregister_class(PBSMaterial)
bpy.utils.unregister_class(PBSDataPanel)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jmespath
from tests.helm_template_generator import render_chart
class GitSyncSchedulerTest(unittest.TestCase):
def test_should_add_dags_volume(self):
docs = render_chart(
values={"dags": {"gitSync": {"enabled": True}}},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
# check that there is a volume and git-sync and scheduler container mount it
assert len(jmespath.search("spec.template.spec.volumes[?name=='dags']", docs[0])) > 0
assert (
len(
jmespath.search(
"(spec.template.spec.containers[?name=='scheduler'].volumeMounts[])[?name=='dags']",
docs[0],
)
)
> 0
)
assert (
len(
jmespath.search(
"(spec.template.spec.containers[?name=='git-sync'].volumeMounts[])[?name=='dags']",
docs[0],
)
)
> 0
)
def test_validate_the_git_sync_container_spec(self):
docs = render_chart(
values={
"images": {
"gitSync": {
"repository": "test-registry/test-repo",
"tag": "test-tag",
"pullPolicy": "Always",
}
},
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"wait": 66,
"maxFailures": 70,
"subPath": "path1/path2",
"rev": "HEAD",
"depth": 1,
"repo": "https://github.com/apache/airflow.git",
"branch": "test-branch",
"sshKeySecret": None,
"credentialsSecret": None,
"knownHosts": None,
},
"persistence": {"enabled": True},
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {
"name": "git-sync-test",
"securityContext": {"runAsUser": 65533},
"image": "test-registry/test-repo:test-tag",
"imagePullPolicy": "Always",
"env": [
{"name": "GIT_SYNC_REV", "value": "HEAD"},
{"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
{"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
{"name": "GIT_SYNC_DEPTH", "value": "1"},
{"name": "GIT_SYNC_ROOT", "value": "/git"},
{"name": "GIT_SYNC_DEST", "value": "repo"},
{"name": "GIT_SYNC_ADD_USER", "value": "true"},
{"name": "GIT_SYNC_WAIT", "value": "66"},
{"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
],
"volumeMounts": [{"mountPath": "/git", "name": "dags"}],
"resources": {},
} == jmespath.search("spec.template.spec.containers[1]", docs[0])
def test_validate_if_ssh_params_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": None,
"branch": "test-branch",
}
}
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {
"name": "git-sync-ssh-key",
"secret": {"secretName": "ssh-secret", "defaultMode": 288},
} in jmespath.search("spec.template.spec.volumes", docs[0])
def test_should_set_username_and_pass_env_variables(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"credentialsSecret": "user-pass-secret",
"sshKeySecret": None,
}
}
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {
"name": "GIT_SYNC_USERNAME",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
} in jmespath.search("spec.template.spec.containers[1].env", docs[0])
assert {
"name": "GIT_SYNC_PASSWORD",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
} in jmespath.search("spec.template.spec.containers[1].env", docs[0])
def test_should_set_the_volume_claim_correctly_when_using_an_existing_claim(self):
docs = render_chart(
values={"dags": {"persistence": {"enabled": True, "existingClaim": "test-claim"}}},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search(
"spec.template.spec.volumes", docs[0]
)
def test_should_add_extra_volume_and_extra_volume_mount(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"scheduler": {
"extraVolumes": [{"name": "test-volume", "emptyDir": {}}],
"extraVolumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}],
},
"dags": {
"gitSync": {
"enabled": True,
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "test-volume", "emptyDir": {}} in jmespath.search(
"spec.template.spec.volumes", docs[0]
)
assert {"name": "test-volume", "mountPath": "/opt/test"} in jmespath.search(
"spec.template.spec.containers[0].volumeMounts", docs[0]
)
def test_extra_volume_and_git_sync_extra_volume_mount(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"scheduler": {
"extraVolumes": [{"name": "test-volume", "emptyDir": {}}],
},
"dags": {
"gitSync": {
"enabled": True,
"extraVolumeMounts": [{"mountPath": "/opt/test", "name": "test-volume"}],
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "test-volume", "emptyDir": {}} in jmespath.search(
"spec.template.spec.volumes", docs[0]
)
assert {'mountPath': '/git', 'name': 'dags'} in jmespath.search(
"spec.template.spec.containers[1].volumeMounts", docs[0]
)
assert {"name": "test-volume", "mountPath": "/opt/test"} in jmespath.search(
"spec.template.spec.containers[1].volumeMounts", docs[0]
)
def test_should_add_env(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"env": [{"name": "FOO", "value": "bar"}],
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "FOO", "value": "bar"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
def test_resources_are_configurable(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"resources": {
"limits": {"cpu": "200m", 'memory': "128Mi"},
"requests": {"cpu": "300m", 'memory': "169Mi"},
},
},
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert "128Mi" == jmespath.search("spec.template.spec.containers[1].resources.limits.memory", docs[0])
assert "169Mi" == jmespath.search(
"spec.template.spec.containers[1].resources.requests.memory", docs[0]
)
assert "300m" == jmespath.search("spec.template.spec.containers[1].resources.requests.cpu", docs[0])
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ZerigoDNSDriver'
]
import copy
import base64
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from xml.etree import ElementTree as ET
from libcloud.utils.misc import merge_valid_keys, get_new_obj
from libcloud.utils.xml import findtext, findall
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.types import MalformedResponseError
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
API_HOST = 'ns.zerigo.com'
API_VERSION = '1.1'
API_ROOT = '/api/%s/' % (API_VERSION)
VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers']
VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority']
# Number of items per page (maximum limit is 1000)
ITEMS_PER_PAGE = 100
class ZerigoError(LibcloudError):
def __init__(self, code, errors):
self.code = code
self.errors = errors or []
def __str__(self):
return 'Errors: %s' % (', '.join(self.errors))
def __repr__(self):
return ('<ZerigoError response code=%s, errors count=%s>' % (
self.code, len(self.errors)))
class ZerigoDNSResponse(XmlResponse):
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
status = int(self.status)
if status == 401:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
elif status == 404:
context = self.connection.context
if context['resource'] == 'zone':
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=context['id'])
elif context['resource'] == 'record':
raise RecordDoesNotExistError(value='', driver=self,
record_id=context['id'])
elif status != 503:
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError('Failed to parse XML',
body=self.body)
errors = []
for error in findall(element=body, xpath='error'):
errors.append(error.text)
raise ZerigoError(code=status, errors=errors)
return self.body
class ZerigoDNSConnection(ConnectionUserAndKey):
host = API_HOST
secure = True
responseCls = ZerigoDNSResponse
def add_default_headers(self, headers):
auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8'))
return headers
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {'Content-Type': 'application/xml; charset=UTF-8'}
return super(ZerigoDNSConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers)
class ZerigoDNSDriver(DNSDriver):
type = Provider.ZERIGO
name = 'Zerigo DNS'
website = 'http://www.zerigo.com/'
connectionCls = ZerigoDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.REDIRECT: 'REDIRECT',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
RecordType.NAPTR: 'NAPTR',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SPF: 'SPF',
RecordType.GEO: 'GEO',
RecordType.URL: 'URL',
}
def iterate_zones(self):
return self._get_more('zones')
def iterate_records(self, zone):
return self._get_more('records', zone=zone)
def get_zone(self, zone_id):
path = API_ROOT + 'zones/%s.xml' % (zone_id)
self.connection.set_context({'resource': 'zone', 'id': zone_id})
data = self.connection.request(path).object
zone = self._to_zone(elem=data)
return zone
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
self.connection.set_context({'resource': 'record', 'id': record_id})
path = API_ROOT + 'hosts/%s.xml' % (record_id)
data = self.connection.request(path).object
record = self._to_record(elem=data, zone=zone)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/create
@inherits: L{DNSDriver.create_zone}
"""
path = API_ROOT + 'zones.xml'
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
data = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='POST').object
zone = self._to_zone(elem=data)
return zone
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
"""
Update an existing zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/update
@inherits: L{DNSDriver.update_zone}
"""
if domain:
raise LibcloudError('Domain cannot be changed', driver=self)
path = API_ROOT + 'zones/%s.xml' % (zone.id)
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='PUT')
assert response.status == httplib.OK
merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
valid_keys=VALID_ZONE_EXTRA_PARAMS,
extra=extra)
updated_zone = get_new_obj(obj=zone, klass=Zone,
attributes={'type': type,
'ttl': ttl,
'extra': merged})
return updated_zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/hosts/create
@inherits: L{DNSDriver.create_record}
"""
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
record_elem = self._to_record_elem(name=name, type=type, data=data,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(record_elem),
method='POST')
assert response.status == httplib.CREATED
record = self._to_record(elem=response.object, zone=zone)
return record
def update_record(self, record, name=None, type=None, data=None,
extra=None):
path = API_ROOT + 'hosts/%s.xml' % (record.id)
record_elem = self._to_record_elem(name=name, type=type, data=data,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(record_elem),
method='PUT')
assert response.status == httplib.OK
merged = merge_valid_keys(params=copy.deepcopy(record.extra),
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra)
updated_record = get_new_obj(obj=record, klass=Record,
attributes={'type': type,
'data': data,
'extra': merged})
return updated_record
def delete_zone(self, zone):
path = API_ROOT + 'zones/%s.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(action=path, method='DELETE')
return response.status == httplib.OK
def delete_record(self, record):
path = API_ROOT + 'hosts/%s.xml' % (record.id)
self.connection.set_context({'resource': 'record', 'id': record.id})
response = self.connection.request(action=path, method='DELETE')
return response.status == httplib.OK
def ex_get_zone_by_domain(self, domain):
"""
Retrieve a zone object by the domain name.
@param domain: The domain which should be used
@type domain: C{str}
@rtype: L{Zone}
"""
path = API_ROOT + 'zones/%s.xml' % (domain)
self.connection.set_context({'resource': 'zone', 'id': domain})
data = self.connection.request(path).object
zone = self._to_zone(elem=data)
return zone
def ex_force_slave_axfr(self, zone):
"""
Force a zone transfer.
@param zone: Zone which should be used.
@type zone: L{Zone}
@rtype: L{Zone}
"""
path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(path, method='POST')
assert response.status == httplib.ACCEPTED
return zone
def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None):
zone_elem = ET.Element('zone', {})
if domain:
domain_elem = ET.SubElement(zone_elem, 'domain')
domain_elem.text = domain
if type:
ns_type_elem = ET.SubElement(zone_elem, 'ns-type')
if type == 'master':
ns_type_elem.text = 'pri_sec'
elif type == 'slave':
if not extra or 'ns1' not in extra:
raise LibcloudError('ns1 extra attribute is required ' +
'when zone type is slave', driver=self)
ns_type_elem.text = 'sec'
ns1_elem = ET.SubElement(zone_elem, 'ns1')
ns1_elem.text = extra['ns1']
elif type == 'std_master':
# TODO: Each driver should provide supported zone types
# Slave name servers are elsewhere
if not extra or 'slave-nameservers' not in extra:
raise LibcloudError('slave-nameservers extra ' +
'attribute is required whenzone ' +
'type is std_master', driver=self)
ns_type_elem.text = 'pri'
slave_nameservers_elem = ET.SubElement(zone_elem,
'slave-nameservers')
slave_nameservers_elem.text = extra['slave-nameservers']
if ttl:
default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl')
default_ttl_elem.text = str(ttl)
if extra and 'tag-list' in extra:
tags = extra['tag-list']
tags_elem = ET.SubElement(zone_elem, 'tag-list')
tags_elem.text = ' '.join(tags)
return zone_elem
def _to_record_elem(self, name=None, type=None, data=None, extra=None):
record_elem = ET.Element('host', {})
if name:
name_elem = ET.SubElement(record_elem, 'hostname')
name_elem.text = name
if type is not None:
type_elem = ET.SubElement(record_elem, 'host-type')
type_elem.text = self.RECORD_TYPE_MAP[type]
if data:
data_elem = ET.SubElement(record_elem, 'data')
data_elem.text = data
if extra:
if 'ttl' in extra:
ttl_elem = ET.SubElement(record_elem, 'ttl',
{'type': 'integer'})
ttl_elem.text = str(extra['ttl'])
if 'priority' in extra:
# Only MX and SRV records support priority
priority_elem = ET.SubElement(record_elem, 'priority',
{'type': 'integer'})
priority_elem.text = str(extra['priority'])
if 'notes' in extra:
notes_elem = ET.SubElement(record_elem, 'notes')
notes_elem.text = extra['notes']
return record_elem
def _to_zones(self, elem):
zones = []
for item in findall(element=elem, xpath='zone'):
zone = self._to_zone(elem=item)
zones.append(zone)
return zones
def _to_zone(self, elem):
id = findtext(element=elem, xpath='id')
domain = findtext(element=elem, xpath='domain')
type = findtext(element=elem, xpath='ns-type')
type = 'master' if type.find('pri') == 0 else 'slave'
ttl = findtext(element=elem, xpath='default-ttl')
hostmaster = findtext(element=elem, xpath='hostmaster')
custom_ns = findtext(element=elem, xpath='custom-ns')
custom_nameservers = findtext(element=elem, xpath='custom-nameservers')
notes = findtext(element=elem, xpath='notes')
nx_ttl = findtext(element=elem, xpath='nx-ttl')
slave_nameservers = findtext(element=elem, xpath='slave-nameservers')
tags = findtext(element=elem, xpath='tag-list')
tags = tags.split(' ') if tags else []
extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns,
'custom-nameservers': custom_nameservers, 'notes': notes,
'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers,
'tags': tags}
zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl),
driver=self, extra=extra)
return zone
def _to_records(self, elem, zone):
records = []
for item in findall(element=elem, xpath='host'):
record = self._to_record(elem=item, zone=zone)
records.append(record)
return records
def _to_record(self, elem, zone):
id = findtext(element=elem, xpath='id')
name = findtext(element=elem, xpath='hostname')
type = findtext(element=elem, xpath='host-type')
type = self._string_to_record_type(type)
data = findtext(element=elem, xpath='data')
notes = findtext(element=elem, xpath='notes', no_text_value=None)
state = findtext(element=elem, xpath='state', no_text_value=None)
fqdn = findtext(element=elem, xpath='fqdn', no_text_value=None)
priority = findtext(element=elem, xpath='priority', no_text_value=None)
ttl = findtext(element=elem, xpath='ttl', no_text_value=None)
if ttl:
ttl = int(ttl)
extra = {'notes': notes, 'state': state, 'fqdn': fqdn,
'priority': priority, 'ttl': ttl}
record = Record(id=id, name=name, type=type, data=data,
zone=zone, driver=self, extra=extra)
return record
def _get_more(self, rtype, **kwargs):
exhausted = False
last_key = None
while not exhausted:
items, last_key, exhausted = self._get_data(
rtype, last_key, **kwargs)
for item in items:
yield item
def _get_data(self, rtype, last_key, **kwargs):
# Note: last_key in this case really is a "last_page".
# TODO: Update base driver and change last_key to something more
# generic - e.g. marker
params = {}
params['per_page'] = ITEMS_PER_PAGE
params['page'] = last_key + 1 if last_key else 1
if rtype == 'zones':
path = API_ROOT + 'zones.xml'
response = self.connection.request(path)
transform_func = self._to_zones
elif rtype == 'records':
zone = kwargs['zone']
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(path, params=params)
transform_func = self._to_records
exhausted = False
result_count = int(response.headers.get('x-query-count', 0))
if (params['page'] * ITEMS_PER_PAGE) >= result_count:
exhausted = True
if response.status == httplib.OK:
items = transform_func(elem=response.object, **kwargs)
return items, params['page'], exhausted
else:
return [], None, True
|
|
"""Config flow to configure the AsusWrt integration."""
import logging
import os
import socket
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
)
from homeassistant.const import (
CONF_HOST,
CONF_MODE,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_DNSMASQ,
CONF_INTERFACE,
CONF_REQUIRE_IP,
CONF_SSH_KEY,
CONF_TRACK_UNKNOWN,
DEFAULT_DNSMASQ,
DEFAULT_INTERFACE,
DEFAULT_SSH_PORT,
DEFAULT_TRACK_UNKNOWN,
DOMAIN,
MODE_AP,
MODE_ROUTER,
PROTOCOL_SSH,
PROTOCOL_TELNET,
)
from .router import get_api
RESULT_CONN_ERROR = "cannot_connect"
RESULT_UNKNOWN = "unknown"
RESULT_SUCCESS = "success"
_LOGGER = logging.getLogger(__name__)
def _is_file(value) -> bool:
"""Validate that the value is an existing file."""
file_in = os.path.expanduser(str(value))
return os.path.isfile(file_in) and os.access(file_in, os.R_OK)
def _get_ip(host):
"""Get the ip address from the host name."""
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
class AsusWrtFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize AsusWrt config flow."""
self._host = None
@callback
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
vol.Required(
CONF_USERNAME, default=user_input.get(CONF_USERNAME, "")
): str,
vol.Optional(CONF_PASSWORD): str,
vol.Optional(CONF_SSH_KEY): str,
vol.Required(CONF_PROTOCOL, default=PROTOCOL_SSH): vol.In(
{PROTOCOL_SSH: "SSH", PROTOCOL_TELNET: "Telnet"}
),
vol.Required(CONF_PORT, default=DEFAULT_SSH_PORT): cv.port,
vol.Required(CONF_MODE, default=MODE_ROUTER): vol.In(
{MODE_ROUTER: "Router", MODE_AP: "Access Point"}
),
}
),
errors=errors or {},
)
async def _async_check_connection(self, user_input):
"""Attempt to connect the AsusWrt router."""
api = get_api(user_input)
try:
await api.connection.async_connect()
except OSError:
_LOGGER.error("Error connecting to the AsusWrt router at %s", self._host)
return RESULT_CONN_ERROR
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unknown error connecting with AsusWrt router at %s", self._host
)
return RESULT_UNKNOWN
if not api.is_connected:
_LOGGER.error("Error connecting to the AsusWrt router at %s", self._host)
return RESULT_CONN_ERROR
conf_protocol = user_input[CONF_PROTOCOL]
if conf_protocol == PROTOCOL_TELNET:
api.connection.disconnect()
return RESULT_SUCCESS
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self._show_setup_form(user_input)
errors = {}
self._host = user_input[CONF_HOST]
pwd = user_input.get(CONF_PASSWORD)
ssh = user_input.get(CONF_SSH_KEY)
if not (pwd or ssh):
errors["base"] = "pwd_or_ssh"
elif ssh:
if pwd:
errors["base"] = "pwd_and_ssh"
else:
isfile = await self.hass.async_add_executor_job(_is_file, ssh)
if not isfile:
errors["base"] = "ssh_not_file"
if not errors:
ip_address = await self.hass.async_add_executor_job(_get_ip, self._host)
if not ip_address:
errors["base"] = "invalid_host"
if not errors:
result = await self._async_check_connection(user_input)
if result != RESULT_SUCCESS:
errors["base"] = result
if errors:
return self._show_setup_form(user_input, errors)
return self.async_create_entry(
title=self._host,
data=user_input,
)
async def async_step_import(self, user_input=None):
"""Import a config entry."""
return await self.async_step_user(user_input)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for AsusWrt."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): vol.All(vol.Coerce(int), vol.Clamp(min=0, max=900)),
vol.Optional(
CONF_TRACK_UNKNOWN,
default=self.config_entry.options.get(
CONF_TRACK_UNKNOWN, DEFAULT_TRACK_UNKNOWN
),
): bool,
vol.Required(
CONF_INTERFACE,
default=self.config_entry.options.get(
CONF_INTERFACE, DEFAULT_INTERFACE
),
): str,
vol.Required(
CONF_DNSMASQ,
default=self.config_entry.options.get(
CONF_DNSMASQ, DEFAULT_DNSMASQ
),
): str,
}
)
if self.config_entry.data[CONF_MODE] == MODE_AP:
data_schema = data_schema.extend(
{
vol.Optional(
CONF_REQUIRE_IP,
default=self.config_entry.options.get(CONF_REQUIRE_IP, True),
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
|
|
from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.db.utils import OperationalError, ProgrammingError
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text
from django.utils.encoding import python_2_unicode_compatible
class ContentTypeManager(models.Manager):
# Cache to avoid re-looking up ContentType objects all over the place.
# This cache is shared by all the get_for_* methods.
_cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self.__class__._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
elif model._deferred:
model = model._meta.proxy_for_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self.__class__._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
# We start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except (OperationalError, ProgrammingError):
# It's possible to migrate a single app before contenttypes,
# as it's not a required initial dependency (it's contrib!)
# Have a nice error for this.
raise RuntimeError(
"Error creating new content types. Please make sure contenttypes "
"is migrated before trying to migrate apps individually."
)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time we
# use get_or_create to take care of any race conditions.
# The smart_text() is needed around opts.verbose_name_raw because
# name_raw might be a django.utils.functional.__proxy__ object.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
defaults={'name': smart_text(opts.verbose_name_raw)},
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.model_name)
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
name=smart_text(opts.verbose_name_raw),
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self.__class__._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self.__class__._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class() will return None.
# Hence, there is no reliance on model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self.__class__._cache.setdefault(using, {})[key] = ct
self.__class__._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
name = models.CharField(max_length=100)
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
ordering = ('name',)
unique_together = (('app_label', 'model'),)
def __str__(self):
# self.name is deprecated in favor of using model's verbose_name, which
# can be translated. Formal deprecation is delayed until we have DB
# migration to be able to remove the field from the database along with
# the attribute.
#
# We return self.name only when users have changed its value from the
# initial verbose_name_raw and might rely on it.
model = self.model_class()
if not model or self.name != model._meta.verbose_name_raw:
return self.name
else:
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
|
|
'''
Created on Oct 23, 2015
@author: kelvinguu
'''
import logging
import operator
import os.path
import random
import shutil
import traceback
import types
import json
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import OrderedDict, defaultdict, MutableMapping, Mapping
from contextlib import contextmanager
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed
from pyhocon import ConfigTree, HOCONConverter, ConfigFactory
from gtd.io import makedirs
def sorted_by_value(d, ascending=True):
return OrderedDict(sorted(d.items(), key=operator.itemgetter(1), reverse=not ascending))
class FunctionWrapper(object):
"""Turn a function or method into a callable object.
Can be used as a decorator above method definitions, e.g.
class Something(object):
...
@FunctionWrapper
def some_method(self, ...):
...
Or, bound methods of an instance can be directly overriden
obj = Something()
obj.some_method = FunctionWrapper(obj.some_method)
"""
__metaclass__ = ABCMeta
def __init__(self, fxn):
self._orig_fxn = fxn
@property
def orig_fxn(self):
return self._orig_fxn
def __get__(self, instance, objtype=None):
"""Implement descriptor functionality."""
return self.as_method(instance, objtype)
def as_method(self, instance, objtype=None):
"""Make this object a method of the given object instance.
Args:
instance: any object instance
"""
return types.MethodType(self, instance, objtype)
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError
class Memoized(FunctionWrapper):
__metaclass__ = ABCMeta
def __init__(self, fxn):
"""Create memoized version of a function.
Args:
fxn (Callable): function to be memoized
"""
super(Memoized, self).__init__(fxn)
self._cache_hits = 0
self._calls = 0.
def __call__(self, *args, **kwargs):
use_cache = kwargs.pop('use_cache', True)
if not use_cache:
return self.orig_fxn(*args, **kwargs)
key = self._cache_key(args, kwargs)
# logging.debug('cache key: {}'.format(key))
if self._in_cache(key):
# logging.debug('load from cache')
self._cache_hits += 1 # successfully return from cache
return self._from_cache(key)
# logging.debug('compute and save to cache')
val = self.orig_fxn(*args, **kwargs)
self._to_cache(key, val)
return val
@property
def hit_rate(self):
if self._calls <= 0:
return 0.
return self._cache_hits / self._calls
@abstractmethod
def _cache_key(self, args, kwargs):
raise NotImplementedError
@abstractmethod
def clear_cache(self):
raise NotImplementedError
@abstractmethod
def _in_cache(self, key):
raise NotImplementedError
@abstractmethod
def _from_cache(self, key):
raise NotImplementedError
@abstractmethod
def _to_cache(self, key, val):
raise NotImplementedError
@abstractproperty
def cache_size(self):
pass
class DictMemoized(Memoized):
def __init__(self, fxn, custom_key_fxn=None):
super(DictMemoized, self).__init__(fxn)
self.cache = {}
self._custom_key_fxn = custom_key_fxn
def _cache_key(self, args, kwargs):
if self._custom_key_fxn:
return self._custom_key_fxn(*args, **kwargs)
kwargs_key = tuple(sorted(kwargs.items()))
return (args, kwargs_key)
def clear_cache(self):
self.cache = {}
def _in_cache(self, key):
return key in self.cache
def _from_cache(self, key):
return self.cache[key]
def _to_cache(self, key, val):
self.cache[key] = val
@property
def cache_size(self):
return len(self.cache)
def memoize(fxn):
return DictMemoized(fxn)
def memoize_with_key_fxn(key_fxn):
return lambda fxn: DictMemoized(fxn, custom_key_fxn=key_fxn)
def args_as_string(args, kwargs):
args_str = '_'.join([str(a) for a in args])
kwargs_str = '_'.join(['{}={}'.format(k, v) for k, v in kwargs.iteritems()])
items = [args_str, kwargs_str]
items = [s for s in items if s] # remove empty elements
key_str = '_'.join(items)
if not key_str:
key_str = 'NO_KEY'
return key_str
class FileMemoized(Memoized):
def __init__(self, fxn, cache_dir, serialize, deserialize):
super(FileMemoized, self).__init__(fxn)
self.cache_dir = cache_dir
self.serialize = serialize
self.deserialize = deserialize
makedirs(cache_dir)
def _cache_key(self, args, kwargs):
"""Compute the name of the file."""
key_str = args_as_string(args, kwargs)
return os.path.join(self.cache_dir, '{}.txt'.format(key_str))
def _in_cache(self, key):
return os.path.exists(key)
def clear_cache(self):
shutil.rmtree(self.cache_dir)
makedirs(self.cache_dir)
def _to_cache(self, key, val):
with open(key, 'w') as f:
self.serialize(f, val)
def _from_cache(self, key):
with open(key, 'r') as f:
return self.deserialize(f)
@property
def cache_size(self):
raise NotImplementedError
def file_memoize(cache_dir, serialize, deserialize):
return lambda fxn: FileMemoized(fxn, cache_dir, serialize, deserialize)
def sample_if_large(arr, max_size, replace=True):
if len(arr) > max_size:
idx = np.random.choice(len(arr), size=max_size, replace=replace)
return [arr[i] for i in idx]
return list(arr)
def flatten(lol):
"""
Flatten a list of lists
"""
return [item for sublist in lol for item in sublist]
def chunks(l, n):
"""
Return a generator of lists, each of size n (the last list may be less than n)
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def ensure_unicode(s):
assert isinstance(s, basestring)
if not isinstance(s, unicode):
s = unicode(s, 'utf-8')
return s
class UnicodeMixin(object):
__slots__ = []
@abstractmethod
def __unicode__(self):
raise NotImplementedError
def __str__(self):
return repr(self)
def __repr__(self):
return unicode(self).encode('utf-8')
class EqualityMixinSlots(object):
"""Equality mixin for classes using __slots__"""
__slots__ = []
class Missing(object):
pass # just a special object to denote that a value is missing. Is only equal to itself.
__MISSING = Missing()
@property
def _slot_vals(self):
vals = []
for slots in [getattr(cls, '__slots__', tuple()) for cls in type(self).__mro__]:
for slot in slots:
try:
val = getattr(self, slot)
except AttributeError:
val = self.__MISSING
vals.append(val)
return tuple(vals)
def __eq__(self, other):
# must be strictly same type
if type(other) != type(self):
return False
if self._slot_vals != other._slot_vals:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._slot_vals)
class EqualityMixin(object):
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def data_split(items, dev_part=0.1, test_part=0.1):
# don't allow duplicates
assert len(set(items)) == len(items)
# remaining portion is set aside for train
assert dev_part + test_part < 1.0
items_copy = list(items)
random.shuffle(items_copy)
n = len(items_copy)
ndev = int(n * dev_part)
ntest = int(n * test_part)
dev = items_copy[:ndev]
test = items_copy[ndev:ndev + ntest]
train = items_copy[ndev + ntest:]
# verify that there is no overlap
train_set = set(train)
dev_set = set(dev)
test_set = set(test)
assert len(train_set.intersection(dev_set)) == 0
assert len(train_set.intersection(test_set)) == 0
print 'train {}, dev {}, test {}'.format(len(train), len(dev), len(test))
return train, dev, test
def compute_if_absent(d, key, keyfunc):
val = d.get(key)
if val is None:
val = keyfunc(key)
d[key] = val
return val
class Bunch(object):
"""A simple class for holding arbitrary attributes. Recommended by the famous Martelli bot."""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return repr(self.__dict__)
def best_threshold(scores, labels, debug=False):
# find best threshold in O(nlogn)
# does not handle scores of infinity or -infinity
items = zip(scores, labels)
items.sort()
total = len(items)
total_pos = len([l for l in labels if l])
def accuracy(p, n):
correct_n = n
correct_p = total_pos - p
return float(correct_n + correct_p) / total
# predict True iff score > thresh
pos = 0 # no. pos <= thresh
neg = 0 # no. neg <= thresh
thresh_accs = [(float('-inf'), accuracy(pos, neg))]
for thresh, label in items:
if label:
pos += 1
else:
neg += 1
thresh_accs.append((thresh, accuracy(pos, neg)))
if debug:
import matplotlib.pyplot as plt
from gtd.plot import plot_pdf
x, y = zip(*thresh_accs)
plt.figure()
plt.plot(x, y)
pos_scores = [s for s, l in items if l]
neg_scores = [s for s, l in items if not l]
plot_pdf(pos_scores, 0.1, color='b')
plot_pdf(neg_scores, 0.1, color='r')
plt.show()
return max(thresh_accs, key=operator.itemgetter(1))[0]
def as_batches(l, batch_size):
assert batch_size >= 1
batch = []
for item in l:
if len(batch) == batch_size:
yield batch
batch = []
batch.append(item)
# final batch may be smaller
if len(batch) != 0:
yield batch
# TODO: test
def get_batch(data, batch_size, k):
"""Get the kth batch from a data sequence
If the final batch is less than batch_size, this function loops back to the beginning of data
so that the returned batch is exactly batch_size.
Args:
data: a list of examples
batch_size: the size of the returned batch
k: the batch index you want to get.
"""
return [data[i % len(data)] for i in xrange(k * batch_size, (k + 1) * batch_size)]
# TODO: test
def batch_compute(data, batch_fxn, batch_size):
"""Evaluate the batch function on a list of items.
Args:
data: a list of examples
batch_fxn: a function which only accepts a list of exactly length batch_size,
and returns a list of the same length
batch_size: the batch size
Returns:
a list of length = len(data)
"""
n = len(data)
num_batches = n / batch_size + 1
final_trim_size = n % batch_size
# map
results = []
for k in range(num_batches):
batch = get_batch(data, batch_size, k) # circles around
result = batch_fxn(batch)
results.append(result)
# remove the examples that looped around to the beginning of data
results[-1] = results[-1][:final_trim_size]
return flatten(results)
def fixed_length(l, length, pad_val):
"""Given a list of arbitrary length, make it fixed length by padding or truncating.
(Makes a shallow copy of l, then modifies this copy.)
Args:
l: a list
length: desired length
pad_val: values padded to the end of l, if l is too short
Returns:
a list of with length exactly as specified.
"""
if len(l) < length:
fixed = list(l) # make shallow copy
fixed += [pad_val] * (length - len(l)) # pad
return fixed
else:
return l[:length] # truncate
class HomogeneousBatchSampler(object):
def __init__(self, data, bucket_fxn):
buckets = defaultdict(list)
for d in data:
buckets[bucket_fxn(d)].append(d)
keys = buckets.keys()
freqs = np.array([len(buckets[k]) for k in keys], dtype=float)
probs = freqs / np.sum(freqs)
self.keys = keys
self.probs = probs
self.buckets = buckets
def sample(self, batch_size):
# WARNING! This sampling scheme is only "correct" if each len(bucket) > batch_size
# sample a bucket according to its frequency
key = np.random.choice(self.keys, p=self.probs)
bucket = self.buckets[key]
# sample a batch from the bucket
batch = np.random.choice(bucket, size=batch_size, replace=True)
return batch
class Frozen(object):
"""Objects that inherit from Frozen cannot set or add new attributes unless inside an `unfreeze` context."""
__frozen = True
@staticmethod
@contextmanager
def unfreeze():
prev_state = Frozen.__frozen
Frozen.__frozen = False
yield
Frozen.__frozen = prev_state # set back to previous state
def __setattr__(self, key, value):
if Frozen.__frozen:
raise NotImplementedError('Object is frozen.')
else:
super(Frozen, self).__setattr__(key, value)
def __delattr__(self, item):
if Frozen.__frozen:
raise NotImplementedError('Object is frozen.')
else:
super(Frozen, self).__delattr__(item)
def sigmoid(x):
# scipy.special.expit will return NaN if x gets larger than about 700, which is just wrong
# compute using two different approaches
# they are each stable over a different interval of x
with warnings.catch_warnings():
warnings.simplefilter('ignore')
numer = np.exp(x)
s0 = numer / (1.0 + numer)
denom = 1.0 + np.exp(-x)
s1 = 1.0 / denom
# replace nans
if isinstance(x, float):
if np.isnan(s0):
s0 = s1
else:
nans = np.isnan(s0)
s0[nans] = s1[nans]
return s0
class NestedDict(MutableMapping):
def __init__(self, d=None):
"""Create a NestedDict.
Args:
d (dict): a nested Python dictionary. Defaults to an empty dictionary.
NOTE: if d contains empty dicts at its leaves, these will be dropped.
"""
if d is None:
d = {}
self.d = {}
for keys, val in self._flatten(d).iteritems():
self.set_nested(keys, val)
def __iter__(self):
"""Iterate through top-level keys."""
return iter(self.d)
def __delitem__(self, key):
del self.d[key]
def __getitem__(self, key):
return self.d[key]
def __len__(self):
"""Total number of leaf nodes."""
l = 0
for v in self.itervalues():
if isinstance(v, NestedDict):
l += len(v)
else:
l += 1
return l
def __setitem__(self, key, value):
"""Set a key-value pair.
If value is a Mapping, it will be converted into a NestedDict.
"""
if isinstance(value, Mapping):
value = NestedDict(value)
self.d[key] = value
def get_nested(self, keys):
d = self
for k in keys:
if not isinstance(d, NestedDict):
raise KeyError(keys)
d = d[k]
return d
def set_nested(self, keys, val):
first_keys, last_key = keys[:-1], keys[-1]
d = self
for k in first_keys:
if k not in d:
d[k] = NestedDict()
d = d[k]
d[last_key] = val
def __repr__(self):
return repr(self.d)
def as_dict(self):
d = {}
for key, sub in self.iteritems():
if isinstance(sub, NestedDict):
val = sub.as_dict()
else:
val = sub
d[key] = val
return d
@staticmethod
def _flatten(d):
flattened = {}
def helper(key_tuple, d):
if not isinstance(d, Mapping): # leaf node
flattened[key_tuple] = d
return
for key, val in d.iteritems():
helper(key_tuple + (key,), val)
helper(tuple(), d)
return flattened
def flattened(self):
return self._flatten(self)
def leaves(self):
return self.flattened().values()
def ranks(scores, ascending=True):
"""Assign a rank to each score.
Args:
scores (list[float]): a list of scores
ascending (bool): if True, then higher scores will have smaller rank
Returns:
list[int]: a list of ranks, where ranks[i] is the rank of the value scores[i]
"""
if isinstance(scores, list):
scores = np.array(scores)
else:
assert len(scores.shape) == 1
flip = 1 if ascending else -1
idx = np.argsort(flip * scores)
ranks = np.empty(scores.shape, dtype=int)
ranks[idx] = np.arange(len(scores))
# ranks should start from 1
ranks += 1
return list(ranks)
def quantiles(vals, ps):
vals = sorted(vals)
max_idx = len(vals) - 1
qs = []
for p in ps:
assert 0 <= p <= 1
i = int(round(max_idx * p))
qs.append(vals[i])
return qs
def sample_excluding(items, exclude):
candidates = list(items) # shallow copy
random.shuffle(candidates)
for cand in candidates:
if cand not in exclude:
return cand
# if everything is excluded, return None
return None
def map_array(fxn, array):
"""Apply fxn to all elements of array.
Args:
fxn: a function
array: a list of lists of lists of ... If it is a numpy array, converts it to a list.
Returns:
a new array, mapped
>>> arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> map_array(lambda x: 10 * x, arr)
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]]
"""
if isinstance(array, np.ndarray):
array = array.tolist()
new_array = []
for val in array:
new_val = map_array(fxn, val) if isinstance(val, list) else fxn(val)
new_array.append(new_val)
return new_array
def group(items, grouper):
d = defaultdict(list)
for item in items:
labels = grouper(item)
for label in labels:
d[label].append(item)
return d
# TODO(kelvin): test this
def generator_ignore_errors(iterator):
"""Loop through iterator, but ignore exceptions.
Logs a warning if there is an exception.
Args:
iterator: any object with a __next__ method
Yields:
the next element of the iterator
"""
i = 0
while True:
try:
try:
yield next(iterator)
except StopIteration:
# stop when we're out of elements
break
except Exception:
# If this generator is closed before it is exhausted (e.g. if we break out of a for-loop)
# it will get garbage collected, and throw a GeneratorExit error
# GeneratorExit does not inherit from Exception in Python >2.6, so we will not catch it here
# Critically, this line should NOT be changed to just "except:", as it would catch GeneratorExit
logging.warn('Error parsing line {}'.format(i))
i += 1
class SimpleExecutor(object):
def __init__(self, fxn, max_workers=120):
self._fxn = fxn
self._executor = ThreadPoolExecutor(max_workers)
self._future_to_key = {} # map from future to a key for later access
def submit(self, key, x):
future = self._executor.submit(self._fxn, x)
self._future_to_key[future] = key
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
def results(self):
for future in as_completed(self._future_to_key):
key = self._future_to_key[future]
try:
result = future.result()
except BaseException:
f = Failure.with_message('SimpleExecutor failed to compute key: {}'.format(key))
logging.error(f.traceback)
result = f
yield key, result
def shutdown(self):
self._executor.shutdown()
class Failure(object):
"""Represents the result of a failed computation."""
@staticmethod
def with_message(msg):
f = Failure(message=msg)
logging.error(f.message)
return f
@staticmethod
def silent(msg):
return Failure(message=msg)
def __init__(self, uid=None, message='Failure'):
if uid is None:
uid = id(self)
self._uid = uid
self._msg = message
self._traceback = traceback.format_exc()
def __repr__(self):
return self._msg
@property
def uid(self):
return self._uid
@property
def traceback(self):
return self._traceback
@property
def message(self):
return self._msg
def __eq__(self, other):
if not isinstance(other, Failure):
return False
return self.uid == other.uid
def __ne__(self, other):
return not self.__eq__(other)
@contextmanager
def random_seed(seed=None):
"""Execute code inside this with-block using the specified seed.
If no seed is specified, nothing happens.
Does not affect the state of the random number generator outside this block.
Not thread-safe.
Args:
seed (int): random seed
"""
if seed is None:
yield
else:
py_state = random.getstate() # save state
np_state = np.random.get_state()
random.seed(seed) # alter state
np.random.seed(seed)
yield
random.setstate(py_state) # restore state
np.random.set_state(np_state)
class cached_property(object):
"""Descriptor (non-data) for building an attribute on-demand on first use."""
def __init__(self, factory):
self._attr_name = factory.__name__
self._factory = factory
def __get__(self, instance, owner):
# Build the attribute.
attr = self._factory(instance)
# Cache the value; hide ourselves.
setattr(instance, self._attr_name, attr)
return attr
class set_once_attribute(object):
def __init__(self, attr_name):
self._attr_name = attr_name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def __set__(self, instance, value):
if hasattr(instance, self._attr_name):
raise RuntimeError('Cannot set {} more than once.'.format(self._attr_name))
setattr(instance, self._attr_name, value)
class Config(object):
"""A wrapper around the pyhocon ConfigTree object.
Allows you to access values in the ConfigTree as attributes.
"""
def __init__(self, config_tree=None):
"""Create a Config.
Args:
config_tree (ConfigTree)
"""
if config_tree is None:
config_tree = ConfigTree()
self._config_tree = config_tree
def __getattr__(self, item):
val = self._config_tree[item]
if isinstance(val, ConfigTree):
return Config(val)
else:
return val
def get(self, key, default=None):
val = self._config_tree.get(key, default)
if isinstance(val, ConfigTree):
return Config(val)
else:
return val
def put(self, key, value, append=False):
"""Put a value into the Config (dot separated)
Args:
key (str): key to use (dot separated). E.g. `a.b.c`
value (object): value to put
"""
self._config_tree.put(key, value, append=append)
def __repr__(self):
return self.to_str()
def to_str(self):
return HOCONConverter.convert(self._config_tree, 'hocon')
def to_json(self):
return json.loads(HOCONConverter.convert(self._config_tree, 'json'))
def to_file(self, path):
with open(path, 'w') as f:
f.write(self.to_str())
@classmethod
def from_file(cls, path):
config_tree = ConfigFactory.parse_file(path)
return cls(config_tree)
@classmethod
def from_dict(cls, d):
return Config(ConfigFactory.from_dict(d))
@classmethod
def merge(cls, config1, config2):
assert isinstance(config1, Config)
assert isinstance(config2, Config)
return cls(ConfigTree.merge_configs(config1._config_tree, config2._config_tree))
def softmax(logits):
"""Take the softmax over a set of logit scores.
Args:
logits (np.array): a 1D numpy array
Returns:
a 1D numpy array of probabilities, of the same shape.
"""
if not isinstance(logits, np.ndarray):
logits = np.array(logits) # 1D array
logits = logits - np.max(logits) # re-center
exp_logits = np.exp(logits)
probs = exp_logits / np.sum(exp_logits)
return probs
def bleu(reference, predict):
"""Compute sentence-level bleu score.
Args:
reference (list[str])
predict (list[str])
"""
from nltk.translate import bleu_score
if len(predict) == 0:
if len(reference) == 0:
return 1.0
else:
return 0.0
# TODO(kelvin): is this quite right?
# use a maximum of 4-grams. If 4-grams aren't present, use only lower n-grams.
n = min(4, len(reference), len(predict))
weights = tuple([1. / n] * n) # uniform weight on n-gram precisions
return bleu_score.sentence_bleu([reference], predict, weights)
class ComparableMixin(object):
__metaclass__ = ABCMeta
__slots__ = []
@abstractproperty
def _cmpkey(self):
pass
def _compare(self, other, method):
try:
return method(self._cmpkey, other._cmpkey)
except (AttributeError, TypeError):
# _cmpkey not implemented, or return different type,
# so I can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Beat and tempo
==============
.. autosummary::
:toctree: generated/
beat_track
estimate_tempo
"""
import numpy as np
import scipy
from . import cache
from . import core
from . import onset
from . import util
from .util.exceptions import ParameterError
__all__ = ['beat_track', 'estimate_tempo']
def beat_track(y=None, sr=22050, onset_envelope=None, hop_length=512,
start_bpm=120.0, tightness=100, trim=True, bpm=None):
r'''Dynamic programming beat tracker.
Beats are detected in three stages, following the method of [1]_:
1. Measure onset strength
2. Estimate tempo from onset correlation
3. Pick peaks in onset strength approximately consistent with estimated
tempo
.. [1] Ellis, Daniel PW. "Beat tracking by dynamic programming."
Journal of New Music Research 36.1 (2007): 51-60.
http://labrosa.ee.columbia.edu/projects/beattrack/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : int > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope.
hop_length : int > 0 [scalar]
number of audio samples between successive `onset_envelope` values
start_bpm : float > 0 [scalar]
initial guess for the tempo estimator (in beats per minute)
tightness : float [scalar]
tightness of beat distribution around tempo
trim : bool [scalar]
trim leading/trailing beats with weak onsets
bpm : float [scalar]
(optional) If provided, use `bpm` as the tempo instead of
estimating it from `onsets`.
Returns
-------
tempo : float [scalar, non-negative]
estimated global tempo (in beats per minute)
beats : np.ndarray [shape=(m,)]
frame numbers of estimated beat events
.. note::
If no onset strength could be detected, beat_tracker estimates 0 BPM
and returns an empty list.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
See Also
--------
librosa.onset.onset_strength
Examples
--------
Track beats using time series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> tempo
129.19921875
Print the first 20 beat frames
>>> beats[:20]
array([ 461, 500, 540, 580, 619, 658, 698, 737, 777,
817, 857, 896, 936, 976, 1016, 1055, 1095, 1135,
1175, 1214])
Or print them as timestamps
>>> librosa.frames_to_time(beats[:20], sr=sr)
array([ 0.093, 0.534, 0.998, 1.463, 1.927, 2.368, 2.833,
3.297, 3.762, 4.203, 4.667, 5.132, 5.596, 6.06 ,
6.525, 6.989, 7.454, 7.918, 8.382, 8.847])
Track beats using a pre-computed onset envelope
>>> onset_env = librosa.onset.onset_strength(y, sr=sr,
... aggregate=np.median)
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
... sr=sr)
>>> tempo
64.599609375
>>> beats[:20]
array([ 461, 500, 540, 580, 619, 658, 698, 737, 777,
817, 857, 896, 936, 976, 1016, 1055, 1095, 1135,
1175, 1214])
Plot the beat events against the onset strength envelope
>>> import matplotlib.pyplot as plt
>>> hop_length = 512
>>> plt.figure()
>>> plt.plot(librosa.util.normalize(onset_env), label='Onset strength')
>>> plt.vlines(beats, 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> # Limit the plot to a 15-second window
>>> plt.xlim([10 * sr / hop_length, 25 * sr / hop_length])
>>> plt.xticks(np.linspace(10, 25, 5) * sr / hop_length,
... np.linspace(10, 25, 5))
>>> plt.xlabel('Time (s)')
>>> plt.tight_layout()
'''
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length,
aggregate=np.median)
# Do we have any onsets to grab?
if not onset_envelope.any():
return (0, np.array([], dtype=int))
# Estimate BPM if one was not provided
if bpm is None:
bpm = estimate_tempo(onset_envelope,
sr=sr,
hop_length=hop_length,
start_bpm=start_bpm)
# Then, run the tracker
beats = __beat_tracker(onset_envelope,
bpm,
float(sr) / hop_length,
tightness,
trim)
return (bpm, beats)
@cache
def estimate_tempo(onset_envelope, sr=22050, hop_length=512, start_bpm=120,
std_bpm=1.0, ac_size=4.0, duration=90.0, offset=0.0):
"""Estimate the tempo (beats per minute) from an onset envelope
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
sr : int > 0 [scalar]
sampling rate of the time series
hop_length : int > 0 [scalar]
hop length of the time series
start_bpm : float [scalar]
initial guess of the BPM
std_bpm : float > 0 [scalar]
standard deviation of tempo distribution
ac_size : float > 0 [scalar]
length (in seconds) of the auto-correlation window
duration : float > 0 [scalar]
length of signal (in seconds) to use in estimating tempo
offset : float > 0 [scalar]
offset (in seconds) of signal sample to use in estimating tempo
Returns
-------
tempo : float [scalar]
estimated tempo (beats per minute)
See Also
--------
librosa.onset.onset_strength
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> onset_env = librosa.onset.onset_strength(y, sr=sr)
>>> tempo = librosa.beat.estimate_tempo(onset_env, sr=sr)
>>> tempo
129.19921875
Plot the estimated tempo against the onset autocorrelation
>>> import matplotlib.pyplot as plt
>>> # Compute 2-second windowed autocorrelation
>>> hop_length = 512
>>> ac = librosa.autocorrelate(onset_env, 2 * sr // hop_length)
>>> # Convert tempo estimate from bpm to frames
>>> tempo_frames = (60 * sr / hop_length) / tempo
>>> plt.plot(librosa.util.normalize(ac),
... label='Onset autocorrelation')
>>> plt.vlines([tempo_frames], 0, 1,
... color='r', alpha=0.75, linestyle='--',
... label='Tempo: {:.2f} BPM'.format(tempo))
>>> librosa.display.time_ticks(librosa.frames_to_time(np.arange(len(ac)),
... sr=sr))
>>> plt.xlabel('Lag')
>>> plt.legend()
>>> plt.axis('tight')
"""
if start_bpm <= 0:
raise ParameterError('start_bpm must be strictly positive')
fft_res = float(sr) / hop_length
# Chop onsets to X[(upper_limit - duration):upper_limit]
# or as much as will fit
maxcol = int(min(len(onset_envelope)-1,
np.round((offset + duration) * fft_res)))
mincol = int(max(0, maxcol - np.round(duration * fft_res)))
# Use auto-correlation out of 4 seconds (empirically set??)
ac_window = min(maxcol, np.round(ac_size * fft_res))
# Compute the autocorrelation
x_corr = core.autocorrelate(onset_envelope[mincol:maxcol], ac_window)
# re-weight the autocorrelation by log-normal prior
bpms = 60.0 * fft_res / (np.arange(1, ac_window+1))
# Smooth the autocorrelation by a log-normal distribution
x_corr = x_corr * np.exp(-0.5 * ((np.log2(bpms / start_bpm)) / std_bpm)**2)
# Get the local maximum of weighted correlation
x_peaks = util.localmax(x_corr)
# Zero out all peaks before the first negative
x_peaks[:np.argmax(x_corr < 0)] = False
# Choose the best peak out of .33, .5, 2, 3 * start_period
candidates = np.argmax(x_peaks * x_corr) * np.asarray([1./3, 0.5, 1, 2, 3])
candidates = candidates[candidates < ac_window].astype(int)
best_period = np.argmax(x_corr[candidates])
if candidates[best_period] > 0:
return 60.0 * fft_res / candidates[best_period]
return start_bpm
@cache
def __beat_tracker(onset_envelope, bpm, fft_res, tightness, trim):
"""Internal function that tracks beats in an onset strength envelope.
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
bpm : float [scalar]
tempo estimate
fft_res : float [scalar]
resolution of the fft (sr / hop_length)
tightness: float [scalar]
how closely do we adhere to bpm?
trim : bool [scalar]
trim leading/trailing beats with weak onsets?
Returns
-------
beats : np.ndarray [shape=(n,)]
frame numbers of beat events
"""
if bpm <= 0:
raise ParameterError('bpm must be strictly positive')
# convert bpm to a sample period for searching
period = round(60.0 * fft_res / bpm)
# localscore is a smoothed version of AGC'd onset envelope
localscore = __beat_local_score(onset_envelope, period)
# run the DP
backlink, cumscore = __beat_track_dp(localscore, period, tightness)
# get the position of the last beat
beats = [__last_beat(cumscore)]
# Reconstruct the beat path from backlinks
while backlink[beats[-1]] >= 0:
beats.append(backlink[beats[-1]])
# Put the beats in ascending order
# Convert into an array of frame numbers
beats = np.array(beats[::-1], dtype=int)
# Discard spurious trailing beats
beats = __trim_beats(localscore, beats, trim)
return beats
# -- Helper functions for beat tracking
def __normalize_onsets(onsets):
'''Maps onset strength function into the range [0, 1]'''
norm = onsets.std(ddof=1)
if norm > 0:
onsets = onsets / norm
return onsets
def __beat_local_score(onset_envelope, period):
'''Construct the local score for an onset envlope and given period'''
window = np.exp(-0.5 * (np.arange(-period, period+1)*32.0/period)**2)
return scipy.signal.convolve(__normalize_onsets(onset_envelope),
window,
'same')
def __beat_track_dp(localscore, period, tightness):
"""Core dynamic program for beat tracking"""
backlink = np.zeros_like(localscore, dtype=int)
cumscore = np.zeros_like(localscore)
# Search range for previous beat
window = np.arange(-2 * period, -np.round(period / 2) + 1, dtype=int)
# Make a score window, which begins biased toward start_bpm and skewed
if tightness <= 0:
raise ParameterError('tightness must be strictly positive')
txwt = -tightness * (np.log(-window / period) ** 2)
# Are we on the first beat?
first_beat = True
for i, score_i in enumerate(localscore):
# Are we reaching back before time 0?
z_pad = np.maximum(0, min(- window[0], len(window)))
# Search over all possible predecessors
candidates = txwt.copy()
candidates[z_pad:] = candidates[z_pad:] + cumscore[window[z_pad:]]
# Find the best preceding beat
beat_location = np.argmax(candidates)
# Add the local score
cumscore[i] = score_i + candidates[beat_location]
# Special case the first onset. Stop if the localscore is small
if first_beat and score_i < 0.01 * localscore.max():
backlink[i] = -1
else:
backlink[i] = window[beat_location]
first_beat = False
# Update the time range
window = window + 1
return backlink, cumscore
def __last_beat(cumscore):
"""Get the last beat from the cumulative score array"""
maxes = util.localmax(cumscore)
med_score = np.median(cumscore[np.argwhere(maxes)])
# The last of these is the last beat (since score generally increases)
return np.argwhere((cumscore * maxes * 2 > med_score)).max()
def __trim_beats(localscore, beats, trim):
"""Final post-processing: throw out spurious leading/trailing beats"""
smooth_boe = scipy.signal.convolve(localscore[beats],
scipy.signal.hann(5),
'same')
if trim:
threshold = 0.5 * ((smooth_boe**2).mean()**0.5)
else:
threshold = 0.0
valid = np.argwhere(smooth_boe > threshold)
return beats[valid.min():valid.max()]
|
|
"""Module for commonly reused classes and functions."""
import os
import sys
from contextlib import contextmanager
from distutils.util import strtobool
from enum import EnumMeta, IntEnum
from typing import Any, Dict, List, Optional, Union
import requests
from tqdm import tqdm
class CLTKEnumMeta(EnumMeta):
def __repr__(cls):
return cls.__name__
class CLTKEnum(IntEnum, metaclass=CLTKEnumMeta):
def __repr__(self):
return f"{self._name_}"
__str__ = __repr__
def __eq__(self, other):
return False if type(self) != type(other) else IntEnum.__eq__(self, other)
def file_exists(file_path: str, is_dir: bool = False) -> bool:
"""Try to expand `~/` and check if a file or dir exists.
Optionally check if it's a dir.
>>> file_exists('~/fake_file')
False
>>> file_exists('~/', is_dir=True)
True
"""
file_path_expanded = os.path.expanduser(file_path) # type: str
if is_dir:
return os.path.isdir(file_path_expanded)
return os.path.isfile(file_path_expanded)
def reverse_dict(
input_dict: Dict[str, Any], # pylint: disable=bad-continuation
ignore_keys: Optional[List[str]] = None, # pylint: disable=bad-continuation
) -> Dict[str, str]:
"""Take a dict and reverse its keys and values. Optional
parameter to ignore certain keys.
>>> ids_lang = dict(anci1242='Ancient Greek', lati1261='Latin', unlabeled=['Ottoman'])
>>> reverse_dict(ids_lang, ignore_keys=['unlabeled'])
{'Ancient Greek': 'anci1242', 'Latin': 'lati1261'}
>>> reverse_dict(dict(anci1242='Ancient Greek', lati1261='Latin'))
{'Ancient Greek': 'anci1242', 'Latin': 'lati1261'}
>>> reverse_dict(ids_lang)
Traceback (most recent call last):
...
TypeError: This function can only convert type str value to a key. Received value type `<class 'list'>` for key `unlabeled` instead. Consider using `ignore_keys` for this key-value pair to be skipped.
>>> reverse_dict(ids_lang, ignore_keys='unlabeled')
Traceback (most recent call last):
...
TypeError: The `ignore_key` parameter must be either types None or list. Received type `<class 'str'>` instead.
>>> reverse_dict(ids_lang, ignore_keys=['UNUSED-KEY'])
Traceback (most recent call last):
...
TypeError: This function can only convert type str value to a key. Received value type `<class 'list'>` for key `unlabeled` instead. Consider using `ignore_keys` for this key-value pair to be skipped.
"""
if ignore_keys and not isinstance(ignore_keys, list):
raise TypeError(
"The `ignore_key` parameter must be either types None or list. Received type `{}` instead.".format(
type(ignore_keys)
)
)
output_dict = dict() # type: Dict[str, str]
for key, val in input_dict.items():
if ignore_keys and key in ignore_keys:
continue
try:
output_dict[val] = key
except TypeError:
raise TypeError(
"This function can only convert type str value to a key. Received value type `{0}` for key `{1}` instead. Consider using `ignore_keys` for this key-value pair to be skipped.".format(
type(val), key
)
)
return output_dict
@contextmanager
def suppress_stdout():
"""Wrap a function with this to suppress
its printing to screen.
Source: `<https://thesmithfam.org/blog/2012/10/25/temporarily-suppress-console-output-in-python/>`_.
>>> print("You can see this")
You can see this
>>> with suppress_stdout():
... print("YY")
>>> print("And you can see this again")
And you can see this again
"""
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def get_cltk_data_dir() -> str:
"""Defines where to look for the ``cltk_data`` dir.
By default, this is located in a user's home directory
and the directory is created there (``~/cltk_data``).
However a user may customize where this goes with
the OS environment variable ``$CLTK_DATA``. If the
variable is found, then its value is used.
>>> from cltk.utils import CLTK_DATA_DIR
>>> import os
>>> os.environ["CLTK_DATA"] = os.path.expanduser("~/cltk_data")
>>> cltk_data_dir = get_cltk_data_dir()
>>> os.path.split(cltk_data_dir)[1]
'cltk_data'
>>> del os.environ["CLTK_DATA"]
>>> os.environ["CLTK_DATA"] = os.path.expanduser("~/custom_dir")
>>> cltk_data_dir = os.environ.get("CLTK_DATA")
>>> os.path.split(cltk_data_dir)[1]
'custom_dir'
>>> del os.environ["CLTK_DATA"]
"""
import os # pylint: disable=import-outside-toplevel
if "CLTK_DATA" in os.environ:
cltk_data_dir = os.path.expanduser(os.path.normpath(os.environ["CLTK_DATA"]))
if not os.path.isdir(cltk_data_dir):
raise FileNotFoundError(
"Custom data directory `%s` does not exist. "
"Update your OS environment variable `$CLTK_DATA` "
"or remove it." % cltk_data_dir
)
if not os.access(cltk_data_dir, os.W_OK):
raise PermissionError(
"Custom data directory `%s` must have "
"write permission." % cltk_data_dir
)
else:
cltk_data_dir = os.path.expanduser(os.path.normpath("~/cltk_data"))
return cltk_data_dir
def query_yes_no(question: str, default: Union[str, None] = "yes") -> bool:
"""Ask a yes/no question via ``input()` and return ``True``/``False``..
Source: `<https://stackoverflow.com/a/3041990>`_.
Args:
question: Question string presented to the user.
default: Presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", or None (meaning
an answer is required of the user).
Returns:
``True`` for "yes" or ``False`` for "no".
"""
# 1. Construct prompt
if default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
elif not default:
prompt = " [y/n] "
else:
raise ValueError("Invalid default answer: '%s'" % default)
# 2. Check user input and return correct boolean
while True:
# sys.stdout.write(question + prompt)
print(question + prompt)
choice = input().lower()
if default and choice == "":
return bool(strtobool(default))
try:
return bool(strtobool(choice))
except ValueError:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').")
def mk_dirs_for_file(file_path: str) -> None:
"""Make all dirs specified for final file. If dir already exists,
then silently continue.
Args:
file_path: Paths of dirs to be created (i.e., `mkdir -p`)
Returns:
None
"""
dirs = os.path.split(file_path)[0]
try:
os.makedirs(dirs)
except FileExistsError:
# TODO: Log INFO level; it's OK if dir already exists
return None
def get_file_with_progress_bar(model_url: str, file_path: str) -> None:
"""Download file with a progress bar.
Source: https://stackoverflow.com/a/37573701
Args:
model_url: URL from which to downloaded file.
file_path: Location at which to save file.
Raises:
IOError: If size of downloaded file differs from that in remote's ``content-length`` header.
Returns:
None
"""
mk_dirs_for_file(file_path=file_path)
req_obj = requests.get(url=model_url, stream=True)
total_size = int(req_obj.headers.get("content-length", 0))
block_size = 1024 # 1 Kibibyte
progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
with open(file_path, "wb") as file_open:
for data in req_obj.iter_content(block_size):
progress_bar.update(len(data))
file_open.write(data)
progress_bar.close()
if total_size != 0 and progress_bar.n != total_size:
raise IOError(
f"Expected downloaded file to be of size '{total_size}' however it is in fact '{progress_bar.n}'."
)
CLTK_DATA_DIR = get_cltk_data_dir()
|
|
"""Control tasks execution order"""
import fnmatch
from collections import deque
from collections import OrderedDict
import re
from .exceptions import InvalidTask, InvalidCommand, InvalidDodoFile
from .task import Task, DelayedLoaded
from .loader import generate_tasks
class RegexGroup(object):
'''Helper to keep track of all delayed-tasks which regexp target
matches the target specified from command line.
'''
def __init__(self, target, tasks):
# target name specified in command line
self.target = target
# set of delayed-tasks names (string)
self.tasks = tasks
# keep track if the target was already found
self.found = False
class TaskControl(object):
"""Manages tasks inter-relationship
There are 3 phases
1) the constructor gets a list of tasks and do initialization
2) 'process' the command line options for tasks are processed
3) 'task_dispatcher' dispatch tasks to runner
Process dependencies and targets to find out the order tasks
should be executed. Also apply filter to exclude tasks from
execution. And parse task cmd line options.
@ivar tasks: (dict) Key: task name ([taskgen.]name)
Value: L{Task} instance
@ivar targets: (dict) Key: fileName
Value: task_name
"""
def __init__(self, task_list, auto_delayed_regex=False):
self.tasks = OrderedDict()
self.targets = {}
self.auto_delayed_regex = auto_delayed_regex
# name of task in order to be executed
# this the order as in the dodo file. the real execution
# order might be different if the dependencies require so.
self._def_order = []
# list of tasks selected to be executed
self.selected_tasks = None
# sanity check and create tasks dict
for task in task_list:
# task must be a Task
if not isinstance(task, Task):
msg = "Task must be an instance of Task class. %s"
raise InvalidTask(msg % (task.__class__))
# task name must be unique
if task.name in self.tasks:
msg = "Task names must be unique. %s"
raise InvalidDodoFile(msg % task.name)
self.tasks[task.name] = task
self._def_order.append(task.name)
# expand wild-card task-dependencies
for task in self.tasks.values():
for pattern in task.wild_dep:
task.task_dep.extend(self._get_wild_tasks(pattern))
self._check_dep_names()
self.set_implicit_deps(self.targets, task_list)
def _check_dep_names(self):
"""check if user input task_dep or setup_task that doesnt exist"""
# check task-dependencies exist.
for task in self.tasks.values():
for dep in task.task_dep:
if dep not in self.tasks:
msg = "%s. Task dependency '%s' does not exist."
raise InvalidTask(msg% (task.name, dep))
for setup_task in task.setup_tasks:
if setup_task not in self.tasks:
msg = "Task '%s': invalid setup task '%s'."
raise InvalidTask(msg % (task.name, setup_task))
@staticmethod
def set_implicit_deps(targets, task_list):
"""set/add task_dep based on file_dep on a target from another task
@param targets: (dict) fileName -> task_name
@param task_list: (list - Task) task with newly added file_dep
"""
# 1) create a dictionary associating every target->task. where the task
# builds that target.
for task in task_list:
for target in task.targets:
if target in targets:
msg = ("Two different tasks can't have a common target." +
"'%s' is a target for %s and %s.")
raise InvalidTask(msg % (target, task.name,
targets[target]))
targets[target] = task.name
# 2) now go through all dependencies and check if they are target from
# another task.
# Note: When used with delayed tasks.
# It does NOT check if a delayed-task's target is a file_dep
# from another previously created task.
for task in task_list:
TaskControl.add_implicit_task_dep(targets, task, task.file_dep)
@staticmethod
def add_implicit_task_dep(targets, task, deps_list):
"""add implicit task_dep for `task` for newly added `file_dep`
@param targets: (dict) fileName -> task_name
@param task: (Task) task with newly added file_dep
@param dep_list: (list - str): list of file_dep for task
"""
for dep in deps_list:
if (dep in targets and targets[dep] not in task.task_dep):
task.task_dep.append(targets[dep])
def _get_wild_tasks(self, pattern):
"""get list of tasks that match pattern"""
wild_list = []
for t_name in self._def_order:
if fnmatch.fnmatch(t_name, pattern):
wild_list.append(t_name)
return wild_list
def _process_filter(self, task_selection):
"""process cmd line task options
[task_name [-task_opt [opt_value]] ...] ...
@param task_selection: list of strings with task names/params or target
@return list of task names. Expanding glob and removed params
"""
filter_list = []
def add_filtered_task(seq, f_name):
"""add task to list `filter_list` and set task.options from params
@return list - str: of elements not yet
"""
filter_list.append(f_name)
# only tasks specified by name can contain parameters
if f_name in self.tasks:
# parse task_selection
the_task = self.tasks[f_name]
# Initialize options for the task
seq = the_task.init_options(seq)
# if task takes positional parameters set all as pos_arg_val
if the_task.pos_arg is not None:
the_task.pos_arg_val = seq
seq = []
return seq
# process...
seq = task_selection[:]
# process cmd_opts until nothing left
while seq:
f_name = seq.pop(0) # always start with a task/target name
# select tasks by task-name pattern
if '*' in f_name:
for task_name in self._get_wild_tasks(f_name):
add_filtered_task((), task_name)
else:
seq = add_filtered_task(seq, f_name)
return filter_list
def _filter_tasks(self, task_selection):
"""Select tasks specified by filter.
@param task_selection: list of strings with task names/params or target
@return (list) of string. where elements are task name.
"""
selected_task = []
filter_list = self._process_filter(task_selection)
for filter_ in filter_list:
# by task name
if filter_ in self.tasks:
selected_task.append(filter_)
continue
# by target
if filter_ in self.targets:
selected_task.append(self.targets[filter_])
continue
# if can not find name check if it is a sub-task of a delayed
basename = filter_.split(':', 1)[0]
if basename in self.tasks:
loader = self.tasks[basename].loader
if not loader:
raise InvalidCommand(not_found=filter_)
loader.basename = basename
self.tasks[filter_] = Task(filter_, None, loader=loader)
selected_task.append(filter_)
continue
# check if target matches any regex
delayed_matched = [] # list of Task
for task in list(self.tasks.values()):
if not task.loader:
continue
if task.name.startswith('_regex_target'):
continue
if task.loader.target_regex:
if re.match(task.loader.target_regex, filter_):
delayed_matched.append(task)
elif self.auto_delayed_regex:
delayed_matched.append(task)
delayed_matched_names = [t.name for t in delayed_matched]
regex_group = RegexGroup(filter_, set(delayed_matched_names))
# create extra tasks to load delayed tasks matched by regex
for task in delayed_matched:
loader = task.loader
loader.basename = task.name
name = '{}_{}:{}'.format('_regex_target', filter_, task.name)
loader.regex_groups[name] = regex_group
self.tasks[name] = Task(name, None,
loader=loader,
file_dep=[filter_])
selected_task.append(name)
if not delayed_matched:
# not found
raise InvalidCommand(not_found=filter_)
return selected_task
def process(self, task_selection):
"""
@param task_selection: list of strings with task names/params
@return (list - string) each element is the name of a task
"""
# execute only tasks in the filter in the order specified by filter
if task_selection is not None:
self.selected_tasks = self._filter_tasks(task_selection)
else:
# if no filter is defined execute all tasks
# in the order they were defined.
self.selected_tasks = self._def_order
def task_dispatcher(self):
"""return a TaskDispatcher generator
"""
assert self.selected_tasks is not None, \
"must call 'process' before this"
return TaskDispatcher(self.tasks, self.targets, self.selected_tasks)
class ExecNode(object):
"""Each task will have an instance of this.
This used to keep track of waiting events and the generator for dep nodes
@ivar run_status (str): contains the result of Dependency.get_status().status
modified by runner, value can be:
- None: not processed yet
- run: task is selected to be executed (it might be running or
waiting for setup)
- ignore: task wont be executed (user forced deselect)
- up-to-date: task wont be executed (no need)
- done: task finished its execution
"""
def __init__(self, task, parent):
self.task = task
# list of dependencies not processed by _add_task yet
self.task_dep = task.task_dep[:]
self.calc_dep = task.calc_dep.copy()
# ancestors are used to detect cyclic references.
# it does not contain a list of tasks that depends on this node
# for that check the attribute waiting_me
self.ancestors = []
if parent:
self.ancestors.extend(parent.ancestors)
self.ancestors.append(task.name)
# Wait for a task to be selected to its execution
# checking if it is up-to-date
self.wait_select = False
# Wait for a task to finish its execution
self.wait_run = set() # task names
self.wait_run_calc = set() # task names
self.waiting_me = set() # ExecNode
self.run_status = None
# all ancestors that failed
self.bad_deps = []
self.ignored_deps = []
# generator from TaskDispatcher._add_task
self.generator = None
def reset_task(self, task, generator):
"""reset task & generator after task is created by its own `loader`"""
self.task = task
self.task_dep = task.task_dep[:]
self.calc_dep = task.calc_dep.copy()
self.generator = generator
def parent_status(self, parent_node):
if parent_node.run_status == 'failure':
self.bad_deps.append(parent_node)
elif parent_node.run_status == 'ignore':
self.ignored_deps.append(parent_node)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.task.name)
def step(self):
"""get node's next step"""
try:
return next(self.generator)
except StopIteration:
return None
def no_none(decorated):
"""decorator for a generator to discard/filter-out None values"""
def _func(*args, **kwargs):
"""wrap generator"""
for value in decorated(*args, **kwargs):
if value is not None:
yield value
return _func
class TaskDispatcher(object):
"""Dispatch another task to be selected/executed, mostly handle with MP
Note that a dispatched task might not be ready to be executed.
"""
def __init__(self, tasks, targets, selected_tasks):
self.tasks = tasks
self.targets = targets
self.selected_tasks = selected_tasks
self.nodes = {} # key task-name, value: ExecNode
# queues
self.waiting = set() # of ExecNode
self.ready = deque() # of ExecNode
self.generator = self._dispatcher_generator(selected_tasks)
def _gen_node(self, parent, task_name):
"""return ExecNode for task_name if not created yet"""
node = self.nodes.get(task_name, None)
# first time, create node
if node is None:
node = ExecNode(self.tasks[task_name], parent)
node.generator = self._add_task(node)
self.nodes[task_name] = node
return node
# detect cyclic/recursive dependencies
if parent and task_name in parent.ancestors:
msg = "Cyclic/recursive dependencies for task %s: [%s]"
cycle = " -> ".join(parent.ancestors + [task_name])
raise InvalidDodoFile(msg % (task_name, cycle))
def _node_add_wait_run(self, node, task_list, calc=False):
"""updates node.wait_run
@param node (ExecNode)
@param task_list (list - str) tasks that node should wait for
@param calc (bool) task_list is for calc_dep
"""
# wait_for: contains tasks that `node` needs to wait for and
# were not executed yet.
wait_for = set()
for name in task_list:
dep_node = self.nodes[name]
if (not dep_node) or dep_node.run_status in (None, 'run'):
wait_for.add(name)
else:
# if dep task was already executed:
# a) set parent status
node.parent_status(dep_node)
# b) update dependencies from calc_dep results
if calc:
self._process_calc_dep_results(dep_node, node)
# update ExecNode setting parent/dependent relationship
for name in wait_for:
self.nodes[name].waiting_me.add(node)
if calc:
node.wait_run_calc.update(wait_for)
else:
node.wait_run.update(wait_for)
@no_none
def _add_task(self, node):
"""@return a generator that produces:
- ExecNode for task dependencies
- 'wait' to wait for an event (i.e. a dep task run)
- Task when ready to be dispatched to runner (run or be selected)
- None values are of no interest and are filtered out
by the decorator no_none
note that after a 'wait' is sent it is the responsibility of the
caller to ensure the current ExecNode cleared all its waiting
before calling `next()` again on this generator
"""
this_task = node.task
# skip this task if task belongs to a regex_group that already
# executed the task used to build the given target
if this_task.loader:
regex_group = this_task.loader.regex_groups.get(this_task.name, None)
if regex_group and regex_group.found:
return
# add calc_dep & task_dep until all processed
# calc_dep may add more deps so need to loop until nothing left
while True:
calc_dep_list = list(node.calc_dep)
node.calc_dep.clear()
task_dep_list = node.task_dep[:]
node.task_dep = []
for calc_dep in calc_dep_list:
yield self._gen_node(node, calc_dep)
self._node_add_wait_run(node, calc_dep_list, calc=True)
# add task_dep
for task_dep in task_dep_list:
yield self._gen_node(node, task_dep)
self._node_add_wait_run(node, task_dep_list)
# do not wait until all possible task_dep are created
if (node.calc_dep or node.task_dep):
continue # pragma: no cover # coverage cant catch this #198
elif (node.wait_run or node.wait_run_calc):
yield 'wait'
else:
break
# generate tasks from a DelayedLoader
if this_task.loader:
ref = this_task.loader.creator
to_load = this_task.loader.basename or this_task.name
this_loader = self.tasks[to_load].loader
if this_loader and not this_loader.created:
task_gen = ref(**this_loader.kwargs) if this_loader.kwargs else ref()
new_tasks = generate_tasks(to_load, task_gen, ref.__doc__)
TaskControl.set_implicit_deps(self.targets, new_tasks)
for nt in new_tasks:
if not nt.loader:
nt.loader = DelayedLoaded
self.tasks[nt.name] = nt
# check itself for implicit dep (used by regex_target)
TaskControl.add_implicit_task_dep(
self.targets, this_task, this_task.file_dep)
# remove file_dep since generated tasks are not required
# to really create the target (support multiple matches)
if regex_group:
this_task.file_dep = {}
if regex_group.target in self.targets:
regex_group.found = True
else:
regex_group.tasks.remove(this_task.loader.basename)
if len(regex_group.tasks) == 0:
# In case no task is left, we cannot find a task
# generating this target. Print an error message!
raise InvalidCommand(not_found=regex_group.target)
# mark this loader to not be executed again
this_task.loader.created = True
this_task.loader = DelayedLoaded
# this task was placeholder to execute the loader
# now it needs to be re-processed with the real task
yield "reset generator"
assert False, "This generator can not be used again"
# add itself
yield this_task
# tasks that contain setup-tasks need to be yielded twice
if this_task.setup_tasks:
# run_status None means task is waiting for other tasks
# in order to check if up-to-date. so it needs to wait
# before scheduling its setup-tasks.
if node.run_status is None:
node.wait_select = True
yield "wait"
# if this task should run, so schedule setup-tasks before itself
if node.run_status == 'run':
for setup_task in this_task.setup_tasks:
yield self._gen_node(node, setup_task)
self._node_add_wait_run(node, this_task.setup_tasks)
if node.wait_run:
yield 'wait'
# re-send this task after setup_tasks are sent
yield this_task
def _get_next_node(self, ready, tasks_to_run):
"""get ExecNode from (in order):
.1 ready
.2 tasks_to_run (list in reverse order)
"""
if ready:
return ready.popleft()
# get task group from tasks_to_run
while tasks_to_run:
task_name = tasks_to_run.pop()
node = self._gen_node(None, task_name)
if node:
return node
def _update_waiting(self, processed):
"""updates 'ready' and 'waiting' queues after processed
@param processed (ExecNode) or None
"""
# no task processed, just ignore
if processed is None:
return
node = processed
# if node was waiting select must only receive select event
if node.wait_select:
self.ready.append(node)
self.waiting.remove(node)
node.wait_select = False
# status == run means this was not just select completed
if node.run_status == 'run':
return
for waiting_node in node.waiting_me:
waiting_node.parent_status(node)
# is_ready indicates if node.generator can be invoked again
task_name = node.task.name
# node wait_run will be ready if there are nothing left to wait
if task_name in waiting_node.wait_run:
waiting_node.wait_run.remove(task_name)
is_ready = not (waiting_node.wait_run or
waiting_node.wait_run_calc)
# node wait_run_calc
else:
assert task_name in waiting_node.wait_run_calc
waiting_node.wait_run_calc.remove(task_name)
# calc_dep might add new deps that can be run without
# waiting for the completion of the remaining deps
is_ready = True
self._process_calc_dep_results(node, waiting_node)
# this node can be further processed
if is_ready and (waiting_node in self.waiting):
self.ready.append(waiting_node)
self.waiting.remove(waiting_node)
def _process_calc_dep_results(self, node, waiting_node):
# refresh this task dependencies with values got from calc_dep
values = node.task.values
len_task_deps = len(waiting_node.task.task_dep)
old_calc_dep = waiting_node.task.calc_dep.copy()
waiting_node.task.update_deps(values)
TaskControl.add_implicit_task_dep(
self.targets, waiting_node.task,
values.get('file_dep', []))
# update node's list of non-processed dependencies
new_task_dep = waiting_node.task.task_dep[len_task_deps:]
waiting_node.task_dep.extend(new_task_dep)
new_calc_dep = waiting_node.task.calc_dep - old_calc_dep
waiting_node.calc_dep.update(new_calc_dep)
def _dispatcher_generator(self, selected_tasks):
"""return generator dispatching tasks"""
# each selected task will create a tree (from dependencies) of
# tasks to be processed
tasks_to_run = list(reversed(selected_tasks))
node = None # current active ExecNode
while True:
# get current node
if not node:
node = self._get_next_node(self.ready, tasks_to_run)
if not node:
if self.waiting:
# all tasks are waiting, hold on
processed = (yield "hold on")
self._update_waiting(processed)
continue
# we are done!
return
# get next step from current node
next_step = node.step()
# got None, nothing left for this generator
if next_step is None:
node = None
continue
# got a task, send ExecNode to runner
if isinstance(next_step, Task):
processed = (yield self.nodes[next_step.name])
self._update_waiting(processed)
# got new ExecNode, add to ready_queue
elif isinstance(next_step, ExecNode):
self.ready.append(next_step)
# node just performed a delayed creation of tasks, restart
elif next_step == "reset generator":
node.reset_task(self.tasks[node.task.name],
self._add_task(node))
# got 'wait', add ExecNode to waiting queue
else:
assert next_step == "wait"
self.waiting.add(node)
node = None
|
|
import json
import common
from objs.gametime import Gametime
from objs.poll import Poll
class WeekendGames(object):
"""
Defines the WeekendGames class
"""
def __init__(self,):
"""
WeekendGames constructor
"""
db = common.db
self.people = []
if 'people' in db:
self.people = db['people']
self.day = 'Sunday'
self.last_shot = "Unknown"
if 'last_shot' in db:
self.last_shot = db['last_shot']
self.consecutive_shot_wins = 1
if 'consecutive_shot_wins' in db:
self.consecutive_shot_wins = db['consecutive_shot_wins']
self.last_reddit_request = 0
# store our games
self.gametimes = []
if 'gametimes' in db:
for gt in db['gametimes']:
self.gametimes.append(Gametime(json_create=gt))
self.c_win = 0
self.c_loss = 0
self.c_draw = 0
if 'c_win' in db:
self.c_win = db['c_win']
if 'c_loss' in db:
self.c_loss = db['c_loss']
if 'c_draw' in db:
self.c_draw = db['c_draw']
# non persistent variables
self.wins = 0
self.draws = 0
self.losses = 0
self.last = None
self.consecutive = 0
self.poll = None
def get_gametimes(self):
"""
Get upcoming gametimes.
:return: string response to print to chat.
"""
upcoming_days = "**Exciting f-ing news, boys:**\n\n"
if len(self.gametimes) == 0:
return "No games coming up, friendship outlook bleak."
game_id = 0
for gt in self.gametimes:
game_id += 1
upcoming_days += "{}: There is a gaming session coming up on " \
"**{}**\n".format(game_id,
pretty_date(gt.get_date()))
if len(gt.players) == 0:
upcoming_days += " Nobody's in for this day.\n"
else:
for player in gt.players:
status = gt.get_player_status(player['name'])
upcoming_days += " - **{}** is *{}*.\n".format(
player['name'], status)
return upcoming_days
def remove_old_gametimes(self):
"""
Removes old gametimes from the gametimes object.
:return: None
"""
to_remove = []
for gt in self.gametimes:
if gt.get_date() < gt.get_now():
to_remove.append(gt)
for gt in to_remove:
self.gametimes.remove(gt)
def gametime_actions(self, message):
"""
Routes a gametime action, specified in the second
argument, ex !gametime <add> Sunday
:param message: Containing Arguments
:return: string response to print to chat.
"""
arguments = argument_parser(message)
gametime_help_string = \
"That's not a valid command for **!gametime**\n\n" \
"Please use:\n" \
"!gametime <add> <day of the week>" \
"<_optional_: military time, HH:MM> to **add a gametime**\n" \
"!gametime <remove> <index> to **delete a gametime**\n" \
"!gametime <list> to **list current gametimes**\n" \
"!gametime <set> <index> <time> to " \
"**set the time of a gametime**"
valid_commands = {
"add": self.create_gametime,
"remove": self.remove_gametime,
"list": self.get_gametimes,
"set": self.set_gametime
}
if arguments[0] in valid_commands:
if len(arguments) == 3:
try:
return valid_commands[arguments[0]](arguments[1],
arguments[2])
except KeyError:
return gametime_help_string
elif len(arguments) == 2:
try:
return valid_commands[arguments[0]](arguments[1])
except KeyError:
return gametime_help_string
elif len(arguments) == 1:
try:
return valid_commands[arguments[0]]()
except KeyError:
return gametime_help_string
return gametime_help_string
def poll_actions(self, message):
"""
Handles Poll creation/deletion
:param message: Containing arguments
:return: string response to print to chat.
"""
arguments = argument_parser(message)
poll_help_string = \
"That's not a valid command for **!poll**\n\n" \
"Please use:\n" \
"!poll start \"option 1\" \"option 2\" etc... to **start a " \
"poll**\n" \
"!poll stop to **delete the current poll**"
valid_commands = {
"start": self.create_poll,
"stop": self.stop_poll,
}
if arguments[0] in valid_commands:
return valid_commands[arguments[0]](" ".join(arguments[1:]))
return poll_help_string
def create_poll(self, options):
"""
Creates a poll if one is not already running
:param options: Options for the poll
"""
if self.poll is not None:
return "Can't start poll, one is running try !poll stop first"
try:
self.poll = Poll(options)
except SyntaxError:
return "You probably want to start the poll correctly. Nice try."
return self.poll.get_current_state()
def stop_poll(self):
"""
Stops a poll
"""
if self.poll is None:
return "No poll running"
out_str = self.poll.get_final_state()
self.poll = None
return out_str
def create_gametime(self, day, start_time=None):
"""
Create a gametime, given a full name of a day of the week.
:param start_time: Time of the game
:param day: string of a proper case day of the week.
:return: string response to send to chat.
"""
day = day.capitalize()
if day in Gametime.DAYS_IN_WEEK:
new_gametime = Gametime(day=Gametime.DAYS_IN_WEEK.index(day),
time=start_time)
for game_session in self.gametimes:
if new_gametime == game_session:
return "There is already a session that time."
self.gametimes.append(new_gametime)
self.gametimes.sort(key=lambda x: x.date)
return "Gametime created for {}.".format(
pretty_date(new_gametime.get_date()))
else:
return "Please use the full name of a day of the week."
def remove_gametime(self, index):
try:
index = int(index)
except ValueError:
return "Your index should be a number, silly."
if 0 < index <= len(self.gametimes):
self.gametimes.pop(index - 1)
return "Gametime removed."
else:
return "There's no gametime with that number."
def set_gametime(self, index, new_time):
try:
index = int(index)
except ValueError:
return "Your index should be a number, silly."
if 0 < index <= len(self.gametimes):
output_string = ""
output_string += self.gametimes[index - 1].set_time(new_time)
return "{}\nGametime {} set to {}." \
.format(output_string, index,
pretty_date(self.gametimes[index - 1].get_date()))
else:
return "There's no gametime with that number."
def whos_in(self):
"""
Depreciated function, now just calls get_gametimes()
:rtype str
:return: str: Formatted string for the list of people currently signed
up for weekend games
"""
return self.get_gametimes()
def add(self, person, game_id, status):
"""
Adds a person to the specified gametime
:param status: Status to mark for player
:param person: person to add
:param game_id: list id of the gametime in gametimes
:return: string to print to chat
"""
try:
game_id = int(game_id) - 1
except ValueError:
return "That's not a valid gametime."
if game_id not in range(len(self.gametimes)):
return "There's no gametime then."
if type(person) is str:
person_to_add = person
else:
person_to_add = str(person.display_name)
game = self.gametimes[game_id]
if game.find_player_by_name(person_to_add) and \
status != game.get_player_status(person_to_add):
game.unregister_player(person_to_add)
if game.find_player_by_name(person_to_add):
self.gametimes[game_id] = game
return "You're already {} for that day.".format(
game.get_player_status(person_to_add))
else:
game.register_player(person_to_add,
status=status)
self.gametimes[game_id] = game
return '{} is {} for {}.'.format(person_to_add,
game.get_player_status(
person_to_add),
pretty_date(game.get_date()))
def remove(self, person, game_id):
"""
Removes a person from the weekend games list
:param person: Person to remove
:param game_id: The id of the game session
:rtype str
:return: str: Formatted string indicating whether a person was removed.
"""
try:
game_id = int(game_id) - 1
except ValueError:
return "That's not a valid gametime."
if game_id not in range(len(self.gametimes)):
return "There's no gametime then."
if type(person) is str:
person_to_remove = person
else:
person_to_remove = str(person.display_name)
if self.gametimes[game_id].find_player_by_name(person_to_remove):
self.gametimes[game_id].unregister_player(person_to_remove)
return '{} is out for {}.' \
.format(person_to_remove, pretty_date(self.gametimes[
game_id].get_date()))
else:
return '{} was never in for {}.' \
.format(person_to_remove, pretty_date(self.gametimes[
game_id].get_date()))
def update_db(self):
"""
Updates the database to disk
:return: None
"""
common.db['people'] = self.people
common.db['last_shot'] = self.last_shot
common.db['consecutive_shot_wins'] = self.consecutive_shot_wins
common.db['gametimes'] = []
for gt in self.gametimes:
common.db['gametimes'].append(gt.to_json())
common.db['users'] = common.users
common.db['c_win'] = self.c_win
common.db['c_loss'] = self.c_loss
common.db['c_draw'] = self.c_draw
with open(common.db_file, 'w') as dbfile:
json.dump(common.db, dbfile, sort_keys=True, indent=4,
ensure_ascii=False)
def add_shot_win(self, name):
"""
Adds a shot lottery win to the weekend games
:param name: str Name of winner
:rtype int
:return: int: Number in a row
"""
if self.last_shot == name:
self.consecutive_shot_wins += 1
else:
self.last_shot = name
self.consecutive_shot_wins = 1
return self.consecutive_shot_wins
def add_win(self):
"""
Adds a win
:return: None
"""
self.wins += 1
self.c_win += 1
if self.last == "win":
self.consecutive += 1
else:
self.last = "win"
self.consecutive = 1
def add_loss(self):
"""
Adds a loss
:return: None
"""
self.losses += 1
self.c_loss += 1
if self.last == "loss":
self.consecutive += 1
else:
self.last = "loss"
self.consecutive = 1
def add_draw(self):
"""
Adds a draw
:return: None
"""
self.draws += 1
self.c_draw += 1
if self.last == "draw":
self.consecutive += 1
else:
self.last = "draw"
self.consecutive = 1
def clear_record(self):
"""
Adds a draw
:return: None
"""
self.draws = 0
self.wins = 0
self.losses = 0
self.last = None
self.consecutive = 0
def get_record(self):
"""
Gets the record of a session
:return: str: With record formatting
"""
return "{0} - {1} - {2}".format(self.wins, self.losses, self.draws)
def get_c_record(self):
"""
Gets the cumaltive record of a session
:return: str: With record formatting
"""
return "{0} - {1} - {2}".format(self.c_win, self.c_loss, self.c_draw)
def pretty_date(dt):
"""
Takes a datetime and makes it a pretty string.
:param dt:
:return: string
"""
return dt.strftime("%a, %b %d at %H:%M EST")
# this version has the time, for the future:
# return datetime.strftime("%a, %b %d at %I:%M %p")
def argument_parser(input_args):
"""
Returns a list of tokens for a given argument
:param input_args: input string
:return: argument list
"""
arguments = input_args.split(' ')
if len(arguments) > 1:
return arguments[1:]
else:
return arguments
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
from oslo.config import cfg
from quantumclient.common import exceptions as q_exc
from quantumclient.quantum import v2_0 as quantumv20
from webob import exc
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova.network import quantumv2
from nova.network.security_group import security_group_base
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
wrap_check_security_groups_policy = compute_api.policy_decorator(
scope='compute:security_groups')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SecurityGroupAPI(security_group_base.SecurityGroupBase):
id_is_uuid = True
def create_security_group(self, context, name, description):
quantum = quantumv2.get_client(context)
body = self._make_quantum_security_group_dict(name, description)
try:
security_group = quantum.create_security_group(
body).get('security_group')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error creating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from quantum here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
raise e
return self._convert_to_nova_security_group_format(security_group)
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return nova_group
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if rule['port_range_min'] is None:
nova_rule['from_port'] = -1
else:
nova_rule['from_port'] = rule['port_range_min']
if rule['port_range_max'] is None:
nova_rule['to_port'] = -1
else:
nova_rule['to_port'] = rule['port_range_max']
nova_rule['group_id'] = rule['source_group_id']
nova_rule['cidr'] = rule['source_ip_prefix']
return nova_rule
def get(self, context, name=None, id=None, map_exception=False):
quantum = quantumv2.get_client(context)
try:
if not id and name:
id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', name)
group = quantum.show_security_group(id).get('security_group')
except q_exc.QuantumClientException as e:
if e.status_code == 404:
LOG.exception(_("Quantum Error getting security group %s"),
name)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
return self._convert_to_nova_security_group_format(group)
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
"""Returns list of security group rules owned by tenant."""
quantum = quantumv2.get_client(context)
search_opts = {}
if names:
search_opts['name'] = names
if ids:
search_opts['id'] = ids
try:
security_groups = quantum.list_security_groups(**search_opts).get(
'security_groups')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error getting security groups"))
raise e
converted_rules = []
for security_group in security_groups:
converted_rules.append(
self._convert_to_nova_security_group_format(security_group))
return converted_rules
def validate_id(self, id):
if not uuidutils.is_uuid_like(id):
msg = _("Security group id should be uuid")
self.raise_invalid_property(msg)
return id
def destroy(self, context, security_group):
"""This function deletes a security group."""
quantum = quantumv2.get_client(context)
try:
quantum.delete_security_group(security_group['id'])
except q_exc.QuantumClientException as e:
if e.status_code == 404:
self.raise_not_found(e.message)
elif e.status_code == 409:
self.raise_invalid_property(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding muliple
security group rules at once but the EC2 one does. Therefore,
this function is writen to support both. Multiple rules are
installed to a security group in quantum using bulk support."""
quantum = quantumv2.get_client(context)
body = self._make_quantum_security_group_rules_list(vals)
try:
rules = quantum.create_security_group_rule(
body).get('security_group_rules')
except q_exc.QuantumClientException as e:
if e.status_code == 409:
LOG.exception(_("Quantum Error getting security group %s"),
name)
self.raise_not_found(e.message)
else:
LOG.exception(_("Quantum Error:"))
raise e
converted_rules = []
for rule in rules:
converted_rules.append(
self._convert_to_nova_security_group_rule_format(rule))
return converted_rules
def _make_quantum_security_group_dict(self, name, description):
return {'security_group': {'name': name,
'description': description}}
def _make_quantum_security_group_rules_list(self, rules):
new_rules = []
for rule in rules:
new_rule = {}
# nova only supports ingress rules so all rules are ingress.
new_rule['direction'] = "ingress"
new_rule['protocol'] = rule.get('protocol')
# FIXME(arosen) Nova does not expose ethertype on security group
# rules. Therefore, in the case of self referential rules we
# should probably assume they want to allow both IPv4 and IPv6.
# Unfortunately, this would require adding two rules in quantum.
# The reason we do not do this is because when the user using the
# nova api wants to remove the rule we'd have to have some way to
# know that we should delete both of these rules in quantum.
# For now, self referential rules only support IPv4.
if not rule.get('cidr'):
new_rule['ethertype'] = 'IPv4'
else:
new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr'))
new_rule['source_ip_prefix'] = rule.get('cidr')
new_rule['security_group_id'] = rule.get('parent_group_id')
new_rule['source_group_id'] = rule.get('group_id')
if rule['from_port'] != -1:
new_rule['port_range_min'] = rule['from_port']
if rule['to_port'] != -1:
new_rule['port_range_max'] = rule['to_port']
new_rules.append(new_rule)
return {'security_group_rules': new_rules}
def remove_rules(self, context, security_group, rule_ids):
quantum = quantumv2.get_client(context)
rule_ids = set(rule_ids)
try:
# The ec2 api allows one to delete multiple security group rules
# at once. Since there is no bulk delete for quantum the best
# thing we can do is delete the rules one by one and hope this
# works.... :/
for rule_id in range(0, len(rule_ids)):
quantum.delete_security_group_rule(rule_ids.pop())
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error unable to delete %s"),
rule_ids)
raise e
def get_rule(self, context, id):
quantum = quantumv2.get_client(context)
try:
rule = quantum.show_security_group_rule(
id).get('security_group_rule')
except q_exc.QuantumClientException as e:
if e.status_code == 404:
LOG.exception(_("Quantum Error getting security group rule "
"%s.") % id)
self.raise_not_found(e.message)
else:
LOG.error(_("Quantum Error: %s"), e)
raise e
return self._convert_to_nova_security_group_rule_format(rule)
def get_instance_security_groups(self, req, instance_id):
dict_security_groups = {}
security_group_name_map = {}
admin_context = context.get_admin_context()
quantum = quantumv2.get_client(admin_context)
params = {'device_id': instance_id}
ports = quantum.list_ports(**params)
security_groups = quantum.list_security_groups().get('security_groups')
for security_group in security_groups:
name = security_group.get('name')
# Since the name is optional for quantum security groups
if not name:
name = security_group['id']
security_group_name_map[security_group['id']] = name
for port in ports['ports']:
for security_group in port.get('security_groups', []):
try:
dict_security_groups[security_group] = (
security_group_name_map[security_group])
except KeyError:
# If this should only happen due to a race condition
# if the security group on a port was deleted after the
# ports were returned. We pass since this security group
# is no longer on the port.
pass
ret = []
for security_group in dict_security_groups.values():
ret.append({'name': security_group})
return ret
def _has_security_group_requirements(self, port):
port_security_enabled = port.get('port_security_enabled')
has_ip = port.get('fixed_ips')
if port_security_enabled and has_ip:
return True
else:
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
quantum = quantumv2.get_client(context)
try:
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
raise e
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error:"))
raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
for port in ports:
if not self._has_security_group_requirements(port):
LOG.warn(_("Cannot add security group %(name)s to %(instance)s"
" since the port %(port_id)s does not meet security"
" requirements"), {'name': security_group_name,
'instance': instance['uuid'], 'port_id': port['id']})
raise exception.SecurityGroupCannotBeApplied()
if 'security_groups' not in port:
port['security_groups'] = []
port['security_groups'].append(security_group_id)
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
quantum.update_port(port['id'], {'port': updated_port})
except Exception:
LOG.exception(_("Quantum Error:"))
raise
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
quantum = quantumv2.get_client(context)
try:
security_group_id = quantumv20.find_resourceid_by_name_or_id(
quantum, 'security_group', security_group_name)
except q_exc.QuantumClientException as e:
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Quantum Error:"))
raise e
params = {'device_id': instance['uuid']}
try:
ports = quantum.list_ports(**params).get('ports')
except q_exc.QuantumClientException as e:
LOG.exception(_("Quantum Error:"))
raise e
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
found_security_group = False
for port in ports:
try:
port.get('security_groups', []).remove(security_group_id)
except ValueError:
# When removing a security group from an instance the security
# group should be on both ports since it was added this way if
# done through the nova api. In case it is not a 404 is only
# raised if the security group is not found on any of the
# ports on the instance.
continue
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
quantum.update_port(port['id'], {'port': updated_port})
found_security_group = True
except Exception:
LOG.exception(_("Quantum Error:"))
raise e
if not found_security_group:
msg = (_("Security group %(security_group_name)s not assocaited "
"with the instance %(instance)s"),
{'security_group_name': security_group_name,
'instance': instance['uuid']})
self.raise_not_found(msg)
def populate_security_groups(self, instance, security_groups):
# Setting to emply list since we do not want to populate this field
# in the nova database if using the quantum driver
instance['security_groups'] = []
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from operator import attrgetter
from flask import session
from indico.core import signals
from indico.core.db import db
from indico.core.errors import UserValueError
from indico.modules.events import EventLogKind, EventLogRealm
from indico.modules.events.sessions.operations import update_session_block
from indico.modules.events.timetable import logger
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.modules.events.timetable.util import find_latest_entry_end_dt
from indico.util.date_time import format_datetime
from indico.util.i18n import _
def _get_object_info(entry):
if entry.type == TimetableEntryType.CONTRIBUTION:
object_type = 'contribution'
object_title = entry.contribution.title
elif entry.type == TimetableEntryType.SESSION_BLOCK:
object_type = 'session block'
object_title = entry.session_block.title or entry.session_block.session.title
elif entry.type == TimetableEntryType.BREAK:
object_type = 'break'
object_title = entry.break_.title
else:
raise ValueError('No object associated with timetable entry')
return object_type, object_title
def create_break_entry(event, data, session_block=None):
break_ = Break()
entry_data = {'object': break_,
'start_dt': data.pop('start_dt')}
# XXX: disable change tracking since `location_data` cannot be read back at this point
# due to the break having no valid `location_parent`
break_.populate_from_dict(data, track_changes=False)
parent = session_block.timetable_entry if session_block else None
return create_timetable_entry(event, entry_data, parent=parent, extend_parent=True)
def update_break_entry(break_, data):
start_dt = data.pop('start_dt', None)
if start_dt is not None:
update_timetable_entry(break_.timetable_entry, {'start_dt': start_dt})
break_.populate_from_dict(data)
db.session.flush()
def create_session_block_entry(session_, data):
from indico.modules.events.sessions.operations import create_session_block
start_dt = data.pop('start_dt')
block = create_session_block(session_=session_, data=data)
entry_data = {'object': block, 'start_dt': start_dt}
return create_timetable_entry(session_.event, entry_data, extend_parent=True)
def create_timetable_entry(event, data, parent=None, extend_parent=False):
user = session.user if session else None
entry = TimetableEntry(event=event, parent=parent)
entry.populate_from_dict(data)
object_type, object_title = _get_object_info(entry)
db.session.flush()
signals.event.timetable_entry_created.send(entry)
logger.info('Timetable entry %s created by %s', entry, user)
entry.event.log(EventLogRealm.management, EventLogKind.positive, 'Timetable',
f"Entry for {object_type} '{object_title}' created", user,
data={'Time': format_datetime(entry.start_dt, timezone=event.tzinfo)})
if extend_parent:
entry.extend_parent()
return entry
def schedule_contribution(contribution, start_dt, session_block=None, extend_parent=False):
data = {'object': contribution, 'start_dt': start_dt}
parent = None
if session_block:
contribution.session = session_block.session
contribution.session_block = session_block
parent = session_block.timetable_entry
entry = create_timetable_entry(contribution.event, data, parent=parent, extend_parent=extend_parent)
return entry
def update_timetable_entry(entry, data):
changes = entry.populate_from_dict(data)
object_type, object_title = _get_object_info(entry)
db.session.flush()
if changes:
signals.event.timetable_entry_updated.send(entry, changes=changes)
logger.info('Timetable entry %s updated by %s', entry, session.user)
entry.event.log(EventLogRealm.management, EventLogKind.change, 'Timetable',
f"Entry for {object_type} '{object_title}' modified", session.user,
data={'Time': format_datetime(entry.start_dt)})
def delete_timetable_entry(entry, log=True):
object_type, object_title = _get_object_info(entry)
signals.event.timetable_entry_deleted.send(entry)
entry.object = None
db.session.flush()
if log:
logger.info('Timetable entry %s deleted by %s', entry, session.user)
entry.event.log(EventLogRealm.management, EventLogKind.negative, 'Timetable',
f"Entry for {object_type} '{object_title}' deleted", session.user,
data={'Time': format_datetime(entry.start_dt)})
def fit_session_block_entry(entry, log=True):
assert entry.type == TimetableEntryType.SESSION_BLOCK
children = entry.children
if not children:
return
entry.start_dt = min(x.start_dt for x in children)
end_dt = max(x.end_dt for x in children)
entry.session_block.duration = end_dt - entry.start_dt
db.session.flush()
if log:
entry.event.log(EventLogRealm.management, EventLogKind.change, 'Timetable',
'Session block fitted to contents', session.user,
data={'Session block': entry.session_block.full_title})
def move_timetable_entry(entry, parent=None, day=None):
"""Move the `entry` to another session or top-level timetable.
:param entry: `TimetableEntry` to be moved
:param parent: If specified then the entry will be set as a child
of parent
:param day: If specified then the entry will be moved to the
top-level timetable on this day
"""
if bool(parent) + bool(day) != 1:
raise TypeError('Wrong number of arguments')
from indico.modules.events.contributions.operations import update_contribution
updates = {}
contrib_update_data = {}
if day:
new_start_dt = entry.start_dt.replace(day=day.day, month=day.month)
updates['start_dt'] = new_start_dt
updates['parent'] = None
contrib_update_data = {'session_id': None, 'session_block_id': None}
elif parent:
new_start_dt = find_latest_entry_end_dt(parent.object) or parent.start_dt
tz = entry.event.tzinfo
if (new_start_dt + entry.duration).astimezone(tz).date() != parent.start_dt.astimezone(tz).date():
raise UserValueError(_('Session block cannot span more than one day'))
updates['parent'] = parent
updates['start_dt'] = new_start_dt
contrib_update_data = {'session': parent.session_block.session, 'session_block': parent.session_block}
update_timetable_entry(entry, updates)
if entry.type == TimetableEntryType.CONTRIBUTION:
update_contribution(entry.object, contrib_update_data)
if parent and entry.end_dt > parent.end_dt:
duration = parent.object.duration + (entry.end_dt - parent.end_dt)
update_session_block(parent.object, {'duration': duration})
def update_timetable_entry_object(entry, data):
"""Update the `object` of a timetable entry according to its type."""
from indico.modules.events.contributions.operations import update_contribution
obj = entry.object
if entry.type == TimetableEntryType.CONTRIBUTION:
update_contribution(obj, data)
elif entry.type == TimetableEntryType.SESSION_BLOCK:
update_session_block(obj, data)
elif entry.type == TimetableEntryType.BREAK:
obj.populate_from_dict(data)
db.session.flush()
def swap_timetable_entry(entry, direction, session_=None):
"""Swap entry with closest gap or non-parallel sibling."""
in_session = session_ is not None
sibling = get_sibling_entry(entry, direction=direction, in_session=in_session)
if not sibling:
return
if direction == 'down':
if entry.end_dt != sibling.start_dt:
entry.move_next_to(sibling, position='before')
elif not sibling.is_parallel(in_session=in_session):
sibling.move(entry.start_dt)
entry.move(sibling.end_dt)
elif direction == 'up':
if entry.start_dt != sibling.end_dt:
entry.move_next_to(sibling, position='after')
elif not sibling.is_parallel(in_session=in_session):
entry.move(sibling.start_dt)
sibling.move(entry.end_dt)
def can_swap_entry(entry, direction, in_session=False):
sibling = get_sibling_entry(entry, direction=direction, in_session=in_session)
if not sibling:
return False
if direction == 'down':
return entry.end_dt != sibling.start_dt or not sibling.is_parallel(in_session=in_session)
elif direction == 'up':
return entry.start_dt != sibling.end_dt or not sibling.is_parallel(in_session=in_session)
def get_sibling_entry(entry, direction, in_session=False):
siblings = entry.siblings if not in_session else entry.session_siblings
if direction == 'down':
siblings = [x for x in siblings if x.start_dt >= entry.end_dt]
return min(siblings, key=attrgetter('start_dt')) if siblings else None
elif direction == 'up':
siblings = [x for x in siblings if x.end_dt <= entry.start_dt]
return max(siblings, key=attrgetter('end_dt')) if siblings else None
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase batch module."""
import datetime
import six
from gcloud_bigtable._helpers import _microseconds_to_timestamp
from gcloud_bigtable.row import TimestampRange
_WAL_SENTINEL = object()
# Assumed granularity of timestamps in Cloud Bigtable.
_ONE_MILLISECOND = datetime.timedelta(microseconds=1000)
def _get_column_pairs(columns, require_qualifier=False):
"""Turns a list of column or column families in parsed pairs.
Turns a column family (``fam`` or ``fam:``) into a pair such
as ``['fam', None]`` and turns a column (``fam:col``) into
``['fam', 'col']``.
:type columns: list
:param columns: Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* an single column: ``fam:col``
:type require_qualifier: bool
:param require_qualifier: Boolean indicating if the columns should
all have a qualifier or not.
:rtype: list
:returns: List of pairs, where the first element in each pair is the
column family and the second is the column qualifier
(or :data:`None`).
:raises: :class:`ValueError <exceptions.ValueError>` if any of the columns
are not of the expected format.
:class:`ValueError <exceptions.ValueError>` if
``require_qualifier`` is :data:`True` and one of the values is
for an entire column family
"""
column_pairs = []
for column in columns:
# Remove trailing colons (i.e. for standalone column family).
column = column.rstrip(':')
num_colons = column.count(':')
if num_colons == 0:
# column is a column family.
if require_qualifier:
raise ValueError('column does not contain a qualifier',
column)
else:
column_pairs.append([column, None])
elif num_colons == 1:
column_pairs.append(column.split(':'))
else:
raise ValueError('Column contains the : separator more than once')
return column_pairs
class Batch(object):
"""Batch class for accumulating mutations.
:type table: :class:`Table <gcloud_bigtable.happybase.table.Table>`
:param table: The table where mutations will be applied.
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the epoch)
that all mutations will be applied at.
:type batch_size: int
:param batch_size: (Optional) The maximum number of mutations to allow
to accumulate before committing them.
:type transaction: bool
:param transaction: Flag indicating if the mutations should be sent
transactionally or not. If ``transaction=True`` and
an error occurs while a :class:`Batch` is active,
then none of the accumulated mutations will be
committed. If ``batch_size`` is set, the mutation
can't be transactional.
:type wal: object
:param wal: Unused parameter (Boolean for using the HBase Write Ahead Log).
Provided for compatibility with HappyBase, but irrelevant for
Cloud Bigtable since it does not have a Write Ahead Log.
:raises: :class:`TypeError <exceptions.TypeError>` if ``batch_size``
is set and ``transaction=True``.
:class:`ValueError <exceptions.ValueError>` if ``batch_size``
is not positive.
:class:`ValueError <exceptions.ValueError>` if ``wal``
is used.
"""
def __init__(self, table, timestamp=None, batch_size=None,
transaction=False, wal=_WAL_SENTINEL):
if wal is not _WAL_SENTINEL:
raise ValueError('The wal argument cannot be used with '
'Cloud Bigtable.')
if batch_size is not None:
if transaction:
raise TypeError('When batch_size is set, a Batch cannot be '
'transactional')
if batch_size <= 0:
raise ValueError('batch_size must be positive')
self._table = table
self._batch_size = batch_size
# Timestamp is in milliseconds, convert to microseconds.
self._timestamp = self._delete_range = None
if timestamp is not None:
self._timestamp = _microseconds_to_timestamp(1000 * timestamp)
# For deletes, we get the very next timestamp (assuming timestamp
# granularity is milliseconds). This is because HappyBase users
# expect HBase deletes to go **up to** and **including** the
# timestamp while Cloud Bigtable Time Ranges **exclude** the
# final timestamp.
next_timestamp = self._timestamp + _ONE_MILLISECOND
self._delete_range = TimestampRange(end=next_timestamp)
self._transaction = transaction
# Internal state for tracking mutations.
self._row_map = {}
self._mutation_count = 0
def send(self):
"""Send / commit the batch of mutations to the server."""
for row in self._row_map.values():
# commit() does nothing if row hasn't accumulated any mutations.
row.commit()
self._row_map.clear()
self._mutation_count = 0
def _try_send(self):
"""Send / commit the batch if mutations have exceeded batch size."""
if self._batch_size and self._mutation_count >= self._batch_size:
self.send()
def _get_row(self, row_key):
"""Gets a row that will hold mutations.
If the row is not already cached on the current batch, a new row will
be created.
:type row_key: str
:param row_key: The row key for a row stored in the map.
:rtype: :class:`Row <gcloud_bigtable.row.Row>`
:returns: The newly created or stored row that will hold mutations.
"""
if row_key not in self._row_map:
table = self._table._low_level_table
self._row_map[row_key] = table.row(row_key)
return self._row_map[row_key]
def put(self, row, data, wal=_WAL_SENTINEL):
"""Insert data into a row in the table owned by this batch.
:type row: str
:param row: The row key where the mutation will be "put".
:type data: dict
:param data: Dictionary containing the data to be inserted. The keys
are columns names (of the form ``fam:col``) and the values
are strings (bytes) to be stored in those columns.
:type wal: object
:param wal: Unused parameter (to over-ride the default on the
instance). Provided for compatibility with HappyBase, but
irrelevant for Cloud Bigtable since it does not have a
Write Ahead Log.
:raises: :class:`ValueError <exceptions.ValueError>` if ``wal``
is used.
"""
if wal is not _WAL_SENTINEL:
raise ValueError('The wal argument cannot be used with '
'Cloud Bigtable.')
row_object = self._get_row(row)
# Make sure all the keys are valid before beginning
# to add mutations.
column_pairs = _get_column_pairs(six.iterkeys(data),
require_qualifier=True)
for column_family_id, column_qualifier in column_pairs:
value = data[column_family_id + ':' + column_qualifier]
row_object.set_cell(column_family_id, column_qualifier,
value, timestamp=self._timestamp)
self._mutation_count += len(data)
self._try_send()
def _delete_columns(self, columns, row_object):
"""Adds delete mutations for a list of columns and column families.
:type columns: list
:param columns: Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* an single column: ``fam:col``
:type row_object: :class:`Row <gcloud_bigtable.row.Row>`
:param row_object: The row which will hold the delete mutations.
:raises: :class:`ValueError <exceptions.ValueError>` if the delete
timestamp range is set on the current batch, but a
column family delete is attempted.
"""
column_pairs = _get_column_pairs(columns)
for column_family_id, column_qualifier in column_pairs:
if column_qualifier is None:
if self._delete_range is not None:
raise ValueError('The Cloud Bigtable API does not support '
'adding a timestamp to '
'"DeleteFromFamily" ')
row_object.delete_cells(column_family_id,
columns=row_object.ALL_COLUMNS)
else:
row_object.delete_cell(column_family_id,
column_qualifier,
time_range=self._delete_range)
def delete(self, row, columns=None, wal=_WAL_SENTINEL):
"""Delete data from a row in the table owned by this batch.
:type row: str
:param row: The row key where the delete will occur.
:type columns: list
:param columns: (Optional) Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* an single column: ``fam:col``
If not used, will delete the entire row.
:type wal: object
:param wal: Unused parameter (to over-ride the default on the
instance). Provided for compatibility with HappyBase, but
irrelevant for Cloud Bigtable since it does not have a
Write Ahead Log.
:raises: :class:`ValueError <exceptions.ValueError>` if ``wal``
is used, or if if the delete timestamp range is set on the
current batch, but a full row delete is attempted.
"""
if wal is not _WAL_SENTINEL:
raise ValueError('The wal argument cannot be used with '
'Cloud Bigtable.')
row_object = self._get_row(row)
if columns is None:
# Delete entire row.
if self._delete_range is not None:
raise ValueError('The Cloud Bigtable API does not support '
'adding a timestamp to "DeleteFromRow" '
'mutations')
row_object.delete()
self._mutation_count += 1
else:
self._delete_columns(columns, row_object)
self._mutation_count += len(columns)
self._try_send()
def __enter__(self):
"""Enter context manager, no set-up required."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit context manager, no set-up required.
:type exc_type: type
:param exc_type: The type of the exception if one occurred while the
context manager was active. Otherwise, :data:`None`.
:type exc_value: :class:`Exception <exceptions.Exception>`
:param exc_value: An instance of ``exc_type`` if an exception occurred
while the context was active.
Otherwise, :data:`None`.
:type traceback: ``traceback`` type
:param traceback: The traceback where the exception occurred (if one
did occur). Otherwise, :data:`None`.
"""
# If the context manager encountered an exception and the batch is
# transactional, we don't commit the mutations.
if self._transaction and exc_type is not None:
return
# NOTE: For non-transactional batches, this will even commit mutations
# if an error occurred during the context manager.
self.send()
|
|
'''
def generatorA_28x28_mnist(self,z_pholder,reuse=False,is_phase_train=True):
"""
Args:
z_pholder: has shape [batch_size,self.seed_size]
reuse: Set to False to generate new variable scopes
(e.g.: a fresh copy of the network)
is_phase_train: Tells batch normalizers to use sampling statistics.
Switch this on for training and off for making sample images
The network architecture of the generator
It should have a similar architecture to the discriminator in reverse
This network does transposed convolution.
Note that in Taehoon Kim's example, this entire network is cut and pasted
into a sampler() method for the sake of setting the "is_phase_train" flags
on all the batch_norm layers. For sampling, set is_phase_train to False.
return tanh_act,logits,gen_nn
"""
print ("\n\nBuilding generatorA network for 28x28 grayscale output\n")
with tf.variable_scope("generator") as scope:
if reuse:
#tf.get_variable_scope().reuse_variables()
scope.reuse_variables()
#nf1 = 96
#nf2 = 192
nf1=12
nf2=24
reshaped_z = tf.expand_dims(tf.expand_dims(z_pholder,1),1)
#print "reshaped_z dims",reshaped_z.get_shape().as_list()
#Back into GenericInput class to give input a "output" method
z = dtl.GenericInput(reshaped_z)
#Project 10 randomly generated numbers to size nf2
batch_size = self.batch_size
"""
Note: I figured out the dims the output shapes by running the discriminator network
forward, and writing out the resulting dims.
"""
dc1 = dtl.Conv2d_transpose(z,
filter_shape= [1,1,nf2,10],
output_shape= [batch_size,1,1,nf2],
strides= [1,1,1,1],
padding = 'VALID',
name = 'g_deconv1')
#Error right here
r1 = dtl.Relu(dc1)
g1 = dtl.BatchNorm(r1,'g_bnorm1',is_phase_train)
dc2 = dtl.Conv2d_transpose(g1,
filter_shape= [1,1,nf2,nf2],
output_shape= [batch_size,1,1,nf2],
strides= [1,1,1,1],
padding= 'VALID',
name = 'g_deconv2')
r2 = dtl.Relu(dc2)
g2 = dtl.BatchNorm(r2,'g_bnorm2',is_phase_train)
dc3 = dtl.Conv2d_transpose(g2,
filter_shape = [3,3,nf2,nf2],
output_shape = [batch_size,4,4,nf2],
strides = [1,2,2,1],
padding = 'VALID',
name = 'g_deconv3')
r3 = dtl.Relu(dc3)
g3 = dtl.BatchNorm(r3,'g_bnorm3',is_phase_train)
dc4 = dtl.Conv2d_transpose(g3,
filter_shape = [3,3,nf2,nf2],
output_shape = [batch_size,9,9,nf2],
strides = [1,2,2,1],
padding = 'VALID',
name = 'g_deconv4')
r4 = dtl.Relu(dc4)
g4 = dtl.BatchNorm(r4,'g_bnorm4',is_phase_train)
dc5 = dtl.Conv2d_transpose(g4,
filter_shape = [3,3,nf2,nf2],
output_shape = [batch_size,11,11,nf2],
strides = [1,1,1,1],
padding = 'VALID',
name = 'g_deconv5')
r5 = dtl.Relu(dc5)
g5 = dtl.BatchNorm(r5,'g_bnorm5',is_phase_train)
dc6 = dtl.Conv2d_transpose(g5,
filter_shape = [3,3,nf1,nf2],
output_shape = [batch_size,13,13,nf1],
strides = [1,1,1,1],
padding = 'VALID',
name = 'g_deconv6')
r6 = dtl.Relu(dc6)
g6 = dtl.BatchNorm(r6,'g_bnorm6',is_phase_train)
dc7 = dtl.Conv2d_transpose(g6,
filter_shape = [3,3,1,nf1],
output_shape = [batch_size,28,28,1],
strides = [1,2,2,1],
padding = 'VALID',
name = 'g_deconv7')
'''
r7 = dtl.Relu(dc7)
g7 = dtl.BatchNorm(r7,'g_bnorm7')
dc8 = dtl.Conv2d_transpose(g7,
filter_shape = [3,3,nf1,nf1],
output_shape = [batch_size,30,30,nf1],
strides = [1,1,1,1],
padding = 'VALID',
name = 'deconv8')
r8 = dtl.Relu(dc8)
g8 = dtl.BatchNorm(r8,'g_bnorm8')
dc9 = dtl.Conv2d_transpose(g8,
filter_shape = [3,3,nf1,1],
output_shape = [batch_size,32,32,1],
strides = [1,1,1,1],
padding = 'VALID',
name = 'deconv9')
'''
#nn = dtl.Network(reshaped_z,[last_layer],bounds=[0.,1.])
return tf.nn.tanh(dc7.output)
'''
'''
def discriminatorA_28x28_mnist(self,x_pholder,reuse=False):
"""
The discriminator network.
A convolutional neural network for real images
return logits,discr_nn
This version takes [b,28,28,1] BHWC inputs
"""
print ("Building discriminatorA network for 28x28 grayscale input\n\n")
with tf.variable_scope("discriminator") as scope:
if reuse==True:
scope.reuse_variables()
#print "Resusing", str(tf.get_variable_scope().reuse)
#Reshape image input and pad to 32x32
x_image = dtl.ImageInput(x_pholder,image_shape=[28,28,1],pad_size=2)
#x_image = dtl.ImageInput(x_pholder,image_shape=[96,96,4])
#conv filter dimensions are w,h,input_dims,output_dims
#Example replacing pooling layers with strided conv layers
#nf1=96
#nf2 = 192
nf1=12
nf2=24
#Block1
cl1 = dtl.Conv2d(x_image, filter_shape = [3,3,1,nf1],
strides = [1,1,1,1],padding = 'VALID',name='d_conv1')
r1 = dtl.Relu(cl1)
bn1 = dtl.BatchNorm(r1,'d_bnorm1')
cl2 = dtl.Conv2d(r1, filter_shape = [3,3,nf1,nf1],
strides=[1,1,1,1],padding = 'VALID',name='d_conv2')
r2 = dtl.Relu(cl2)
bn2 = dtl.BatchNorm(r2,'d_bnorm2')
cl3s = dtl.Conv2d(r2,filter_shape=[3,3,nf1,nf1],
strides = [1,2,2,1],
padding = 'VALID',
name = 'd_conv3_strided')
r3 = dtl.Relu(cl3s)
bn3 = dtl.BatchNorm(r3,'d_bnorm3')
#Block2
cl4 = dtl.Conv2d(r3, filter_shape = [3,3,nf1,nf2],
strides=[1,1,1,1],
padding='VALID',name='d_conv4')
r4 = dtl.Relu(cl4)
bn4 = dtl.BatchNorm(r4,'d_bnorm4')
cl5 = dtl.Conv2d(r4, filter_shape = [3,3,nf2,nf2],
strides=[1,1,1,1],
padding='VALID',name='d_conv5')
r5 = dtl.Relu(cl5)
bn5 = dtl.BatchNorm(r5,'d_bnorm5')
cl6s = dtl.Conv2d(r5, filter_shape = [3,3,nf2,nf2],
strides=[1,2,2,1],
padding='VALID',name='d_conv6_strided')
r6 = dtl.Relu(cl6s)
bn6 = dtl.BatchNorm(r6,'d_bnorm6')
c7 = dtl.Conv2d(r6,filter_shape=[3,3,nf2,nf2],
strides = [1,2,2,1],
padding = 'VALID',
name = 'd_conv7_strided')
r7 = dtl.Relu(c7)
bn7 = dtl.BatchNorm(r7,'d_bnorm7')
c8 = dtl.Conv2d(r7,filter_shape =[1,1,nf2,nf2],
strides=[1,1,1,1],
padding = 'VALID',
name='d_conv8_1x1')
r8 = dtl.Relu(c8)
dtl.BatchNorm(r8,'d_bnorm8')
c9 = dtl.Conv2d(r8,filter_shape=[1,1,nf2,10],
strides=[1,1,1,1],
padding='VALID',
name='d_conv9_1x1')
r9 = dtl.Relu(c9)
flat = dtl.Flatten(r9)
nn = dtl.Network(x_image,[flat],bounds=[0.,1.])
logits = flat.output
probs = tf.nn.sigmoid(logits)
return probs,logits,nn
'''
'''
def discriminatorA_96x96(self,x_pholder,reuse=False,is_phase_train=True):
"""
Discriminator network for 96x96 images
No labels used to train by category
Notes:
-Batch normalize before rectifying
-Only works on one class
-Uses a fully connected layer at the very end
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
#tf.get_variable_scope().reuse_variables()
scope.reuse_variables()
#Note: I give the discriminator fewer filters than the generator
nf1 = 24
nf2 = 48
nf3=384 #fc vars
x_image = dtl.ImageInput(x_pholder,image_shape=[96,96,3],pad_size=0)
cl1 = dtl.Conv2d(x_image,
filter_shape=[9,9,3,nf1],
strides=[1,1,1,1],
padding='SAME',
name='d_conv1')
#bn1 =dtl.BatchNorm(cl1,'d_bnorm1',is_phase_train)
r1 = dtl.Relu(cl1)
#Strided Conv
cl2 = dtl.Conv2d(r1,[5,5,nf1,nf1],[1,2,2,1],'SAME',name='d_conv2')
#bn2 =dtl.BatchNorm(cl2,'d_bnorm2',is_phase_train)
r2 =dtl.Relu(cl2)
cl3 = dtl.Conv2d(r2,[5,5,nf1,nf1],[1,2,2,1],'SAME',name='d_conv3')
#bn3 =dtl.BatchNorm(cl3,'d_bnorm3',is_phase_train)
r3 = dtl.Relu(cl3)
#Strided Conv
cl4 = dtl.Conv2d(r3,[5,5,nf1,nf1],[1,2,2,1],'SAME',name='d_conv4')
#bn4 =dtl.BatchNorm(cl4,'d_bnorm4',is_phase_train)
r4 = dtl.Relu(cl4)
cl5 = dtl.Conv2d(r4,[5,5,nf1,nf2],[1,2,2,1],'SAME',name='d_conv5')
#bn5 = dtl.BatchNorm(cl5,'d_bnorm5',is_phase_train)
r5 = dtl.Relu(cl5)
#Strided Conv
cl6 = dtl.Conv2d(r5,[5,5,nf2,nf2],[1,2,2,1],'SAME',name='d_conv6')
#bn6 =dtl.BatchNorm(cl6,'d_bnorm6',is_phase_train)
r6 = dtl.Relu(cl6)
cl7 = dtl.Conv2d(r6,[3,3,nf2,nf2],[1,3,3,1],'SAME',name='d_conv7')
#bn7 = dtl.BatchNorm(cl7,'d_bnorm7',is_phase_train)
r7 = dtl.Relu(cl7)
#Strided FC layers
cl8 = dtl.Conv2d(r7,[1,1,nf2,nf3],[1,1,1,1],'SAME',name='d_conv8')
#bn8 = dtl.BatchNorm(cl8,'d_bnorm8',is_phase_train)
r8 = dtl.Relu(cl8)
cl9 = dtl.Conv2d(r8,[1,1,nf3,self.seed_size],[1,1,1,1],'SAME',name='d_conv9')
#bn9 =dtl.BatchNorm(cl9,'d_bnorm9',is_phase_train)
#r9 = dtl.Relu(bn9)
"""
cl10 = dtl.Conv2d(r9,[1,1,nf1,nf2],[1,1,1,1],'SAME',name='d_conv10')
bn10 = dtl.BatchNorm(cl10,'d_bnorm10',is_phase_train)
r10 = dtl.Relu(bn10)
cl11 = dtl.Conv2d(r10,[1,1,nf2,self.seed_size],[1,1,1,1],'VALID',name='d_conv11')
bn11 = dtl.BatchNorm(cl11,'dbnorm11',is_phase_train)
r11 = dtl.Relu(bn11)
"""
flat = dtl.Flatten(cl9)
logits=flat.output
#logits = cl9.output
#nn = dtl.Network(x_image,[cl9],bounds=[-1.,1.])
return tf.nn.sigmoid(logits), logits, None
'''
'''
def generatorA_96x96(self,z_pholder,reuse=False,is_phase_train=True):
"""
Generator network for 96x96 images
No labels used to train by category
"""
with tf.variable_scope("generator") as scope:
if reuse:
#tf.get_variable_scope().reuse_variables()
scope.reuse_variables()
batch_size = self.batch_size
"""
I used PaddingCalc.py to work out the dims of everything here.
--Larry
"""
print "\n\n\nBuilding generator network\n"
nf1=48
nf2=96
nf3 = 1024
#z input should be [b,z_size]
#Transform to [b,1,1,z_size]
#z is the seeding vector (the latent space)
z_deflat = tf.expand_dims(tf.expand_dims(z_pholder,1),1)
z_generic = dtl.GenericInput(z_deflat) #Wrap in a layer
dc1 = dtl.Conv2d_transpose(z_generic,
filter_shape= [1,1,nf3,self.seed_size],
output_shape= [batch_size,1,1,nf3],
strides= [1,1,1,1],
padding = 'SAME',
name = 'g_deconv1')
bn1 = dtl.BatchNorm(dc1,'g_bnorm1',is_phase_train)
r1 = dtl.Relu(bn1)
dc2 = dtl.Conv2d_transpose(r1,[1,1,nf2,nf3],
[batch_size,1,1,nf2],
[1,1,1,1],'SAME',name='g_deconv2')
bn2 =dtl.BatchNorm(dc2,'g_bnorm2',is_phase_train)
r2 = dtl.Relu(bn2)
dc3 = dtl.Conv2d_transpose(r2,[3,3,nf2,nf2],
[batch_size,3,3,nf2],
[1,3,3,1],'SAME',name='g_deconv3')
bn3 =dtl.BatchNorm(dc3,'g_bnorm3',is_phase_train)
r3 = dtl.Relu(bn3)
dc4 = dtl.Conv2d_transpose(r3,[5,5,nf2,nf2],
[batch_size,6,6,nf2],
[1,2,2,1],'SAME',name='g_deconv4')
bn4 =dtl.BatchNorm(dc4,'g_bnorm4',is_phase_train)
r4 = dtl.Relu(bn4)
dc5 = dtl.Conv2d_transpose(r4,[5,5,nf2,nf2],
[batch_size,12,12,nf2],
[1,2,2,1],'SAME',name='g_deconv5')
bn5 = dtl.BatchNorm(dc5,'g_bnorm5',is_phase_train)
r5 = dtl.Relu(bn5)
dc6 = dtl.Conv2d_transpose(r5,[5,5,nf1,nf2],
[batch_size,24,24,nf1],
[1,2,2,1],'SAME',name='g_deconv6')
bn6 = dtl.BatchNorm(dc6,'g_bnorm6',is_phase_train)
r6 = dtl.Relu(bn6)
dc7 = dtl.Conv2d_transpose(r6,[5,5,nf1,nf1],
[batch_size,48,48,nf1],
[1,2,2,1],'SAME',name='g_deconv7')
bn7 = dtl.BatchNorm(dc7,'g_bnorm7',is_phase_train)
r7 = dtl.Relu(bn7)
dc8 = dtl.Conv2d_transpose(r7,[5,5,nf1,nf1],
[batch_size,96,96,nf1],
[1,2,2,1],'SAME',name='g_deconv8')
bn8 = dtl.BatchNorm(dc8,'g_bnorm8',is_phase_train)
r8 = dtl.Relu(bn8)
dc9 = dtl.Conv2d_transpose(r8,[9,9,3,nf1],
[batch_size,96,96,3],
[1,1,1,1],'SAME',name='g_deconv9')
#bn9 = dtl.BatchNorm(dc9,'g_bnorm9',is_phase_train)
#r9 = dtl.Relu(bn9)
"""
dc10 = dtl.Conv2d_transpose(r9,[3,3,nf1,nf1],
[batch_size,88,88,nf1],
[1,2,2,1],'VALID',name='g_deconv10')
bn10 = dtl.BatchNorm(dc10,'g_bnorm10',is_phase_train)
r10 = dtl.Relu(bn10)
dc11 = dtl.Conv2d_transpose(r10,[9,9,3,nf1],
[batch_size,96,96,3],
[1,1,1,1],'VALID',name='g_deconv11')
#bn11 = dtl.BatchNorm(dc11,'g_bnorm11',is_phase_train)
#r11 = dtl.Relu(bn11)
"""
return tf.nn.tanh(dc9.output) #Outputs should be 96,96,3
'''
def discriminatorB_96x96(self,x_pholder,reuse=False,is_phase_train=True):
"""
Discriminator network for 96x96 images
No labels used to train by category
This is shallower than "A" version
Notes:
-Batch normalize before rectifying
-Only works on one class
-Uses a fully connected layer at the very end
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
#tf.get_variable_scope().reuse_variables()
scope.reuse_variables()
nf1 = 32
nf2 = 64
nf3 = 512
x_image = dtl.ImageInput(x_pholder,image_shape=self.input_shape[1:],pad_size=0)
cl1 = dtl.Conv2d(x_image,
filter_shape=[9,9,3,nf1],
strides=[1,1,1,1],
padding='VALID',
name='d_conv1')
bn1 =dtl.BatchNorm(cl1,'d_bnorm1',is_phase_train)
r1 = dtl.Relu(bn1)
#Strided Conv
cl2 = dtl.Conv2d(r1,[5,5,nf1,nf1],[1,2,2,1],'VALID',name='d_conv2')
bn2 =dtl.BatchNorm(cl2,'d_bnorm2',is_phase_train)
r2 =dtl.Relu(bn2)
cl3 = dtl.Conv2d(r2,[5,5,nf1,nf1],[1,2,2,1],'VALID',name='d_conv3')
bn3 =dtl.BatchNorm(cl3,'d_bnorm3',is_phase_train)
r3 = dtl.Relu(bn3)
#Strided Conv
cl4 = dtl.Conv2d(r3,[5,5,nf1,nf2],[1,2,2,1],'VALID',name='d_conv4')
bn4 =dtl.BatchNorm(cl4,'d_bnorm4',is_phase_train)
r4 = dtl.Relu(bn4)
cl5 = dtl.Conv2d(r4,[5,5,nf2,nf2],[1,2,2,1],'VALID',name='d_conv5')
bn5 =dtl.BatchNorm(cl5,'d_bnorm5',is_phase_train)
r5 = dtl.Relu(bn5)
cl6 = dtl.Conv2d(r5,[2,2,nf2,nf3],[1,1,1,1],'VALID',name='d_conv6')
bn6 =dtl.BatchNorm(cl6,'d_bnorm6',is_phase_train)
r6 = dtl.Relu(bn6)
flat = dtl.Flatten(r6)
#Fully connected layers
l7= dtl.Linear(flat,nf3,"d_linear7")
bn7 = dtl.BatchNorm(l7,"d_bnorm7",is_phase_train)
r7 = dtl.Relu(bn7)
l8 = dtl.Linear(r7,nf3,"d_linear8")
bn8 = dtl.BatchNorm(l8,"d_bnorm8",is_phase_train)
r8 = dtl.Relu(bn8)
l9 = dtl.Linear(r8,self.seed_size,"d_linear9")
bn9 =dtl.BatchNorm(l9,'d_bnorm9',is_phase_train)
r9 = dtl.Relu(bn9)
logits= r9.output
nn = dtl.Network(x_image,[r9],bounds=[0.,1.])
return tf.nn.sigmoid(logits), logits, nn
def generatorB_96x96(self,z_pholder,reuse=False,is_phase_train=True):
"""
Generator network for 96x96 images
No labels used to train by category
#This is shallower than generatorA and includes a fully connected layer
"""
with tf.variable_scope("generator") as scope:
if reuse:
#tf.get_variable_scope().reuse_variables()
scope.reuse_variables()
batch_size = self.batch_size
"""
I used PaddingCalc.py to work out the dims of everything here.
--Larry
"""
print "\n\n\nBuilding generator network\n"
nf1=32
nf2=64
nf3=512
#z input should be [b,seed_size]
#Transform to [b,1,1,seed_size]
#z_deflat = tf.expand_dims(tf.expand_dims(z_pholder,1),1)
z_generic = dtl.GenericInput(z_pholder) #Wrap in a layer
#Linear projection
l1= dtl.Linear(z_generic,nf3,"g_linear1")
bn1 = dtl.BatchNorm(l1,'g_bnorm1',is_phase_train)
r1 = dtl.Relu(bn1)
l2= dtl.Linear(r1,nf3,"g_linear2")
bn2 = dtl.BatchNorm(l2,'g_bnorm2',is_phase_train)
r2 = dtl.Relu(bn2)
l3 = dtl.Linear(r2,nf3,"g_linear3")
bn3 =dtl.BatchNorm(l3,'g_bnorm3',is_phase_train)
r3 = dtl.Relu(bn3)
#reshape to [b,1,1,nf3]
r3_raw = tf.expand_dims(tf.expand_dims(r3.output,1),1)
r3_generic = dtl.GenericInput(r3_raw)
dc4 = dtl.Conv2d_transpose(r3_generic,
filter_shape= [2,2,nf2,nf3],
output_shape= [batch_size,2,2,nf2],
strides= [1,1,1,1],
padding = 'VALID',
name = 'g_deconv4')
bn4 = dtl.BatchNorm(dc4,'g_bnorm4',is_phase_train)
r4 = dtl.Relu(bn4)
dc5 = dtl.Conv2d_transpose(r4,[5,5,nf2,nf2],
[batch_size,8,8,nf2],
[1,2,2,1],'VALID',name='g_deconv5')
bn5 =dtl.BatchNorm(dc5,'g_bnorm5',is_phase_train)
r5 = dtl.Relu(bn5)
dc6 = dtl.Conv2d_transpose(r5,[5,5,nf1,nf2],
[batch_size,19,19,nf1],
[1,2,2,1],'VALID',name='g_deconv6')
bn6 =dtl.BatchNorm(dc6,'g_bnorm6',is_phase_train)
r6 = dtl.Relu(bn6)
dc7 = dtl.Conv2d_transpose(r6,[5,5,nf1,nf1],
[batch_size,42,42,nf1],
[1,2,2,1],'VALID',name='g_deconv7')
bn7 =dtl.BatchNorm(dc7,'g_bnorm7',is_phase_train)
r7 = dtl.Relu(bn7)
dc8 = dtl.Conv2d_transpose(r7,[5,5,nf1,nf1],
[batch_size,88,88,nf1],
[1,2,2,1],'VALID',name='g_deconv8')
bn8 = dtl.BatchNorm(dc8,'g_bnorm8',is_phase_train)
r8 = dtl.Relu(bn8)
dc9 = dtl.Conv2d_transpose(r8,[9,9,3,nf1],
[batch_size,96,96,3],
[1,1,1,1],'VALID',name='g_deconv9')
#bn9 = dtl.BatchNorm(dc9,'g_bnorm9',is_phase_train)
#r9 = dtl.Relu(bn9)
return tf.nn.tanh(dc9.output) #Outputs should be 96,96,3
'''
def discriminatorC_96x96(self,x_pholder,reuse=False,is_phase_train=True):
"""
Discriminator network for 96x96 images
No labels used to train by category
No batchnorm
This is shallower than "A" version
Notes:
-Batch normalize before rectifying
-Only works on one class
-Uses a fully connected layer at the very end
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
#tf.get_variable_scope().reuse_variables()
scope.reuse_variables()
nf1 = 32
nf2 = 64
nf3 = 512
x_image = dtl.ImageInput(x_pholder,image_shape=self.input_shape[1:],pad_size=0)
cl1 = dtl.Conv2d(x_image,
filter_shape=[9,9,3,nf1],
strides=[1,1,1,1],
padding='VALID',
name='d_conv1')
#bn1 =dtl.BatchNorm(cl1,'d_bnorm1',is_phase_train)
r1 = dtl.Relu(cl1)
#Strided Conv
cl2 = dtl.Conv2d(r1,[5,5,nf1,nf1],[1,2,2,1],'VALID',name='d_conv2')
#bn2 =dtl.BatchNorm(cl2,'d_bnorm2',is_phase_train)
r2 =dtl.Relu(cl2)
cl3 = dtl.Conv2d(r2,[5,5,nf1,nf1],[1,2,2,1],'VALID',name='d_conv3')
#bn3 =dtl.BatchNorm(cl3,'d_bnorm3',is_phase_train)
r3 = dtl.Relu(cl3)
#Strided Conv
cl4 = dtl.Conv2d(r3,[5,5,nf1,nf2],[1,2,2,1],'VALID',name='d_conv4')
#bn4 =dtl.BatchNorm(cl4,'d_bnorm4',is_phase_train)
r4 = dtl.Relu(cl4)
cl5 = dtl.Conv2d(r4,[5,5,nf2,nf2],[1,2,2,1],'VALID',name='d_conv5')
#bn5 =dtl.BatchNorm(cl5,'d_bnorm5',is_phase_train)
r5 = dtl.Relu(cl5)
cl6 = dtl.Conv2d(r5,[2,2,nf2,nf3],[1,1,1,1],'VALID',name='d_conv6')
#bn6 =dtl.BatchNorm(cl6,'d_bnorm6',is_phase_train)
r6 = dtl.Relu(cl6)
flat = dtl.Flatten(r6)
#Fully connected layers
l7= dtl.Linear(flat,nf3,"d_linear7")
#bn7 = dtl.BatchNorm(l7,"d_bnorm7",is_phase_train)
r7 = dtl.Relu(l7)
l8 = dtl.Linear(r7,nf3,"d_linear8")
#bn8 = dtl.BatchNorm(l8,"d_bnorm8",is_phase_train)
r8 = dtl.Relu(l8)
l9 = dtl.Linear(r8,self.seed_size,"d_linear9")
#bn9 =dtl.BatchNorm(l9,'d_bnorm9',is_phase_train)
r9 = dtl.Relu(l9)
logits= r9.output
nn = dtl.Network(x_image,[r9],bounds=[0.,1.])
return tf.nn.sigmoid(logits), logits, nn
'''
'''
def generatorC_96x96(self,z_pholder,reuse=False,is_phase_train=True):
"""
Generator network for 96x96 images
No labels used to train by category
#This is shallower than generatorA and includes a fully connected layer
"""
with tf.variable_scope("generator") as scope:
if reuse:
#tf.get_variable_scope().reuse_variables()
scope.reuse_variables()
batch_size = self.batch_size
"""
I used PaddingCalc.py to work out the dims of everything here.
--Larry
"""
print "\n\n\nBuilding generator network\n"
nf1=32
nf2=64
nf3=512
#z input should be [b,seed_size]
#Transform to [b,1,1,seed_size]
#z_deflat = tf.expand_dims(tf.expand_dims(z_pholder,1),1)
z_generic = dtl.GenericInput(z_pholder) #Wrap in a layer
#Linear projection
l1= dtl.Linear(z_generic,nf3,"g_linear1")
#bn1 = dtl.BatchNorm(l1,'g_bnorm1',is_phase_train)
r1 = dtl.Relu(l1)
l2= dtl.Linear(r1,nf3,"g_linear2")
#bn2 = dtl.BatchNorm(l2,'g_bnorm2',is_phase_train)
r2 = dtl.Relu(l2)
l3 = dtl.Linear(r2,nf3,"g_linear3")
#bn3 =dtl.BatchNorm(l3,'g_bnorm3',is_phase_train)
r3 = dtl.Relu(l3)
#reshape to [b,1,1,nf3]
r3_raw = tf.expand_dims(tf.expand_dims(r3.output,1),1)
r3_generic = dtl.GenericInput(r3_raw)
dc4 = dtl.Conv2d_transpose(r3_generic,
filter_shape= [2,2,nf2,nf3],
output_shape= [batch_size,2,2,nf2],
strides= [1,1,1,1],
padding = 'VALID',
name = 'g_deconv4')
#bn4 = dtl.BatchNorm(dc4,'g_bnorm4',is_phase_train)
r4 = dtl.Relu(dc4)
dc5 = dtl.Conv2d_transpose(r4,[5,5,nf2,nf2],
[batch_size,8,8,nf2],
[1,2,2,1],'VALID',name='g_deconv5')
#bn5 =dtl.BatchNorm(dc5,'g_bnorm5',is_phase_train)
r5 = dtl.Relu(dc5)
dc6 = dtl.Conv2d_transpose(r5,[5,5,nf1,nf2],
[batch_size,19,19,nf1],
[1,2,2,1],'VALID',name='g_deconv6')
#bn6 =dtl.BatchNorm(dc6,'g_bnorm6',is_phase_train)
r6 = dtl.Relu(dc6)
dc7 = dtl.Conv2d_transpose(r6,[5,5,nf1,nf1],
[batch_size,42,42,nf1],
[1,2,2,1],'VALID',name='g_deconv7')
#bn7 =dtl.BatchNorm(dc7,'g_bnorm7',is_phase_train)
r7 = dtl.Relu(dc7)
dc8 = dtl.Conv2d_transpose(r7,[5,5,nf1,nf1],
[batch_size,88,88,nf1],
[1,2,2,1],'VALID',name='g_deconv8')
#bn8 = dtl.BatchNorm(dc8,'g_bnorm8',is_phase_train)
r8 = dtl.Relu(dc8)
dc9 = dtl.Conv2d_transpose(r8,[9,9,3,nf1],
[batch_size,96,96,3],
[1,1,1,1],'VALID',name='g_deconv9')
#bn9 = dtl.BatchNorm(dc9,'g_bnorm9',is_phase_train)
#r9 = dtl.Relu(bn9)
return tf.nn.tanh(dc9.output) #Outputs should be 96,96,3
'''
'''
def generatorD(self,z,reuse=False,is_phase_train=True):
##genD
"""Stop gap method from shekkiz for proof of concept only"""
with tf.variable_scope("generator") as scope:
if reuse:
scope.reuse_variables()
gen_dim=16
batch_size,image_size,_,num_channels = self.input_shape
z_dim = self.seed_size
W_0 = utils.weight_variable([z_dim,
64 * gen_dim / 2 * image_size / 16 * image_size / 16],
name="g_weights0")
b_0 = utils.bias_variable([64 * gen_dim / 2 * image_size / 16 * image_size / 16],
name="g_bias0")
z_0 = tf.matmul(z, W_0) + b_0
h_0 = tf.reshape(z_0, [-1, image_size / 16, image_size / 16, 64 * gen_dim / 2])
h_bn0 = utils.batch_norm(h_0, 64 * gen_dim / 2, is_phase_train, scope="g_bnorm0")
h_relu0 = tf.nn.relu(h_bn0, name='g_relu0')
W_2 = utils.weight_variable([5, 5, 64 * gen_dim / 4, 64 * gen_dim / 2],
name="g_weights2")
b_2 = utils.bias_variable([64 * gen_dim / 4], name="g_bias2")
deconv_shape = tf.pack([tf.shape(h_relu0)[0],
image_size / 8, image_size / 8, 64 * gen_dim / 4])
h_conv_t2 = utils.conv2d_transpose_strided(h_relu0, W_2, b_2,
output_shape=deconv_shape)
h_bn2 = utils.batch_norm(h_conv_t2, 64 * gen_dim / 4, is_phase_train,
scope="g_bnorm2")
h_relu2 = tf.nn.relu(h_bn2, name='g_relu2')
W_3 = utils.weight_variable([5, 5, 64 * gen_dim / 8, 64 * gen_dim / 4],
name="g_weights3")
b_3 = utils.bias_variable([64 * gen_dim / 8], name="g_bias3")
deconv_shape = tf.pack([tf.shape(h_relu2)[0], image_size / 4,
image_size / 4, 64 * gen_dim / 8])
h_conv_t3 = utils.conv2d_transpose_strided(h_relu2, W_3, b_3,
output_shape=deconv_shape)
h_bn3 = utils.batch_norm(h_conv_t3, 64 * gen_dim / 8, is_phase_train,
scope="g_bnorm3")
h_relu3 = tf.nn.relu(h_bn3, name='g_relu3')
#utils.add_activation_summary(h_relu3)
W_4 = utils.weight_variable([5, 5, 64 * gen_dim / 16, 64 * gen_dim / 8],
name="g_weights4")
b_4 = utils.bias_variable([64 * gen_dim / 16], name="g_bias4")
deconv_shape = tf.pack([tf.shape(h_relu3)[0], image_size / 2, image_size / 2,
64 * gen_dim / 16])
h_conv_t4 = utils.conv2d_transpose_strided(h_relu3, W_4, b_4,
output_shape=deconv_shape)
h_bn4 = utils.batch_norm(h_conv_t4, 64 * gen_dim / 16, is_phase_train,
scope="g_bnorm4")
h_relu4 = tf.nn.relu(h_bn4, name='g_relu4')
#utils.add_activation_summary(h_relu4)
W_5 = utils.weight_variable([5, 5, num_channels, 64 * gen_dim / 16],
name="g_weights5")
b_5 = utils.bias_variable([num_channels], name="g_bias5")
deconv_shape = tf.pack([tf.shape(h_relu4)[0], image_size, image_size, num_channels])
h_conv_t5 = utils.conv2d_transpose_strided(h_relu4, W_5, b_5,
output_shape=deconv_shape)
generated_image = tf.nn.tanh(h_conv_t5, name='generated_image')
return generated_image
'''
'''
def discriminatorD(self,input_images,reuse=False, is_phase_train=True):
##disD
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
gen_dim=16
batch_size,img_size,_,num_channels = self.input_shape
W_conv0 = utils.weight_variable([5, 5, num_channels, 64 * 1], name="d_weights0")
b_conv0 = utils.bias_variable([64 * 1], name="d_bias0")
h_conv0 = utils.conv2d_strided(input_images, W_conv0, b_conv0)
h_bn0 = h_conv0 # utils.batch_norm(h_conv0, 64 * 1, is_phase_train, scope="d_bnorm0")
h_relu0 = utils.leaky_relu(h_bn0, 0.2, name="d_relu0")
#utils.add_activation_summary(h_relu0)
W_conv1 = utils.weight_variable([5, 5, 64 * 1, 64 * 2], name="d_weights1")
b_conv1 = utils.bias_variable([64 * 2], name="d_bias1")
h_conv1 = utils.conv2d_strided(h_relu0, W_conv1, b_conv1)
h_bn1 = utils.batch_norm(h_conv1, 64 * 2, is_phase_train, scope="d_bnorm1")
h_relu1 = utils.leaky_relu(h_bn1, 0.2, name="d_relu1")
#utils.add_activation_summary(h_relu1)
W_conv2 = utils.weight_variable([5, 5, 64 * 2, 64 * 4], name="d_weights2")
b_conv2 = utils.bias_variable([64 * 4], name="d_bias2")
h_conv2 = utils.conv2d_strided(h_relu1, W_conv2, b_conv2)
h_bn2 = utils.batch_norm(h_conv2, 64 * 4, is_phase_train, scope="d_bnorm2")
h_relu2 = utils.leaky_relu(h_bn2, 0.2, name="d_relu2")
#utils.add_activation_summary(h_relu2)
W_conv3 = utils.weight_variable([5, 5, 64 * 4, 64 * 8], name="d_weights3")
b_conv3 = utils.bias_variable([64 * 8], name="d_bias3")
h_conv3 = utils.conv2d_strided(h_relu2, W_conv3, b_conv3)
h_bn3 = utils.batch_norm(h_conv3, 64 * 8, is_phase_train, scope="d_bnorm3")
h_relu3 = utils.leaky_relu(h_bn3, 0.2, name="d_relu3")
#utils.add_activation_summary(h_relu3)
shape = h_relu3.get_shape().as_list()
h_3 = tf.reshape(h_relu3, [batch_size, (img_size // 16)*(img_size // 16)*shape[3]])
W_4 = utils.weight_variable([h_3.get_shape().as_list()[1], 1], name="W_4")
b_4 = utils.bias_variable([1], name="d_bias4")
h_4 = tf.matmul(h_3, W_4) + b_4
return tf.nn.sigmoid(h_4), h_4,h_relu3
'''
|
|
"""Provide methods to bootstrap a Home Assistant instance."""
import logging
import logging.handlers
import os
import sys
from time import time
from collections import OrderedDict
from typing import Any, Optional, Dict
import voluptuous as vol
from homeassistant import (
core, config as conf_util, config_entries, components as core_components,
loader)
from homeassistant.components import persistent_notification
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.setup import async_setup_component
from homeassistant.util.logging import AsyncHandler
from homeassistant.util.package import async_get_user_site, is_virtual_env
from homeassistant.util.yaml import clear_secret_cache
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = 'home-assistant.log'
# hass.data key for logging information.
DATA_LOGGING = 'logging'
FIRST_INIT_COMPONENT = {'system_log', 'recorder', 'mqtt', 'mqtt_eventstream',
'logger', 'introduction', 'frontend', 'history'}
def from_config_dict(config: Dict[str, Any],
hass: Optional[core.HomeAssistant] = None,
config_dir: Optional[str] = None,
enable_log: bool = True,
verbose: bool = False,
skip_pip: bool = False,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
if config_dir is not None:
config_dir = os.path.abspath(config_dir)
hass.config.config_dir = config_dir
if not is_virtual_env():
hass.loop.run_until_complete(
async_mount_local_lib_path(config_dir))
# run task
hass = hass.loop.run_until_complete(
async_from_config_dict(
config, hass, config_dir, enable_log, verbose, skip_pip,
log_rotate_days, log_file, log_no_color)
)
return hass
async def async_from_config_dict(config: Dict[str, Any],
hass: core.HomeAssistant,
config_dir: Optional[str] = None,
enable_log: bool = True,
verbose: bool = False,
skip_pip: bool = False,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = time()
if enable_log:
async_enable_logging(hass, verbose, log_rotate_days, log_file,
log_no_color)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning("Skipping pip installation of required modules. "
"This may cause issues")
core_config = config.get(core.DOMAIN, {})
has_api_password = bool(config.get('http', {}).get('api_password'))
trusted_networks = config.get('http', {}).get('trusted_networks')
try:
await conf_util.async_process_ha_core_config(
hass, core_config, has_api_password, trusted_networks)
except vol.Invalid as config_err:
conf_util.async_log_exception(
config_err, 'homeassistant', core_config, hass)
return None
except HomeAssistantError:
_LOGGER.error("Home Assistant core failed to initialize. "
"Further initialization aborted")
return None
await hass.async_add_executor_job(
conf_util.process_ha_config_upgrade, hass)
# Make a copy because we are mutating it.
config = OrderedDict(config)
# Merge packages
conf_util.merge_packages_config(
hass, config, core_config.get(conf_util.CONF_PACKAGES, {}))
hass.config_entries = config_entries.ConfigEntries(hass, config)
await hass.config_entries.async_initialize()
# Filter out the repeating and common config section [homeassistant]
components = set(key.split(' ')[0] for key in config.keys()
if key != core.DOMAIN)
components.update(hass.config_entries.async_domains())
# Resolve all dependencies of all components.
for component in list(components):
try:
components.update(loader.component_dependencies(hass, component))
except loader.LoaderError:
# Ignore it, or we'll break startup
# It will be properly handled during setup.
pass
# setup components
res = await core_components.async_setup(hass, config)
if not res:
_LOGGER.error("Home Assistant core failed to initialize. "
"Further initialization aborted")
return hass
await persistent_notification.async_setup(hass, config)
_LOGGER.info("Home Assistant core initialized")
# stage 1
for component in components:
if component not in FIRST_INIT_COMPONENT:
continue
hass.async_create_task(async_setup_component(hass, component, config))
await hass.async_block_till_done()
# stage 2
for component in components:
if component in FIRST_INIT_COMPONENT:
continue
hass.async_create_task(async_setup_component(hass, component, config))
await hass.async_block_till_done()
stop = time()
_LOGGER.info("Home Assistant initialized in %.2fs", stop-start)
# TEMP: warn users for invalid slugs
# Remove after 0.94 or 1.0
if cv.INVALID_SLUGS_FOUND or cv.INVALID_ENTITY_IDS_FOUND:
msg = []
if cv.INVALID_ENTITY_IDS_FOUND:
msg.append(
"Your configuration contains invalid entity ID references. "
"Please find and update the following. "
"This will become a breaking change."
)
msg.append('\n'.join('- {} -> {}'.format(*item)
for item
in cv.INVALID_ENTITY_IDS_FOUND.items()))
if cv.INVALID_SLUGS_FOUND:
msg.append(
"Your configuration contains invalid slugs. "
"Please find and update the following. "
"This will become a breaking change."
)
msg.append('\n'.join('- {} -> {}'.format(*item)
for item in cv.INVALID_SLUGS_FOUND.items()))
hass.components.persistent_notification.async_create(
'\n\n'.join(msg), "Config Warning", "config_warning"
)
# TEMP: warn users of invalid extra keys
# Remove after 0.92
if cv.INVALID_EXTRA_KEYS_FOUND:
msg = []
msg.append(
"Your configuration contains extra keys "
"that the platform does not support (but were silently "
"accepted before 0.88). Please find and remove the following."
"This will become a breaking change."
)
msg.append('\n'.join('- {}'.format(it)
for it in cv.INVALID_EXTRA_KEYS_FOUND))
hass.components.persistent_notification.async_create(
'\n\n'.join(msg), "Config Warning", "config_warning"
)
return hass
def from_config_file(config_path: str,
hass: Optional[core.HomeAssistant] = None,
verbose: bool = False,
skip_pip: bool = True,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False)\
-> Optional[core.HomeAssistant]:
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
# run task
hass = hass.loop.run_until_complete(
async_from_config_file(
config_path, hass, verbose, skip_pip,
log_rotate_days, log_file, log_no_color)
)
return hass
async def async_from_config_file(config_path: str,
hass: core.HomeAssistant,
verbose: bool = False,
skip_pip: bool = True,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False)\
-> Optional[core.HomeAssistant]:
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter.
This method is a coroutine.
"""
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
if not is_virtual_env():
await async_mount_local_lib_path(config_dir)
async_enable_logging(hass, verbose, log_rotate_days, log_file,
log_no_color)
try:
config_dict = await hass.async_add_executor_job(
conf_util.load_yaml_config_file, config_path)
except HomeAssistantError as err:
_LOGGER.error("Error loading %s: %s", config_path, err)
return None
finally:
clear_secret_cache()
return await async_from_config_dict(
config_dict, hass, enable_log=False, skip_pip=skip_pip)
@core.callback
def async_enable_logging(hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: Optional[int] = None,
log_file: Optional[str] = None,
log_no_color: bool = False) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = ("%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s")
datefmt = '%Y-%m-%d %H:%M:%S'
if not log_no_color:
try:
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = "%(log_color)s{}%(reset)s".format(fmt)
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this wil result in a no-op.
logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(err_dir, os.W_OK)):
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when='midnight',
backupCount=log_rotate_days) # type: logging.FileHandler
else:
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
async_handler = AsyncHandler(hass.loop, err_handler)
async def async_stop_async_handler(_: Any) -> None:
"""Cleanup async handler."""
logging.getLogger('').removeHandler(async_handler) # type: ignore
await async_handler.async_close(blocking=True)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, async_stop_async_handler)
logger = logging.getLogger('')
logger.addHandler(async_handler) # type: ignore
logger.setLevel(logging.INFO)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error(
"Unable to set up error log %s (access denied)", err_log_path)
async def async_mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, 'deps')
lib_dir = await async_get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
|
|
# -*- coding: utf-8 -*-
# Simple Bot (SimpBot)
# Copyright 2016-2017, Ismael Lugo (kwargs)
import re
import time
import logging
from . import requires
from six.moves import queue
from simpbot import modules
from simpbot import envvars
from simpbot import parser
from simpbot import localedata
from simpbot.bottools.dummy import thread
i18n = localedata.get()
logging = logging.getLogger('simpbot')
allow = True
deny = False
locked = 0
ignore = None
class ProccessCommands:
overrest = {
'global': 'over-restriction:deny global',
'module': 'over-restriction:deny module',
'command': 'over-restriction:deny command',
'ig_global': 'over-restriction:ignore global',
'ig_module': 'over-restriction:ignore module',
'ig_command': 'over-restriction:ignore command',
'locked': 'over-restriction:lock user'}
def __init__(self, irc, timeout=60):
self.irc = irc
self.timeout = timeout
self.queue = queue.Queue()
self.loop()
@property
def dbstore(self):
return self.irc.dbstore
@property
def request(self):
return self.irc.request
@staticmethod
def get_command(text, noregex=True, regex=True, first=False, privbot=None):
commands = []
for modname, module in modules.core.items():
for hanname, handler in module.handlers.items():
if handler.regex is None:
if noregex:
commands.append((handler, None))
continue
elif not regex:
continue
elif isinstance(handler.regex, dict):
if 'all' in handler.regex:
result = handler.regex['all'].match(text)
elif privbot is True and 'private' in handler.regex:
result = handler.regex['private'].match(text)
elif privbot is False and 'channel' in handler.regex:
result = handler.regex['channel'].match(text)
else:
continue
else:
result = handler.match(text)
if not result is None:
commands.append((handler, result))
if first:
return commands
else:
continue
return commands
def put(self, item):
self.queue.put(item)
def get(self):
try:
return self.queue.get(timeout=self.timeout)
except queue.Empty:
return
def check_generic(self, user, instance, level):
res = instance.check(self.irc.servname, user=user.account)
if res == 'allow':
action = allow
elif res == 'deny':
action = deny
elif res == 'ignore':
action = ignore
else:
action = allow
dbuser = self.dbstore.get_user(user.account)
if dbuser is None:
return action
if dbuser.locked():
if not dbuser.isadmin():
return locked
elif dbuser.admin.has_capab(self.overrest['locked']):
return allow
else:
return locked
elif action is True:
return allow
elif action is False:
if not dbuser.isadmin():
return deny
elif dbuser.admin.has_capab(self.overrest[level]):
return allow
else:
return deny
elif action is None:
if level == 'global':
capab = 'over-restriction:ignore global'
elif level == 'module':
capab = 'over-restriction:ignore module'
elif level == 'command':
capab = 'over-restriction:ignore module'
if not dbuser.isadmin():
return ignore
elif dbuser.admin.has_capab(self.overrest[capab]):
return allow
else:
return ignore
def check(self, user, channel, inst, level='global', checkaccount=False):
# return:
# True -> allow
# False -> deny
# None -> locked
res = inst.check(self.irc.servname, user.mask, user.account, channel)
if res == 'allow':
if user.account is not None:
dbuser = self.dbstore.get_user(user.account)
if dbuser is None:
return allow
elif dbuser.locked():
if not dbuser.isadmin():
return locked
elif dbuser.admin.has_capab(self.overrest['locked']):
return allow
else:
return locked
else:
return allow
if not checkaccount:
return allow
self.request.request(user.nick)
if not user.completed:
try:
self.request.user(user.nick, timeout=self.irc.timeout)
except ValueError:
return ignore
if user.account is None:
return allow
return self.check_generic(user, inst, level)
elif res == 'deny':
if user.account is not None:
dbuser = self.dbstore.get_user(user.account)
if dbuser is None:
return deny
elif dbuser.locked():
if not dbuser.isadmin():
return locked
elif dbuser.admin.has_capab(self.overrest['locked']):
return allow
else:
return locked
elif not dbuser.isadmin():
return deny
elif dbuser.admin.has_capab(self.overrest[level]):
return allow
else:
return deny
if not checkaccount:
return deny
self.request.request(user.nick)
if not user.completed:
self.request.user(user.nick)
if user.account is None:
return deny
return self.check_generic(user, inst, level)
elif res == 'ignore':
if user.account is not None:
dbuser = self.dbstore.get_user(user.account)
if dbuser is None:
return ignore
elif dbuser.locked():
if not dbuser.isadmin():
return locked
elif dbuser.admin.has_capab(self.overrest['locked']):
return allow
else:
return locked
if level == 'global':
capab = 'over-restriction:ignore global'
elif level == 'module':
capab = 'over-restriction:ignore module'
elif level == 'command':
capab = 'over-restriction:ignore module'
if not dbuser.isadmin():
return ignore
elif dbuser.admin.has_capab(self.overrest[capab]):
return allow
else:
return ignore
if not checkaccount:
return ignore
self.request.request(user.nick)
if not user.completed:
self.request.user(user.nick)
if user.account is None:
return ignore
return self.check_generic(user, inst, level)
def get_lang(self, user, channel):
if user.account is None:
if channel is not None and self.dbstore.has_chan(channel):
return self.dbstore.get_chan(channel)._lang
else:
return self.irc.default_lang
else:
dbuser = self.dbstore.get_user(user.account)
if dbuser is None:
if channel is not None and self.dbstore.has_chan(channel):
return self.dbstore.get_chan(channel)._lang
else:
return self.irc.default_lang
else:
return dbuser._lang
@thread
def process(self, match, regexonly=False):
self._process(match, regexonly)
def _process(self, match, regexonly=False):
user = self.request.set_user(*match.group('user', 'host', 'nick'))
irc = self.irc
completeline = match.string # lint:ok
channel = None
privbot = False
target = user.nick
sendin = match.group('target')
message = match.group('message')
if user.nick.lower() == irc.nickname.lower():
return
elif sendin.lower() == irc.nickname.lower():
privbot = True
else:
channel = sendin
target = channel
regex = '^({0}([:;, ] ?)|[{1}])(?P<text>.*)'
regex = regex.format(re.escape(irc.nickname), re.escape(irc.prefix))
cmd = message
if privbot:
sre = None
else:
sre = re.match(regex, message, re.IGNORECASE)
if sre:
cmd = sre.group('text')
lang = self.get_lang(user, channel)
status = self.check(user, channel, modules, 'global', True)
if status == locked:
if sre or privbot:
msg = localedata.get(lang)['you are locked']
usr = self.dbstore.get_user(user.account)
dat = time.localtime(usr.status[1])
dat = '%s/%s/%s %s:%s:%s' % (dat.tm_year, dat.tm_mon,
dat.tm_mday, dat.tm_hour, dat.tm_min, dat.tm_sec)
msg = msg % (usr.status[0], dat, usr.status[2])
self.irc.error(user.nick, msg)
return
elif status is deny or status is ignore:
return
_ = parser.replace(self.irc, match)
_.extend(locals())
msg = i18n['command info']
if not regexonly:
# Se procesan comandos sin regex...
for handler, result in self.get_command(None, regex=False):
msg = msg.format(handler.mod_name, handler.name)
module = handler.module
if module is not None:
status = self.check(user, channel, module, 'module', False)
if status is deny or status is ignore:
continue
status = self.check(user, channel, handler, 'command', False)
if status is deny or status is ignore:
continue
stop = False
for need in handler.need:
watchdog = requires.get(need)
if not watchdog:
logging.error(msg % i18n['invalid requirements'] % need)
continue
if watchdog[0](locals()):
stop = True
break
if stop:
continue
if handler.i18n:
loader = handler.i18n['loader']
locale = loader.getfull(lang, handler.mod_name)
else:
locale = None
handler(self.irc, match, None, target, channel, _, locale)
if handler.rec_form:
abspath = envvars.records.join(handler.fullname)
with open(abspath, 'a') as fp:
fp.write(_(handler.rec_form, include_time=True) + '\n')
if not sre and not privbot:
return
# Se procesan comandos con regex...
# 0 -> False
# 1 -> True
for handler, result in self.get_command(cmd, 0, 1, 1, privbot):
msg = msg.format(handler.mod_name, handler.name)
module = handler.module
if module is not None:
status = self.check(user, channel, module, 'module', False)
if status is deny or status is ignore:
continue
status = self.check(user, channel, handler, 'command', False)
if status is deny or status is ignore:
continue
var = locals()
for need in handler.need:
watchdog = requires.get(need)
if not watchdog:
logging.error(msg % i18n['invalid requirements'] % need)
continue
var['watchdog'] = watchdog
if watchdog[0](var):
return
_.addmatch(result)
channel = var['channel']
if handler.i18n:
loader = handler.i18n['loader']
locale = loader.getfull(lang, handler.mod_name)
else:
locale = None
handler(self.irc, match, result, target, channel, _, locale)
if handler.rec_form:
with open(envvars.records.join(handler.fullname), 'a') as fp:
fp.write(_(handler.rec_form, include_time=True) + '\n')
@thread
def loop(self):
while True:
if self.irc.connection_status == 's':
break
match = self.get()
if match is None:
continue
self.process(match)
|
|
from input import parse
from word2vec1 import word2vec, dictionaries
from collections import namedtuple,OrderedDict
import numpy as np
import json
import gensim
import copy
import logging
def training(fn, wordvecpath):
if not wordvecpath:
word2vec(fn)
wordvecpath = './tmpdata/vecs.bin'
ndeprel = dictionaries(fn)
X_lengths = np.array([])
Arcs = namedtuple('Arcs', ['headid', 'headform', 'tailid', 'tailform', 'deprel'])
Transition = namedtuple('Transition', ['transition', 'label'])
with open('./tmpdata/deprel.json', 'r') as fp:
dictionary2 = json.load(fp)
f = open(fn, 'r')
data = f.read()
mode = gensim.models.Word2Vec.load(wordvecpath)
model = mode.wv
vecdims = mode.layer1_size
vecdims = vecdims+11+2+2
del mode
Y2 = np.zeros([1, 4+ndeprel])
X2 = np.zeros([1, vecdims*5+4])
sid=0
buffer1 = []
stack = []
arcs = []
listofTransitions = []
for sent in parse(data):
del buffer1[:]
del stack[:]
del arcs[:]
buffer1 = copy.deepcopy(sent)
buffer1.append(OrderedDict(
[("id", 0), ("form", 'root'), ("lemma", 'root'), ("upostag", 'root'), ("xpostag", 'root'), ("feats", 'root'), ("head", -1),
("deprel", 'root'), ("deps", 'root'), ("misc", 'root'), ]))
flag=True
for word in sent:
if not pcheck(word['id'],word['head'],sent):
del buffer1[:]
flag=False
break
i=0
while buffer1:
transi, label = oracle(stack, buffer1, arcs)
trans = Transition(transi, label)
i+=1
X,t = nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel)
X2 = np.vstack((X2,X))
Y2 = np.vstack((Y2,t))
if trans.transition == 0: # SHIFT
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
elif trans.transition == 1: # REDUCE
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 2: # LERFT ARC
arcs.append(Arcs(buffer1[0]['id'], buffer1[0]['form'], stack[0]['id'], stack[0]['form'], trans.label))
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 3: # RIGHT ARC
arcs.append(Arcs(stack[0]['id'], stack[0]['form'], buffer1[0]['id'], buffer1[0]['form'], trans.label))
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
if flag : X_lengths = np.append(X_lengths, i)
sid+=1
logging.info ('vectorising sentence : '+str(sid))
X2 = np.delete(X2, 0, axis=0)
Y2 = np.delete(Y2, 0, axis=0)
return X2,Y2,X_lengths
def oracle(stack, buffer1, arcs):
global i
if not stack:
return 0, ""
if not buffer1[0] :
del buffer1[:]
i-=1
return 1, ""
s0id = stack[0]['id']
s0head = stack[0]['head']
b0id = buffer1[0]['id']
b0head = buffer1[0]['head']
if b0id == s0head:
return 2, stack[0]['deprel']
elif s0id == b0head:
return 3, buffer1[0]['deprel']
elif head(stack[0], arcs) != -1 and b0head<s0head :
return 1, ""
return 0, ""
def head(stackc, arcs):
for a in arcs:
if a.headid == stackc['head']:
return a.headid
return -1
def nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel):
mones = [-1] * vecdims
ones = [1] * (vecdims-4)
zeros = [0] * (vecdims-15)
dep = [-1]*4
sentenc = np.array([])
words=["_","_","_","_","_"]
if stack:
words.pop(0)
words.insert(0,stack[0])
dep[0] = iofdeprel(rightchild(stack[0], arcs))
dep[1] = iofdeprel(leftchild(stack[0], arcs))
if len(stack) > 1:
words.pop(1)
words.insert(1,stack[1])
if buffer1:
words.pop(2)
words.insert(2,buffer1[0])
dep[2] = iofdeprel(rightchild(buffer1[0], arcs))
dep[3] = iofdeprel(leftchild(buffer1[0], arcs))
if len(buffer1) > 1:
words.pop(3)
words.insert(3,buffer1[1])
if len(buffer1) > 2:
words.pop(4)
words.insert(4, buffer1[2])
for w in words:
if w == '_':
sentenc = np.hstack((sentenc, mones))
elif w['form'] == 'root':
sentenc = np.hstack((sentenc, ones, D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] in model.vocab:
sentenc = np.hstack((sentenc, model[w['form']], featureids(w['feats'], dictionary2),D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] is not None:
sentenc = np.hstack((sentenc, zeros, featureids(w['feats'], dictionary2), D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
else:
sentenc = np.hstack((sentenc, mones))
sentenc = np.hstack((sentenc,dep))
t = trans.transition
if t > 1:
t = np.hstack((np.eye(4)[t], np.eye(ndeprel)[iofdeprel(trans.label)-1]))
else:
t = np.hstack((np.eye(4)[t], np.zeros(ndeprel)))
return sentenc, t
def D(key, dic):
if dic.get(key): return dic[key]
return -1;
def featureids(feats1, dic):
f=[-1]*11
if feats1['cat'] in dic: f[0] = dic[feats1['cat']]
if feats1['gen'] in dic: f[1] = dic[feats1['gen']]
if feats1['num'] in dic: f[2] = dic[feats1['num']]
if feats1['pers'] in dic: f[3] = dic[feats1['pers']]
if feats1['case'] in dic: f[4] = dic[feats1['case']]
if feats1['vib'] in dic: f[5] = dic[feats1['vib']]
if feats1['tam'] in dic: f[6] = dic[feats1['tam']]
if feats1['chunkId'] in dic: f[7] = dic[feats1['chunkId']]
if feats1['chunkType'] in dic: f[8] = dic[feats1['chunkType']]
if feats1['stype'] in dic: f[9] = dic[feats1['stype']]
if feats1['voicetype'] in dic: f[0] = dic[feats1['voicetype']]
return f
def rightchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid > stackc['id']:
if id==-1 :
id=a.tailid
deprel=a.deprel
else :
if id < a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def leftchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid < stackc['id'] :
if not id :
id = a.tailid
deprel = a.deprel
else :
if id > a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def iofdeprel(ele):
with open('./tmpdata/deprel.json', 'r') as fp:
dict = json.load(fp)
if ele in dict: return dict[ele]
return -1
def pcheck(id1,id2,sentence):
flag=True
if id2>id1:
for words in sentence[id1:id2-1]:
if words['head'] > id2 or words['head'] < id1:
flag=False
break
if id1>id2:
for words in sentence[id2:id1-1]:
if words['head'] > id1 or words['head'] < id2 :
flag=False
break
return flag
|
|
#!/usr/bin/env python3
import argparse
import contextlib
import io
import math
import os
import os.path
import re
import string
import subprocess
import sys
import signal
import tempfile
import threading
if sys.platform == 'win32':
import winreg
else:
signal.signal(signal.SIGPIPE,signal.SIG_DFL)
@contextlib.contextmanager
def stdout_redirected(to=os.devnull):
fd = sys.stdout.fileno()
with os.fdopen(os.dup(fd), 'w') as old_stdout:
sys.stdout.close()
os.dup2(to.fileno(), fd)
try:
yield # allow code to be run with the redirected stdout
finally:
os.dup2(old_stdout.fileno(), fd)
sys.stdout = os.fdopen(fd, 'w')
def capture_output(func,args):
tf = tempfile.TemporaryFile()
with stdout_redirected(tf):
func(*args)
tf.seek(0)
return str(tf.read(), encoding='UTF-8').split('\r\n')
class CstudException(Exception):
def __init__(self,code,value):
self.code = code
self.value = value
def __str__(self):
return "cstud Error #{0}: {1}".format(self.code,self.value)
def simple_connect(instance_name, host,super_server_port,web_server_port,namespace,username,password,force=False,verbosity=False):
instance = InstanceDetails(instance_name, host, super_server_port, web_server_port)
bindings = getPythonBindings(instance.latest_location, force=force)
credentials = Credentials(username,password,namespace)
cacheDatabase = Cache(bindings, credentials, instance, verbosity=verbosity)
return cacheDatabase
def info_(bindings_location=False, **kwargs):
if bindings_location:
details = InstanceDetails()
print(details.latest_location)
class InstanceDetails:
def __init__(self, instanceName=None, host=None, super_server_port=None, web_server_port=None):
localInstances = self.getLocalInstances()
if not instanceName and not host and not super_server_port and not web_server_port:
instanceName = self.getDefaultCacheInstanceName()
if instanceName:
instance = self.getThisInstance(localInstances,instanceName)
host = '127.0.0.1'
super_server_port = instance['super_server_port']
web_server_port = instance['web_server_port']
self.latest_location = self.getLatestLocation(localInstances)
self.host = host
self.super_server_port = int(super_server_port)
self.web_server_port = int(web_server_port)
def iterateOverKey(self,key):
i = 0;
subKeys = []
while True:
try:
subKey = winreg.EnumKey(key, i)
subKeys.append(subKey)
i += 1
except WindowsError:
break
return subKeys
def isWin64(self):
return 'PROGRAMFILES(x86)' in os.environ
def getLocalInstances(self):
if sys.platform == 'win32':
cacheSubKeyName = 'SOFTWARE\\{0}Intersystems\\Cache'.format('Wow6432Node\\' if self.isWin64() else '')
configsSubKeyName = '{0}\\Configurations'.format(cacheSubKeyName)
serversSubKeyName = '{0}\\Servers'.format(cacheSubKeyName)
configsSubKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,configsSubKeyName)
localInstances = []
instance = {}
for instanceName in self.iterateOverKey(configsSubKey):
instance['name'] = instanceName
instanceSubKeyName = '{0}\\{1}'.format(configsSubKeyName,instanceName)
instanceSubKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,instanceSubKeyName)
instance['version'] = winreg.QueryValueEx(instanceSubKey,'Version')[0]
directorySubKeyName = '{0}\\Directory'.format(instanceSubKeyName,instanceName)
instance['location'] = winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE,directorySubKeyName)
preferredServerSubKeyName = '{0}\\Manager\\PreferredServer'.format(instanceSubKeyName,instanceName)
preferredServerName = winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE,preferredServerSubKeyName)
if not hasattr(self,'defaultServerName'): self.defaultServerName = preferredServerName #cheating
serverSubKeyName = '{0}\\{1}'.format(serversSubKeyName,preferredServerName)
serverSubKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,serverSubKeyName)
instance['super_server_port'] = winreg.QueryValueEx(serverSubKey,'Port')[0]
instance['web_server_port'] = winreg.QueryValueEx(serverSubKey,'WebServerPort')[0]
localInstances.append(instance)
return localInstances
else:
try:
ccontrol = subprocess.Popen(['ccontrol', 'qlist'],stdout=subprocess.PIPE)
stdout = ccontrol.communicate()[0]
instanceStrings = stdout.decode('UTF-8').split('\n')
localInstances = []
for instanceString in instanceStrings:
if instanceString:
instanceArray = instanceString.split('^')
trueInstanceArray = instanceArray[0:3] + instanceArray[5:7]
instance = dict(zip(['name','location','version','super_server_port','web_server_port'],trueInstanceArray))
localInstances += [instance]
return localInstances
except FileNotFoundError:
raise CstudException(103,"ccontrol not on PATH")
except:
raise CstudException(201,"ccontrol qlist output not expected")
def getThisInstance(self,localInstances,instanceName):
for instance in localInstances:
if instance['name'] == instanceName.upper():
return instance
else:
raise CstudException(102,"Invalid Instance Name: {0}".format(instanceName.upper()))
def getLatestLocation(self,localInstances):
maxVersion = 0
maxLocation = ""
for instance in localInstances:
versionInt = self.convertVersionToInteger(instance['version'])
if versionInt > maxVersion:
maxVersion = versionInt
maxLocation = instance['location']
return maxLocation
def getDefaultCacheInstanceName(self):
if sys.platform == 'win32':
return self.defaultServerName
else:
try:
ccontrol = subprocess.Popen(['ccontrol','default'],stdout=subprocess.PIPE)
stdout = ccontrol.communicate()[0]
return stdout.decode('UTF-8').split('\n')[0]
except FileNotFoundError:
raise CstudException(103,"ccontrol not on PATH")
def convertVersionToInteger(self,version):
splitVersion = version.split('.')
splitVersion += ['']*(5-len(splitVersion))
paddedArray = [num.zfill(4) for num in splitVersion]
return int(''.join(paddedArray))
class Credentials:
def __init__(self, username, password, namespace):
self.username = username
self.password = password
self.namespace = namespace
def getPythonBindingsLEGACY(latest_location,force):
#Returns True if it was not already there, false if it was
def addToEnvPath(env,location):
changedIt = True
if not os.environ.get(env):
os.environ[env] = location
elif not location in os.environ.get(env):
os.environ[env] += ":"+location
else:
changedIt = False
return changedIt
binDirectory = os.path.join(latest_location,'bin')
if sys.platform.startswith('linux'):
libraryPath = 'LD_LIBRARY_PATH'
elif sys.platform == 'darwin':
libraryPath = 'DYLD_LIBRARY_PATH'
else:
sys.exit("Unsupported Platform")
rerun = addToEnvPath(libraryPath,binDirectory) and addToEnvPath('PATH',binDirectory)
if rerun:
os.execve(os.path.realpath(__file__), sys.argv, os.environ)
try:
if force:
raise ImportError
import intersys.pythonbind3
except ImportError:
try:
installerDirectory = os.path.join(latest_location, 'dev', 'python')
installerPath = os.path.join(installerDirectory, 'setup3.py')
installerProcess = subprocess.Popen([sys.executable, installerPath, 'install'], cwd=installerDirectory, stdin=subprocess.PIPE, stdout=subprocess.DEVNULL)
installerProcess.communicate(bytes(latest_location, 'UTF-8'))
import intersys.pythonbind3
except Exception as ex:
raise CstudException(301, 'Error installing Python Bindings: {0}'.format(ex))
return intersys.pythonbind3
def getPythonBindings(latest_location,force):
binDirectory = os.path.join(latest_location,'bin')
files = ['libcbind', 'libcppbind','libcacheodbciw']
newDir = ''
if sys.platform.startswith('linux'):
newDir = '/usr/lib64'
files = [file+".so" for file in files]
elif sys.platform == 'darwin':
newDir = os.path.join(os.environ['HOME'], 'lib')
files = [file+".dylib" for file in files]
else:
sys.exit("Unsupported Platform")
if not os.path.isdir(newDir):
os.mkdir(newDir)
for file in files:
newPath = os.path.join(newDir,file)
if force or not os.path.isfile(newPath):
if os.path.isfile(newPath):
os.unlink(newPath)
os.symlink(os.path.join(binDirectory,file), newPath)
try:
if force:
raise ImportError
import intersys.pythonbind3
except ImportError:
# try:
installerDirectory = os.path.join(latest_location, 'dev', 'python')
installerPath = os.path.join(installerDirectory, 'setup3.py')
installerProcess = subprocess.Popen([sys.executable, installerPath, 'install'], cwd=installerDirectory, stdin=subprocess.PIPE, stdout=subprocess.DEVNULL)
installerProcess.communicate(bytes(latest_location, 'UTF-8'))
import intersys.pythonbind3
# except Exception as ex:
# raise CstudException(301, 'Error installing Python Bindings: {0}'.format(ex))
return intersys.pythonbind3
class Cache:
def __init__(self, bindings, credentials, instanceDetails,verbosity=0):
self.pythonbind = bindings
self.credentials = credentials
self.instance = instanceDetails
url = '%s[%i]:%s' % (self.instance.host, self.instance.super_server_port, self.credentials.namespace)
conn = bindings.connection()
try:
conn.connect_now(url, self.credentials.username, self.credentials.password, None)
except Exception as ex:
raise CstudException(401, 'Unable to connect to Cache: {0}'.format(ex))
self.database = bindings.database(conn)
self.verbosity = verbosity
def deleteRoutine(self,routineName):
self.database.run_class_method('%Library.Routine',"Delete",[routineName])
def deleteClass(self,className):
flags = "d" if self.verbosity else "-d"
self.database.run_class_method('%SYSTEM.OBJ', 'Delete', [className,flags])
def routineExists(self,routineName):
exists = self.database.run_class_method('%Library.Routine','Exists',[routineName])
return exists
def classExists(self,className):
exists = self.database.run_class_method('%Dictionary.ClassDefinition', '%ExistsId', [className])
return exists
def classNameForText(self,text):
match = re.search(r'^Class\s',text,re.MULTILINE)
if match:
classNameIndexBegin = match.end()
classNameIndexEnd = text.find(' ', classNameIndexBegin)
className = text[classNameIndexBegin:classNameIndexEnd]
return className
return None
def uploadRoutine(self,text):
match = re.search(r'^(#; )?(?P<routine_name>(\w|%|\.)+)',text,re.MULTILINE)
routineName = match.group('routine_name')
# if routineExists(database,routineName):
# if verbose: print('Deleting %s' % routineName)
# deleteRoutine(database,routineName)
routine = self.database.run_class_method('%Library.Routine', '%New', [routineName])
crlfText = text.replace('\n','\r\n')
self.writeStream(routine,crlfText)
if self.verbosity: print('Uploading %s' % routineName)
flags = "ckd" if self.verbosity else "ck-d"
routine.run_obj_method('Save',[])
routine.run_obj_method('Compile',[flags])
def uploadClass(self,text):
stream = self.database.run_class_method('%Stream.GlobalCharacter', '%New', [])
name = self.classNameForText(text)
if self.classExists(name):
self.deleteClass(name)
crlfText = text.replace('\n','\r\n')
self.writeStream(stream,crlfText)
result = self.database.run_class_method('%Compiler.UDL.TextServices', 'SetTextFromStream',[None, name, stream])
if self.verbosity: print('Uploading %s: %s' % (name, result))
flags = "ckd" if self.verbosity else "ck-d"
self.database.run_class_method('%SYSTEM.OBJ','Compile',[name,flags])
def uploadOnce(self,text):
name = self.classNameForText(text)
if name:
self.uploadClass(text)
else:
self.uploadRoutine(text)
def upload_(self,files):
for openFile in files:
text = openFile.read()
self.uploadOnce(text)
def readStream(self,stream):
total = ""
while True:
content = stream.run_obj_method('Read',[])
if content:
if type(content) != str:
content = content.decode('utf-8')
lfcontent = content.replace('\r\n','\n')
total = total + lfcontent
else:
break
return total
def writeStream(self,stream,data):
for chunk in self.chunkString(data):
stream.run_obj_method('Write',[chunk])
def chunkString(self,string,chunkSize=32000):
return [string[i:i+chunkSize] for i in range(0, len(string), chunkSize)]
def downloadClass(self,className):
stream = self.database.run_class_method('%Stream.GlobalCharacter', '%New', [])
argList = [None,className,stream] #the last None is byref
self.database.run_class_method('%Compiler.UDL.TextServices', 'GetTextAsStream', argList)
outputStream = argList[2]
return self.readStream(outputStream)
def downloadRoutine(self,routineName):
routine = self.database.run_class_method('%Library.Routine','%OpenId',[routineName])
return self.readStream(routine)
def downloadOnce(self,name):
content = self.downloadClass(name)
if not content:
content = self.downloadRoutine(name)
return content
def download_(self,names):
for name in names:
print(self.downloadOnce(name))
def executeCode(self,code):
stream = self.database.run_class_method('%Stream.GlobalCharacter', '%New', [])
className = "ISCZZZZZZZZZZZZZZCSTUD.cstud"
methodName = "xecute"
classCode = """Class {0} Extends %RegisteredObject {{
ClassMethod {1}() {{
{2}
}}
}}
""".format(className,methodName,code).replace("\n","\r\n")
if self.classExists(className):
self.deleteClass(className)
self.writeStream(stream,classCode)
self.database.run_class_method('%Compiler.UDL.TextServices', 'SetTextFromStream',[None, className, stream])
flags = "ckd" if self.verbosity else "ck-d"
self.database.run_class_method('%SYSTEM.OBJ','Compile',[className,flags])
self.database.run_class_method(className,methodName,[])
print()
def executeFile(self,theFile):
self.executeCode(theFile.read())
def execute_(self,inline,files,stdin):
if inline:
self.executeCode(inline)
if stdin:
inlineCode = sys.stdin.read().replace("\n","\r\n")
self.executeCode(inlineCode)
for f in files:
print(self.executeFile(f))
def editOnce(self,name):
initialContent = self.downloadOnce(name)
editor = subprocess.Popen([os.environ['EDITOR']], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
finalContentTuple = editor.communicate(bytes(initialContent,'UTF-8'))
finalContent = finalContentTuple[0].decode('UTF-8')
self.uploadOnce(finalContent)
def edit_(self,names):
threads = [threading.Thread(target=self.editOnce, args=[name]) for name in names]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
### accepts runQuery("SELECT * FROM SAMPLE.PERSON") or runQuery("%SYS.Namespace::List")
def runQuery(self, sqlOrName):
query = self.pythonbind.query(self.database)
if '::' in sqlOrName:
query.prepare_class(*sqlOrName.split('::'))
else:
query.prepare(sqlOrName)
sql_code = query.execute()
while True:
cols = query.fetch([None])
if len(cols) == 0: break
yield cols
def listClasses(self,system):
sql = 'SELECT Name FROM %Dictionary.ClassDefinition'
[print(col[0]) for col in self.runQuery(sql)]
def listRoutines(self,type,system):
sql = "SELECT Name FROM %Library.Routine_RoutineList('*.{0},%*.{0}',1,0)".format(type)
[print(col[0]) for col in self.runQuery(sql)]
def listNamespaces(self):
sql = '%SYS.Namespace::List'
[print(col[0]) for col in self.runQuery(sql)]
def list_(self,listFunction,types=None,system=False):
if listFunction == 'classes':
self.listClasses(system)
elif listFunction == 'routines':
if types == None:
types = ['mac','int','inc','bas']
for theType in types:
self.listRoutines(theType,system)
elif listFunction == 'namespaces':
self.listNamespaces()
def export_(self,names,output=None):
namesWithCommas = ",".join(names)
flags = 'd' if self.verbosity else '-d'
args = [namesWithCommas, None, flags]
self.database.run_class_method('%SYSTEM.OBJ', 'ExportToStream', args)
resultStream = args[1]
print(self.readStream(resultStream), file=output)
def import_(self,files):
for file_ in files:
text = file_.read()
stream = self.database.run_class_method('%Stream.GlobalCharacter','%New',[])
self.writeStream(stream,text)
flags = 'ckd' if self.verbosity else 'ck-d'
self.database.run_class_method('%SYSTEM.OBJ', 'LoadStream', [stream, flags])
def loadWSDLFromURL(self,url):
reader = self.database.run_class_method('%SOAP.WSDL.Reader','%New',[])
reader.run_obj_method('Process',[url])
def loadWSDL_(self,urls):
for url in urls:
self.loadWSDLFromURL(url)
def findInFiles(self,term,fileFilter='*.*',system=True,whole_words=False,case_sensitive=False):
args = [term, fileFilter, system, whole_words, case_sensitive, 10000]
results = capture_output(self.database.run_class_method, ['%Studio.Project','FindInFiles', args])
[print(line) for line in results[2:-2]]
def findInDictionary(self,term,table,class_context=None):
sql = "SELECT parent FROM %Dictionary.{0} WHERE Name = '{1}'".format(table,term)
if class_context:
sql += " AND parent LIKE '%{0}'".format(class_context)
[print(row[0]) for row in self.runQuery(sql)]
def find_(self,term,type=None,class_context=None):
if not type:
self.findInFiles(term)
elif type == 'property':
self.findInDictionary(term,'CompiledProperty',class_context)
elif type == 'parameter':
self.findInDictionary(term,'CompiledParameter',class_context)
elif type == 'method':
self.findInDictionary(term,'CompiledMethod',class_context)
elif type == 'class':
self.findInDictionary(term,'CompiledClass')
elif type == 'routine':
pass
elif type == 'macro':
pass
elif type == 'table':
pass
def __main():
mainParser = argparse.ArgumentParser()
mainParser.add_argument('-V', '--verbose', action='store_const', const=1, help='output details')
mainParser.add_argument('-U', '--username', type=str, default='_SYSTEM')
mainParser.add_argument('-P', '--password', type=str, default='SYS')
mainParser.add_argument('-N', '--namespace', type=str, default='USER')
specificationGroup = mainParser.add_mutually_exclusive_group()
specificationGroup.add_argument('-I', '--instance', type=str, default=None)
locationGroup = specificationGroup.add_argument_group('location')
locationGroup.add_argument('-H', '--host', type=str)
locationGroup.add_argument('-S', '--super-server-port', type=int)
locationGroup.add_argument('-W', '--web-server-port', type=int)
mainParser.add_argument('--force-install', action='store_true')
subParsers = mainParser.add_subparsers(help='cstud commands',dest='function')
uploadParser = subParsers.add_parser('upload', help='Upload and compile classes or routines')
uploadParser.add_argument("files", metavar="F", type=argparse.FileType('r'), nargs="+", help="files to upload")
downloadParser = subParsers.add_parser('download', help='Download classes or routines')
downloadParser.add_argument("names", metavar="N", type=str, nargs="+", help="Classes or routines to download")
importParser = subParsers.add_parser('import', help='Upload and compile classes or routines')
importParser.add_argument("files", metavar="F", type=argparse.FileType('r'), nargs="+", help="Files to import")
exportParser = subParsers.add_parser('export', help='Upload and compile classes or routines')
exportParser.add_argument("-o", "--output", type=argparse.FileType('w'), help='File to output to. STDOUT if not specified.')
exportParser.add_argument("names", metavar="N", type=str, nargs="+", help="Classes or routines to export")
executeParser = subParsers.add_parser('execute', help='Execute arbitrary COS code')
executeParser.add_argument('-i', '--inline', type=str, help='Take code from stdin')
executeParser.add_argument('-', dest="stdin", action='store_true', help='Take code from stdin')
executeParser.add_argument("files", metavar="F", type=str, nargs="*", help="Execute routine specified in a file")
editParser = subParsers.add_parser('edit', help='Download classes')
editParser.add_argument("names", metavar="N", type=str, nargs="+", help="Classes or routines to edit")
listParser = subParsers.add_parser('list', help='list server details')
listSubParsers = listParser.add_subparsers(help='list options',dest='listFunction')
listClassesParser = listSubParsers.add_parser('classes', help='List all classes and routines in namespace')
listClassesParser.add_argument('-s','--noSystem',action='store_false', help='hide system classes',dest="system")
listClassesParser = listSubParsers.add_parser('routines', help='List all classes and routines in namespace')
listClassesParser.add_argument('-t','--type',action='append',help='mac|int|obj|inc|bas',dest="types",choices=['obj','mac','int','inc','bas'])
listClassesParser.add_argument('-s','--noSystem',action='store_false', help='hide system classes',dest="system")
listNamespacesParser = listSubParsers.add_parser('namespaces', help='List all classes and routines in namespace')
loadWSDLParser = subParsers.add_parser('loadWSDL', help='Load a WSDL from a URL or a file, and create classes')
loadWSDLParser.add_argument('urls', nargs='+', type=str, help='specify a URL')
infoParser = subParsers.add_parser('info', help='Get configuration information')
infoParser.add_argument('-l','--bindings-location', action='store_true', help='Print location of latest Cache instance installed')
findParser = subParsers.add_parser('find', help='Find things on the server')
findParser.add_argument('-t', '--type', type=str, help='property|parameter|method|class|routine|macro|table or blank for all', choices=['property','parameter','method','class','routine','macro','table'])
findParser.add_argument('-c', '--class-context', type=str, help='class to search in (applies to property, parameter, and method')
findParser.add_argument('term', type=str, help='term to search for')
results = mainParser.parse_args()
kwargs = dict(results._get_kwargs())
function = kwargs.pop('function')
if function == 'info':
info_(**kwargs)
else:
database = simple_connect(kwargs.pop('instance'),
kwargs.pop('host'),
kwargs.pop('super_server_port'),
kwargs.pop('web_server_port'),
kwargs.pop('namespace'),
kwargs.pop('username'),
kwargs.pop('password'),
force=kwargs.pop('force_install'),
verbosity=kwargs.pop('verbose'))
if function:
getattr(database,function + '_')(**kwargs)
if __name__ == "__main__":
try:
__main()
except CstudException as ex:
print(ex)
|
|
import ldap
import ldap.filter
import logging
class LDAPConn(object):
"""
LDAP connector class
Defines methods for retrieving users and groups from LDAP server.
"""
def __init__(self, config):
self.uri = config.ldap_uri
self.base = config.ldap_base
self.ldap_user = config.ldap_user
self.ldap_pass = config.ldap_passwd
self.ldap_type = config.ldap_type
self.group_member_attribute = config.ldap_group_member_attribute
self.group_filter = config.ldap_group_filter
self.uid_attribute = config.ldap_uid_attribute
self.recursive = config.ldap_recursive
if self.recursive:
self.memberof_filter = config.ldap_memberof_filter
self.skipdisabled = config.ldap_skipdisabled
self.lowercase = config.ldap_lowercase
self.user_filter = config.ldap_user_filter
self.active_directory = config.ldap_active_directory
self.verbose = config.verbose
# Use logger to log information
self.logger = logging.getLogger()
if self.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Log to stdout
ch = logging.StreamHandler()
if self.verbose:
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.addHandler(ch) # Use logger to log information
# Log from pyldap
log = logging.getLogger('ldap')
log.addHandler(ch)
if self.verbose:
log.setLevel(logging.DEBUG)
ldap.set_option(ldap.OPT_DEBUG_LEVEL, 4095)
def connect(self):
"""
Establish a connection to the LDAP server.
Raises:
SystemExit
"""
self.conn = ldap.initialize(self.uri)
self.conn.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF)
try:
self.conn.simple_bind_s(self.ldap_user, self.ldap_pass)
except ldap.SERVER_DOWN as e:
raise SystemExit('Cannot connect to LDAP server: %s' % e)
def disconnect(self):
"""
Disconnect from the LDAP server.
"""
self.conn.unbind()
def remove_ad_referrals(self, result):
"""
Remove referrals from AD query result
"""
return [i for i in result if i[0] != None]
def get_group_members(self, group):
"""
Retrieves the members of an LDAP group
Args:
group (str): The LDAP group name
Returns:
A list of all users in the LDAP group
"""
attrlist = [self.group_member_attribute]
filter = self.group_filter % group
result = self.conn.search_s(base=self.base,
scope=ldap.SCOPE_SUBTREE,
filterstr=filter,
attrlist=attrlist)
if not result:
self.logger.info('Unable to find group "%s" with filter "%s", skipping group' % (group, filter))
return None
# Get DN for each user in the group
if self.active_directory:
result = self.remove_ad_referrals(result)
final_listing = {}
for members in result:
result_dn = members[0]
result_attrs = members[1]
group_members = []
attrlist = [self.uid_attribute]
if self.recursive:
# Get a DN for all users in a group (recursive)
# It's available only on domain controllers with Windows Server 2003 SP2 or later
member_of_filter_dn = self.memberof_filter % result_dn
if self.skipdisabled:
filter = "(&%s%s%s)" % (self.user_filter, member_of_filter_dn, self.disabled_filter)
else:
filter = "(&%s%s)" % (self.user_filter, member_of_filter_dn)
uid = self.conn.search_s(base=self.base,
scope=ldap.SCOPE_SUBTREE,
filterstr=filter,
attrlist=attrlist)
for item in self.remove_ad_referrals(uid):
group_members.append(item)
else:
# Otherwise, just get a DN for each user in the group
for member in result_attrs[self.group_member_attribute]:
if self.skipdisabled:
filter = "(&%s%s)" % (self.user_filter, self.disabled_filter)
else:
filter = "(&%s)" % self.user_filter
uid = self.conn.search_s(base=member.decode('utf8'),
scope=ldap.SCOPE_BASE,
filterstr=filter,
attrlist=attrlist)
for item in uid:
group_members.append(item)
# Fill dictionary with usernames and corresponding DNs
for item in group_members:
dn = item[0]
username = item[1][self.uid_attribute]
if self.lowercase:
username = username[0].decode('utf8').lower()
else:
username = username[0].decode('utf8')
final_listing[username] = dn
return final_listing
else:
dn, users = result.pop()
final_listing = {}
group_members = []
# Get info for each user in the group
for memberid in users[self.group_member_attribute]:
if self.openldap_type == "groupofnames":
filter = "(objectClass=*)"
# memberid is user dn
base = memberid
else:
# memberid is user attribute, most likely uid
filter = self.user_filter % memberid
base = self.base
attrlist = [self.uid_attribute]
# get the actual LDAP object for each group member
uid = self.conn.search_s(base=base,
scope=ldap.SCOPE_SUBTREE,
filterstr=filter,
attrlist=attrlist)
for item in uid:
group_members.append(item)
# Fill dictionary with usernames and corresponding DNs
for item in group_members:
dn = item[0]
username = item[1][self.uid_attribute]
user = ''.join(username)
final_listing[user] = dn
return final_listing
def get_groups_with_wildcard(self, groups_wildcard):
self.logger.info("Search group with wildcard: %s" % groups_wildcard)
filter = self.group_filter % groups_wildcard
result_groups = []
result = self.conn.search_s(base=self.base,
scope=ldap.SCOPE_SUBTREE,
filterstr=filter, )
for group in result:
# Skip refldap (when Active Directory used)
# [0]==None
if group[0]:
group_name = group[1]['name'][0]
self.logger.info("Find group %s" % group_name)
result_groups.append(group_name)
if not result_groups:
self.logger.info('Unable to find group "%s", skipping group wildcard' % groups_wildcard)
return result_groups
def get_user_media(self, dn, ldap_media):
"""
Retrieves the 'media' attribute of an LDAP user
Args:
username (str): The LDAP distinguished name to lookup
ldap_media (str): The name of the field containing the media address
Returns:
The user's media attribute value
"""
attrlist = [ldap_media]
result = self.conn.search_s(base=dn,
scope=ldap.SCOPE_BASE,
attrlist=attrlist)
if not result:
return None
dn, data = result.pop()
mail = data.get(ldap_media)
if not mail:
return None
return mail.pop()
def get_user_sn(self, dn):
"""
Retrieves the 'sn' attribute of an LDAP user
Args:
username (str): The LDAP distinguished name to lookup
Returns:
The user's surname attribute
"""
attrlist = ['sn']
result = self.conn.search_s(base=dn,
scope=ldap.SCOPE_BASE,
attrlist=attrlist)
if not result:
return None
dn, data = result.pop()
sn = data.get('sn')
if not sn:
return None
return sn.pop()
def get_user_givenName(self, dn):
"""
Retrieves the 'givenName' attribute of an LDAP user
Args:
username (str): The LDAP distinguished name to lookup
Returns:
The user's given name attribute
"""
attrlist = ['givenName']
result = self.conn.search_s(base=dn,
scope=ldap.SCOPE_BASE,
attrlist=attrlist)
if not result:
return None
dn, data = result.pop()
name = data.get('givenName')
if not name:
return None
return name.pop()
def get_groups_with_wildcard(self):
"""
Set group from LDAP with wildcard
:return:
"""
result_groups = []
ldap_conn = LDAPConn(self.ldap_uri, self.ldap_base, self.ldap_user, self.ldap_pass)
ldap_conn.connect()
for group in self.ldap_groups:
groups = ldap_conn.get_groups_with_wildcard(group)
result_groups = result_groups + groups
if not result_groups:
raise SystemExit('ERROR - No groups found with wildcard')
return result_groups
|
|
""" Module for reading the MPD file
Author: Parikshit Juluri
Contact : pjuluri@umkc.edu
"""
from __future__ import division
import re
import config_dash
FORMAT = 0
URL_LIST = list()
# Dictionary to convert size to bits
SIZE_DICT = {'bits': 1,
'Kbits': 1024,
'Mbits': 1024*1024,
'bytes': 8,
'KB': 1024*8,
'MB': 1024*1024*8,
}
# Try to import the C implementation of ElementTree which is faster
# In case of ImportError import the pure Python implementation
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
MEDIA_PRESENTATION_DURATION = 'mediaPresentationDuration'
MIN_BUFFER_TIME = 'minBufferTime'
def get_tag_name(xml_element):
""" Module to remove the xmlns tag from the name
eg: '{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate'
Return: SegmentTemplate
"""
try:
tag_name = xml_element[xml_element.find('}')+1:]
except TypeError:
config_dash.LOG.error("Unable to retrieve the tag. ")
return None
return tag_name
def get_playback_time(playback_duration):
""" Get the playback time(in seconds) from the string:
Eg: PT0H1M59.89S
"""
# Get all the numbers in the string
numbers = re.split('[PTHMS]', playback_duration)
# remove all the empty strings
numbers = [value for value in numbers if value != '']
numbers.reverse()
total_duration = 0
for count, val in enumerate(numbers):
if count == 0:
total_duration += float(val)
elif count == 1:
total_duration += float(val) * 60
elif count == 2:
total_duration += float(val) * 60 * 60
return total_duration
class MediaObject(object):
"""Object to handel audio and video stream """
def __init__(self):
self.min_buffer_time = None
self.start = None
self.timescale = None
self.segment_duration = None
self.initialization = None
self.base_url = None
self.url_list = list()
class DashPlayback:
"""
Audio[bandwidth] : {duration, url_list}
Video[bandwidth] : {duration, url_list}
"""
def __init__(self):
self.min_buffer_time = None
self.playback_duration = None
self.audio = dict()
self.video = dict()
def get_url_list(media, segment_duration, playback_duration, bitrate):
"""
Module to get the List of URLs
"""
if FORMAT == 0:
# Counting the init file
total_playback = segment_duration
segment_count = media.start
# Get the Base URL string
base_url = media.base_url
if "$Bandwidth$" in base_url:
base_url = base_url.replace("$Bandwidth$", str(bitrate))
if "$Number" in base_url:
base_url = base_url.split('$')
base_url[1] = base_url[1].replace('$', '')
base_url[1] = base_url[1].replace('Number', '')
base_url = ''.join(base_url)
while True:
media.url_list.append(base_url % segment_count)
segment_count += 1
if total_playback > playback_duration:
break
total_playback += segment_duration
elif FORMAT == 1:
media.url_list = URL_LIST
#print media.url_list
return media
def read_mpd(mpd_file, dashplayback):
""" Module to read the MPD file"""
global FORMAT
config_dash.LOG.info("Reading the MPD file")
try:
tree = ET.parse(mpd_file)
except IOError:
config_dash.LOG.error("MPD file not found. Exiting")
return None
config_dash.JSON_HANDLE["video_metadata"] = {'mpd_file': mpd_file}
root = tree.getroot()
if 'MPD' in get_tag_name(root.tag).upper():
if MEDIA_PRESENTATION_DURATION in root.attrib:
dashplayback.playback_duration = get_playback_time(root.attrib[MEDIA_PRESENTATION_DURATION])
config_dash.JSON_HANDLE["video_metadata"]['playback_duration'] = dashplayback.playback_duration
if MIN_BUFFER_TIME in root.attrib:
dashplayback.min_buffer_time = get_playback_time(root.attrib[MIN_BUFFER_TIME])
format = 0;
if "Period" in get_tag_name(root[0].tag):
child_period = root[0]
FORMAT = 0
elif "Period" in get_tag_name(root[1].tag):
child_period = root[1]
FORMAT = 1
#print child_period
video_segment_duration = None
if FORMAT == 0:
for adaptation_set in child_period:
if 'mimeType' in adaptation_set.attrib:
media_found = False
if 'audio' in adaptation_set.attrib['mimeType']:
media_object = dashplayback.audio
media_found = False
config_dash.LOG.info("Found Audio")
elif 'video' in adaptation_set.attrib['mimeType']:
media_object = dashplayback.video
media_found = True
config_dash.LOG.info("Found Video")
if media_found:
config_dash.LOG.info("Retrieving Media")
config_dash.JSON_HANDLE["video_metadata"]['available_bitrates'] = list()
for representation in adaptation_set:
bandwidth = int(representation.attrib['bandwidth'])
config_dash.JSON_HANDLE["video_metadata"]['available_bitrates'].append(bandwidth)
media_object[bandwidth] = MediaObject()
media_object[bandwidth].segment_sizes = []
for segment_info in representation:
if "SegmentTemplate" in get_tag_name(segment_info.tag):
media_object[bandwidth].base_url = segment_info.attrib['media']
media_object[bandwidth].start = int(segment_info.attrib['startNumber'])
media_object[bandwidth].timescale = float(segment_info.attrib['timescale'])
media_object[bandwidth].initialization = segment_info.attrib['initialization']
if 'video' in adaptation_set.attrib['mimeType']:
if "SegmentSize" in get_tag_name(segment_info.tag):
try:
segment_size = float(segment_info.attrib['size']) * float(
SIZE_DICT[segment_info.attrib['scale']])
except KeyError, e:
config_dash.LOG.error("Error in reading Segment sizes :{}".format(e))
continue
media_object[bandwidth].segment_sizes.append(segment_size)
elif "SegmentTemplate" in get_tag_name(segment_info.tag):
video_segment_duration = (float(segment_info.attrib['duration'])/float(
segment_info.attrib['timescale']))
config_dash.LOG.debug("Segment Playback Duration = {}".format(video_segment_duration))
elif FORMAT == 1: #differentFormat
for adaptation_set in child_period:
for representation in adaptation_set:
media_found = False
if 'audio' in representation.attrib['mimeType']:
media_object = dashplayback.audio
media_found = False
config_dash.LOG.info("Found Audio")
elif 'video' in representation.attrib['mimeType']:
media_object = dashplayback.video
media_found = True
config_dash.LOG.info("Found Video")
if media_found:
config_dash.LOG.info("Retrieving Media")
config_dash.JSON_HANDLE["video_metadata"]['available_bitrates'] = list()
bandwidth = int(representation.attrib['bandwidth'])
config_dash.JSON_HANDLE["video_metadata"]['available_bitrates'].append(bandwidth)
media_object[bandwidth] = MediaObject()
media_object[bandwidth].segment_sizes = []
media_object[bandwidth].start = int(representation.attrib['startWithSAP'])
media_object[bandwidth].base_url = root[0].text
tempcut_url = root[0].text.split('/',3)[2:]
cut_url = tempcut_url[1]
print "cut_url = {}".format(cut_url)
#print root[0].text
for segment_info in representation:
if "SegmentBase" in get_tag_name(segment_info.tag):
for init in segment_info:
media_object[bandwidth].initialization = cut_url + init.attrib['sourceURL']
if 'video' in representation.attrib['mimeType']:
if "SegmentList" in get_tag_name(segment_info.tag):
video_segment_duration = (float(segment_info.attrib['duration']))
config_dash.LOG.debug("Segment Playback Duration = {}".format(video_segment_duration))
for segment_URL in segment_info:
if "SegmentURL" in get_tag_name(segment_URL.tag):
try:
Ssize = segment_URL.attrib['media'].split('/')[0]
Ssize = Ssize.split('_')[-1];
Ssize = Ssize.split('kbit')[0];
#print "ssize"
#print Ssize
segment_size = float(Ssize) * float(
SIZE_DICT["Kbits"])
except KeyError, e:
config_dash.LOG.error("Error in reading Segment sizes :{}".format(e))
continue
segurl = cut_url + segment_URL.attrib['media']
#print segurl
URL_LIST.append(segurl)
media_object[bandwidth].segment_sizes.append(segment_size)
else:
print "Error: UknownFormat of MPD file!"
return dashplayback, int(video_segment_duration)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ndarray class."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_export
def convert_to_tensor(value, dtype=None, dtype_hint=None):
"""Wrapper over `tf.convert_to_tensor`.
Args:
value: value to convert
dtype: (optional) the type we would like it to be converted to.
dtype_hint: (optional) soft preference for the type we would like it to be
converted to. `tf.convert_to_tensor` will attempt to convert value to this
type first, but will not fail if conversion is not possible falling back
to inferring the type instead.
Returns:
Value converted to tf.Tensor.
"""
# A safer version of `tf.convert_to_tensor` to work around b/149876037.
# TODO(wangpeng): Remove this function once the bug is fixed.
if (dtype is None and isinstance(value, six.integer_types) and
value >= 2**63):
dtype = dtypes.uint64
elif dtype is None and dtype_hint is None and isinstance(value, float):
dtype = np_dtypes.default_float_type()
return ops.convert_to_tensor(value, dtype=dtype, dtype_hint=dtype_hint)
class NdarraySpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.experiemntal.numpy.ndarray`."""
value_type = property(lambda self: ndarray)
def __init__(self, data_spec):
if not isinstance(data_spec, tensor_spec.TensorSpec):
raise ValueError('NdarraySpec.__init__ was expecting a tf.TypeSpec, '
'but got a {} instead.'.format(type(data_spec)))
self._data_spec = data_spec
self._hash = None
@property
def _component_specs(self):
return self._data_spec
def _to_components(self, value):
return value.data
def _from_components(self, data):
return tensor_to_ndarray(data)
def _serialize(self):
return (self._data_spec,)
def _batch(self, batch_size):
return NdarraySpec(self._data_spec._batch(batch_size)) # pylint: disable=protected-access
def _unbatch(self):
return NdarraySpec(self._data_spec._unbatch()) # pylint: disable=protected-access
def __hash__(self):
if self._hash is None:
self._hash = hash((type(self), self._data_spec))
return self._hash
@np_export.np_export('ndarray') # pylint: disable=invalid-name
class ndarray(composite_tensor.CompositeTensor):
"""Equivalent of numpy.ndarray backed by TensorFlow tensors.
This does not support all features of NumPy ndarrays e.g. strides and
memory order since, unlike NumPy, the backing storage is not a raw memory
buffer.
TODO(srbs): Clearly specify which attributes and methods are not supported
or if there are any differences in behavior.
"""
__slots__ = ['_data', '_dtype', '_type_spec_internal']
def __init__(self, shape, dtype=float, buffer=None): # pylint: disable=redefined-builtin
"""Initializes an ndarray.
This is a low level interface for building ndarrays and should be avoided.
Users should instead use methods in array_creation.py.
This class provides a numpy.ndarray like interface for a TF Tensor with a
fully-defined shape. Note that, unlike the backing buffer of np.ndarray,
Tensors are immutable. So, operations like `__setitem__` are performed by
replacing the Tensor. This restricts the ability to implement NumPy `view`
semantics.
Compared to numpy.ndarray, this does not support `offset`, `strides`
and `order` arguments.
Args:
shape: The shape of the array. Must be a scalar, an iterable of integers
or a `TensorShape` object.
dtype: Optional. The dtype of the array. Must be a python type, a numpy
type or a tensorflow `DType` object.
buffer: Optional. The backing buffer of the array. Must have shape
`shape`. Must be a `ndarray`, `np.ndarray` or a `Tensor`.
Raises:
ValueError: If `buffer` is specified and its shape does not match
`shape`.
"""
if dtype and not isinstance(dtype, dtypes.DType):
dtype = dtypes.as_dtype(np.dtype(dtype))
if buffer is None:
buffer = array_ops.zeros(shape, dtype=dtype)
else:
if isinstance(buffer, ndarray):
buffer = buffer.data
elif isinstance(buffer, np.ndarray):
# If `buffer` is a np.ndarray, the Tensor will share the underlying
# storage of the array.
buffer = convert_to_tensor(value=buffer, dtype=dtype)
elif not isinstance(buffer, ops.Tensor):
raise ValueError('Unexpected type for `buffer` {}. Must be an ndarray,'
' Tensor or np.ndarray.'.format(type(buffer)))
if shape is not None:
buffer.set_shape(shape)
assert isinstance(buffer, ops.Tensor)
if dtype and dtype != buffer.dtype:
buffer = math_ops.cast(buffer, dtype)
self._data = buffer
self._type_spec_internal = None
self._dtype = None
@classmethod
def from_tensor(cls, tensor):
o = cls.__new__(cls, None)
# pylint: disable=protected-access
o._data = tensor
o._dtype = None
o._type_spec_internal = None
# pylint: enable=protected-access
return o
@property
def _type_spec(self):
if self._type_spec_internal is None:
self._type_spec_internal = NdarraySpec(
type_spec.type_spec_from_value(self._data))
return self._type_spec_internal
@property
def data(self):
"""Tensor object containing the array data.
This has a few key differences from the Python buffer object used in
NumPy arrays.
1. Tensors are immutable. So operations requiring in-place edit, e.g.
__setitem__, are performed by replacing the underlying buffer with a new
one.
2. Tensors do not provide access to their raw buffer.
Returns:
A Tensor.
"""
return self._data
@property
def shape(self):
"""Returns a tuple or tf.Tensor of array dimensions."""
shape = self.data.shape
if shape.is_fully_defined():
return tuple(shape.as_list())
else:
return array_ops.shape(self.data)
@property
def dtype(self):
if self._dtype is None:
self._dtype = np_dtypes._get_cached_dtype(self._data.dtype) # pylint: disable=protected-access
return self._dtype
def _is_boolean(self):
return self._data.dtype == dtypes.bool
@property
def ndim(self):
ndims = self.data.shape.ndims
if ndims is None:
return array_ops.rank(self.data)
else:
return ndims
@property
def size(self):
"""Returns the number of elements in the array."""
shape = self.shape
if isinstance(shape, ops.Tensor):
return array_ops.size(self.data)
else:
return np.prod(self.shape)
@property
def T(self): # pylint: disable=invalid-name
return self.transpose()
def __len__(self):
shape = self.shape
if isinstance(shape, ops.Tensor):
raise TypeError('len() of symbolic tensor undefined')
elif shape:
return self.shape[0]
else:
raise TypeError('len() of unsized object.')
def astype(self, dtype):
if self.dtype == dtype:
return self
else:
return tensor_to_ndarray(math_ops.cast(self.data, dtype))
# Unary operations
def __neg__(self):
return tensor_to_ndarray(-self.data) # pylint: disable=invalid-unary-operand-type
def __pos__(self):
return self
__hash__ = None
def __int__(self):
return int(self.data)
def __float__(self):
return float(self.data)
def __bool__(self):
return bool(self.data)
def __nonzero__(self):
return self.__bool__()
def __iter__(self):
if not isinstance(self.data, ops.EagerTensor):
raise TypeError('Iteration over symbolic tensor is not allowed')
for i in range(self.shape[0]):
result_t = self.data[i]
yield tensor_to_ndarray(result_t)
return
def __array__(self, dtype=None):
"""Returns a NumPy ndarray.
This allows instances of this class to be directly used in NumPy routines.
However, doing that may force a copy to CPU.
Args:
dtype: A NumPy compatible type.
Returns:
A NumPy ndarray.
"""
return np.asarray(self.data, dtype)
# NOTE: we currently prefer interop with TF to allow TF to take precedence.
__array_priority__ = 90
def __array_module__(self, types):
# Experimental support for NumPy's module dispatch with NEP-37:
# https://numpy.org/neps/nep-0037-array-module.html
# Currently requires https://github.com/seberg/numpy-dispatch
# pylint: disable=g-import-not-at-top
import tensorflow.compat.v2 as tf
if all(issubclass(t, (ndarray, np.ndarray)) for t in types):
return tf.experimental.numpy
else:
return NotImplemented
def __index__(self):
"""Returns a python scalar.
This allows using an instance of this class as an array index.
Note that only arrays of integer types with size 1 can be used as array
indices.
Returns:
A Python scalar.
Raises:
TypeError: If the array is not of an integer type.
ValueError: If the array does not have size 1.
"""
# TODO(wangpeng): Handle graph mode
if not isinstance(self.data, ops.EagerTensor):
raise TypeError('Indexing using symbolic tensor is not allowed')
return self.data.numpy().item()
def tolist(self):
return self.data.numpy().tolist()
def __str__(self):
return 'ndarray<{}>'.format(self.data.__str__())
def __repr__(self):
return 'ndarray<{}>'.format(self.data.__repr__())
def tensor_to_ndarray(tensor):
return ndarray.from_tensor(tensor)
def ndarray_to_tensor(arr, dtype=None, name=None, as_ref=False):
if as_ref:
raise ValueError('as_ref is not supported.')
if dtype and dtypes.as_dtype(arr.dtype) != dtype:
return math_ops.cast(arr.data, dtype)
result_t = arr.data
if name:
result_t = array_ops.identity(result_t, name=name)
return result_t
ops.register_tensor_conversion_function(ndarray, ndarray_to_tensor)
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=maybe-no-member, invalid-name
"""Test request import and updates."""
from ggrc import db
from ggrc import models
from ggrc.converters import errors
from integration.ggrc import TestCase
from flask.json import dumps
class TestAssessmentImport(TestCase):
"""Basic Assessment import tests with.
This test suite should test new Assessment imports, exports, and updates.
The main focus of these tests is checking error messages for invalid state
transitions.
"""
def setUp(self):
"""Set up for Assessment test cases."""
super(TestAssessmentImport, self).setUp()
self.client.get("/login")
def test_import_assessments_with_templates(self):
"""Test importing of assessments with templates."""
self.import_file("assessment_template_no_warnings.csv")
response = self.import_file("assessment_with_templates.csv")
self._check_csv_response(response, {})
assessment = models.Assessment.query.filter(
models.Assessment.slug == "A 4").first()
values = set(v.attribute_value for v in assessment.custom_attribute_values)
self.assertIn("abc", values)
self.assertIn("2015-07-15", values)
def _test_assessment_users(self, asmt, users):
""" Test that all users have correct roles on specified Assessment"""
verification_errors = ""
for user_name, expected_types in users.items():
try:
user = models.Person.query.filter_by(name=user_name).first()
rel = models.Relationship.find_related(asmt, user)
if expected_types:
self.assertNotEqual(
rel, None,
"User {} is not mapped to {}".format(user.email, asmt.slug))
self.assertIn("AssigneeType", rel.relationship_attrs)
self.assertEqual(
set(rel.relationship_attrs[
"AssigneeType"].attr_value.split(",")),
expected_types
)
else:
self.assertEqual(
rel, None,
"User {} is mapped to {}".format(user.email, asmt.slug))
except AssertionError as error:
verification_errors += "\n\nChecks for Users-Assessment mapping "\
"failed for user '{}' with:\n{}".format(user_name, str(error))
self.assertEqual(verification_errors, "", verification_errors)
def test_assessment_full_no_warnings(self):
""" Test full assessment import with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
response = self.import_file("assessment_full_no_warnings.csv")
self._check_csv_response(response, {})
# Test first Assessment line in the CSV file
asmt_1 = models.Assessment.query.filter_by(slug="Assessment 1").first()
users = {
"user 1": {"Assessor"},
"user 2": {"Assessor", "Creator"}
}
self._test_assessment_users(asmt_1, users)
self.assertEqual(asmt_1.status, models.Assessment.START_STATE)
# Test second Assessment line in the CSV file
asmt_2 = models.Assessment.query.filter_by(slug="Assessment 2").first()
users = {
"user 1": {"Assessor"},
"user 2": {"Creator"},
"user 3": {},
"user 4": {},
"user 5": {},
}
self._test_assessment_users(asmt_2, users)
self.assertEqual(asmt_2.status, models.Assessment.PROGRESS_STATE)
def test_assessment_import_states(self):
""" Test Assessment state imports
These tests are an intermediate part for zucchini release and will be
updated in the next release.
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=299569476
"""
self.import_file("assessment_full_no_warnings.csv")
response = self.import_file("assessment_update_intermediate.csv")
expected_errors = {
"Assessment": {
"block_errors": set(),
"block_warnings": set(),
"row_errors": set(),
"row_warnings": set(),
}
}
self._check_csv_response(response, expected_errors)
assessments = {r.slug: r for r in models.Assessment.query.all()}
self.assertEqual(assessments["Assessment 60"].status,
models.Assessment.START_STATE)
self.assertEqual(assessments["Assessment 61"].status,
models.Assessment.PROGRESS_STATE)
self.assertEqual(assessments["Assessment 62"].status,
models.Assessment.DONE_STATE)
self.assertEqual(assessments["Assessment 63"].status,
models.Assessment.FINAL_STATE)
self.assertEqual(assessments["Assessment 64"].status,
models.Assessment.FINAL_STATE)
self.assertEqual(assessments["Assessment 3"].status,
models.Assessment.FINAL_STATE)
self.assertEqual(assessments["Assessment 4"].status,
models.Assessment.FINAL_STATE)
# Check that there is only one attachment left
asmt1 = assessments["Assessment 1"]
self.assertEqual(len(asmt1.documents), 1)
# Check that there are only the two new URLs present in asessment 1
url_titles = set(obj.title for obj in asmt1.related_objects()
if isinstance(obj, models.Document))
self.assertEqual(url_titles, set(["a.b.com", "c.d.com"]))
def test_assessment_warnings_errors(self):
""" Test full assessment import with warnings and errors
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
self.import_file("assessment_full_no_warnings.csv")
response = self.import_file("assessment_with_warnings_and_errors.csv")
expected_errors = {
"Assessment": {
"block_errors": set([]),
"block_warnings": {
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="error description - non existing column will "
"be ignored"
),
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="actual error message"
),
},
"row_errors": {
errors.MISSING_VALUE_ERROR.format(
line=19,
column_name="Audit"
),
errors.DUPLICATE_VALUE_IN_CSV.format(
line_list="20, 22",
column_name="Code",
value="Assessment 22",
s="",
ignore_lines="22",
),
},
"row_warnings": {
errors.UNKNOWN_OBJECT.format(
line=14,
object_type="Project",
slug="proj-55"
),
errors.UNKNOWN_OBJECT.format(
line=19,
object_type="Audit",
slug="not existing"
),
errors.WRONG_VALUE_DEFAULT.format(
line=20,
column_name="State",
value="open",
),
errors.WRONG_VALUE.format(line=3, column_name="Url"),
},
}
}
self._check_csv_response(response, expected_errors)
class TestAssessmentExport(TestCase):
"""Test Assessment object export."""
def setUp(self):
""" Set up for Assessment test cases """
super(TestAssessmentExport, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def export_csv(self, data):
return self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
def test_simple_export(self):
""" Test full assessment export with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
self.import_file("assessment_full_no_warnings.csv")
data = [{
"object_name": "Assessment",
"filters": {"expression": {}},
"fields": "all",
}]
response = self.export_csv(data)
self.assertIn(u"\u5555", response.data.decode("utf8"))
def test_export_assessments_with_filters_and_conflicting_ca_names(self):
"""Test exporting assessments with conflicting custom attribute names."""
self.import_file(u"assessment_template_no_warnings.csv")
self.import_file(u"assessment_with_templates.csv")
# also create an object level custom attribute with a name that clashes
# with a name of a "regular" attribute
assessment = models.Assessment.query.filter(
models.Assessment.slug == u"A 2").first()
cad = models.CustomAttributeDefinition(
attribute_type=u"Text",
title=u"ca title",
definition_type=u"assessment",
definition_id=assessment.id
)
db.session.add(cad)
db.session.commit()
data = [{
"object_name": "Assessment",
"fields": ["slug", "title", "description", "status"],
"filters": {
"expression": {
"left": {
"left": "code",
"op": {"name": "~"},
"right": "A 2"
},
"op": {"name": "AND"},
"right": {
"left": "title",
"op": {"name": "~"},
"right": "no template Assessment"
}
},
"keys": ["code", "title", "status"],
"order_by": {
"keys": [],
"order": "",
"compare": None
}
}
}]
response = self.export_csv(data)
self.assertIn(u"No template Assessment 2", response.data)
|
|
#!/usr/bin/env python
"""
@file HybridVAControl.py
@author Craig Rafter
@date 19/08/2016
class for fixed time signal control
"""
import signalControl, readJunctionData, traci
from math import atan2, degrees, hypot
import numpy as np
from collections import defaultdict
class HybridVAControl(signalControl.signalControl):
def __init__(self, junctionData, minGreenTime=10., maxGreenTime=60., scanRange=250, packetRate=0.2):
super(HybridVAControl, self).__init__()
self.junctionData = junctionData
self.firstCalled = traci.simulation.getCurrentTime()
self.lastCalled = self.firstCalled
self.lastStageIndex = 0
traci.trafficlights.setRedYellowGreenState(self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString)
self.packetRate = int(1000*packetRate)
self.transition = False
# self.CAMactive = False
# dict[vehID] = [position, heading, velocity, Tdetect]
self.newVehicleInfo = {}
self.oldVehicleInfo = {}
self.scanRange = scanRange
self.jcnPosition = np.array(traci.junction.getPosition(self.junctionData.id))
self.jcnCtrlRegion = self._getJncCtrlRegion()
# print(self.junctionData.id)
# print(self.jcnCtrlRegion)
self.controlledLanes = traci.trafficlights.getControlledLanes(self.junctionData.id)
# dict[laneID] = [heading, shape]
self.laneDetectionInfo = self._getIncomingLaneInfo()
self.stageTime = 0.0
self.minGreenTime = minGreenTime
self.maxGreenTime = maxGreenTime
self.secondsPerMeterTraffic = 0.45
self.nearVehicleCatchDistance = 25
self.extendTime = 1.0 # 5 m in 10 m/s (acceptable journey 1.333)
self.laneInductors = self._getLaneInductors()
self.TIME_MS = self.firstCalled
self.TIME_SEC = 0.001 * self.TIME_MS
'''def minmax(x, lower, upper):
return min(max(x, lower), upper)
'''
def process(self):
self.TIME_MS = traci.simulation.getCurrentTime()
self.TIME_SEC = 0.001 * self.TIME_MS
# Packets sent on this step
# packet delay + only get packets towards the end of the second
if (not self.TIME_MS % self.packetRate) and (not 50 < self.TIME_MS % 1000 < 650):
#self.CAMactive = True
self._getCAMinfo()
# else:
# self.CAMactive = False
# Update stage decisions
# If there's no ITS enabled vehicles present use VA ctrl
numCAVs = len(self.oldVehicleInfo)
isControlInterval = not self.TIME_MS % 1000
#if isControlInterval: print('CTRL')
if numCAVs < 1 and isControlInterval:
detectTimePerLane = self._getLaneDetectTime()
# Set adaptive time limit
if np.any(detectTimePerLane < 2):
extend = self.extendTime
else:
extend = 0.0
self.stageTime = max(self.stageTime + extend, self.minGreenTime)
self.stageTime = min(self.stageTime, self.maxGreenTime)
#print('A'+str(self.stageTime))
# If active and on the second, or transition then make stage descision
elif numCAVs >= 1 and isControlInterval:
oncomingVeh = self._getOncomingVehicles()
# If new stage get furthest from stop line whose velocity < 5% speed
# limit and determine queue length
if self.transition:
furthestVeh = self._getFurthestStationaryVehicle(oncomingVeh)
if furthestVeh[0] != '':
meteredTime = self.secondsPerMeterTraffic*furthestVeh[1]
self.stageTime = max(self.minGreenTime, meteredTime)
self.stageTime = min(self.stageTime, self.maxGreenTime)
# If we're in this state this should never happen but just in case
else:
self.stageTime = self.minGreenTime
#print('B'+str(self.stageTime))
# If currently staging then extend time if there are vehicles close
# to the stop line
else:
nearestVeh = self._getNearestVehicle(oncomingVeh)
# If a vehicle detected
if nearestVeh != '' and nearestVeh[1] <= self.nearVehicleCatchDistance:
if (self.oldVehicleInfo[nearestVeh[0]][2] != 1e6
and self.oldVehicleInfo[nearestVeh[0]][2] > 1.0/self.secondsPerMeterTraffic):
meteredTime = nearestVeh[1]/self.oldVehicleInfo[nearestVeh[0]][2]
else:
meteredTime = self.secondsPerMeterTraffic*nearestVeh[1]
elapsedTime = 0.001*(self.TIME_MS - self.lastCalled)
Tremaining = self.stageTime - elapsedTime
self.stageTime = elapsedTime + max(meteredTime, Tremaining)
#self.stageTime = max(self.stageTime, self.minGreenTime)
self.stageTime = min(self.stageTime, self.maxGreenTime)
#print('C'+str(self.stageTime))
# no detectable near vehicle try inductive loop info
elif nearestVeh == '' or nearestVeh[1] > self.nearVehicleCatchDistance:
detectTimePerLane = self._getLaneDetectTime()
# Set adaptive time limit
if np.any(detectTimePerLane < 2):
extend = self.extendTime
else:
extend = 0.0
self.stageTime = max(self.stageTime + extend, self.minGreenTime)
self.stageTime = min(self.stageTime, self.maxGreenTime)
#print('D'+str(self.stageTime))
else:
pass
# process stage as normal
else:
pass
# print(self.stageTime)
if isControlInterval:
self.transition = False
if self.transitionObject.active:
# If the transition object is active i.e. processing a transition
pass
# elif (self.TIME_MS - self.firstCalled) < (self.junctionData.offset*1000):
# # Process offset first
# pass
elif (self.TIME_MS - self.lastCalled) < self.stageTime*1000:
# Before the period of the next stage
pass
else:
# Not active, not in offset, stage not finished
if len(self.junctionData.stages) != (self.lastStageIndex)+1:
# Loop from final stage to first stage
self.transitionObject.newTransition(
self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString,
self.junctionData.stages[self.lastStageIndex+1].controlString)
self.lastStageIndex += 1
else:
# Proceed to next stage
self.transitionObject.newTransition(
self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString,
self.junctionData.stages[0].controlString)
self.lastStageIndex = 0
#print(self.stageTime)
self.lastCalled = self.TIME_MS
self.transition = True
self.stageTime = 0.0
super(HybridVAControl, self).process()
def _getHeading(self, currentLoc, prevLoc):
dy = currentLoc[1] - prevLoc[1]
dx = currentLoc[0] - prevLoc[0]
if currentLoc[1] == prevLoc[1] and currentLoc[0] == prevLoc[0]:
heading = -1
else:
if dy >= 0:
heading = degrees(atan2(dy, dx))
else:
heading = 360 + degrees(atan2(dy, dx))
# Map angle to make compatible with SUMO heading
if 0 <= heading <= 90:
heading = 90 - heading
elif 90 < heading < 360:
heading = 450 - heading
return heading
def _getJncCtrlRegion(self):
jncPosition = traci.junction.getPosition(self.junctionData.id)
otherJuncPos = [traci.junction.getPosition(x) for x in traci.trafficlights.getIDList() if x != self.junctionData.id]
ctrlRegion = {'N':jncPosition[1]+self.scanRange, 'S':jncPosition[1]-self.scanRange,
'E':jncPosition[0]+self.scanRange, 'W':jncPosition[0]-self.scanRange}
TOL = 10 # Exclusion region around junction boundary
if otherJuncPos != []:
for pos in otherJuncPos:
dx = jncPosition[0] - pos[0]
dy = jncPosition[1] - pos[1]
# North/South Boundary
if abs(dy) < self.scanRange:
if dy < -TOL:
ctrlRegion['N'] = min(pos[1] - TOL, ctrlRegion['N'])
elif dy > TOL:
ctrlRegion['S'] = max(pos[1] + TOL, ctrlRegion['S'])
else:
pass
else:
pass
# East/West Boundary
if abs(dx) < self.scanRange:
if dx < -TOL:
ctrlRegion['E'] = min(pos[0] - TOL, ctrlRegion['E'])
elif dx > TOL:
ctrlRegion['W'] = max(pos[0] + TOL, ctrlRegion['W'])
else:
pass
else:
pass
return ctrlRegion
def _isInRange(self, vehPosition):
distance = np.linalg.norm(vehPosition - self.jcnPosition)
if (distance < self.scanRange
and self.jcnCtrlRegion['W'] <= vehPosition[0] <= self.jcnCtrlRegion['E']
and self.jcnCtrlRegion['S'] <= vehPosition[1] <= self.jcnCtrlRegion['N']):
return True
else:
return False
def _getVelocity(self, vehID, vehPosition, Tdetect):
if vehID in self.oldVehicleInfo.keys():
oldX = np.array(self.oldVehicleInfo[vehID][0])
newX = np.array(vehPosition)
dx = np.linalg.norm(newX - oldX)
dt = Tdetect - self.oldVehicleInfo[vehID][3]
velocity = dx/dt
return velocity
else:
return 1e6
def _getCAMinfo(self):
self.oldVehicleInfo = self.newVehicleInfo.copy()
self.newVehicleInfo = {}
Tdetect = self.TIME_SEC
for vehID in traci.vehicle.getIDList():
vehPosition = traci.vehicle.getPosition(vehID)
if traci.vehicle.getTypeID(vehID) == 'typeITSCV' and self._isInRange(vehPosition):
vehHeading = traci.vehicle.getAngle(vehID)
vehVelocity = self._getVelocity(vehID, vehPosition, Tdetect)
self.newVehicleInfo[vehID] = [vehPosition, vehHeading, vehVelocity, Tdetect]
def _getIncomingLaneInfo(self):
laneInfo = defaultdict(list)
for lane in list(np.unique(np.array(self.controlledLanes))):
shape = traci.lane.getShape(lane)
width = traci.lane.getWidth(lane)
heading = self._getHeading(shape[1], shape[0])
dx = shape[0][0] - shape[1][0]
dy = shape[0][1] - shape[1][1]
if abs(dx) > abs(dy):
roadBounds = ((shape[0][0], shape[0][1] + width), (shape[1][0], shape[1][1] - width))
else:
roadBounds = ((shape[0][0] + width, shape[0][1]), (shape[1][0] - width, shape[1][1]))
laneInfo[lane] = [heading, roadBounds]
return laneInfo
def _getOncomingVehicles(self):
# Oncoming if (in active lane & heading matches oncoming heading &
# is in lane bounds)
activeLanes = self._getActiveLanes()
vehicles = []
for lane in activeLanes:
for vehID in self.oldVehicleInfo.keys():
# If on correct heading pm 10deg
if (np.isclose(self.oldVehicleInfo[vehID][1], self.laneDetectionInfo[lane][0], atol=10)
# If in lane x bounds
and min(self.laneDetectionInfo[lane][1][0][0], self.laneDetectionInfo[lane][1][1][0]) <
self.oldVehicleInfo[vehID][0][0] <
max(self.laneDetectionInfo[lane][1][0][0], self.laneDetectionInfo[lane][1][1][0])
# If in lane y bounds
and min(self.laneDetectionInfo[lane][1][0][1], self.laneDetectionInfo[lane][1][1][1]) <
self.oldVehicleInfo[vehID][0][1] <
max(self.laneDetectionInfo[lane][1][0][1], self.laneDetectionInfo[lane][1][1][1])):
# Then append vehicle
vehicles.append(vehID)
vehicles = list(np.unique(np.array(vehicles)))
return vehicles
def _getActiveLanes(self):
# Get the current control string to find the green lights
stageCtrlString = self.junctionData.stages[self.lastStageIndex].controlString
activeLanes = []
for i, letter in enumerate(stageCtrlString):
if letter == 'G':
activeLanes.append(self.controlledLanes[i])
# Get a list of the unique active lanes
activeLanes = list(np.unique(np.array(activeLanes)))
return activeLanes
def _getLaneInductors(self):
laneInductors = defaultdict(list)
for loop in traci.inductionloop.getIDList():
loopLane = traci.inductionloop.getLaneID(loop)
if loopLane in self.controlledLanes:
laneInductors[loopLane].append(loop)
return laneInductors
def _getFurthestStationaryVehicle(self, vehIDs):
furthestID = ''
maxDistance = -1
speedLimit = traci.lane.getMaxSpeed(self._getActiveLanes()[0])
for ID in vehIDs:
vehPosition = np.array(self.oldVehicleInfo[ID][0])
distance = np.linalg.norm(vehPosition - self.jcnPosition)
if distance > maxDistance and self.oldVehicleInfo[ID][2] < 0.05*speedLimit:
furthestID = ID
maxDistance = distance
return [furthestID, maxDistance]
def _getNearestVehicle(self, vehIDs):
nearestID = ''
minDistance = self.nearVehicleCatchDistance + 1
for ID in vehIDs:
vehPosition = np.array(self.oldVehicleInfo[ID][0])
distance = np.linalg.norm(vehPosition - self.jcnPosition)
if distance < minDistance:
nearestID = ID
minDistance = distance
return [nearestID, minDistance]
def _getLaneDetectTime(self):
activeLanes = self._getActiveLanes()
meanDetectTimePerLane = np.zeros(len(activeLanes))
for i, lane in enumerate(activeLanes):
detectTimes = []
for loop in self.laneInductors[lane]:
detectTimes.append(traci.inductionloop.getTimeSinceDetection(loop))
meanDetectTimePerLane[i] = np.mean(detectTimes)
return meanDetectTimePerLane
|
|
#
# Copyright (c) 2015, 2016, 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
GEOPM Plotter - Used to produce plots and other analysis files from report and/or trace files.
"""
import sys
import os
import subprocess
import traceback
import argparse
import math
from pkg_resources import parse_version
import pandas
if parse_version(pandas.__version__) < parse_version('0.19.2'):
raise ImportError('Pandas version must be >= v0.19.2!')
import numpy
import code
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
from natsort import natsorted
from cycler import cycler
from geopmpy import __version__
import geopmpy.io
class Config(object):
"""The base class for plot configuration objects.
This class contains the common options for all derived configuration types.
Attributes:
profile_name: The name string used for the title of the plot.
misc_text: Extra text string to append to the plot title.
datatype: The string name for a datatype to be examined in the plot.
normalize: A bool controlling whether or not the plot data is normalized.
output_dir: The output directory string for storing plots and associated data.
write_csv: A bool controlling whether or not CSV data for the plots is written.
output_types: The list of file formats (str) to be used to save the plots.
verbose: A bool to print extra information while parsing and plotting if true.
min_drop: The minimum power budget to include in the plotted data.
max_drop: The maximum power budget to include in the plotted data.
ref_version: The reference version string to include in the plotted data.
ref_profile_name: The reference profile name to include in the plotted data.
ref_plugin: The reference plugin to include in the plotted data.
tgt_version: The target version string to include in the plotted data.
tgt_profile_name: The target profile name to include in the plotted data.
tgt_plugin: The target plugin to include in the plotted data.
style: The Matplotlib style string to use when plotting.
fig_size: A 2-tuple of ints for the (X, Y) size of the plotted figure in inches.
fontsize: The int size of the font for text in the plot.
legend_fontsize: The int size of the font in the legend.
show: A bool to display an interactive plot if true.
shell: A bool to drops to a Python shell for further analysis if true.
"""
def __init__(self, profile_name='Default', misc_text = '', datatype=None, normalize=False,
output_dir='figures', write_csv=False, output_types=['svg'], verbose=False,
style='classic', fig_size=None, fontsize=None, legend_fontsize=None, show=False, shell=False,
min_drop=0, max_drop=999,
ref_version=None, ref_profile_name=None, ref_plugin=None,
tgt_version=None, tgt_profile_name=None, tgt_plugin=None):
# Custom params
self.profile_name = profile_name
self.misc_text = ' - {}'.format(misc_text) if misc_text else ''
self.datatype = datatype
self.normalize = normalize
self.output_dir = output_dir
self.write_csv = write_csv
self.output_types = output_types
self.verbose = verbose
# Indexing params
self.min_drop = min_drop
self.max_drop = max_drop
self.ref_version = ref_version
self.ref_profile_name = ref_profile_name
self.ref_plugin = ref_plugin
self.tgt_version = tgt_version
self.tgt_profile_name = tgt_profile_name
self.tgt_plugin = tgt_plugin
# Matplotlib params
self.fig_size = fig_size
self.fontsize = fontsize
self.legend_fontsize = legend_fontsize
self.show = show
self.shell = shell
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if self.show:
self.block=True
if self.shell:
self.block = False
plt.style.use(style)
plt.rcParams.update({'figure.figsize': self.fig_size})
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams.update({'figure.autolayout': True})
def check_plugins(self, df):
decider_list = df.index.get_level_values('tree_decider').unique().tolist()
if self.ref_plugin is not None and self.ref_plugin not in decider_list:
raise SyntaxError('Reference plugin {} not found in report dataframe!'.format(self.ref_plugin))
if self.tgt_plugin is not None and self.tgt_plugin not in decider_list:
raise SyntaxError('Target plugin {} not found in report dataframe!'.format(self.tgt_plugin))
class ReportConfig(Config):
"""The configuration for plots based on report data.
This class extends the Config class with the parameters specific to plotting report based data.
Attributes:
speedup: A bool to indicate whether or not to plot the target bars as a relative speedup compared
to the reference bars.
yspan: If speedup is true, the amount of units to include above and below 1.0.
datatype: The desired datatype from the report to plot (e.g. runtime, energy, frequency).
fig_size: A 2-tuple of ints for the (X, Y) size of the plotted figure in inches.
fontsize: The int size of the font for text in the plot.
legend_fontsize: The size of the font in the legend.
**kwargs: Arbitrary additional overrides for the Config object.
units: The keys are report datatypes and the values are the Y-axis label.
"""
def __init__(self, speedup=False, yspan = 0.35, # New args for this class
datatype='runtime', fig_size=(6.3, 4.8), fontsize=14, legend_fontsize=14, # Base class args to override
**kwargs): # User overridden args
super(ReportConfig, self).__init__(datatype=datatype, fig_size=fig_size, fontsize=fontsize,
legend_fontsize=legend_fontsize, **kwargs)
self.speedup = speedup
self.yspan = yspan
self.units = {
'energy': 'J',
'runtime': 's',
'frequency': '% of sticker',
}
class TraceConfig(Config):
"""The configuration for plots based on trace data.
This class extends the Config class with the parameters specific to plotting trace based data.
Attributes:
legend_label_spacing: The float spacing between legend labels.
smooth: The number of samples to use in a moving average for the plotted Y-axis data.
analyze: Flag to control whether basic analysis data is also plotted.
base_clock: The base clock frequency for the CPU used in the data.
focus_node: The node to highlight during per-node plots.
fig_size: A 2-tuple of ints for the (X, Y) size of the plotted figure in inches.
fontsize: The int size of the font for text in the plot.
legend_fontsize: The size of the font in the legend.
**kwargs: Arbitrary additional overrides for the Config object.
"""
def __init__(self, legend_label_spacing = 0.15, smooth=1, analyze=False, base_clock=None, # New args for this class
focus_node=None, # New args for this class
fig_size=(7, 6), fontsize=16, legend_fontsize=12, # Base class args to override
**kwargs): # User overridden args
super(TraceConfig, self).__init__(fig_size=fig_size, fontsize=fontsize, legend_fontsize=legend_fontsize,
**kwargs)
self.legend_label_spacing = legend_label_spacing
self.smooth = smooth
self.analyze = analyze
self.base_clock = base_clock
self.focus_node = focus_node
plt.rcParams.update({'legend.labelspacing': self.legend_label_spacing})
def get_node_dict(self, node_list):
"""Creates a dictionary of uniform names for node names present in the node list.
Args:
node_list: The list of node names for the current experiment.
Returns:
dict: The keys are the experiment node names with the uniform names as values.
"""
node_list = natsorted(node_list)
node_dict = {}
for i, name in enumerate(node_list):
if self.normalize:
node_dict[name] = 'Node {}'.format(i + 1)
else:
node_dict[name] = name
return node_dict
def generate_box_plot(report_df, config):
"""Plots boxes for all the input data.
This will generate a boxplot for every tree decider in the 'report_df' for the target version and profile name.
It is optionally normalized by the min of the means for the reference profile name, version, and plugin.
Args:
report_df: The multiindexed DataFrame with all the report data parsed from the
AppOutput class.
config: The ReportConfig object specifying the plotting and analysis parameters.
"""
config.check_plugins(report_df)
idx = pandas.IndexSlice
df = pandas.DataFrame()
normalization_factor = 1
if config.normalize:
# This is the min of the means of all the iterations for datatype per power budget (i.e. rightmost reference bar)
normalization_factor = report_df.loc[idx[config.ref_version:config.ref_version,
config.ref_profile_name:config.ref_profile_name,
config.min_drop:config.max_drop, config.ref_plugin, :, :, :, 'epoch'],
config.datatype].groupby(level='power_budget').mean().min()
# Select only the data we care about
report_df = report_df.loc[idx[config.tgt_version:config.tgt_version, config.tgt_profile_name:config.tgt_profile_name,
config.min_drop:config.max_drop, config.tgt_plugin:config.tgt_plugin],]
for decider, decider_df in report_df.groupby(level='tree_decider'):
f, ax = plt.subplots()
data_df = decider_df.loc[idx[config.tgt_version:config.tgt_version, config.tgt_profile_name:config.tgt_profile_name,
config.min_drop:config.max_drop, :, :, :, :, 'epoch'], config.datatype]
data_df /= normalization_factor
grouped = data_df.groupby(level='power_budget')
lines = pandas.tools.plotting.boxplot_frame_groupby(grouped, subplots=False, showmeans=True, whis='range',
ax=ax, return_type='both')
ylabel = config.datatype.title()
if config.normalize:
ylabel = 'Normalized {}'.format(ylabel)
else:
units_label = config.units.get(config.datatype)
ylabel = '{}{}'.format(ylabel, ' ({})'.format(units_label) if units_label else '')
ax.set_ylabel(ylabel)
ax.set_xlabel('Per-Node Socket+DRAM Power Limit (W)')
plt.title('{} {} Boxplot{}'.format(config.profile_name, config.datatype.title(), config.misc_text), y=1.06)
plt.suptitle(decider.title().replace('_', ' ') + ' Plugin', x=0.54, y=0.91, ha='center')
plt.margins(0.02, 0.01)
plt.axis('tight')
plt.tight_layout()
# Match the y-axis limits to the bar plot for easier comparison
ymax = report_df.loc[idx[:, :, config.min_drop:config.max_drop, :, :, :, :, 'epoch'], config.datatype].max()
ymax /= normalization_factor
ymax *= 1.1
ax.set_ylim(0, ymax)
# Write data/plot files
file_name = '{}_{}_{}_boxplot'.format(config.profile_name.lower().replace(' ', '_'), config.datatype, decider)
if config.verbose:
sys.stdout.write('Writing:\n')
if config.write_csv:
full_path = os.path.join(config.output_dir, '{}.csv'.format(file_name))
grouped.describe().to_csv(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
for ext in config.output_types:
full_path = os.path.join(config.output_dir, '{}.{}'.format(file_name, ext))
plt.savefig(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
sys.stdout.flush()
if config.show:
plt.show(block=config.block)
if config.shell:
code.interact(local=dict(globals(), **locals()))
plt.close()
def generate_bar_plot(report_df, config):
"""Plots bars comparing the reference and target plugins.
This will generate a plot with 2 bars at every power budget in the 'report_df' that compares the reference
plugin to the target plugin. This presently is configured only for tree decider comparisons. The bars are
optionally normalized by the min of the means of the reference data.
Args:
report_df: The multiindexed DataFrame with all the report data parsed from the
AppOutput class.
config: The config object specifying the plotting and analysis parameters.
Todo:
* Allow for a single plugin to be plotted (e.g. only a target)?
"""
if config.ref_plugin is None:
config.ref_plugin = 'static_policy'
sys.stdout.write('WARNING: No reference plugin set. Use "--ref_plugin" to override. ' +
'Assuming "static_policy".\n')
if config.tgt_plugin is None:
config.tgt_plugin = 'power_balancing'
sys.stdout.write('WARNING: No target plugin set. Use "--tgt_plugin" to override. ' +
'Assuming "power_balancing".\n')
sys.stdout.flush()
config.check_plugins(report_df)
idx = pandas.IndexSlice
df = pandas.DataFrame()
reference_g = report_df.loc[idx[config.ref_version:config.ref_version, config.ref_profile_name:config.ref_profile_name,
config.min_drop:config.max_drop, config.ref_plugin, :, :, :, 'epoch'],
config.datatype].groupby(level='power_budget')
df['reference_mean'] = reference_g.mean()
df['reference_max'] = reference_g.max()
df['reference_min'] = reference_g.min()
target_g = report_df.loc[idx[config.tgt_version:config.tgt_version, config.tgt_profile_name:config.tgt_profile_name,
config.min_drop:config.max_drop, config.tgt_plugin, :, :, :, 'epoch'],
config.datatype].groupby(level='power_budget')
df['target_mean'] = target_g.mean()
df['target_max'] = target_g.max()
df['target_min'] = target_g.min()
if config.normalize and not config.speedup: # Normalize the data against the rightmost reference bar
df /= df['reference_mean'].iloc[-1]
if config.speedup: # Plot the inverse of the target data to show speedup as a positive change
df = df.div(df['reference_mean'], axis='rows')
df['target_mean'] = 1 / df['target_mean']
df['target_max'] = 1 / df['target_max']
df['target_min'] = 1 / df['target_min']
# Convert the maxes and mins to be deltas from the mean; required for the errorbar API
df['reference_max_delta'] = df['reference_max'] - df['reference_mean']
df['reference_min_delta'] = df['reference_mean'] - df['reference_min']
df['target_max_delta'] = df['target_max'] - df['target_mean']
df['target_min_delta'] = df['target_mean'] - df['target_min']
# Begin plot setup
f, ax = plt.subplots()
bar_width = 0.35
index = numpy.arange(min(len(df['target_mean']), len(df['reference_mean'])))
plt.bar(index - bar_width / 2,
df['reference_mean'],
width=bar_width,
color='blue',
align='center',
label=config.ref_plugin.replace('_', ' ').title(),
zorder=3)
ax.errorbar(index - bar_width / 2,
df['reference_mean'],
xerr=None,
yerr=(df['reference_min_delta'], df['reference_max_delta']),
fmt=' ',
label='',
color='r',
elinewidth=2,
capthick=2,
zorder=10)
plt.bar(index + bar_width / 2,
df['target_mean'],
width=bar_width,
color='cyan',
align='center',
label=config.tgt_plugin.replace('_', ' ').title(),
zorder=3) # Forces grid lines to be drawn behind the bar
ax.errorbar(index + bar_width / 2,
df['target_mean'],
xerr=None,
yerr=(df['target_min_delta'], df['target_max_delta']),
fmt=' ',
label='',
color='r',
elinewidth=2,
capthick=2,
zorder=10)
ax.set_xticks(index)
ax.set_xticklabels(df.index)
ax.set_xlabel('Per-Node Socket+DRAM Power Limit (W)')
ylabel = config.datatype.title()
if config.normalize and not config.speedup:
ylabel = 'Normalized {}'.format(ylabel)
elif not config.normalize and not config.speedup:
units_label = config.units.get(config.datatype)
ylabel = '{}{}'.format(ylabel, ' ({})'.format(units_label) if units_label else '')
else: #if config.speedup:
ylabel = 'Normalized Speed-up'
ax.set_ylabel(ylabel)
ax.grid(axis='y', linestyle='--', color='black')
plt.title('{} {} Comparison{}'.format(config.profile_name, config.datatype.title(), config.misc_text), y=1.02)
plt.margins(0.02, 0.01)
plt.axis('tight')
plt.legend(shadow=True, fancybox=True, fontsize=config.legend_fontsize, loc='best').set_zorder(11)
plt.tight_layout()
if config.speedup:
ax.set_ylim(1 - config.yspan, 1 + config.yspan)
else:
ymax = ax.get_ylim()[1]
ymax *= 1.1
ax.set_ylim(0, ymax)
# Write data/plot files
file_name = '{}_{}_comparison'.format(config.profile_name.lower().replace(' ', '_'), config.datatype)
if config.speedup:
file_name += '_speeudp'
if config.verbose:
sys.stdout.write('Writing:\n')
if config.write_csv:
full_path = os.path.join(config.output_dir, '{}.csv'.format(file_name))
df.T.to_csv(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
for ext in config.output_types:
full_path = os.path.join(config.output_dir, '{}.{}'.format(file_name, ext))
plt.savefig(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
sys.stdout.flush()
if config.show:
plt.show(block=config.block)
if config.shell:
code.interact(local=dict(globals(), **locals()))
plt.close()
def generate_power_plot(trace_df, config):
"""Plots the power consumed per node at each sample.
This function will plot the power used at each sample for every node. Specifying the 'analyze' option in the
config object will also plot the power cap and aggregate power for all nodes. Specifying the 'normalize' option
in the config object will use the uniform node names in the plot's legend.
Args:
trace_df: The multiindexed DataFrame with all the trace data parsed from the
AppOutput class.
config: The object specifying the plotting and analysis parameters.
Raises:
SyntaxError: If the reference or target plugin was not found in the DataFrame.
Todo:
* Resample the median_df to ensure all nodes have the same number of samples. This can be a source of
minor error for especially long running apps.
"""
config.check_plugins(trace_df)
idx = pandas.IndexSlice
decider_list = trace_df.index.get_level_values('tree_decider').unique().tolist()
# Select only the data we care about
trace_df = trace_df.loc[idx[config.tgt_version:config.tgt_version, config.tgt_profile_name:config.tgt_profile_name,
config.min_drop:config.max_drop, config.tgt_plugin:config.tgt_plugin],]
# Do not include node_name, iteration or index in the groupby clause; The median iteration is extracted and used
# below for every node togther in a group. The index must be preserved to ensure the DFs stay in order.
for (version, name, power_budget, tree_decider, leaf_decider), df in \
trace_df.groupby(level=['version', 'name', 'power_budget', 'tree_decider', 'leaf_decider']):
# Diff the energy counters and determine the median iteration (if multiple runs)
median_df = geopmpy.io.Trace.get_median_df(df, 'energy', config)
# Calculate power from the diffed counters
pkg_energy_cols = [s for s in median_df.keys() if 'pkg_energy' in s]
dram_energy_cols = [s for s in median_df.keys() if 'dram_energy' in s]
median_df['socket_power'] = median_df[pkg_energy_cols].sum(axis=1) / median_df['elapsed_time']
median_df['dram_power'] = median_df[dram_energy_cols].sum(axis=1) / median_df['elapsed_time']
median_df['combined_power'] = median_df['socket_power'] + median_df['dram_power']
# Begin plot setup
node_names = df.index.get_level_values('node_name').unique().tolist()
node_dict = config.get_node_dict(node_names)
colors = [plt.get_cmap('plasma')(1. * i/len(node_names)) for i in range(len(node_names))]
plt.rc('axes', prop_cycle=(cycler('color', colors)))
f, ax = plt.subplots()
for node_name in natsorted(node_names):
node_df = median_df.loc[idx[:, :, :, :, :, node_name],]
if node_name == config.focus_node:
plt.plot(pandas.Series(numpy.arange(float(len(node_df))) / (len(node_df) - 1) * 100),
node_df['combined_power'].rolling(window=config.smooth, center=True).mean(),
label=node_dict[node_name],
color='red',
path_effects=[pe.Stroke(linewidth=3, foreground='black'), pe.Normal()],
zorder=10)
else:
plt.plot(pandas.Series(numpy.arange(float(len(node_df))) / (len(node_df) - 1) * 100),
node_df['combined_power'].rolling(window=config.smooth, center=True).mean(),
label=node_dict[node_name])
if config.analyze:
plt.plot(pandas.Series(numpy.arange(float(len(node_df))) / (len(node_df) - 1) * 100),
median_df['combined_power'].unstack(level=['node_name']).mean(axis=1),
label='Combined Average',
color='aqua',
linewidth=2.0,
path_effects=[pe.Stroke(linewidth=4, foreground='black'), pe.Normal()],
zorder=11)
plt.axhline(power_budget, linewidth=2, color='blue', label='Cap', zorder=11)
ax.set_xlabel('Iteration # (Normalized)')
ylabel = 'Socket+DRAM Power (W)'
if config.smooth > 1:
ylabel += ' Smoothed'
ax.set_ylabel(ylabel)
plt.title('{} Iteration Power\n@ {}W{}'.format(config.profile_name, power_budget, config.misc_text), y=1.02)
num_nodes = len(node_names)
if config.analyze:
num_nodes += 2 # Add 2 node spots for the cap and combined average
ncol = int(math.ceil(float(num_nodes)/4))
legend = plt.legend(loc="lower center", bbox_to_anchor=[0.5,0], ncol=ncol,
shadow=True, fancybox=True, fontsize=config.legend_fontsize)
for l in legend.legendHandles:
l.set_linewidth(2.0)
legend.set_zorder(11)
plt.tight_layout()
ax.set_ylim(ax.get_ylim()[0] * .93, ax.get_ylim()[1])
# Write data/plot files
file_name = '{}_power_{}_{}'.format(config.profile_name.lower().replace(' ', '_'), power_budget, tree_decider)
if config.verbose:
sys.stdout.write('Writing:\n')
if config.write_csv:
full_path = os.path.join(config.output_dir, '{}.csv'.format(file_name))
median_df.to_csv(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
if config.analyze:
full_path = os.path.join(config.output_dir, '{}_stats.txt'.format(file_name))
with open(full_path, 'w') as fd:
for node_name, node_data in natsorted(median_df.groupby(level='node_name')):
fd.write('{} ({}) statistics -\n\n{}\n\n'.format(node_name, node_dict[node_name], node_data.describe()))
fd.write('Aggregate (mean) power statistics -\n\n{}'.format(
median_df['combined_power'].unstack(level=['node_name']).mean(axis=1).describe()))
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
for ext in config.output_types:
full_path = os.path.join(config.output_dir, '{}.{}'.format(file_name, ext))
plt.savefig(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
sys.stdout.flush()
if config.show:
plt.show(block=config.block)
if config.shell:
code.interact(local=dict(globals(), **locals()))
plt.close()
def generate_epoch_plot(trace_df, config):
"""Plots the max elapsed time for the nodes at each sample.
This function will plot the maximum elapsed time for all nodes present in the trace file for each sample.
Specifying the 'analyze' option till adjust the Y-axis bounds to filter out outliers. Set '--ref_plugin'
and '--tgt_plugin' config parameters to the same plugin name to plot a single plugin.
Args:
trace_df: The multiindexed DataFrame with all the trace data parsed from the
AppOutput class.
config: The object specifying the plotting and analysis parameters.
Raises:
SyntaxError: If the reference or target plugin was not found in the DataFrame.
Todo:
* Resample the median_df to ensure all nodes have the same number of samples. This can be a source of
minor error for especially long running apps.
"""
if config.ref_plugin is None:
config.ref_plugin = 'static_policy'
sys.stdout.write('WARNING: No reference plugin set. Use "--ref_plugin" to override. ' +
'Assuming "static_policy".\n')
if config.tgt_plugin is None:
config.tgt_plugin = 'power_balancing'
sys.stdout.write('WARNING: No target plugin set. Use "--tgt_plugin" to override. ' +
'Assuming "power_balancing".\n')
sys.stdout.flush()
config.check_plugins(trace_df)
idx = pandas.IndexSlice
decider_list = trace_df.index.get_level_values('tree_decider').unique().tolist()
# Select only the data we care about
trace_df = trace_df.loc[idx[config.ref_version:config.tgt_version, config.ref_profile_name:config.tgt_profile_name,
config.min_drop:config.max_drop],]
# Group by power budget
for (version, name, power_budget), df in trace_df.groupby(level=['version', 'name', 'power_budget']):
reference_df = df.loc[idx[config.ref_version:config.ref_version, config.ref_profile_name:config.ref_profile_name,
:, config.ref_plugin],]
reference_median_df = geopmpy.io.Trace.get_median_df(reference_df, ' ', config)
reference_max_time_df = reference_median_df.unstack(level=['node_name']).max(axis=1)
target_df = df.loc[idx[config.tgt_version:config.tgt_version, config.tgt_profile_name:config.tgt_profile_name,
:, config.tgt_plugin],]
target_median_df = geopmpy.io.Trace.get_median_df(target_df, ' ', config)
target_max_time_df = target_median_df.unstack(level=['node_name']).max(axis=1)
if config.normalize:
normalization_factor = max(reference_max_time_df.max(), target_max_time_df.max())
reference_max_time_df /= normalization_factor
target_max_time_df /= normalization_factor
f, ax = plt.subplots()
plt.plot(numpy.arange(float(len(reference_max_time_df))) / len(reference_max_time_df) * 100,
reference_max_time_df.rolling(window=config.smooth, center=True).mean(),
label=config.ref_plugin.replace('_', ' ').title(),
color='blue',
linewidth=1.5)
if config.ref_plugin != config.tgt_plugin: # Do not plot the second line if there is no second plugin
plt.plot(numpy.arange(float(len(target_max_time_df))) / len(target_max_time_df) * 100,
target_max_time_df.rolling(window=config.smooth, center=True).mean(),
label=config.tgt_plugin.replace('_', ' ').title(),
color='cyan',
linewidth=1.5)
ax.set_xlabel('Iteration # (Normalized)')
if config.normalize:
ylabel = 'Normalized Elapsed Time'
else:
ylabel = 'Max Elapsed Time (s)'
if config.smooth > 1:
ylabel += ' Smoothed'
ax.set_ylabel(ylabel)
plt.title('{} Critical Path Iteration Loop Time\n@ {}W{}'.format(config.profile_name, power_budget,
config.misc_text), y=1.02)
plt.legend(shadow=True, fancybox=True, loc='best', fontsize=14).set_zorder(11)
plt.tight_layout()
if config.analyze:
# Set the ylim from 90% of the 25th percentile to 110% of the 75th percentile
lower_ylim = min(target_max_time_df.quantile(.25), reference_max_time_df.quantile(.25))
lower_ylim *= 0.9
upper_ylim = max(target_max_time_df.quantile(.75), reference_max_time_df.quantile(.75))
upper_ylim *= 1.1
ax.set_ylim(lower_ylim, upper_ylim)
# Write data/plot files
file_name = '{}_iteration_loop_time_{}'.format(config.profile_name.lower().replace(' ', '_'), power_budget)
if config.verbose:
sys.stdout.write('Writing:\n')
if config.write_csv:
full_path = os.path.join(config.output_dir, '{}_reference.csv'.format(file_name))
reference_median_df.to_csv(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
full_path = os.path.join(config.output_dir, '{}_target.csv'.format(file_name))
target_median_df.to_csv(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
if config.analyze:
full_path = os.path.join(config.output_dir, '{}_stats.txt'.format(file_name))
with open(full_path, 'w') as fd:
fd.write('Reference ({}) time statistics -\n\n{}'.format(config.ref_plugin,
reference_median_df.unstack(level=['node_name']).describe()))
fd.write('\n\nReference ({}) Aggregate (max) time statistics -\n\n{}'.format(config.ref_plugin,
reference_median_df.unstack(level=['node_name']).mean(axis=1).describe()))
fd.write('\n\nTarget ({}) time statistics -\n\n{}'.format(config.tgt_plugin,
target_median_df.unstack(level=['node_name']).describe()))
fd.write('\n\nTarget ({}) Aggregate (max) time statistics -\n\n{}'.format(config.tgt_plugin,
target_median_df.unstack(level=['node_name']).mean(axis=1).describe()))
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
for ext in config.output_types:
full_path = os.path.join(config.output_dir, '{}.{}'.format(file_name, ext))
plt.savefig(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
sys.stdout.flush()
if config.show:
plt.show(block=config.block)
if config.shell:
code.interact(local=dict(globals(), **locals()))
plt.close()
def generate_freq_plot(trace_df, config):
"""Plots the per sample frequency per node per socket.
This function will plot the frequency of each socket on each node per sample. It plots the sockets as seperate
files denoted '...socket_0.svg', '...socket_1.svg', etc. Specifying the 'analyze' option in the config object
will also include a statistics print out of the data used in the plot. Specifying the 'normalize' option will
use uniform node names in the plot legend. Setting the 'config.base_clock' parameter in the config object will
convert the Y-axis to frequency in GHz as opposed to % of sticker frequency.
Args:
trace_df: The multiindexed DataFrame with all the trace data parsed from the
AppOutput class.
config: The object specifying the plotting and analysis parameters.
Raises:
SyntaxError: If the reference or target plugin was not found in the DataFrame.
Todo:
* Resample the median_df to ensure all nodes have the same number of samples. This can be a source of
minor error for especially long running apps.
"""
config.check_plugins(trace_df)
idx = pandas.IndexSlice
decider_list = trace_df.index.get_level_values('tree_decider').unique().tolist()
# Select only the data we care about
trace_df = trace_df.loc[idx[config.tgt_version:config.tgt_version, config.tgt_profile_name:config.tgt_profile_name,
config.min_drop:config.max_drop, config.tgt_plugin:config.tgt_plugin],]
for (version, name, power_budget, tree_decider, leaf_decider), df in \
trace_df.groupby(level=['version', 'name', 'power_budget', 'tree_decider', 'leaf_decider']):
# Get the diffed CLK counters, then determine the median iteration (if multiple runs)
median_df = geopmpy.io.Trace.get_median_df(df, 'clk', config)
# Begin plot setup
node_names = df.index.get_level_values('node_name').unique().tolist()
node_dict = config.get_node_dict(node_names)
colors = [plt.get_cmap('plasma')(1. * i/len(node_names)) for i in range(len(node_names))]
plt.rc('axes', prop_cycle=(cycler('color', colors)))
f, ax = plt.subplots()
clk_unhalted_core_cols = [s for s in median_df.keys() if 'clk_unhalted_core' in s]
clk_unhalted_ref_cols = [s for s in median_df.keys() if 'clk_unhalted_ref' in s]
for c, r in zip(clk_unhalted_core_cols, clk_unhalted_ref_cols): # Loop once per socket
frequency_data = median_df[c] / median_df[r]
if config.base_clock:
frequency_data *= config.base_clock
else:
frequency_data *= 100 # Convert from fraction of sticker to % of sticker
for node_name in natsorted(node_names):
node_data = frequency_data.loc[idx[:, :, :, :, :, node_name],]
if node_name == config.focus_node:
plt.plot(pandas.Series(numpy.arange(float(len(node_data))) / (len(node_data) - 1) * 100),
node_data.rolling(window=config.smooth, center=True).mean(),
label=node_dict[node_name],
color='red',
path_effects=[pe.Stroke(linewidth=3, foreground='black'), pe.Normal()],
zorder=10)
else:
plt.plot(pandas.Series(numpy.arange(float(len(node_data))) / (len(node_data) - 1) * 100),
node_data.rolling(window=config.smooth, center=True).mean(),
label=node_dict[node_name])
ax.set_xlabel('Iteration # (Normalized)')
if config.base_clock:
ylabel = 'Frequency (GHz)'
else:
ylabel = '% of Sticker Frequency'
if config.smooth > 1:
ylabel += ' Smoothed'
ax.set_ylabel(ylabel)
plt.title('{} Iteration Frequency\n@ {}W{}'.format(config.profile_name, power_budget, config.misc_text), y=1.02)
ncol = int(math.ceil(float(len(node_names))/4))
legend = plt.legend(loc="lower center", bbox_to_anchor=[0.5,0], ncol=ncol,
shadow=True, fancybox=True, fontsize=config.legend_fontsize)
for l in legend.legendHandles:
l.set_linewidth(2.0)
legend.set_zorder(11)
plt.tight_layout()
ax.set_ylim(0, ax.get_ylim()[1] * 1.1)
# Write data/plot files
file_name = '{}_frequency_{}_{}_socket_{}'.format(config.profile_name.lower().replace(' ', '_'),
power_budget, tree_decider, clk_unhalted_core_cols.index(c))
if config.verbose:
sys.stdout.write('Writing:\n')
if config.write_csv:
full_path = os.path.join(config.output_dir, '{}.csv'.format(file_name))
frequency_data.unstack(level=['node_name']).to_csv(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
if config.analyze:
full_path = os.path.join(config.output_dir, '{}_stats.txt'.format(file_name))
with open(full_path, 'w') as fd:
for node_name, node_data in natsorted(frequency_data.groupby(level='node_name')):
fd.write('{} ({}) frequency statistics -\n\n{}\n\n'.format(node_name, node_dict[node_name], node_data.describe()))
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
for ext in config.output_types:
full_path = os.path.join(config.output_dir, '{}.{}'.format(file_name, ext))
plt.savefig(full_path)
if config.verbose:
sys.stdout.write(' {}\n'.format(full_path))
sys.stdout.flush()
if config.show:
plt.show(block=config.block)
if config.shell:
code.interact(local=dict(globals(), **locals()))
plt.close()
def main(argv):
report_plots = {'box', 'bar'}
trace_plots = {'power', 'epoch', 'freq'}
_, os.environ['COLUMNS'] = subprocess.check_output(['stty', 'size']).split() # Ensures COLUMNS is set so text wraps
pandas.set_option('display.width', int(os.environ['COLUMNS'])) # Same tweak for Pandas
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data_path', metavar='PATH',
help='the input path to be searched for report/trace files.',
action='store', default='.')
parser.add_argument('-r', '--report_base',
help='the base report string to be searched.',
action='store', default='')
parser.add_argument('-t', '--trace_base',
help='the base trace string to be searched.',
action='store', default='')
parser.add_argument('-p', '--plot_types',
help='the type of plot to be generated. (e.g. {})'.format(','.join(report_plots | trace_plots)),
action='store', default='bar', type=lambda s : s.split(','))
parser.add_argument('-s', '--shell',
help='drop to a Python shell after plotting.',
action='store_true')
parser.add_argument('-c', '--csv',
help='generate CSV files for the plotted data.',
action='store_true')
parser.add_argument('--normalize',
help='normalize the data that is plotted',
action='store_true')
parser.add_argument('-o', '--output_dir',
help='the output directory for the generated files',
action='store', default='figures')
parser.add_argument('-O', '--output_types',
help='the file type(s) for the plot file output (e.g. svg,png,eps).',
action='store', default='svg', type=lambda s : s.split(','))
parser.add_argument('-n', '--profile_name',
help='Name of the profile to be used in file names / plot titles.',
action='store')
parser.add_argument('-m', '--misc_text',
help='Text to be appended to the plot title.',
action='store', default='')
parser.add_argument('-v', '--verbose',
help='print debugging information.',
action='store_true')
parser.add_argument('--speedup',
help='plot the speedup instead of the raw data value.',
action='store_true')
parser.add_argument('--ref_version',
help='use this version as the reference to compare against.',
action='store', metavar='VERSION')
parser.add_argument('--ref_profile_name',
help='use this name as the reference to compare against.',
action='store', metavar='PROFILE_NAME')
parser.add_argument('--ref_plugin',
help='use this tree decider plugin as the reference to compare against.',
action='store', metavar='PLUGIN_NAME')
parser.add_argument('--tgt_version',
help='use this version as the target for analysis (to compare against the ref-plugin.',
action='store', metavar='VERSION')
parser.add_argument('--tgt_profile_name',
help='use this name as the target for analysis (to compare against the ref-plugin.',
action='store', metavar='PROFILE_NAME')
parser.add_argument('--tgt_plugin',
help='use this tree decider plugin as the target for analysis (to compare against \
the ref-plugin).',
action='store', metavar='PLUGIN_NAME')
parser.add_argument('--datatype',
help='the datatype to be plotted.',
action='store', default='runtime')
parser.add_argument('--smooth',
help='apply a NUM_SAMPLES sample moving average to the data plotted on the y axis',
action='store', metavar='NUM_SAMPLES', type=int, default=1)
parser.add_argument('--analyze',
help='analyze the data that is plotted',
action='store_true')
parser.add_argument('--min_drop',
help='Minimum power budget to include in the plot.',
action='store', metavar='BUDGET_WATTS', type=int, default=1)
parser.add_argument('--max_drop',
help='Maximum power budget to include in the plot.',
action='store', metavar='BUDGET_WATTS', type=int, default=999)
parser.add_argument('--base_clock',
help='Set the base clock frequency (i.e. max non-turbo) for frequency related plots.',
action='store', metavar='FREQ_GHZ', type=float)
parser.add_argument('--focus_node',
help='Node to highlight in red during per-node plots.',
action='store', metavar='NODE_NAME')
parser.add_argument('--show',
help='show an interactive plot of the data',
action='store_true')
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args(argv)
if report_plots.intersection(args.plot_types):
if not args.report_base:
report_glob = '*report'
else:
report_glob = args.report_base + '*'
else:
report_glob = None
if trace_plots.intersection(args.plot_types):
if not args.trace_base:
trace_glob = '*trace-*'
else:
trace_glob = args.trace_base + '*'
else:
trace_glob = None
app_output = geopmpy.io.AppOutput(report_glob, trace_glob, args.data_path, args.verbose)
if args.profile_name:
profile_name = args.profile_name
else:
if report_glob is not None:
profile_name_list = app_output.get_report_df().index.get_level_values('name').unique()
elif trace_glob is not None:
profile_name_list = app_output.get_trace_df().index.get_level_values('name').unique()
else:
raise SyntaxError('No glob pattern specified.')
if len(profile_name_list) > 1:
raise SyntaxError('Multiple profile names detected! Please provide the -n option to specify the profile name!')
profile_name = profile_name_list[0]
report_config = ReportConfig(shell=args.shell, profile_name=profile_name, misc_text=args.misc_text,
output_dir=args.output_dir, normalize=args.normalize, write_csv=args.csv,
output_types=args.output_types, verbose=args.verbose, speedup=args.speedup,
datatype=args.datatype, min_drop=args.min_drop, max_drop=args.max_drop,
ref_version=args.ref_version, ref_profile_name=args.ref_profile_name,
ref_plugin=args.ref_plugin, tgt_version=args.tgt_version,
tgt_profile_name=args.tgt_profile_name, tgt_plugin=args.tgt_plugin, show=args.show)
if trace_plots.intersection(args.plot_types):
trace_config = TraceConfig(shell=args.shell, profile_name=profile_name, misc_text=args.misc_text,
output_dir=args.output_dir, normalize=args.normalize, write_csv=args.csv,
output_types=args.output_types, verbose=args.verbose, smooth=args.smooth,
analyze=args.analyze, min_drop=args.min_drop, max_drop=args.max_drop,
ref_version=args.ref_version, base_clock=args.base_clock,
ref_profile_name=args.ref_profile_name,
ref_plugin=args.ref_plugin, tgt_version=args.tgt_version,
tgt_profile_name=args.tgt_profile_name, tgt_plugin=args.tgt_plugin,
focus_node=args.focus_node, show=args.show)
for plot in args.plot_types:
# This tries to create the name of the plot function based on what was parsed in args.plot_types. If it exists in
# the global namespace, it can be called through the namespace.
plot_func_name = 'generate_{}_plot'.format(plot)
if plot_func_name not in globals():
raise KeyError('Invalid plot type "{}"! Valid plots are {}.'.format(plot, ', '.join(plots)))
if plot in trace_plots:
globals()[plot_func_name](app_output.get_trace_df(), trace_config)
else:
globals()[plot_func_name](app_output.get_report_df(), report_config)
|
|
#!/usr/bin/env python
"""
Module to test combinator-based grid match functions
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os, os.path
import sys
import re
import shutil
import unittest
import logging
import datetime
import StringIO
import json
import csv
import rdflib
log = logging.getLogger(__name__)
if __name__ == "__main__":
# Add main project directory at start of python path
sys.path.insert(0, "../..")
from MiscUtils import TestUtils
from rocommand import ro
from rocommand import ro_utils
from rocommand import ro_manifest
from rocommand.ro_namespaces import RDF, DCTERMS, RO, AO, ORE
from rocommand.test import TestROSupport
from rocommand.test import TestConfig
from rocommand.test import StdoutContext
from iaeval.ro_eval_minim import ValueList
from checklist.grid import (GridCSV, GridExcel)
from checklist import gridmatch
from checklist import checklist_template
from checklist import mkminim
# Base directory for RO tests in this module
testbase = os.path.dirname(os.path.realpath(__file__))
# Test suite
class TestMkMinim(TestROSupport.TestROSupport):
"""
Test ro annotation commands
"""
def setUp(self):
super(TestMkMinim, self).setUp()
return
def tearDown(self):
super(TestMkMinim, self).tearDown()
return
# Setup local config for Minim tests
def setupConfig(self):
return self.setupTestBaseConfig(testbase)
# Annotate RO with metadata file
def annotateResource(self, testbase, rodir, resref, annref):
"""
Annotate named resource with named annotation resource
Names are appended to the RO directory.
Returns RO directory.
"""
# $RO annotate $resuri -g $annuri
args = [
"ro", "annotate", rodir+"/"+resref, "-g", rodir+"/"+annref
]
with StdoutContext.SwitchStdout(self.outstr):
configdir = self.getConfigDir(testbase)
robasedir = self.getRoBaseDir(testbase)
status = ro.runCommand(configdir, robasedir, args)
outtxt = self.outstr.getvalue()
assert status == 0, outtxt
self.outstr = StringIO.StringIO()
return rodir
# Actual tests follow
def testNull(self):
assert True, 'Null test failed'
def testSetupConfig(self):
(configdir, robasedir) = self.setupConfig()
config = ro_utils.readconfig(configdir)
self.assertEqual(config["robase"], os.path.abspath(robasedir))
self.assertEqual(config["rosrs_uri"], TestConfig.ro_test_config.ROSRS_URI)
self.assertEqual(config["rosrs_access_token"], TestConfig.ro_test_config.ROSRS_ACCESS_TOKEN)
self.assertEqual(config["username"], TestConfig.ro_test_config.ROBOXUSERNAME)
self.assertEqual(config["useremail"], TestConfig.ro_test_config.ROBOXEMAIL)
return
def testGridRead(self):
"""
Basic test that Minim test file can be read
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "testro", "RO for Minim creation test", "ro-testMkMinim")
rouri = ro_manifest.getRoUri(rodir)
gridname = "TestMkMinim.csv"
griduri = ro_manifest.getComponentUri(rodir, gridname)
gridcsv = os.path.join(rodir, gridname)
with open(gridcsv, "rU") as gridfile:
grid = GridCSV(gridfile, baseuri=griduri, dialect=csv.excel)
self.assertEqual(grid[0][0], "Minim definition for MkMinim testing")
self.deleteTestRo(rodir)
return
def testGridMatch(self):
self.setupConfig()
rodir = self.createTestRo(testbase, "testro", "RO for Minim creation test", "ro-testMkMinim")
rouri = ro_manifest.getRoUri(rodir)
gridname = "TestMkMinim.csv"
gridcsv = os.path.join(rodir, gridname)
gridbase = ""
with open(gridcsv, "rU") as gridfile:
grid = GridCSV(gridfile, baseuri=gridbase, dialect=csv.excel)
self.assertEqual(grid[0][0], "Minim definition for MkMinim testing")
(d,(r,c)) = checklist_template.checklist.match(grid, 0, 0)
self.assertEquals(r, 86, "newrow (%d)"%(r))
self.assertEquals(c, 1, "newcol (%d)"%(c))
### print repr(d)
self.assertEquals(d["matchtemplate"], 'checklist', "matchtemplate")
self.assertEquals(d["prefixes"]['ro'], 'http://purl.org/wf4ever/ro#', "Prefix 'ro'")
self.assertEquals(d["prefixes"]['minim'], 'http://purl.org/minim/minim#', "Prefix 'minim'")
self.assertEquals(d["prefixes"]['ao'], 'http://purl.org/ao/', "Prefix 'ao'")
self.assertEquals(d["prefixes"]['ex'], 'http://example.org/', "Prefix 'ex'")
self.assertEquals(len(d["checklists"]), 2, "Checklist count")
self.assertEquals(d["checklists"][0]["model"], '#model_test1', "Checklist[1] model")
self.assertEquals(d["checklists"][0]["target_urit"], '{+targetro}', "Checklist[1] uri")
self.assertEquals(d["checklists"][0]["purpose"], 'test1', "Checklist[1] purpose")
self.assertEquals(d["checklists"][1]["model"], '#model_test2', "Checklist[2] model")
self.assertEquals(d["checklists"][1]["target_urit"], '{+targetro}', "Checklist[2] uri")
self.assertEquals(d["checklists"][1]["purpose"], 'test2', "Checklist[2] purpose")
self.assertEquals(len(d["models"]), 2, "Model count")
self.assertEquals(d["models"][0]["modelid"], '#model_test1', "Model[1] id")
self.assertEquals(len(d["models"][0]["items"]), 7, "Model[1] item count")
self.assertEquals(d["models"][1]["modelid"], '#model_test2', "Model[2] id")
self.assertEquals(len(d["models"][1]["items"]), 5, "Model[2] item count")
self.assertEquals(len(d["requirements"]), 7, "Requirement count (%d found)"%(len(d["requirements"])))
self.assertEquals(d["requirements"][0]["reqid"], '#req_exists')
self.assertEquals(d["requirements"][0]["exists"], '?file rdf:type ex:Part')
self.assertEquals(d["requirements"][0]["pass"], 'File exists as a part')
self.assertEquals(d["requirements"][0]["fail"], 'File as part does not exist')
self.assertEquals(d["requirements"][0].get("miss"), None)
self.assertEquals(d["requirements"][1]["reqid"], '#req_foreach_exists')
self.assertEquals(d["requirements"][1]["foreach"], '?file rdf:type ex:Part')
self.assertEquals(d["requirements"][1]["result_mod"], 'ORDER BY ?file')
self.assertEquals(d["requirements"][1]["exists"], '?file ex:partOf [ rdf:type ex:Whole ]')
self.assertEquals(d["requirements"][1]["pass"], 'Files as part are partOf some indicated whole')
self.assertEquals(d["requirements"][1]["fail"], 'File as part %(file)s is not part of some whole')
self.assertEquals(d["requirements"][2]["reqid"], '#req_foreach_aggregated')
self.assertEquals(d["requirements"][2]["foreach"], '?file rdf:type ex:Part')
self.assertEquals(d["requirements"][2]["aggregates"], '{+file}')
self.assertEquals(d["requirements"][2]["pass"], 'All part resource files %(file_list)s are aggregated in RO')
self.assertEquals(d["requirements"][2]["fail"], 'File as part %(file)s is not aggregated in RO')
self.assertEquals(d["requirements"][2]["miss"], 'No file as part definitions are present')
self.assertEquals(len(d["requirements"][2]["collectall"]), 1)
self.assertEquals(d["requirements"][2]["collectall"][0]["collectvar"], "?file")
self.assertEquals(d["requirements"][2]["collectall"][0]["collectlist"], "?file_list")
self.assertEquals(d["requirements"][6]["reqid"], '#req_python')
self.assertEquals(d["requirements"][6]["command"], 'python --version')
self.assertEquals(d["requirements"][6]["response"], '^Python 2\.7.*$')
self.assertEquals(d["requirements"][6]["pass"], 'Python 2.7.x present')
self.assertEquals(d["requirements"][6]["fail"], 'Python 2.7.x not present')
self.deleteTestRo(rodir)
return
def testMkMinim(self):
self.setupConfig()
rodir = self.createTestRo(testbase, "testro", "RO for testMkMinim", "ro-testMkMinim")
rouri = ro_manifest.getRoUri(rodir)
# Create minim graph from CSV file
# NOTE: a base URI may be specoified when decoding the grid or when constructing the minim
# graph. The Minim graph uses its own relative references, so for consistency it may
# be necessary to pass the grid base URI to mkminim. The code below does this.
gridname = "TestMkMinim.csv"
griduri = ro_manifest.getComponentUri(rodir, gridname)
gridcsv = os.path.join(rodir, gridname)
gridbase = "http://example.org/base/"
with open(gridcsv, "rU") as gridfile:
grid = GridCSV(gridfile, baseuri=gridbase, dialect=csv.excel)
(status, minimgr) = mkminim.mkminim(grid, baseuri=grid.resolveUri(""))
self.assertEquals(status, 0)
# Read expected graph
graphname = os.path.join(rodir, "TestMkMinim.ttl")
expectgr = rdflib.Graph()
with open(graphname) as expectfile:
expectgr.parse(file=expectfile, publicID=gridbase, format="turtle")
# Check content of minim graph
###minimgr.serialize(sys.stdout, format="turtle")
self.checkTargetGraph(minimgr.graph(), expectgr, msg="Not found in constructed minim graph")
self.deleteTestRo(rodir)
return
def testChecklistEval(self):
"""
Test checklist evaluation with generated Minim file
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "testro", "RO for testMkMinim", "ro-testMkMinim")
self.populateTestRo(testbase, rodir)
self.annotateResource(testbase, rodir, "", "FileAnnotations.ttl")
rouri = ro_manifest.getRoUri(rodir)
# Create minim graph from CSV file
gridname = "TestMkMinim.csv"
griduri = ro_manifest.getComponentUri(rodir, gridname)
gridcsv = os.path.join(rodir, gridname)
gridbase = "http://example.org/base/"
with open(gridcsv, "rU") as gridfile:
grid = GridCSV(gridfile, baseuri=gridbase, dialect=csv.excel)
(status, minimgr) = mkminim.mkminim(grid, baseuri=grid.resolveUri(""))
self.assertEquals(status, 0)
# Write Minim
minimname = "TestMkMinim_minim.ttl"
with open(rodir+"/"+minimname, "w") as minimfile:
minimgr.serialize(minimfile, format="turtle")
# Evaluate checklist
minimuri = ro_manifest.getComponentUri(rodir, minimname)
minimpurpose = "test1"
args = [ "ro", "evaluate", "checklist"
, "-a"
, "-d", rodir+"/"
, minimname
, minimpurpose
, "."
]
self.outstr.seek(0)
with StdoutContext.SwitchStdout(self.outstr):
status = ro.runCommand(
os.path.join(testbase, TestConfig.ro_test_config.CONFIGDIR),
os.path.join(testbase, TestConfig.ro_test_config.ROBASEDIR),
args)
outtxt = self.outstr.getvalue()
assert status == 0, "Status %d, outtxt: %s"%(status,outtxt)
log.debug("status %d, outtxt: %s"%(status, outtxt))
# Check response returned
filelist = ValueList( [ str(ro_manifest.getComponentUri(rodir, f))
for f in ["File1.txt", "File2.txt", "File3.txt"] ] )
expect = (
[ "Research Object file://%s/:"%(rodir)
, "Fully complete for test1 of resource ."
, "Satisfied requirements:"
, " At least 3 file as part values are present"
, " At most 3 file as part values are present"
, " All part resource files %s are aggregated in RO"%(filelist)
, " All file as part resources are accessible (live)"
, " Python 2.7.x present"
, " Files as part are partOf some indicated whole"
, " File exists as a part"
, "Research object URI: %s"%(rouri)
, "Minimum information URI: %s"%(minimuri)
])
self.outstr.seek(0)
for line in self.outstr:
self.assertIn(str(line)[:-1], expect)
self.deleteTestRo(rodir)
return
def testChecklistEvalExcel(self):
"""
Test checklist evaluation with generated Minim file from Excel source
"""
self.setupConfig()
rodir = self.createTestRo(testbase, "testro", "RO for testMkMinim", "ro-testMkMinim")
self.populateTestRo(testbase, rodir)
self.annotateResource(testbase, rodir, "", "FileAnnotations.ttl")
rouri = ro_manifest.getRoUri(rodir)
# Create minim graph from CSV file
gridname = "TestMkMinim.xls"
griduri = ro_manifest.getComponentUri(rodir, gridname)
gridxls = os.path.join(rodir, gridname)
gridbase = "http://example.org/base/"
grid = GridExcel(gridxls, baseuri=gridbase)
(status, minimgr) = mkminim.mkminim(grid, baseuri=grid.resolveUri(""))
self.assertEquals(status, 0)
# Write Minim
minimname = "TestMkMinim_minim.ttl"
with open(rodir+"/"+minimname, "w") as minimfile:
minimgr.serialize(minimfile, format="turtle")
# Evaluate checklist
minimuri = ro_manifest.getComponentUri(rodir, minimname)
minimpurpose = "test1"
args = [ "ro", "evaluate", "checklist"
, "-a"
, "-d", rodir+"/"
, minimname
, minimpurpose
, "."
]
self.outstr.seek(0)
with StdoutContext.SwitchStdout(self.outstr):
status = ro.runCommand(
os.path.join(testbase, TestConfig.ro_test_config.CONFIGDIR),
os.path.join(testbase, TestConfig.ro_test_config.ROBASEDIR),
args)
outtxt = self.outstr.getvalue()
assert status == 0, "Status %d, outtxt: %s"%(status,outtxt)
log.debug("status %d, outtxt: %s"%(status, outtxt))
# Check response returned
# filelist = [ unicode(ro_manifest.getComponentUri(rodir, f))
# for f in ["File1.txt", "File2.txt", "File3.txt"] ]
filelist = ValueList( [ str(ro_manifest.getComponentUri(rodir, f))
for f in ["File1.txt", "File2.txt", "File3.txt"] ] )
expect = (
[ "Research Object file://%s/:"%(rodir)
, "Fully complete for test1 of resource ."
, "Satisfied requirements:"
, " At least 3 file as part values are present"
, " At most 3 file as part values are present"
, " All part resource files %s are aggregated in RO"%(filelist)
, " All file as part resources are accessible (live)"
, " Python 2.7.x present"
, " Files as part are partOf some indicated whole"
, " File exists as a part"
, "Research object URI: %s"%(rouri)
, "Minimum information URI: %s"%(minimuri)
])
self.outstr.seek(0)
for line in self.outstr:
self.assertIn(str(line)[:-1], expect)
self.deleteTestRo(rodir)
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "Pending tests follow"
# Assemble test suite
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
, "testSetupConfig"
, "testGridRead"
, "testGridMatch"
, "testMkMinim"
, "testChecklistEval"
, "testChecklistEvalExcel"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestMkMinim, testdict, select=select)
if __name__ == "__main__":
TestUtils.runTests("TestMkMinim.log", getTestSuite, sys.argv)
# End.
|
|
# -*- coding: utf-8 -*-
"""Helper to create filters based on forensic artifact definitions."""
from artifacts import definitions as artifact_types
from dfwinreg import registry_searcher
from dfvfs.helpers import file_system_searcher
from plaso.engine import filters_helper
from plaso.engine import logger
from plaso.engine import path_helper
class ArtifactDefinitionsFiltersHelper(filters_helper.CollectionFiltersHelper):
"""Helper to create collection filters based on artifact definitions.
Builds collection filters from forensic artifact definitions.
For more information about Forensic Artifacts see:
https://github.com/ForensicArtifacts/artifacts/blob/main/docs/Artifacts%20definition%20format%20and%20style%20guide.asciidoc
Attributes:
file_system_artifact_names (set[str]): names of artifacts definitions that
generated file system find specifications.
registry_artifact_names (set[str]): names of artifacts definitions that
generated Windows Registry find specifications.
"""
_COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES = frozenset([
'HKEY_CURRENT_USER',
'HKEY_LOCAL_MACHINE\\SYSTEM',
'HKEY_LOCAL_MACHINE\\SOFTWARE',
'HKEY_LOCAL_MACHINE\\SAM',
'HKEY_LOCAL_MACHINE\\SECURITY',
'HKEY_USERS'])
def __init__(self, artifacts_registry, knowledge_base):
"""Initializes an artifact definitions filters helper.
Args:
artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifact
definitions registry.
knowledge_base (KnowledgeBase): contains information from the source
data needed for filtering.
"""
super(ArtifactDefinitionsFiltersHelper, self).__init__()
self._artifacts_registry = artifacts_registry
self._knowledge_base = knowledge_base
self.file_system_artifact_names = set()
self.registry_artifact_names = set()
def _BuildFindSpecsFromArtifact(self, definition, environment_variables):
"""Builds find specifications from an artifact definition.
Args:
definition (artifacts.ArtifactDefinition): artifact definition.
environment_variables (list[EnvironmentVariableArtifact]):
environment variables.
Returns:
list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find
specifications.
"""
find_specs = []
for source in definition.sources:
if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE:
for path_entry in set(source.paths):
specifications = self._BuildFindSpecsFromFileSourcePath(
path_entry, source.separator, environment_variables,
self._knowledge_base.user_accounts)
find_specs.extend(specifications)
self.file_system_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
for key_path in set(source.keys):
if ArtifactDefinitionsFiltersHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
# TODO: Handle Registry Values Once Supported in dfwinreg.
# https://github.com/log2timeline/dfwinreg/issues/98
# Use set-comprehension to create a set of the source key paths.
key_paths = {key_value['key'] for key_value in source.key_value_pairs}
key_paths_string = ', '.join(key_paths)
logger.warning((
'Windows Registry values are not supported, extracting keys: '
'"{0!s}"').format(key_paths_string))
for key_path in key_paths:
if ArtifactDefinitionsFiltersHelper.CheckKeyCompatibility(key_path):
specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)
find_specs.extend(specifications)
self.registry_artifact_names.add(definition.name)
elif (source.type_indicator ==
artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):
for name in source.names:
specifications = self._BuildFindSpecsFromGroupName(
name, environment_variables)
find_specs.extend(specifications)
else:
logger.warning(
'Unsupported artifact definition source type: "{0:s}"'.format(
source.type_indicator))
return find_specs
def _BuildFindSpecsFromGroupName(self, group_name, environment_variables):
"""Builds find specifications from a artifact group name.
Args:
group_name (str): artifact group name.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in file and registry
artifacts.
Returns:
list[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no
artifact with the given name can be retrieved.
"""
definition = self._artifacts_registry.GetDefinitionByName(group_name)
if not definition:
return None
return self._BuildFindSpecsFromArtifact(definition, environment_variables)
def _BuildFindSpecsFromRegistrySourceKey(self, key_path):
"""Build find specifications from a Windows Registry source type.
Args:
key_path (str): Windows Registry key path defined by the source.
Returns:
list[dfwinreg.FindSpec]: find specifications for the Windows Registry
source type.
"""
find_specs = []
for key_path_glob in path_helper.PathHelper.ExpandGlobStars(key_path, '\\'):
logger.debug('building find spec from key path glob: {0:s}'.format(
key_path_glob))
key_path_glob_upper = key_path_glob.upper()
if key_path_glob_upper.startswith(
'HKEY_LOCAL_MACHINE\\SYSTEM\\CURRENTCONTROLSET'):
# Rewrite CurrentControlSet to ControlSet* for Windows NT.
key_path_glob = 'HKEY_LOCAL_MACHINE\\System\\ControlSet*{0:s}'.format(
key_path_glob[43:])
elif key_path_glob_upper.startswith('HKEY_USERS\\%%USERS.SID%%'):
key_path_glob = 'HKEY_CURRENT_USER{0:s}'.format(key_path_glob[26:])
find_spec = registry_searcher.FindSpec(key_path_glob=key_path_glob)
find_specs.append(find_spec)
return find_specs
def _BuildFindSpecsFromFileSourcePath(
self, source_path, path_separator, environment_variables, user_accounts):
"""Builds find specifications from a file source type.
Args:
source_path (str): file system path defined by the source.
path_separator (str): file system path segment separator.
environment_variables (list[str]): environment variable attributes used to
dynamically populate environment variables in key.
user_accounts (list[str]): identified user accounts stored in the
knowledge base.
Returns:
list[dfvfs.FindSpec]: find specifications for the file source type.
"""
find_specs = []
for path_glob in path_helper.PathHelper.ExpandGlobStars(
source_path, path_separator):
logger.debug('building find spec from path glob: {0:s}'.format(
path_glob))
for path in path_helper.PathHelper.ExpandUsersVariablePath(
path_glob, path_separator, user_accounts):
logger.debug('building find spec from path: {0:s}'.format(path))
if '%' in path:
path = path_helper.PathHelper.ExpandWindowsPath(
path, environment_variables)
logger.debug('building find spec from expanded path: {0:s}'.format(
path))
if not path.startswith(path_separator):
logger.warning((
'The path filter must be defined as an absolute path: '
'"{0:s}"').format(path))
continue
try:
find_spec = file_system_searcher.FindSpec(
case_sensitive=False, location_glob=path,
location_separator=path_separator)
except ValueError as exception:
logger.error((
'Unable to build find specification for path: "{0:s}" with '
'error: {1!s}').format(path, exception))
continue
find_specs.append(find_spec)
return find_specs
def BuildFindSpecs(self, artifact_filter_names, environment_variables=None):
"""Builds find specifications from artifact definitions.
Args:
artifact_filter_names (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
environment_variables (Optional[list[EnvironmentVariableArtifact]]):
environment variables.
"""
find_specs = []
for name in artifact_filter_names:
definition = self._artifacts_registry.GetDefinitionByName(name)
if not definition:
logger.debug('undefined artifact definition: {0:s}'.format(name))
continue
logger.debug('building find spec from artifact definition: {0:s}'.format(
name))
artifact_find_specs = self._BuildFindSpecsFromArtifact(
definition, environment_variables)
find_specs.extend(artifact_find_specs)
for find_spec in find_specs:
if isinstance(find_spec, file_system_searcher.FindSpec):
self.included_file_system_find_specs.append(find_spec)
elif isinstance(find_spec, registry_searcher.FindSpec):
self.registry_find_specs.append(find_spec)
else:
logger.warning('Unsupported find specification type: {0!s}'.format(
type(find_spec)))
@classmethod
def CheckKeyCompatibility(cls, key_path):
"""Checks if a Windows Registry key path is supported by dfWinReg.
Args:
key_path (str): path of the Windows Registry key.
Returns:
bool: True if key is compatible or False if not.
"""
key_path_upper = key_path.upper()
for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES:
if key_path_upper.startswith(key_path_prefix):
return True
logger.warning('Key path: "{0:s}" is currently not supported'.format(
key_path))
return False
|
|
from unicodedata import normalize
import datetime
from operator import itemgetter
import keys # local module handling API key
import analysis
from pprint import pprint
# set up access with these global vars
sp = None
def set_access(token=None):
global sp
global username
# if token == None:
# sp = keys.get_access()
# print "have public access only"
# return
sp = keys.get_access(token)
print "have private access"
def to_date(date):
'''converts a string in any day/month/year format
to a datetime object, defaulting to 1/1 if no
month or day is found (year is required)
'''
year = int(date[0:4])
month = day = 1
if len(date) > 7:
day = int(date[8:])
if len(date) > 5:
month = int(date[5:7])
#return datetime.date(year, month, day)
return year
def correct_spaces(string):
'''removes double spaces and beginning and ending
spaces from a string
'''
string = string.replace(" ", " ")
if string[0] == " ":
string = string[1:]
if string[-1] == " ":
string = string[:-1]
return string
def feature(playlist, feature):
'''returns comma separated list (string) of specified feature value
in specifed playlist in order
'''
ids = []
for song in playlist['songs']:
ids.append(song[feature])
return ids
def get_playlists(user, token=None):
'''returns list of playlists for user as [name, id],...
--- 1 request per 50 playlists ---
'''
if token != None:
set_access(token)
else:
set_access()
if(user != ""):
playlists = []
fifty = "start"
start = 0
print "user = ", user
#to shorten delay, cap at 200 playlists
while (fifty == "start" or len(fifty['items']) == 50) and start < 200:
#monitor
fifty = sp.user_playlists(user, offset=start)
#-------#
playlists += fifty['items']
print u"retrieved {} playlists".format(len(playlists))
start += 50
pls = []
for playlist in playlists:
if playlist['name'] == None:
continue
pname = correct_spaces(playlist['name'])
pid = playlist['id']
puser = playlist['owner']['id']
pls.append([pname,pid,puser])
print "playlists successfully retrieved"
return sorted(pls, key=lambda d: d[0].lower())
print "username is blank"
return "no user"
def get_songs(p_id, p_name, userid):
'''returns songs in playlist as list of dicts
generates data: id, name, artists, popularity,
--- 1 request per 100 songs ---
'''
hundred = sp.user_playlist_tracks(userid, playlist_id=p_id)
playlist = hundred
start = 0
while len(hundred['items']) >= 100:
start += 100
hundred = sp.user_playlist_tracks(userid, playlist_id=p_id, offset=start)
playlist['items'] += hundred['items']
print u"retrieved {} songs".format(len(playlist['items']))
pl = {'id': p_id, 'name': p_name, 'songs': []}
for track in playlist['items']:
try:
artist = track['track']['artists'][0]['name']
artist_id = track['track']['artists'][0]['id']
name = track['track']['name']
s_id = track['track']['id']
if s_id == None:
continue
pop = track['track']['popularity']
if track['track']['preview_url'] != None:
preview = track['track']['preview_url']
else:
preview = ""
#cover = track['track']['album']['images'][2]['url'])
album_id = track['track']['album']['id']
song = {'id': s_id, 'name': name, 'artist': artist,
'popularity': pop, 'preview_url': preview,
'album_id': album_id, 'artist_id': artist_id}
pl['songs'].append(song)
except:
playlist['items'].remove(track)
print "song discarded"
return pl
def existing_playlist(name, username, token=None):
'''return type: Playlist with all Songs loaded
uses the username global var
'''
playlists = get_playlists(username, token)
playlist_id = user_id = None
for playlist in playlists:
if name == playlist[0]:
playlist_id = playlist[1]
user_id = playlist[2]
if playlist_id:
return get_songs(playlist_id, name, user_id)
print 'ERROR: playlist name invalid'
return ''
def clean_data(songs, l_features):
'''sets all class variables for the songs corresponding to each
'''
playlist = []
i = 1
for song, features in zip(songs, l_features):
if features == None:
continue
for k,v in features.iteritems():
if v == None or v == "":
features[k] = 0
song['order'] = i
song['danceability'] = round(features['danceability'] * 100, 2)
song['energy'] = round(features['energy'] * 100, 2)
song['loudness'] = round(features['loudness'], 1)
song['speechiness'] = round(features['speechiness'] * 100, 2)
song['acousticness'] = round((features['acousticness']) * 100, 2)
song['instrumentalness'] = round(features['instrumentalness'] * 100, 2)
song['valence'] = round(features['valence'] * 100, 2)
song['tempo'] = round(features['tempo'], 0)
song['duration'] = round(features['duration_ms'] / 1000, 0)
playlist.append(song)
i += 1
return playlist
def get_song_features(song_ids):
'''returns json of all song features corresponding to input ids
--- 1 request per 100 songs ---
'''
print "Getting song features"
features = []
while(song_ids != None):
print u"have {} song features to retrieve left".format(len(song_ids))
if len(song_ids) > 100:
hundred = song_ids[0:100]
song_ids = song_ids[100:]
else:
hundred = song_ids
song_ids = None
features += sp.audio_features(hundred)
return features
def get_genres(artist_ids):
'''returns genres for input artist_ids in list of lists
generates data: genres
--- 1 request per 50 songs ---
'''
artists = []
while(artist_ids != None):
print len(artist_ids)
if len(artist_ids) > 50:
fifty = artist_ids[0:50]
artist_ids = artist_ids[50:]
else:
fifty = artist_ids
artist_ids = None
artists += sp.artists(fifty)['artists']
sorted_genres = {}
genres = []
for artist in artists:
genres.append(artist['genres'])
for genre in artist['genres']:
sorted_genres[genre] = (sorted_genres.get(genre, 0) + 1)
sorted_genres = sorted(sorted_genres.items(), key=itemgetter(1),
reverse=True)
return genres, sorted_genres
def pl_data(pl_name, username, token=None):
'''returns Dict of specified playlist with all songs and features
'''
playlist = existing_playlist(pl_name, username, token)
if playlist == "":
return ""
features = get_song_features(feature(playlist,'id'))
songs = clean_data(playlist['songs'], features)
means = analysis.simple_stats(songs)
pca_data = analysis.pca(songs)
tsne_data = analysis.tSNE(songs) ## DEBUG
songs = analysis.merge_pca(songs, pca_data['coords'])
songs = analysis.merge_tsne(songs, tsne_data)
return {'songs': songs, 'means': means,'pcaweights': pca_data['weights']}
def new_playlist(playlist_name, ids):
'''create playlist
'''
username = sp.current_user()['id']
playlist = sp.user_playlist_create(username, playlist_name)
pid = playlist['id']
ids = ids.split(",")
while(ids != None):
if len(ids) > 100:
hundred = ids[0:100]
ids = ids[100:]
else:
hundred = ids
ids = None
sp.user_playlist_add_tracks(username, pid, hundred)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
from xml.dom import minidom
from lxml import etree
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from six.moves import range
import webob
from jacket.api.storage.storage.v2 import limits
from jacket.api.storage.storage import views
from jacket.api.storage.storage import xmlutil
import jacket.storage.context
from jacket.storage import test
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE),
limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0',
}
class BaseLimitTestSuite(test.TestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
self.stubs.Set(jacket.storage.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTest(BaseLimitTestSuite):
"""Tests for `limits.LimitsController` class."""
def setUp(self):
"""Run before each test."""
super(LimitsControllerTest, self).setUp()
self.controller = limits.create_resource()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = jacket.storage.context.RequestContext('testuser', 'testproject')
request.environ["storage.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["storage.limits"] = _limits
return request
def test_empty_index_json(self):
"""Test getting empty limit details in JSON."""
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'gigabytes': 512,
'volumes': 5,
}
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["storage.limits"] = _limits
return request
def test_index_diff_regex(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _test_index_absolute_limits_json(self, expected):
request = self._get_index_request()
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body['limits']['absolute'])
def test_index_ignores_extra_absolute_limits_json(self):
self.absolute_limits = {'unknown_limit': 9001}
self._test_index_absolute_limits_json({})
class TestLimiter(limits.Limiter):
pass
class LimitMiddlewareTest(BaseLimitTestSuite):
"""Tests for the `limits.RateLimitingMiddleware` class."""
@webob.dec.wsgify
def _empty_app(self, request):
"""Do-nothing WSGI app."""
pass
def setUp(self):
"""Prepare middleware for use through fake WSGI app."""
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
"%s.TestLimiter" %
self.__class__.__module__)
def test_limit_class(self):
"""Test that middleware selected correct limiter class."""
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
"""Test successful GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
"""Test a rate-limited (413) GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
self.assertIn('Retry-After', response.headers)
retry_after = int(response.headers['Retry-After'])
self.assertAlmostEqual(retry_after, 60, 1)
body = jsonutils.loads(response.body)
expected = "Only 1 GET request(s) can be made to * every minute."
value = body["overLimitFault"]["details"].strip()
self.assertEqual(expected, value)
def test_limited_request_xml(self):
"""Test a rate-limited (413) response as XML."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
request.accept = "application/xml"
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
root = minidom.parseString(response.body).childNodes[0]
expected = "Only 1 GET request(s) can be made to * every minute."
details = root.getElementsByTagName("details")
self.assertEqual(1, details.length)
value = details.item(0).firstChild.data.strip()
self.assertEqual(expected, value)
class LimitTest(BaseLimitTestSuite):
"""Tests for the `limits.Limit` class."""
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(0, limit.next_request)
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
delay = limit("GET", "/anything")
self.assertEqual(1, delay)
self.assertEqual(1, limit.next_request)
self.assertEqual(0, limit.last_request)
self.time += 4
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(4, limit.next_request)
self.assertEqual(4, limit.last_request)
class ParseLimitsTest(BaseLimitTestSuite):
"""Tests for the default limits parser in the `limits.Limiter` class."""
def test_invalid(self):
"""Test that parse_limits() handles invalid input correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
"""Test that parse_limits() handles bad rules correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
"""Test that parse_limits() handles missing args correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
"""Test that parse_limits() handles bad values correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
"""Test that parse_limits() handles bad units correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
"""Test that parse_limits() handles multiple rules correctly."""
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
assert False, six.text_type(e)
# Make sure the number of returned limits are correct
self.assertEqual(4, len(l))
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual(expected, [t.verb for t in l])
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual(expected, [t.uri for t in l])
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual(expected, [t.regex for t in l])
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual(expected, [t.value for t in l])
# ...and the units...
expected = [limits.PER_MINUTE, limits.PER_HOUR,
limits.PER_SECOND, limits.PER_DAY]
self.assertEqual(expected, [t.unit for t in l])
class LimiterTest(BaseLimitTestSuite):
"""Tests for the in-memory `limits.Limiter` class."""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
userlimits = {'limits.user3': '',
'limits.user0': '(get, *, .*, 4, minute);'
'(put, *, .*, 2, minute)'}
self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in range(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):
"""Check and sum results from checks."""
results = self._check(num, verb, url, username)
return sum(item for item in results if item)
def test_no_delay_GET(self):
"""Ensure no delay on a single call for a limit verb we didn't set."""
delay = self.limiter.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_no_delay_PUT(self):
"""Ensure no delay on a single call for a known limit."""
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual((None, None), delay)
def test_delay_PUT(self):
"""Test delay on 11th PUT request.
Ensure the 11th PUT will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_POST(self):
"""Test delay on 8th POST request.
Ensure the 8th POST will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 7
results = list(self._check(7, "POST", "/anything"))
self.assertEqual(expected, results)
expected = 60.0 / 7.0
results = self._check_sum(1, "POST", "/anything")
self.assertAlmostEqual(expected, results, 8)
def test_delay_GET(self):
"""Ensure the 11th GET will result in NO delay."""
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 4 + [15.0]
results = list(self._check(5, "GET", "/foo", "user0"))
self.assertEqual(expected, results)
def test_delay_PUT_volumes(self):
"""Test delay on /volumes.
Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere
is still OK after 5 requests...but then after 11 total requests,
PUT limiting kicks in.
"""
# First 6 requests on PUT /volumes
expected = [None] * 5 + [12.0]
results = list(self._check(6, "PUT", "/volumes"))
self.assertEqual(expected, results)
# Next 5 request on PUT /anything
expected = [None] * 4 + [6.0]
results = list(self._check(5, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_wait(self):
"""Test limit is lifted again.
Ensure after hitting the limit and then waiting for
the correct amount of time, the limit will be lifted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
# Advance time
self.time += 6.0
expected = [None, 6.0]
results = list(self._check(2, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_multiple_delays(self):
"""Ensure multiple requests still get a delay."""
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
self.time += 1.0
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
def test_user_limit(self):
"""Test user-specific limits."""
self.assertEqual([], self.limiter.levels['user3'])
self.assertEqual(2, len(self.limiter.levels['user0']))
def test_multiple_users(self):
"""Tests involving multiple users."""
# User0
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
# User2
expected = [None] * 10 + [6.0] * 5
results = list(self._check(15, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User3
expected = [None] * 20
results = list(self._check(20, "PUT", "/anything", "user3"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [4.0] * 5
results = list(self._check(5, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User0 again
expected = [28.0]
results = list(self._check(1, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
self.time += 28.0
expected = [None, 30.0]
results = list(self._check(2, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
class WsgiLimiterTest(BaseLimitTestSuite):
"""Tests for `limits.WsgiLimiter` class."""
def setUp(self):
"""Run before each test."""
super(WsgiLimiterTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
"""Get data describing a limit request verb/path."""
return jsonutils.dump_as_bytes({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
"""POST request to given url by given username.
Make sure that POSTing to the given url causes the given username
to perform the given action. Make the internal rate limiter return
delay and make sure that the WSGI app returns the correct response.
"""
if username:
request = webob.Request.blank("/%s" % username)
else:
request = webob.Request.blank("/")
request.method = "POST"
request.body = self._request_data(verb, url)
response = request.get_response(self.app)
if "X-Wait-Seconds" in response.headers:
self.assertEqual(403, response.status_int)
return response.headers["X-Wait-Seconds"]
self.assertEqual(204, response.status_int)
def test_invalid_methods(self):
"""Only POSTs should work."""
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(405, response.status_int)
def test_good_url(self):
delay = self._request("GET", "/something")
self.assertIsNone(delay)
def test_escaping(self):
delay = self._request("GET", "/something/jump%20up")
self.assertIsNone(delay)
def test_response_to_delays(self):
delay = self._request("GET", "/delayed")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed")
self.assertEqual('60.00', delay)
def test_response_to_delays_usernames(self):
delay = self._request("GET", "/delayed", "user1")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user2")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user1")
self.assertEqual('60.00', delay)
delay = self._request("GET", "/delayed", "user2")
self.assertEqual('60.00', delay)
class FakeHttplibSocket(object):
"""Fake `http_client.HTTPResponse` replacement."""
def __init__(self, response_string):
"""Initialize new `FakeHttplibSocket`."""
if isinstance(response_string, six.text_type):
response_string = response_string.encode('utf-8')
self._buffer = six.BytesIO(response_string)
def makefile(self, mode, *args):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""Fake `http_client.HTTPConnection`."""
def __init__(self, app, host):
"""Initialize `FakeHttplibConnection`."""
self.app = app
self.host = host
def request(self, method, path, body="", headers=None):
"""Fake request handler.
Requests made via this connection actually get translated and
routed into our WSGI app, we then wait for the response and turn
it back into an `http_client.HTTPResponse`.
"""
if not headers:
headers = {}
req = webob.Request.blank(path)
req.method = method
req.headers = headers
req.host = self.host
req.body = body
resp = str(req.get_response(self.app))
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = http_client.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""Return our generated response from the request."""
return self.http_response
def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection.
Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app.
After calling this method, when any code calls
http_client.HTTPConnection(host)
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
different apps.
This method returns the original HTTPConnection object, so that the caller
can restore the default HTTPConnection interface (for all hosts).
"""
class HTTPConnectionDecorator(object):
"""Decorator to mock the HTTPConecction class.
Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
oldHTTPConnection = http_client.HTTPConnection
new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection)
http_client.HTTPConnection = new_http_connection
return oldHTTPConnection
class WsgiLimiterProxyTest(BaseLimitTestSuite):
"""Tests for the `limits.WsgiLimiterProxy` class."""
def setUp(self):
"""setUp() for WsgiLimiterProxyTest.
Do some nifty HTTP/WSGI magic which allows for WSGI to be called
directly by something like the `http_client` library.
"""
super(WsgiLimiterProxyTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
oldHTTPConnection = (
wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
self.addCleanup(self._restore, oldHTTPConnection)
def _restore(self, oldHTTPConnection):
# restore original HTTPConnection object
http_client.HTTPConnection = oldHTTPConnection
def test_200(self):
"""Successful request test."""
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_403(self):
"""Forbidden request test."""
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual((None, None), delay)
delay, error = self.proxy.check_for_delay("GET", "/delayed")
error = error.strip()
expected = ("60.00",
b"403 Forbidden\n\nOnly 1 GET request(s) can be "
b"made to /delayed every minute.")
self.assertEqual(expected, (delay, error))
class LimitsViewBuilderTest(test.TestCase):
def setUp(self):
super(LimitsViewBuilderTest, self).setUp()
self.view_builder = views.limits.ViewBuilder()
self.rate_limits = [{"URI": "*",
"regex": ".*",
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"resetTime": 1311272226},
{"URI": "*/volumes",
"regex": "^/volumes",
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"resetTime": 1311272226}]
self.absolute_limits = {"metadata_items": 1,
"injected_files": 5,
"injected_file_content_bytes": 5}
def test_build_limits(self):
tdate = "2011-07-21T18:17:06"
expected_limits = {
"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertDictMatch(expected_limits, output)
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
"absolute": {}}}
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
self.assertDictMatch(expected_limits, output)
class LimitsXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
has_dec = output.startswith(b"<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_index(self):
tdate = "2011-12-15T22:42:45Z"
serializer = limits.LimitsTemplate()
fixture = {"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
# verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(4, len(absolutes))
for limit in absolutes:
name = limit.get('name')
value = limit.get('value')
self.assertEqual(str(fixture['limits']['absolute'][name]), value)
# verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(2, len(rates))
for i, rate in enumerate(rates):
for key in ['uri', 'regex']:
self.assertEqual(str(fixture['limits']['rate'][i][key]),
rate.get(key))
rate_limits = rate.xpath('ns:limit', namespaces=NS)
self.assertEqual(1, len(rate_limits))
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
self.assertEqual(
str(fixture['limits']['rate'][i]['limit'][j][key]),
limit.get(key))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
# verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(0, len(absolutes))
# verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(0, len(rates))
|
|
# ===================================================================
#
# Copyright (c) 2016, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""RSA public-key cryptography algorithm (signature and encryption).
RSA_ is the most widespread and used public key algorithm. Its security is
based on the difficulty of factoring large integers. The algorithm has
withstood attacks for 30 years, and it is therefore considered reasonably
secure for new designs.
The algorithm can be used for both confidentiality (encryption) and
authentication (digital signature). It is worth noting that signing and
decryption are significantly slower than verification and encryption.
The cryptograhic strength is primarily linked to the length of the modulus *n*.
In 2012, a sufficient length is deemed to be 2048 bits. For more information,
see the most recent ECRYPT_ report.
Both RSA ciphertext and RSA signature are as big as the modulus *n* (256
bytes if *n* is 2048 bit long).
This module provides facilities for generating fresh, new RSA keys,
constructing them from known components, exporting them, and importing them.
>>> from Cryptodome.PublicKey import RSA
>>>
>>> key = RSA.generate(2048)
>>> f = open('mykey.pem','w')
>>> f.write(key.exportKey('PEM'))
>>> f.close()
...
>>> f = open('mykey.pem','r')
>>> key = RSA.import_key(f.read())
Even though you may choose to directly use the methods of an RSA key object
to perform the primitive cryptographic operations (e.g. `RsaKey._encrypt`),
it is recommended to use one of the standardized schemes instead (like
`Cryptodome.Cipher.PKCS1_v1_5` or `Cryptodome.Signature.PKCS1_v1_5`).
.. _RSA: http://en.wikipedia.org/wiki/RSA_%28algorithm%29
.. _ECRYPT: http://www.ecrypt.eu.org/documents/D.SPA.17.pdf
:sort: generate,construct,import_key
"""
__all__ = ['generate', 'construct', 'import_key',
'RsaKey', 'oid']
import binascii
import struct
from Cryptodome import Random
from Cryptodome.IO import PKCS8, PEM
from Cryptodome.Util.py3compat import tobytes, bord, bchr, b, tostr
from Cryptodome.Util.asn1 import DerSequence
from Cryptodome.Math.Numbers import Integer
from Cryptodome.Math.Primality import (test_probable_prime,
generate_probable_prime, COMPOSITE)
from Cryptodome.PublicKey import (_expand_subject_public_key_info,
_create_subject_public_key_info,
_extract_subject_public_key_info)
class RsaKey(object):
"""Class defining an actual RSA key.
:undocumented: __init__, __repr__, __getstate__, __eq__, __ne__, __str__,
sign, verify, encrypt, decrypt, blind, unblind, size
"""
def __init__(self, **kwargs):
"""Build an RSA key.
:Keywords:
n : integer
The modulus.
e : integer
The public exponent.
d : integer
The private exponent. Only required for private keys.
p : integer
The first factor of the modulus. Only required for private keys.
q : integer
The second factor of the modulus. Only required for private keys.
u : integer
The CRT coefficient (inverse of p modulo q). Only required for
privta keys.
"""
input_set = set(kwargs.keys())
public_set = set(('n', 'e'))
private_set = public_set | set(('p', 'q', 'd', 'u'))
if input_set not in (private_set, public_set):
raise ValueError("Some RSA components are missing")
for component, value in kwargs.items():
setattr(self, "_" + component, value)
@property
def n(self):
"""Modulus"""
return int(self._n)
@property
def e(self):
"""Public exponent"""
return int(self._e)
@property
def d(self):
"""Private exponent"""
if not self.has_private():
raise AttributeError("No private exponent available for public keys")
return int(self._d)
@property
def p(self):
"""First factor of the modulus"""
if not self.has_private():
raise AttributeError("No CRT component 'p' available for public keys")
return int(self._p)
@property
def q(self):
"""Second factor of the modulus"""
if not self.has_private():
raise AttributeError("No CRT component 'q' available for public keys")
return int(self._q)
@property
def u(self):
"""Chinese remainder component (inverse of *p* modulo *q*)"""
if not self.has_private():
raise AttributeError("No CRT component 'u' available for public keys")
return int(self._u)
def size_in_bits(self):
"""Size of the RSA modulus in bits"""
return self._n.size_in_bits()
def size_in_bytes(self):
"""The minimal amount of bytes that can hold the RSA modulus"""
return (self._n.size_in_bits() - 1) // 8 + 1
def _encrypt(self, plaintext):
if not 0 < plaintext < self._n:
raise ValueError("Plaintext too large")
return int(pow(Integer(plaintext), self._e, self._n))
def _decrypt(self, ciphertext):
if not 0 < ciphertext < self._n:
raise ValueError("Ciphertext too large")
if not self.has_private():
raise TypeError("This is not a private key")
# Blinded RSA decryption (to prevent timing attacks):
# Step 1: Generate random secret blinding factor r,
# such that 0 < r < n-1
r = Integer.random_range(min_inclusive=1, max_exclusive=self._n)
# Step 2: Compute c' = c * r**e mod n
cp = Integer(ciphertext) * pow(r, self._e, self._n) % self._n
# Step 3: Compute m' = c'**d mod n (ordinary RSA decryption)
m1 = pow(cp, self._d % (self._p - 1), self._p)
m2 = pow(cp, self._d % (self._q - 1), self._q)
h = m2 - m1
while h < 0:
h += self._q
h = (h * self._u) % self._q
mp = h * self._p + m1
# Step 4: Compute m = m**(r-1) mod n
result = (r.inverse(self._n) * mp) % self._n
# Verify no faults occured
if ciphertext != pow(result, self._e, self._n):
raise ValueError("Fault detected in RSA decryption")
return result
def has_private(self):
return hasattr(self, "_d")
def can_encrypt(self):
return True
def can_sign(self):
return True
def publickey(self):
return RsaKey(n=self._n, e=self._e)
def __eq__(self, other):
if self.has_private() != other.has_private():
return False
if self.n != other.n or self.e != other.e:
return False
if not self.has_private():
return True
return (self.d == other.d and
self.q == other.q and
self.p == other.p and
self.u == other.u)
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
# RSA key is not pickable
from pickle import PicklingError
raise PicklingError
def __repr__(self):
if self.has_private():
extra = ", d=%d, p=%d, q=%d, u=%d" % (int(self._d), int(self._p),
int(self._q), int(self._u))
else:
extra = ""
return "RsaKey(n=%d, e=%d%s)" % (int(self._n), int(self._e), extra)
def __str__(self):
if self.has_private():
key_type = "Private"
else:
key_type = "Public"
return "%s RSA key at 0x%X" % (key_type, id(self))
def exportKey(self, format='PEM', passphrase=None, pkcs=1,
protection=None, randfunc=None):
"""Export this RSA key.
:Parameters:
format : string
The format to use for wrapping the key:
- *'DER'*. Binary encoding.
- *'PEM'*. Textual encoding, done according to `RFC1421`_/`RFC1423`_.
- *'OpenSSH'*. Textual encoding, done according to OpenSSH specification.
Only suitable for public keys (not private keys).
passphrase : string
For private keys only. The pass phrase used for deriving the encryption
key.
pkcs : integer
For *DER* and *PEM* format only.
The PKCS standard to follow for assembling the components of the key.
You have two choices:
- **1** (default): the public key is embedded into
an X.509 ``SubjectPublicKeyInfo`` DER SEQUENCE.
The private key is embedded into a `PKCS#1`_
``RSAPrivateKey`` DER SEQUENCE.
- **8**: the private key is embedded into a `PKCS#8`_
``PrivateKeyInfo`` DER SEQUENCE. This value cannot be used
for public keys.
protection : string
The encryption scheme to use for protecting the private key.
If ``None`` (default), the behavior depends on ``format``:
- For *DER*, the *PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC*
scheme is used. The following operations are performed:
1. A 16 byte Triple DES key is derived from the passphrase
using `Cryptodome.Protocol.KDF.PBKDF2` with 8 bytes salt,
and 1 000 iterations of `Cryptodome.Hash.HMAC`.
2. The private key is encrypted using CBC.
3. The encrypted key is encoded according to PKCS#8.
- For *PEM*, the obsolete PEM encryption scheme is used.
It is based on MD5 for key derivation, and Triple DES for encryption.
Specifying a value for ``protection`` is only meaningful for PKCS#8
(that is, ``pkcs=8``) and only if a pass phrase is present too.
The supported schemes for PKCS#8 are listed in the
`Cryptodome.IO.PKCS8` module (see ``wrap_algo`` parameter).
randfunc : callable
A function that provides random bytes. Only used for PEM encoding.
The default is `Cryptodome.Random.get_random_bytes`.
:Return: A byte string with the encoded public or private half
of the key.
:Raise ValueError:
When the format is unknown or when you try to encrypt a private
key with *DER* format and PKCS#1.
:attention:
If you don't provide a pass phrase, the private key will be
exported in the clear!
.. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt
.. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt
.. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt
.. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt
"""
if passphrase is not None:
passphrase = tobytes(passphrase)
if randfunc is None:
randfunc = Random.get_random_bytes
if format == 'OpenSSH':
e_bytes, n_bytes = [x.to_bytes() for x in (self._e, self._n)]
if bord(e_bytes[0]) & 0x80:
e_bytes = bchr(0) + e_bytes
if bord(n_bytes[0]) & 0x80:
n_bytes = bchr(0) + n_bytes
keyparts = [b('ssh-rsa'), e_bytes, n_bytes]
keystring = b('').join([struct.pack(">I", len(kp)) + kp for kp in keyparts])
return b('ssh-rsa ') + binascii.b2a_base64(keystring)[:-1]
# DER format is always used, even in case of PEM, which simply
# encodes it into BASE64.
if self.has_private():
binary_key = DerSequence([0,
self.n,
self.e,
self.d,
self.p,
self.q,
self.d % (self.p-1),
self.d % (self.q-1),
Integer(self.q).inverse(self.p)
]).encode()
if pkcs == 1:
key_type = 'RSA PRIVATE KEY'
if format == 'DER' and passphrase:
raise ValueError("PKCS#1 private key cannot be encrypted")
else: # PKCS#8
if format == 'PEM' and protection is None:
key_type = 'PRIVATE KEY'
binary_key = PKCS8.wrap(binary_key, oid, None)
else:
key_type = 'ENCRYPTED PRIVATE KEY'
if not protection:
protection = 'PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC'
binary_key = PKCS8.wrap(binary_key, oid,
passphrase, protection)
passphrase = None
else:
key_type = "RSA PUBLIC KEY"
binary_key = _create_subject_public_key_info(oid,
DerSequence([self.n,
self.e])
)
if format == 'DER':
return binary_key
if format == 'PEM':
pem_str = PEM.encode(binary_key, key_type, passphrase, randfunc)
return tobytes(pem_str)
raise ValueError("Unknown key format '%s'. Cannot export the RSA key." % format)
# Methods defined in PyCryptodome that we don't support anymore
def sign(self, M, K):
raise NotImplementedError("Use module Cryptodome.Signature.pkcs1_15 instead")
def verify(self, M, signature):
raise NotImplementedError("Use module Cryptodome.Signature.pkcs1_15 instead")
def encrypt(self, plaintext, K):
raise NotImplementedError("Use module Cryptodome.Cipher.PKCS1_OAEP instead")
def decrypt(self, ciphertext):
raise NotImplementedError("Use module Cryptodome.Cipher.PKCS1_OAEP instead")
def blind(self, M, B):
raise NotImplementedError
def unblind(self, M, B):
raise NotImplementedError
def size():
raise NotImplementedError
def generate(bits, randfunc=None, e=65537):
"""Create a new RSA key.
The algorithm closely follows NIST `FIPS 186-4`_ in its
sections B.3.1 and B.3.3. The modulus is the product of
two non-strong probable primes.
Each prime passes a suitable number of Miller-Rabin tests
with random bases and a single Lucas test.
:Parameters:
bits : integer
Key length, or size (in bits) of the RSA modulus.
It must be at least 1024.
The FIPS standard only defines 1024, 2048 and 3072.
randfunc : callable
Function that returns random bytes.
The default is `Cryptodome.Random.get_random_bytes`.
e : integer
Public RSA exponent. It must be an odd positive integer.
It is typically a small number with very few ones in its
binary representation.
The FIPS standard requires the public exponent to be
at least 65537 (the default).
:Return: An RSA key object (`RsaKey`).
.. _FIPS 186-4: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
if bits < 1024:
raise ValueError("RSA modulus length must be >= 1024")
if e % 2 == 0 or e < 3:
raise ValueError("RSA public exponent must be a positive, odd integer larger than 2.")
if randfunc is None:
randfunc = Random.get_random_bytes
d = n = Integer(1)
e = Integer(e)
while n.size_in_bits() != bits and d < (1 << (bits // 2)):
# Generate the prime factors of n: p and q.
# By construciton, their product is always
# 2^{bits-1} < p*q < 2^bits.
size_q = bits // 2
size_p = bits - size_q
min_p = min_q = (Integer(1) << (2 * size_q - 1)).sqrt()
if size_q != size_p:
min_p = (Integer(1) << (2 * size_p - 1)).sqrt()
def filter_p(candidate):
return candidate > min_p and (candidate - 1).gcd(e) == 1
p = generate_probable_prime(exact_bits=size_p,
randfunc=randfunc,
prime_filter=filter_p)
min_distance = Integer(1) << (bits // 2 - 100)
def filter_q(candidate):
return (candidate > min_q and
(candidate - 1).gcd(e) == 1 and
abs(candidate - p) > min_distance)
q = generate_probable_prime(exact_bits=size_q,
randfunc=randfunc,
prime_filter=filter_q)
n = p * q
lcm = (p - 1).lcm(q - 1)
d = e.inverse(lcm)
if p > q:
p, q = q, p
u = p.inverse(q)
return RsaKey(n=n, e=e, d=d, p=p, q=q, u=u)
def construct(rsa_components, consistency_check=True):
"""Construct an RSA key from a tuple of valid RSA components.
The modulus **n** must be the product of two primes.
The public exponent **e** must be odd and larger than 1.
In case of a private key, the following equations must apply:
- e != 1
- p*q = n
- e*d = 1 mod lcm[(p-1)(q-1)]
- p*u = 1 mod q
:Parameters:
rsa_components : tuple
A tuple of long integers, with at least 2 and no
more than 6 items. The items come in the following order:
1. RSA modulus (*n*).
2. Public exponent (*e*).
3. Private exponent (*d*).
Only required if the key is private.
4. First factor of *n* (*p*).
Optional, but factor q must also be present.
5. Second factor of *n* (*q*). Optional.
6. CRT coefficient, *(1/p) mod q* (*u*). Optional.
consistency_check : boolean
If *True*, the library will verify that the provided components
fulfil the main RSA properties.
:Raise ValueError:
When the key being imported fails the most basic RSA validity checks.
:Return: An RSA key object (`RsaKey`).
"""
class InputComps(object):
pass
input_comps = InputComps()
for (comp, value) in zip(('n', 'e', 'd', 'p', 'q', 'u'), rsa_components):
setattr(input_comps, comp, Integer(value))
n = input_comps.n
e = input_comps.e
if not hasattr(input_comps, 'd'):
key = RsaKey(n=n, e=e)
else:
d = input_comps.d
if hasattr(input_comps, 'q'):
p = input_comps.p
q = input_comps.q
else:
# Compute factors p and q from the private exponent d.
# We assume that n has no more than two factors.
# See 8.2.2(i) in Handbook of Applied Cryptography.
ktot = d * e - 1
# The quantity d*e-1 is a multiple of phi(n), even,
# and can be represented as t*2^s.
t = ktot
while t % 2 == 0:
t //= 2
# Cycle through all multiplicative inverses in Zn.
# The algorithm is non-deterministic, but there is a 50% chance
# any candidate a leads to successful factoring.
# See "Digitalized Signatures and Public Key Functions as Intractable
# as Factorization", M. Rabin, 1979
spotted = False
a = Integer(2)
while not spotted and a < 100:
k = Integer(t)
# Cycle through all values a^{t*2^i}=a^k
while k < ktot:
cand = pow(a, k, n)
# Check if a^k is a non-trivial root of unity (mod n)
if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:
# We have found a number such that (cand-1)(cand+1)=0 (mod n).
# Either of the terms divides n.
p = Integer(n).gcd(cand + 1)
spotted = True
break
k *= 2
# This value was not any good... let's try another!
a += 2
if not spotted:
raise ValueError("Unable to compute factors p and q from exponent d.")
# Found !
assert ((n % p) == 0)
q = n // p
if hasattr(input_comps, 'u'):
u = input_comps.u
else:
u = p.inverse(q)
# Build key object
key = RsaKey(n=n, e=e, d=d, p=p, q=q, u=u)
# Very consistency of the key
fmt_error = False
if consistency_check:
# Modulus and public exponent must be coprime
fmt_error = e <= 1 or e >= n
fmt_error |= Integer(n).gcd(e) != 1
# For RSA, modulus must be odd
fmt_error |= not n & 1
if not fmt_error and key.has_private():
# Modulus and private exponent must be coprime
fmt_error = d <= 1 or d >= n
fmt_error |= Integer(n).gcd(d) != 1
# Modulus must be product of 2 primes
fmt_error |= (p * q != n)
fmt_error |= test_probable_prime(p) == COMPOSITE
fmt_error |= test_probable_prime(q) == COMPOSITE
# See Carmichael theorem
phi = (p - 1) * (q - 1)
lcm = phi // (p - 1).gcd(q - 1)
fmt_error |= (e * d % int(lcm)) != 1
if hasattr(key, 'u'):
# CRT coefficient
fmt_error |= u <= 1 or u >= q
fmt_error |= (p * u % q) != 1
else:
fmt_error = True
if fmt_error:
raise ValueError("Invalid RSA key components")
return key
def _import_pkcs1_private(encoded, *kwargs):
# RSAPrivateKey ::= SEQUENCE {
# version Version,
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
# privateExponent INTEGER, -- d
# prime1 INTEGER, -- p
# prime2 INTEGER, -- q
# exponent1 INTEGER, -- d mod (p-1)
# exponent2 INTEGER, -- d mod (q-1)
# coefficient INTEGER -- (inverse of q) mod p
# }
#
# Version ::= INTEGER
der = DerSequence().decode(encoded, nr_elements=9, only_ints_expected=True)
if der[0] != 0:
raise ValueError("No PKCS#1 encoding of an RSA private key")
return construct(der[1:6] + [Integer(der[4]).inverse(der[5])])
def _import_pkcs1_public(encoded, *kwargs):
# RSAPublicKey ::= SEQUENCE {
# modulus INTEGER, -- n
# publicExponent INTEGER -- e
# }
der = DerSequence().decode(encoded, nr_elements=2, only_ints_expected=True)
return construct(der)
def _import_subjectPublicKeyInfo(encoded, *kwargs):
algoid, encoded_key, params = _expand_subject_public_key_info(encoded)
if algoid != oid or params is not None:
raise ValueError("No RSA subjectPublicKeyInfo")
return _import_pkcs1_public(encoded_key)
def _import_x509_cert(encoded, *kwargs):
sp_info = _extract_subject_public_key_info(encoded)
return _import_subjectPublicKeyInfo(sp_info)
def _import_pkcs8(encoded, passphrase):
k = PKCS8.unwrap(encoded, passphrase)
if k[0] != oid:
raise ValueError("No PKCS#8 encoded RSA key")
return _import_keyDER(k[1], passphrase)
def _import_keyDER(extern_key, passphrase):
"""Import an RSA key (public or private half), encoded in DER form."""
decodings = (_import_pkcs1_private,
_import_pkcs1_public,
_import_subjectPublicKeyInfo,
_import_x509_cert,
_import_pkcs8)
for decoding in decodings:
try:
return decoding(extern_key, passphrase)
except ValueError:
pass
raise ValueError("RSA key format is not supported")
def import_key(extern_key, passphrase=None):
"""Import an RSA key (public or private half), encoded in standard
form.
:Parameter extern_key:
The RSA key to import, encoded as a byte string.
An RSA public key can be in any of the following formats:
- X.509 certificate (binary or PEM format)
- X.509 ``subjectPublicKeyInfo`` DER SEQUENCE (binary or PEM
encoding)
- `PKCS#1`_ ``RSAPublicKey`` DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
An RSA private key can be in any of the following formats:
- PKCS#1 ``RSAPrivateKey`` DER SEQUENCE (binary or PEM encoding)
- `PKCS#8`_ ``PrivateKeyInfo`` or ``EncryptedPrivateKeyInfo``
DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
For details about the PEM encoding, see `RFC1421`_/`RFC1423`_.
The private key may be encrypted by means of a certain pass phrase
either at the PEM level or at the PKCS#8 level.
:Type extern_key: string
:Parameter passphrase:
In case of an encrypted private key, this is the pass phrase from
which the decryption key is derived.
:Type passphrase: string
:Return: An RSA key object (`RsaKey`).
:Raise ValueError/IndexError/TypeError:
When the given key cannot be parsed (possibly because the pass
phrase is wrong).
.. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt
.. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt
.. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt
.. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt
"""
extern_key = tobytes(extern_key)
if passphrase is not None:
passphrase = tobytes(passphrase)
if extern_key.startswith(b('-----')):
# This is probably a PEM encoded key.
(der, marker, enc_flag) = PEM.decode(tostr(extern_key), passphrase)
if enc_flag:
passphrase = None
return _import_keyDER(der, passphrase)
if extern_key.startswith(b('ssh-rsa ')):
# This is probably an OpenSSH key
keystring = binascii.a2b_base64(extern_key.split(b(' '))[1])
keyparts = []
while len(keystring) > 4:
l = struct.unpack(">I", keystring[:4])[0]
keyparts.append(keystring[4:4 + l])
keystring = keystring[4 + l:]
e = Integer.from_bytes(keyparts[1])
n = Integer.from_bytes(keyparts[2])
return construct([n, e])
if bord(extern_key[0]) == 0x30:
# This is probably a DER encoded key
return _import_keyDER(extern_key, passphrase)
raise ValueError("RSA key format is not supported")
# Backward compatibility
importKey = import_key
#: `Object ID`_ for the RSA encryption algorithm. This OID often indicates
#: a generic RSA key, even when such key will be actually used for digital
#: signatures.
#:
#: .. _`Object ID`: http://www.alvestrand.no/objectid/1.2.840.113549.1.1.1.html
oid = "1.2.840.113549.1.1.1"
|
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import warnings
from ._DataCollection_class import DataCollection
from . import _comp_spectrallines
from . import _DataCollection_comp
from . import _DataCollection_plot
__all__ = ['SpectralLines', 'TimeTraces']
_OPENADAS_ONLINE = True
_GROUP_LINES = 'lines'
_GROUP_NE = 'ne'
_GROUP_TE = 'Te'
_UNITS_LAMBDA0 = 'm'
#############################################
#############################################
# Spectral Lines
#############################################
class SpectralLines(DataCollection):
_ddef = {
'Id': {'include': ['Mod', 'Cls', 'Name', 'version']},
'params': {
'lambda0': (float, 0.),
'source': (str, 'unknown'),
'transition': (str, 'unknown'),
'element': (str, 'unknown'),
'charge': (int, 0),
'ion': (str, 'unknown'),
'symbol': (str, 'unknown'),
},
}
_forced_group = [_GROUP_NE, _GROUP_TE]
_data_none = True
_show_in_summary_core = ['shape', 'ref', 'group']
_show_in_summary = 'all'
_grouplines = _GROUP_LINES
_groupne = _GROUP_NE
_groupte = _GROUP_TE
_units_lambda0 = _UNITS_LAMBDA0
def add_line(
self,
key=None,
lambda0=None,
pec=None,
source=None,
transition=None,
ion=None,
symbol=None,
**kwdargs,
):
""" Add a spectral line by key and rest wavelength, optionally with
"""
self.add_obj(
which='lines',
key=key,
lambda0=lambda0,
pec=pec,
source=source,
transition=transition,
ion=ion,
symbol=symbol,
**kwdargs,
)
def add_pec(self, key=None, pec=None, ref=None):
pass
def remove_line(
self,
key=None,
ion=None,
source=None,
lambda0=None,
):
# Check inputs
lc = [
key is not None,
ion is not None or source is not None or lambda0 is not None,
]
raise NotImplementedError
# -----------------
# from openadas
# ------------------
@classmethod
def _from_openadas(
cls,
lambmin=None,
lambmax=None,
element=None,
charge=None,
online=None,
update=None,
create_custom=None,
dsource0=None,
dref0=None,
ddata0=None,
dlines0=None,
grouplines=None,
):
"""
Load lines and pec from openadas, either:
- online = True: directly from the website
- online = False: from pre-downloaded files in ~/.tofu/openadas/
Provide wavelengths in m
Example:
--------
>>> import tofu as tf
>>> lines_mo = tf.data.SpectralLines.from_openadas(
element='Mo',
lambmin=3.94e-10,
lambmax=4e-10,
)
"""
# Preliminary import and checks
from ..openadas2tofu import _requests
from ..openadas2tofu import _read_files
if online is None:
online = _OPENADAS_ONLINE
if grouplines is None:
grouplines = cls._grouplines
else:
cls._grouplines = grouplines
# Load from online if relevant
if online is True:
try:
out = _requests.step01_search_online_by_wavelengthA(
lambmin=lambmin*1e10,
lambmax=lambmax*1e10,
element=element,
charge=charge,
verb=False,
returnas=np.ndarray,
resolveby='file',
)
lf = sorted(set([oo[0] for oo in out]))
out = _requests.step02_download_all(
files=lf,
update=update,
create_custom=create_custom,
verb=False,
)
except Exception as err:
msg = (
"""
{}
For some reason data could not be downloaded from openadas
=> see error message above
=> maybe check your internet connection?
""".format(err)
)
raise Exception(msg)
# Load for local files
dne, dte, dpec, lion, dsource, dlines = _read_files.step03_read_all(
lambmin=lambmin,
lambmax=lambmax,
element=element,
charge=charge,
pec_as_func=False,
format_for_DataCollection=True,
dsource0=dsource0,
dref0=dref0,
ddata0=ddata0,
dlines0=dlines0,
verb=False,
)
# # dgroup
# dgroup = ['Te', 'ne']
# dref - Te + ne
dref = dte
dref.update(dne)
# ddata - pec
ddata = dpec
# dref_static
dref_static = {
'ion': {k0: {} for k0 in lion},
'source': dsource,
}
# dobj (lines)
dobj = {
grouplines: dlines,
}
return ddata, dref, dref_static, dobj
@classmethod
def from_openadas(
cls,
lambmin=None,
lambmax=None,
element=None,
charge=None,
online=None,
update=None,
create_custom=None,
grouplines=None,
):
"""
Load lines and pec from openadas, either:
- online = True: directly from the website
- online = False: from pre-downloaded files in ~/.tofu/openadas/
"""
ddata, dref, dref_static, dobj = cls._from_openadas(
lambmin=lambmin,
lambmax=lambmax,
element=element,
charge=charge,
online=online,
update=update,
create_custom=create_custom,
grouplines=grouplines,
)
return cls(ddata=ddata, dref=dref, dref_static=dref_static, dobj=dobj)
def add_from_openadas(
self,
lambmin=None,
lambmax=None,
element=None,
charge=None,
online=None,
update=None,
create_custom=None,
):
"""
Load and add lines and pec from openadas, either:
- online = True: directly from the website
- online = False: from pre-downloaded files in ~/.tofu/openadas/
"""
ddata, dref, dref_static, dobj = self._from_openadas(
lambmin=lambmin,
lambmax=lambmax,
element=element,
charge=charge,
online=online,
update=update,
create_custom=create_custom,
dsource0=self._dref_static.get('source'),
dref0=self._dref,
ddata0=self._ddata,
dlines0=self._dobj.get('lines'),
)
self.update(ddata=ddata, dref=dref, dref_static=dref_static, dobj=dobj)
# -----------------
# from nist
# ------------------
@classmethod
def _from_nist(
cls,
lambmin=None,
lambmax=None,
element=None,
charge=None,
ion=None,
wav_observed=None,
wav_calculated=None,
transitions_allowed=None,
transitions_forbidden=None,
cache_from=None,
cache_info=None,
verb=None,
create_custom=None,
dsource0=None,
dlines0=None,
grouplines=None,
):
"""
Load lines from nist, either:
- cache_from = False: directly from the website
- cache_from = True: from pre-downloaded files in ~/.tofu/nist/
Provide wavelengths in m
Example:
--------
>>> import tofu as tf
>>> lines_mo = tf.data.SpectralLines.from_nist(
element='Mo',
lambmin=3.94e-10,
lambmax=4e-10,
)
"""
# Preliminary import and checks
from ..nist2tofu import _requests
if grouplines is None:
grouplines = cls._grouplines
else:
cls._grouplines = grouplines
if verb is None:
verb = False
if cache_info is None:
cache_info = False
# Load from online if relevant
dlines, dsources = _requests.step01_search_online_by_wavelengthA(
element=element,
charge=charge,
ion=ion,
lambmin=lambmin*1e10,
lambmax=lambmax*1e10,
wav_observed=wav_observed,
wav_calculated=wav_calculated,
transitions_allowed=transitions_allowed,
transitions_forbidden=transitions_forbidden,
info_ref=True,
info_conf=True,
info_term=True,
info_J=True,
info_g=True,
cache_from=cache_from,
cache_info=cache_info,
return_dout=True,
return_dsources=True,
verb=verb,
create_custom=create_custom,
format_for_DataCollection=True,
dsource0=dsource0,
dlines0=dlines0,
)
# dref_static
lion = sorted(set([dlines[k0]['ion'] for k0 in dlines.keys()]))
dref_static = {
'ion': {k0: {} for k0 in lion},
'source': dsources,
}
# dobj (lines)
dobj = {
grouplines: dlines,
}
return dref_static, dobj
@classmethod
def from_nist(
cls,
lambmin=None,
lambmax=None,
element=None,
charge=None,
ion=None,
wav_observed=None,
wav_calculated=None,
transitions_allowed=None,
transitions_forbidden=None,
cache_from=None,
cache_info=None,
verb=None,
create_custom=None,
grouplines=None,
):
"""
Load lines and pec from openadas, either:
- online = True: directly from the website
- online = False: from pre-downloaded files in ~/.tofu/openadas/
"""
dref_static, dobj = cls._from_nist(
lambmin=lambmin,
lambmax=lambmax,
element=element,
charge=charge,
ion=ion,
wav_observed=wav_observed,
wav_calculated=wav_calculated,
transitions_allowed=transitions_allowed,
transitions_forbidden=transitions_forbidden,
cache_from=cache_from,
cache_info=cache_info,
verb=verb,
create_custom=create_custom,
grouplines=grouplines,
)
return cls(dref_static=dref_static, dobj=dobj)
def add_from_nist(
self,
lambmin=None,
lambmax=None,
element=None,
charge=None,
ion=None,
wav_observed=None,
wav_calculated=None,
transitions_allowed=None,
transitions_forbidden=None,
cache_from=None,
cache_info=None,
verb=None,
create_custom=None,
grouplines=None,
):
"""
Load and add lines and pec from openadas, either:
- online = True: directly from the website
- online = False: from pre-downloaded files in ~/.tofu/openadas/
"""
dref_static, dobj = self._from_nist(
lambmin=lambmin,
lambmax=lambmax,
element=element,
charge=charge,
ion=ion,
wav_observed=wav_observed,
wav_calculated=wav_calculated,
transitions_allowed=transitions_allowed,
transitions_forbidden=transitions_forbidden,
cache_from=cache_from,
cache_info=cache_info,
verb=verb,
create_custom=create_custom,
dsource0=self._dref_static.get('source'),
dlines0=self._dobj.get('lines'),
grouplines=grouplines,
)
self.update(dref_static=dref_static, dobj=dobj)
# -----------------
# from file (.py)
# ------------------
@staticmethod
def _check_extract_dict_from_mod(mod, k0):
lk1 = [
k0, k0.upper(),
'_'+k0, '_'+k0.upper(),
'_d'+k0, '_D'+k0.upper(),
'd'+k0, 'D'+k0.upper(),
k0+'s', k0.upper()+'S'
'_d'+k0+'s', '_D'+k0.upper()+'S',
'd'+k0+'s', 'D'+k0.upper()+'S',
]
lk1 = [k1 for k1 in lk1 if hasattr(mod, k1)]
if len(lk1) > 1:
msg = "Ambiguous attributes: {}".format(lk1)
raise Exception(msg)
elif len(lk1) == 0:
return
if hasattr(mod, lk1[0]):
return getattr(mod, lk1[0])
else:
return
@classmethod
def from_module(cls, pfe=None):
# Check input
c0 = (
os.path.isfile(pfe)
and pfe[-3:] == '.py'
)
if not c0:
msg = (
"\nProvided Path-File-Extension (pfe) not valid!\n"
+ "\t- expected: absolute path to python module\n"
+ "\t- provided: {}".format(pfe)
)
raise Exception(msg)
pfe = os.path.abspath(pfe)
# Load module
path, fid = os.path.split(pfe)
import importlib.util
spec = importlib.util.spec_from_file_location(fid[:-3], pfe)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
# extract dref_static
dref_static = {}
for k0 in ['source', 'transition', 'ion', 'element']:
dd = cls._check_extract_dict_from_mod(mod, k0)
if dd is not None:
dref_static[k0] = dd
# add ion
if 'ion' not in dref_static.keys():
lions = np.array([
v0['ion'] for k0, v0 in mod.dlines.items()
if 'ion' in v0.keys()
]).ravel()
if len(lions) > 0:
dref_static['ion'] = {
k0: {'ion': k0} for k0 in lions
}
else:
lIONS = np.array([
v0['ION'] for k0, v0 in mod.dlines.items()
if 'ION' in v0.keys()
]).ravel()
if len(lIONS) > 0:
dref_static['ION'] = {
k0: {'ION': k0} for k0 in lIONS
}
# extract lines
dobj = {
'lines': mod.dlines
}
# Create collection
out = cls(dref_static=dref_static, dobj=dobj)
# Replace ION by ion if relevant
c0 = (
'ion' in out.dref_static.keys()
and 'ion' not in out.get_lparam(which='lines')
and 'ION' in out.get_lparam(which='lines')
)
if c0:
for k0, v0 in out._dobj['lines'].items():
ion = [
k1 for k1, v1 in out._dref_static['ion'].items()
if out._dobj['lines'][k0]['ION'] == v1['ION']
][0]
out._dobj['lines'][k0]['ion'] = ion
del out._dobj['lines'][k0]['ION']
return out
# -----------------
# summary
# ------------------
# -----------------
# conversion wavelength - energy - frequency
# ------------------
def convert_lines(self, units=None, key=None, ind=None, returnas=None):
""" Convert wavelength (m) to other units or other quantities
Avalaible units:
wavelength: km, m, mm, um, nm, pm, A
energy: J, eV, keV, MeV, GeV
frequency: Hz, kHz, MHz, GHz, THz
Return the result as a np.ndarray (returnas = 'data')
Can also just return the conversion coef if returnas='coef'
In that case, a bool is also returned indicating whether the result is
the proportional to the inverse of lambda0::
- False: data = coef * lambda0
- True: data = coef / lambda0
"""
if units is None:
units = self._units_lambda0
if returnas is None:
returnas = dict
lok = [dict, np.ndarray, 'data', 'coef']
if returnas not in lok:
msg = (
"Arg returnas must be in:\n"
+ "\t- {}\n".format(lok)
+ "\t- provided: {}".format(returnas)
)
raise Exception(msg)
if returnas in [dict, np.ndarray, 'data']:
returnas2 = 'data'
else:
returnas2 = 'coef'
key = self._ind_tofrom_key(
which=self._grouplines, key=key, ind=ind, returnas=str,
)
lamb_in = self.get_param(
'lambda0', key=key, returnas=np.ndarray,
)['lambda0']
out = self.convert_spectral(
data=lamb_in, units_in='m', units_out=units, returnas=returnas2,
)
if returnas is dict:
out = {k0: out[ii] for ii, k0 in enumerate(key)}
return out
# -----------------
# PEC interpolation
# ------------------
def calc_pec(
self,
key=None,
ind=None,
ne=None,
Te=None,
deg=None,
grid=None,
):
""" Compute the pec (<sigma v>) by interpolation for chosen lines
Assumes Maxwellian electron distribution
Provide ne and Te and 1d np.ndarrays
if grid=False:
- ne is a (n,) 1d array
- Te is a (n,) 1d array
=> the result is a dict of (n,) 1d array
if grid=True:
- ne is a (n1,) 1d array
- Te is a (n2,) 1d array
=> the result is a dict of (n1, n2) 2d array
"""
# Check keys
key = self._ind_tofrom_key(
which=self._grouplines, key=key, ind=ind, returnas=str,
)
dlines = self._dobj[self._grouplines]
if deg is None:
deg = 2
# Check data conformity
lg = (self._groupne, self._groupte)
lc = [
k0 for k0 in key
if (
dlines[k0].get('pec') is None
or self._ddata[dlines[k0]['pec']]['group'] != lg
)
]
if len(lc) > 0:
msg = (
"The following lines have non-conform pec data:\n"
+ "\t- {}\n\n".format(lc)
+ " => pec data should be tabulated vs (ne, Te)"
)
warnings.warn(msg)
key = [kk for kk in key if kk not in lc]
# Check ne, Te
ltype = [int, float, np.integer, np.floating]
dnTe = {'ne': ne, 'Te': Te}
for k0, v0 in dnTe.items():
if type(v0) in ltype:
dnTe[k0] = np.r_[v0]
if isinstance(dnTe[k0], list) or isinstance(dnTe[k0], tuple):
dnTe[k0] = np.array([dnTe[k0]])
if not (isinstance(dnTe[k0], np.ndarray) and dnTe[k0].ndim == 1):
msg = (
"Arg {} should be a 1d np.ndarray!".format(k0)
)
raise Exception(msg)
# Interpolate
dout = {}
derr = {}
for k0 in key:
try:
ne0 = [
kk for kk in self._ddata[dlines[k0]['pec']]['ref']
if self._ddata[kk]['group'] == (self._groupne,)
][0]
ne0 = self._ddata[ne0]['data']
Te0 = [
kk for kk in self._ddata[dlines[k0]['pec']]['ref']
if self._ddata[kk]['group'] == (self._groupte,)
][0]
Te0 = self._ddata[Te0]['data']
dout[k0] = _comp_spectrallines._interp_pec(
ne0=ne0,
Te0=Te0,
pec0=self._ddata[dlines[k0]['pec']]['data'],
ne=dnTe['ne'],
Te=dnTe['Te'],
deg=deg,
grid=grid,
)
except Exception as err:
derr[k0] = str(err)
if len(derr) > 0:
msg = (
"The pec could not be interpolated for the following lines:\n"
+ "\n".join([
'\t- {} : {}'.format(k0, v0) for k0, v0 in derr.items()
])
)
raise Exception(msg)
return dout
def calc_intensity(
self,
key=None,
ind=None,
ne=None,
Te=None,
concentration=None,
deg=None,
grid=None,
):
""" Compute the lines intensities by pec interpolation for chosen lines
Assumes Maxwellian electron distribution
Provide ne and Te and 1d np.ndarrays
Provide concentration as:
- a np.ndarray (same concentration assumed for all lines)
- a dict of {key: np.ndarray}
if grid=False:
- ne is a (n,) 1d array
- Te is a (n,) 1d array
- concentration is a (dict of) (n,) 1d array(s)
=> the result is a dict of (n1, n2) 2d array
if grid=True:
- ne is a (n1,) 1d array
- Te is a (n2,) 1d array
- concentration is a (dict of) (n1, n2) 2d array(s)
=> the result is a dict of (n1, n2) 2d array
"""
# check inputs
if grid is None:
grid = ne.size != Te.size
# Check keys
key = self._ind_tofrom_key(
which=self._grouplines, key=key, ind=ind, returnas=str,
)
if isinstance(concentration, np.ndarray):
concentration = {k0: concentration for k0 in key}
c0 = (
isinstance(concentration, dict)
and all([
k0 in key
and isinstance(cc, np.ndarray)
and (
(grid is False and cc.shape == ne.shape == Te.shape)
or
(grid is True and cc.shape == (ne.size, Te.size))
)
and np.all((cc > 0.) & (cc <= 1.))
for k0, cc in concentration.items()
])
)
if not c0:
shape = ne.shape if grid is False else (ne.size, Te.size)
msg = (
"Arg concentration is non-conform:\n"
+ "\t- Expected: dict of {} arrays in [0, 1]\n".format(shape)
+ "\t- Provided: {}".format(concentration)
)
raise Exception(msg)
# interpolate pec
dpec = self.calc_pec(
key=key,
ind=ind,
ne=ne,
Te=Te,
grid=grid,
deg=deg,
)
# ne for broadcasting
if grid is True:
neb = ne[:, None]
else:
neb = ne
# Derive intensity
dint = {
k0: v0*neb**2*concentration[k0] for k0, v0 in dpec.items()
}
return dint
# -----------------
# plotting
# ------------------
def plot(
self,
key=None,
ind=None,
ax=None,
sortby=None,
param_txt=None,
ymin=None,
ymax=None,
ls=None,
lw=None,
fontsize=None,
side=None,
dcolor=None,
fraction=None,
figsize=None,
dmargin=None,
wintit=None,
tit=None,
):
""" plot rest wavelengths as vertical lines """
if param_txt is None:
param_txt = 'symbol'
return super()._plot_axvlines(
which='lines',
key=key,
param_x='lambda0',
param_txt=param_txt,
sortby=sortby,
sortby_def='ion',
sortby_lok=['ion', 'ION', 'source'],
ax=ax, ymin=ymin, ymax=ymax,
ls=ls, lw=lw, fontsize=fontsize,
side=side, dcolor=dcolor, fraction=fraction,
figsize=figsize, dmargin=dmargin,
wintit=wintit, tit=tit,
)
def plot_pec_single(
self,
key=None,
ind=None,
ne=None,
Te=None,
concentration=None,
deg=None,
grid=None,
ax=None,
sortby=None,
param_txt=None,
ymin=None,
ymax=None,
ls=None,
lw=None,
fontsize=None,
side=None,
dcolor=None,
fraction=None,
figsize=None,
dmargin=None,
wintit=None,
tit=None,
):
# Check input
if param_txt is None:
param_txt = 'symbol'
# Check ne, Te
ltypes = [int, float, np.integer, np.floating]
dnTe = {'ne': ne, 'Te': Te}
single = all([
type(v0) in ltypes or len(v0) == 1 for v0 in dnTe.values()
])
if not single:
msg = ("Arg ne and Te must be floats!")
raise Exception(msg)
# Get dpec
dpec = self.calc_pec(
key=key,
ind=ind,
ne=ne,
Te=Te,
deg=deg,
grid=grid,
)
key = list(dpec.keys())
ne = float(ne)
Te = float(Te)
tit = (
r'$n_e$' + '= {} '.format(ne) + r'$/m^3$'
+ r' - $T_e$ = ' + '{} keV'.format(Te/1000.)
)
pmax = np.max([np.log10(v0) for v0 in dpec.values()])
pmin = np.min([np.log10(v0) for v0 in dpec.values()])
dsize = {
k0: (np.log10(v0)-pmin)/(pmax-pmin)*19 + 1
for k0, v0 in dpec.items()
}
sortby_lok = ['ion', 'ION', 'source']
lk0 = [k0 for k0 in sortby_lok if k0 in self._dref_static.keys()]
if len(lk0) > 0:
sortby_def = lk0[0]
else:
sortby_def = None
return super()._plot_axvlines(
which='lines',
key=key,
param_x='lambda0',
param_txt=param_txt,
sortby=sortby,
sortby_def=sortby_def,
sortby_lok=sortby_lok,
dsize=dsize,
ax=ax, ymin=ymin, ymax=ymax,
ls=ls, lw=lw, fontsize=fontsize,
side=side, dcolor=dcolor, fraction=fraction,
figsize=figsize, dmargin=dmargin,
wintit=wintit, tit=tit,
)
def plot_pec(
self,
key=None,
ind=None,
ne=None,
Te=None,
norder=None,
ne_scale=None,
Te_scale=None,
param_txt=None,
param_color=None,
deg=None,
dax=None,
proj=None,
ymin=None,
ymax=None,
ls=None,
lw=None,
fontsize=None,
side=None,
dcolor=None,
fraction=None,
figsize=None,
dmargin=None,
dtit=None,
tit=None,
wintit=None,
):
# Check input
if param_txt is None:
param_txt = 'symbol'
if param_color is None:
param_color = 'ion'
if norder is None:
norder = 0
if ne_scale is None:
ne_scale = 'log'
if Te_scale is None:
Te_scale = 'linear'
# Check ne, Te
ltypes = [int, float, np.integer, np.floating]
dnTe = {
'ne': type(ne) in ltypes or len(ne) == 1,
'Te': type(Te) in ltypes or len(Te) == 1,
}
if all([v0 for v0 in dnTe.values()]):
msg = (
"For a single point in (ne, Te) space, use plot_pec_singe()"
)
raise Exception(msg)
elif dnTe['ne']:
ne = np.r_[ne].ravel()
ne = np.full((Te.size), ne[0])
elif dnTe['Te']:
Te = np.r_[Te].ravel()
Te = np.full((ne.size), Te[0])
if len(ne) != len(Te):
msg = (
"Please provide ne and Te as vectors of same size!"
)
raise Exception(msg)
# Get dpec
dpec = self.calc_pec(
key=key,
ind=ind,
ne=ne,
Te=Te,
deg=deg,
grid=False,
)
damp = {k0: {'data': v0} for k0, v0 in dpec.items()}
# Create grid
ne_grid = _DataCollection_comp._get_grid1d(
ne, scale=ne_scale, npts=ne.size*2, nptsmin=3,
)
Te_grid = _DataCollection_comp._get_grid1d(
Te, scale=Te_scale, npts=Te.size*2, nptsmin=3,
)
dpec_grid = self.calc_pec(
key=key,
ind=ind,
ne=ne_grid,
Te=Te_grid,
deg=deg,
grid=True,
)
# Get dcolor
lcol = plt.rcParams['axes.prop_cycle'].by_key()['color']
dcolor = {}
if param_color != 'key':
lion = [self._dobj['lines'][k0][param_color] for k0 in dpec.keys()]
for ii, k0 in enumerate(set(lion)):
dcolor[k0] = mcolors.to_rgb(lcol[ii % len(lcol)])
lk1 = [
k2 for k2 in dpec.keys()
if self._dobj['lines'][k2][param_color] == k0
]
for k1 in lk1:
damp[k1]['color'] = k0
else:
for ii, k0 in enumerate(dpec.keys()):
dcolor[k0] = mcolors.to_rgb(lcol[ii % len(lcoil)])
damp[k0]['color'] = k0
# Create image
im_data = np.full((ne_grid.size, Te_grid.size), np.nan)
im = np.full((ne_grid.size, Te_grid.size, 4), np.nan)
dom_val = np.concatenate(
[v0[None, :, :] for v0 in dpec_grid.values()],
axis=0,
)
if norder == 0:
im_ind = np.nanargmax(dom_val, axis=0)
else:
im_ind = np.argsort(dom_val, axis=0)[-norder, :, :]
for ii in np.unique(im_ind):
ind = im_ind == ii
im_data[ind] = dom_val[ii, ind]
pmin = np.nanmin(np.log10(im_data))
pmax = np.nanmax(np.log10(im_data))
for ii, k0 in enumerate(dpec_grid.keys()):
if ii in np.unique(im_ind):
ind = im_ind == ii
im[ind, :-1] = dcolor[damp[k0]['color']]
im[ind, -1] = (
(np.log10(im_data[ind])-pmin)/(pmax-pmin)*0.9 + 0.1
)
extent = (ne_grid.min(), ne_grid.max(), Te_grid.min(), Te_grid.max())
if tit is None:
tit = 'spectral lines PEC interpolations'
if dtit is None:
dtit = {'map': 'norder = {}'.format(norder)}
return _DataCollection_plot.plot_dominance_map(
din=self._dobj['lines'], im=im, extent=extent,
xval=ne, yval=Te, damp=damp,
x_scale=ne_scale, y_scale=Te_scale, amp_scale='log',
param_txt='symbol',
dcolor=dcolor,
dax=dax, proj=proj,
figsize=figsize, dmargin=dmargin,
wintit=wintit, tit=tit, dtit=dtit,
)
#############################################
#############################################
# Time traces
#############################################
# TBC
class TimeTraces(DataCollection):
""" A generic class for handling multiple time traces """
_forced_group = 'time'
_dallowed_params = {
'time': {
'origin': (str, 'unknown'),
'dim': (str, 'time'),
'quant': (str, 't'),
'name': (str, 't'),
'units': (str, 's')},
}
_plot_vignettes = False
def fit(self, ind=None, key=None,
Type='staircase', func=None,
plot=True, fs=None, ax=None, draw=True, **kwdargs):
""" Fit the times traces with a model
Typically try to fit plateaux and ramps i.e.: Type = 'staircase')
Return a dictionary of the fitted parameters, ordered by data key
"""
dout = self._fit_one_dim(ind=ind, key=key, group=self._forced_group,
Type=Type, func=func, **kwdargs)
if plot:
kh = _DataCollection_plot.plot_fit_1d(self, dout)
return dout
def add_plateaux(self, verb=False):
dout = self.fit(
ind=ind, key=key, group=group, Type='staircase',
)
# Make Pandas Dataframe attribute
self.plateaux = None
if verb:
msg = ""
def plot(self, **kwdargs):
return self._plot_timetraces(**kwdargs)
def plot_incremental(self, key=None, ind=None,
plateaux=True, connect=True):
return
def plot_plateau_validate(self, key=None, ind=None):
return
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU specific APIs to be used in conjunction with TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import device
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import topology
from tensorflow.python.tpu import tpu
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_INITIALIZED_TPU_SYSTEMS = {}
_LOCAL_MASTERS = ("", "local")
@tf_export("tpu.experimental.initialize_tpu_system")
def initialize_tpu_system(cluster_resolver=None):
"""Initialize the TPU devices.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Returns:
The tf.tpu.Topology object for the topology of the TPU cluster. If called
inside tf.function, it returns the serialized topology object instead.
Raises:
RuntimeError: If running inside a tf.function.
NotFoundError: If no TPU devices found in eager mode.
"""
job = None
if cluster_resolver is None:
# If no cluster resolver is specified, and running eagerly, execute the init
# ops in the current device scope.
if context.executing_eagerly():
curr_device = device.DeviceSpec.from_string(context.context().device_name)
if curr_device.job is not None:
job = "{}/replica:0/task:0".format(curr_device.job)
cluster_resolver = TPUClusterResolver("")
assert isinstance(cluster_resolver, TPUClusterResolver)
tpu_name = compat.as_text(cluster_resolver._tpu) # pylint: disable=protected-access
if tpu_name in _INITIALIZED_TPU_SYSTEMS:
logging.warning(
"TPU system %s has already been initialized. "
"Reinitializing the TPU can cause previously created "
"variables on TPU to be lost.", tpu_name)
logging.info("Initializing the TPU system: %s", tpu_name)
# This function looks as it is for the following non-intuitive reasons.
# tpu.initialize_system creates a dummy op whose sole purpose is to trigger
# DistributedTPURewritePass. This pass actually adds real ops that
# initialize the TPU system. Thus, we can't simply run tpu.initialize_system
# eagerly. We need to wrap it in defun and trigger the rewrite passes on it.
if tpu_name not in _LOCAL_MASTERS:
# Explicitly place the tpu.initialize_system in the first worker to
# avoid the output node match multiple devices error.
job = "{}/replica:0/task:0".format(cluster_resolver.get_job_name())
if context.executing_eagerly():
@function.defun
def _tpu_init_fn():
# In TF1, we usually close chips when compilation fails to clear the data
# in infeed. In TF2, we don't need to do this because infeed is no longer
# used, so user can recover from TPU compilation failures more smoothly.
return tpu.initialize_system(
job=job, compilation_failure_closes_chips=False)
# The TPU_SYSTEM device must match the device used in tpu.initialize_system
# exactly, otherwise you can get errors if there are multiple TPU_SYSTEM
# devices available.
try:
with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access
output = _tpu_init_fn()
context.async_wait()
except errors.InvalidArgumentError as e:
raise errors.NotFoundError(
None, None,
"TPUs not found in the cluster. Failed in initialization: "
+ str(e))
# Clear out the eager context caches since the memory is invalid now.
logging.info("Clearing out eager caches")
context.context()._clear_caches() # pylint: disable=protected-access
serialized_topology = output.numpy()
elif not ops.executing_eagerly_outside_functions():
master = cluster_resolver.master()
cluster_spec = cluster_resolver.cluster_spec()
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
with ops.Graph().as_default():
with session_lib.Session(config=session_config, target=master) as sess:
serialized_topology = sess.run(tpu.initialize_system())
else:
with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access
serialized_topology = tpu.initialize_system(
job=job, compilation_failure_closes_chips=False)
# If initialize_tpu_system is called inside tf.function, we only return
# the serialized topology object as the tf.tpu.Topology object has to be
# constructed in eager mode.
return serialized_topology
logging.info("Finished initializing TPU system.")
tpu_topology = topology.Topology(serialized=serialized_topology)
_INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology
return tpu_topology
@tf_export("tpu.experimental.shutdown_tpu_system")
def shutdown_tpu_system(cluster_resolver=None):
"""Shuts down the TPU devices.
This will clear all caches, even those that are maintained through sequential
calls to tf.tpu.experimental.initialize_tpu_system, such as the compilation
cache.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Raises:
RuntimeError: If no TPU devices found for eager execution or if run in a
tf.function.
"""
job = None
if cluster_resolver is None:
# If no cluster resolver is specified, and running eagerly, execute the init
# ops in the current device scope.
if context.executing_eagerly():
curr_device = device.DeviceSpec.from_string(context.context().device_name)
if curr_device.job is not None:
job = "{}/replica:0/task:0".format(curr_device.job)
cluster_resolver = TPUClusterResolver("")
assert isinstance(cluster_resolver, TPUClusterResolver)
tpu_name = compat.as_text(cluster_resolver._tpu) # pylint: disable=protected-access
if tpu_name not in _INITIALIZED_TPU_SYSTEMS:
logging.warning("You are shutting down a TPU system %s that has not been "
"initialized." % tpu_name)
logging.info("Shutting down the TPU system: %s", tpu_name)
if context.executing_eagerly():
# This function looks as it is for the following non-intuitive reasons.
# tpu.shutdown_system creates a dummy op whose sole purpose is to trigger
# DistributedTPURewritePass. This pass actually adds real ops that
# shutdown the TPU system. Thus, we can't simply run tpu.shutdown_system
# eagerly. We need to wrap it in defun and trigger the rewrite passes on it.
if tpu_name not in _LOCAL_MASTERS:
# Explicitly place the tpu.shutdown_system in the first worker to
# avoid the output node match multiple devices error.
job = "{}/replica:0/task:0".format(cluster_resolver.get_job_name())
@function.defun
def _tpu_shutdown_fn():
tpu.shutdown_system(job=job)
# The TPU_SYSTEM device must match the device used in tpu.shutdown_system
# exactly, otherwise you can get errors if there are multiple TPU_SYSTEM
# devices available.
with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access
_tpu_shutdown_fn()
# Clear out the eager context caches since the memory is invalid now.
logging.info("Clearing out eager caches")
context.context()._clear_caches() # pylint: disable=protected-access
elif not ops.executing_eagerly_outside_functions():
master = cluster_resolver.master()
cluster_spec = cluster_resolver.cluster_spec()
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
with ops.Graph().as_default():
with session_lib.Session(config=session_config, target=master) as sess:
sess.run(tpu.shutdown_system())
else:
raise RuntimeError("initialize_tpu_system is not supported within "
"tf.functions.")
logging.info("Finished shutting down TPU system.")
if tpu_name in _INITIALIZED_TPU_SYSTEMS:
del _INITIALIZED_TPU_SYSTEMS[tpu_name]
|
|
"""
pyapi-gitlab tests
"""
import unittest2 as unittest
import gitlab
import os
import time
import random
import string
try:
from Crypto.PublicKey import RSA
ssh_test = True
except ImportError:
ssh_test = False
user = os.environ.get('gitlab_user', 'root')
password = os.environ.get('gitlab_password', '5iveL!fe')
host = os.environ.get('gitlab_host', 'http://192.168.1.100')
class GitlabTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.git = gitlab.Gitlab(host=host)
cls.git.login(user=user, password=password)
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
cls.project = cls.git.createproject(name=name, visibility_level="private",
import_url="https://github.com/Itxaka/pyapi-gitlab.git")
# wait a bit for the project to be fully imported
time.sleep(20)
cls.project_id = cls.project['id']
cls.user_id = cls.git.currentuser()['id']
@classmethod
def tearDownClass(cls):
cls.git.deleteproject(cls.project_id)
def test_user(self):
assert isinstance(self.git.createuser(name="test", username="test",
password="test1234", email="test@test.com",
skype="this", linkedin="that"), dict)
# get all users
assert isinstance(self.git.getusers(), list) # compatible with 2.6
assert isinstance(self.git.currentuser(), dict)
user = self.git.getusers(search="test")[0]
self.assertTrue(self.git.deleteuser(user["id"]))
# check can_create_user
user = self.git.createuser("random", "random", "random1234", "random@random.org",
can_create_group="false")
self.assertFalse(self.git.getuser(user['id'])['can_create_group'])
self.git.deleteuser(user['id'])
user = self.git.createuser("random", "random", "random1234", "random@random.org",
can_create_group="true")
self.assertTrue(self.git.getuser(user['id'])['can_create_group'])
assert isinstance(self.git.edituser(user['id'], can_create_group="false"), dict)
# Check that indeed the user details were changed
self.assertFalse(self.git.getuser(user['id'])['can_create_group'])
self.git.deleteuser(user['id'])
# get X pages
assert isinstance(self.git.getusers(page=2), list) # compatible with 2.6
assert isinstance(self.git.getusers(per_page=4), list) # compatible with 2.6
self.assertEqual(self.git.getusers(page=800), list("")) # check against empty list
self.assertTrue(self.git.getusers(per_page=43)) # check against false
def test_project(self):
# test project
assert isinstance(self.git.getprojects(), list)
assert isinstance(self.git.getprojects(page=5), list)
assert isinstance(self.git.getprojects(per_page=7), list)
assert isinstance(self.git.getproject(self.project_id), dict)
assert isinstance(self.git.getproject(self.project['path_with_namespace']), dict)
self.assertFalse(self.git.getproject("wrong"))
# test getprojectsall
assert isinstance(self.git.getprojectsall(), list)
assert isinstance(self.git.getprojectsall(page=5), list)
assert isinstance(self.git.getprojectsall(per_page=7), list)
# test getownprojects
assert isinstance(self.git.getprojectsowned(), list)
assert isinstance(self.git.getprojectsowned(page=5), list)
assert isinstance(self.git.getprojectsowned(per_page=7), list)
# test events
assert isinstance(self.git.getprojectevents(self.project_id), list)
assert isinstance(self.git.getprojectevents(self.project_id, page=3), list)
assert isinstance(self.git.getprojectevents(self.project_id, per_page=4), list)
# add-remove project members
self.assertTrue(self.git.addprojectmember(self.project_id, user_id=self.user_id, access_level="reporter"))
assert isinstance(self.git.getprojectmembers(self.project_id), list)
self.assertTrue(self.git.editprojectmember(self.project_id, user_id=self.user_id, access_level="master"))
self.assertTrue(self.git.deleteprojectmember(self.project_id, user_id=1))
# Hooks testing
assert isinstance(self.git.addprojecthook(self.project_id, "http://web.com"), dict)
assert isinstance(self.git.getprojecthooks(self.project_id), list)
assert isinstance(self.git.getprojecthook(self.project_id,
self.git.getprojecthooks(self.project_id)[0]['id']), dict)
self.assertTrue(self.git.editprojecthook(self.project_id,
self.git.getprojecthooks(self.project_id)[0]['id'], "http://another.com"))
self.assertTrue(self.git.deleteprojecthook(self.project_id,
self.git.getprojecthooks(self.project_id)[0]['id']))
# Forks testing
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
newproject = self.git.createproject(name)
# set it as forker from the main project
self.git.createforkrelation(newproject["id"], self.project_id)
newproject = self.git.getproject(newproject["id"])
self.assertIn("forked_from_project", newproject)
# remove the fork relation
self.assertTrue(self.git.removeforkrelation(newproject["id"]))
newproject = self.git.getproject(newproject["id"])
with self.assertRaises(KeyError) as raises:
_ = newproject["forked_from_project"]
# test moveproject
for group in self.git.getgroups():
self.git.deletegroup(group["id"])
group = self.git.creategroup("movegroup", "movegroup")
assert isinstance(group, dict)
assert isinstance(self.git.moveproject(group["id"], newproject["id"]), dict)
project = self.git.getproject(newproject["id"])
self.assertEqual("movegroup", project["namespace"]["name"])
# Clean up the newgroup
self.git.deleteproject(newproject["id"])
# Create an actual fork of the main project
self.git.createfork(self.project_id)
def test_deploykeys(self):
keys = self.git.getdeploykeys(self.project_id)
assert isinstance(keys, list)
self.assertEqual(len(keys), 0)
if ssh_test:
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
rsa_key = RSA.generate(1024)
assert isinstance(self.git.adddeploykey(project_id=self.project_id, title=name,
key=str(rsa_key.publickey().exportKey(format="OpenSSH"))), dict)
keys = self.git.getdeploykeys(self.project_id)
self.assertGreater(len(keys), 0)
key = keys[0]
assert isinstance(self.git.getdeploykey(self.project_id, key["id"]), dict)
self.assertTrue(self.git.deletedeploykey(self.project_id, key["id"]))
keys = self.git.getdeploykeys(self.project_id)
self.assertEqual(len(keys), 0)
def test_branch(self):
sha1 = self.git.getrepositorycommits(project_id=self.project_id)[0]["id"]
assert isinstance(self.git.createbranch(self.project_id, branch="deleteme", ref=sha1), dict)
self.assertTrue(self.git.deletebranch(self.project_id, branch="deleteme"))
assert isinstance(self.git.getbranches(self.project_id), list)
assert isinstance(self.git.getbranch(self.project_id, branch="develop"), dict)
self.assertTrue(self.git.protectbranch(self.project_id, branch="develop"))
self.assertTrue(self.git.unprotectbranch(self.project_id, branch="develop"))
def test_sshkeys(self):
assert isinstance(self.git.getsshkeys(), list)
self.assertEquals(len(self.git.getsshkeys()), 0)
# not working due a bug? in pycrypto: https://github.com/dlitz/pycrypto/issues/99
if ssh_test:
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
rsa_key = RSA.generate(1024)
self.assertTrue(self.git.addsshkey(title=name, key=str(rsa_key.publickey().exportKey(format="OpenSSH"))))
self.assertGreater(self.git.getsshkeys(), 0)
keys = self.git.getsshkeys()
assert isinstance(keys, list)
key = self.git.getsshkeys()[0]
assert isinstance(key, dict)
self.assertTrue(self.git.deletesshkey(key["id"]))
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
rsa_key = RSA.generate(1024)
self.assertTrue(self.git.addsshkeyuser(self.user_id, title=name,
key=str(rsa_key.publickey().exportKey(format="OpenSSH"))))
self.assertGreater(self.git.getsshkeys(), 0)
keys = self.git.getsshkeys()
assert isinstance(keys, list)
key = self.git.getsshkeys()[0]
assert isinstance(key, dict)
self.assertTrue(self.git.deletesshkey(key["id"]))
def test_snippets(self):
assert isinstance(self.git.createsnippet(self.project_id, "test", "test", "codeee"), dict)
assert isinstance(self.git.getsnippets(self.project_id), list)
snippet = self.git.getsnippets(self.project_id)[0]
assert isinstance(self.git.getsnippet(self.project_id, snippet["id"]), dict)
self.assertTrue(self.git.deletesnippet(self.project_id, snippet["id"]))
def test_repositories(self):
assert isinstance(self.git.getrepositories(self.project_id), list)
assert isinstance(self.git.getrepositorybranch(self.project_id, "develop"), dict)
assert isinstance(self.git.protectrepositorybranch(self.project_id, "develop"), dict)
assert isinstance(self.git.unprotectrepositorybranch(self.project_id, "develop"), dict)
assert isinstance(self.git.getrepositorycommits(self.project_id), list)
assert isinstance(self.git.getrepositorycommits(self.project_id, page=1), list)
assert isinstance(self.git.getrepositorycommits(self.project_id, per_page=7), list)
commit = self.git.getrepositorycommits(self.project_id)[0]
# tags
tags = self.git.getrepositorytags(self.project_id)
assert isinstance(tags, list)
tag = self.git.createrepositorytag(self.project_id, "test_tag", commit["id"], "test_tag_message")
assert isinstance(tag, dict)
self.assertEqual(tag["name"], "test_tag")
assert isinstance(self.git.getrepositorycommit(self.project_id, commit["id"]), dict)
assert isinstance(self.git.getrepositorycommitdiff(self.project_id, commit["id"]), list)
assert isinstance(self.git.getrepositorytree(self.project_id), list)
assert isinstance(self.git.getrepositorytree(self.project_id, path="docs"), list)
assert isinstance(self.git.getrepositorytree(self.project_id, ref_name="develop"), list)
assert isinstance(str(self.git.getrawblob(self.project_id, commit['id'])), str)
assert isinstance(str(self.git.getrawfile(self.project_id, commit['id'], "setup.py")), str)
commit = self.git.getrepositorycommits(self.project_id)
assert isinstance(self.git.compare_branches_tags_commits(self.project_id,
from_id=commit[1]["id"],
to_id=commit[0]["id"]), dict)
self.assertTrue(self.git.createfile(self.project_id, "test.file", "develop", "00000000", "testfile0"))
firstfile = self.git.getfile(self.project_id, "test.file", "develop")
self.assertTrue(self.git.updatefile(self.project_id, "test.file", "develop", "11111111", "testfile1"))
secondfile = self.git.getfile(self.project_id, "test.file", "develop")
self.assertNotEqual(firstfile["commit_id"], secondfile["commit_id"])
self.assertNotEqual(firstfile["content"], secondfile["content"])
self.assertTrue(self.git.deletefile(self.project_id, "test.file", "develop", "remove_testfile"))
assert self.git.getcontributors(self.project_id) is not False
def test_search(self):
self.assertGreater(len(self.git.searchproject(self.project['name'])), 0)
assert isinstance(self.git.searchproject(self.project['name']), list)
def test_filearchive(self):
# test it works
self.assertTrue(self.git.getfilearchive(self.project_id, self.project["name"] + ".tar.gz"))
# test for failure
self.failUnlessRaises(gitlab.exceptions.HttpError, self.git.getfilearchive, 999999)
def test_group(self):
for group in self.git.getgroups():
self.git.deletegroup(group["id"])
assert isinstance(self.git.creategroup("test_group", "test_group"), dict)
assert isinstance(self.git.getgroups(), list)
group = self.git.getgroups()[0]
assert isinstance(self.git.getgroupmembers(group["id"]), list)
try:
# Gitlab < 7.8
self.assertEqual(len(self.git.getgroupmembers(group["id"])), 0)
self.assertTrue(self.git.addgroupmember(group["id"], self.user_id, "master"))
except AssertionError:
# In Gitlab > 7.7, Admin is automatically added to all groups. Keep breaking that api champs.
self.assertEqual(len(self.git.getgroupmembers(group["id"])), 1)
assert isinstance(self.git.getgroupmembers(group["id"]), list)
self.assertGreater(len(self.git.getgroupmembers(group["id"])), 0)
self.assertTrue(self.git.deletegroupmember(group["id"], self.user_id))
self.assertFalse(self.git.addgroupmember(group["id"], self.user_id, "nonexistant"))
self.assertTrue(self.git.deletegroup(group_id=group["id"]))
def test_issues(self):
issue = self.git.createissue(self.project_id, title="Test_issue", description="blaaaaa")
assert isinstance(issue, dict)
self.assertEqual(issue["title"], "Test_issue")
issue = self.git.editissue(self.project_id, issue["id"], title="Changed")
assert isinstance(issue, dict)
self.assertEqual(issue["title"], "Changed")
issue = self.git.editissue(self.project_id, issue["id"], state_event="close")
self.assertEqual(issue["state"], "closed")
self.assertGreater(len(self.git.getprojectissues(self.project_id)), 0)
assert isinstance(self.git.getprojectissue(self.project_id, issue["id"]), dict)
self.assertGreater(len(self.git.getissues()), 0)
def test_system_hooks(self):
# clean up before
for hook in self.git.getsystemhooks():
self.git.deletesystemhook(hook["id"])
self.assertTrue(self.git.addsystemhook("http://github.com"))
self.assertEqual(len(self.git.getsystemhooks()), 1)
hook = self.git.getsystemhooks()[0]
assert isinstance(self.git.testsystemhook(hook["id"]), list)
self.assertTrue(self.git.deletesystemhook(hook["id"]))
self.assertEqual(len(self.git.getsystemhooks()), 0)
def test_milestones(self):
milestone = self.git.createmilestone(self.project_id, title="test")
assert isinstance(milestone, dict)
self.assertGreater(len(self.git.getmilestones(self.project_id)), 0)
assert isinstance(self.git.getmilestone(self.project_id, milestone["id"]), dict)
self.assertEqual(milestone["title"], "test")
milestone = self.git.editmilestone(self.project_id, milestone["id"], title="test2")
self.assertEqual(milestone["title"], "test2")
def test_merge(self):
# prepare for the merge
commit = self.git.getrepositorycommits(self.project_id)[5]
branch = self.git.createbranch(self.project_id, "mergebranch", commit["id"])
merge = self.git.createmergerequest(self.project_id, "develop", "mergebranch", "testmerge")
assert isinstance(self.git.getmergerequests(self.project_id), list)
merge_request = self.git.getmergerequest(self.project_id, merge["id"])
assert isinstance(merge_request, dict)
self.assertEqual(merge_request["title"], "testmerge")
self.assertEqual(len(self.git.getmergerequestcomments(self.project_id, merge["id"])), 0)
self.assertTrue(self.git.addcommenttomergerequest(self.project_id, merge["id"], "Hello"))
comments = self.git.getmergerequestcomments(self.project_id, merge["id"])
self.assertEqual(comments[0]["note"], "Hello")
self.assertTrue(self.git.updatemergerequest(self.project_id, merge["id"], title="testmerge2"))
merge_request = self.git.getmergerequest(self.project_id, merge["id"])
self.assertEqual(merge_request["title"], "testmerge2")
self.assertEqual(self.git.getmergerequest(self.project_id, merge["id"])["state"], "opened")
self.assertTrue(self.git.acceptmergerequest(self.project_id, merge["id"], "closed!"))
self.assertEqual(self.git.getmergerequest(self.project_id, merge["id"])["state"], "merged")
def test_notes(self):
# issue wallnotes
issue = self.git.createissue(self.project_id, title="test_issue")
note = self.git.createissuewallnote(self.project_id, issue["id"], content="Test_note")
assert isinstance(issue, dict)
assert isinstance(note, dict)
self.assertEqual(note["body"], "Test_note")
assert isinstance(self.git.getissuewallnotes(self.project_id, issue["id"]), list)
note2 = self.git.getissuewallnote(self.project_id, issue["id"], note["id"])
assert isinstance(note2, dict)
self.assertEqual(note["body"], note2["body"])
# snippet wallnotes
snippet = self.git.createsnippet(self.project_id, "test_snippet", "test.py", "import this")
note = self.git.createsnippetewallnote(self.project_id, snippet["id"], "test_snippet_content")
assert isinstance(self.git.getsnippetwallnotes(self.project_id, snippet["id"]), list)
note2 = self.git.getsnippetwallnote(self.project_id, snippet["id"], note["id"])
assert isinstance(note2, dict)
self.assertEqual(note["body"], note2["body"])
# merge request wallnotes
commit = self.git.getrepositorycommits(self.project_id)[5]
branch = self.git.createbranch(self.project_id, "notesbranch", commit["id"])
merge = self.git.createmergerequest(self.project_id, "develop", "notesbranch", "testnotes")
self.assertEqual(len(self.git.getmergerequestwallnotes(self.project_id, merge["id"])), 0)
note = self.git.createmergerequestewallnote(self.project_id, merge["id"], "test_content")
assert isinstance(note, dict)
note2 = self.git.getmergerequestwallnote(self.project_id, merge["id"], note["id"])
assert isinstance(note2, dict)
self.assertEqual(note["body"], note2["body"])
self.assertEqual(len(self.git.getmergerequestwallnotes(self.project_id, merge["id"])), 1)
def test_labels(self):
labels = self.git.getlabels(self.project_id)
assert isinstance(labels, list)
self.assertEqual(len(labels), 0)
assert self.git.createlabel(self.project_id, "test_label", "#FFAABB") is not False
labels = self.git.getlabels(self.project_id)
self.assertEqual(len(labels), 1)
self.assertTrue(self.git.deletelabel(self.project_id, "test_label"))
labels = self.git.getlabels(self.project_id)
self.assertEqual(len(labels), 0)
def test_sudo(self):
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
newuser = self.git.createuser(name, name, "sudo_user", "{}@user.org".format(name))
# change to the new user
self.git.setsudo(user=newuser["id"])
self.assertEqual(len(self.git.getprojects()), 0)
self.assertEqual(self.git.currentuser()["username"], name)
# change back to logged user
self.git.setsudo()
self.assertGreaterEqual(len(self.git.getprojects()), 1)
self.assertEqual(self.git.currentuser()["username"], "root")
self.git.deleteuser(newuser["id"])
|
|
from datanator.data_source import sabio_rk_nosql
from datanator.util import file_util
import datanator.config.core
import unittest
import tempfile
import shutil
import requests
import libsbml
import bs4
import time
class TestSabioRk(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_dirname = tempfile.mkdtemp()
db = 'test'
username = datanator.config.core.get_config()[
'datanator']['mongodb']['user']
password = datanator.config.core.get_config(
)['datanator']['mongodb']['password']
MongoDB = datanator.config.core.get_config(
)['datanator']['mongodb']['server']
cls.src = sabio_rk_nosql.SabioRk(cache_dirname=cls.cache_dirname,
MongoDB=MongoDB, db=db,
verbose=True, max_entries=20, username=username,
password=password, webservice_batch_size = 10)
cls.sbml = requests.get('http://sabiork.h-its.org/sabioRestWebServices/kineticLaws', params={
'kinlawids': '4096'}).text
cls.reader = libsbml.SBMLReader()
cls.doc = cls.reader.readSBMLFromString(cls.sbml)
cls.test_model = cls.doc.getModel()
cls.species_sbml = cls.test_model.getListOfSpecies()
cls.reactions_sbml = cls.test_model.getListOfReactions()
cls.file_manager = file_util.FileUtil()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.cache_dirname)
cls.src.db_obj.drop_collection("sabio_rk")
cls.src.client.close()
@unittest.skip('passed, avoid unnecessary http requests')
def test_load_kinetic_law_ids(self):
ids = self.src.load_kinetic_law_ids()
self.assertEqual(ids[0:10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
self.assertGreater(len(ids), 55000)
# @unittest.skip('passed')
def test_create_cross_references_from_sbml(self):
x_refs = self.src.create_cross_references_from_sbml(self.species_sbml.get(0))
exp = [{'chebi': 'CHEBI:16810'}, {'chebi': 'CHEBI:30915'},
{'kegg.compound': 'C00026'}]
self.assertEqual(exp, x_refs)
# @unittest.skip('passed')
def test_parse_enzyme_name(self):
name, is_wildtype, variant = self.src.parse_enzyme_name(self.species_sbml.get(5).getName())
self.assertEqual('E211S/I50N/V80T', variant)
self.assertEqual('4-aminobutyrate transaminase', name)
# @unittest.skip('passed')
def test_get_specie_from_sbml(self):
specie, properties = self.src.get_specie_from_sbml(self.species_sbml.get(5))
specie_exp = {'_id': 141214, 'molecular_weight': None, 'name': '4-aminobutyrate transaminase', 'subunits': [{'uniprot': 'P22256'}, {'uniprot': 'P50457'}],
'cross_references': []}
properties_exp = {'is_wildtype': False, 'variant': 'E211S/I50N/V80T', 'modifier_type': 'Modifier-Catalyst'}
self.assertEqual(specie['_id'], specie_exp['_id'])
self.assertEqual(properties_exp['variant'], properties['variant'])
# @unittest.skip('passed')
def test_get_specie_reference_from_sbml(self):
species = []
for i_specie in range(self.species_sbml.size()):
specie_sbml = self.species_sbml.get(i_specie)
specie, properties = self.src.get_specie_from_sbml(specie_sbml)
species.append(specie)
specie, compartment = self.src.get_specie_reference_from_sbml('ENZ_141214_Cell', species)
self.assertEqual(compartment, None)
self.assertEqual(specie[0]['subunits'], [{'uniprot': 'P22256'},
{'uniprot': 'P50457'}])
# @unittest.skip('passed')
def test_create_kinetic_law_from_sbml(self):
species = []
specie_properties = {}
for i_specie in range(self.species_sbml.size()):
specie_sbml = self.species_sbml.get(i_specie)
specie, properties = self.src.get_specie_from_sbml(specie_sbml)
species.append(specie)
specie_properties[specie_sbml.getId()] = properties
units = {}
units_sbml = self.test_model.getListOfUnitDefinitions()
for i_unit in range(units_sbml.size()):
unit_sbml = units_sbml.get(i_unit)
units[unit_sbml.getId()] = unit_sbml.getName()
functions = {}
functions_sbml = self.test_model.getListOfFunctionDefinitions()
for i_function in range(functions_sbml.size()):
function_sbml = functions_sbml.get(i_function)
math_sbml = function_sbml.getMath()
if math_sbml.isLambda() and math_sbml.getNumChildren():
eq = libsbml.formulaToL3String(math_sbml.getChild(math_sbml.getNumChildren() - 1))
else:
eq = None
if eq in ('', 'NaN'):
eq = None
functions[function_sbml.getId()] = eq
result = self.src.create_kinetic_law_from_sbml(4096, self.reactions_sbml.get(0), species,
specie_properties, functions, units)
test_1 = 1922
self.assertEqual(result['reactants'][0]['_id'], test_1)
# @unittest.skip('passed')
def test_create_kinetic_laws_from_sbml(self):
ids = [4096]
self.src.create_kinetic_laws_from_sbml(ids, self.sbml)
time.sleep(0.5)
doc = self.src.collection.find_one({'kinlaw_id':ids[0]})
self.assertEqual(doc["parameters"][0]['observed_value'], 0.00014)
# @unittest.skip('passed')
def test_load_compounds(self):
compound_1 = {
"_id" : 1922,
"name" : "2-Oxoglutarate",
"cross_references" : [
{
"namespace" : "chebi",
"id" : "CHEBI:16810"
},
{
"namespace" : "chebi",
"id" : "CHEBI:30915"
},
{
"namespace" : "kegg.compound",
"id" : "C00026"
}
]
}
compound_2 = {
"_id" : 21128,
"name" : "2-Methylaspartic acid",
"cross_references" : []
}
self.src.load_compounds(compounds = [compound_1, compound_2])
test_1 = self.src.collection_compound.find_one({'_id': compound_1['_id']})
test_2 = self.src.collection_compound.find_one({'_id': compound_2['_id']})
self.assertTrue('synonyms' in test_1)
self.assertTrue(isinstance(test_2['structures'], list))
# @unittest.skip('passed')
def test_get_parameter_by_properties(self):
kinetic_law_mock = {'kinlaw_id': 4096, 'mechanism': 'mock_mechanism',
'tissue': 'mock_tissue', 'enzyme_type': 'mock_et',
'parameters': [{'observed_type': ['mock_ot', 'ssss'], 'compound': None,
'observed_value': ['mock_ov', 'some_1']}]}
parameter_properties_mock = {'type_code': ['mock_ot'], 'associatedSpecies': None,
'startValue': ['mock_ov', 'some_2'], 'type': 'some_type'}
result = self.src.get_parameter_by_properties(kinetic_law_mock, parameter_properties_mock)
exp = {'observed_type': ['mock_ot', 'ssss'], 'compound': None, 'observed_value': ['mock_ov', 'some_1']}
self.assertEqual(result, exp)
# @unittest.skip('passed')
def test_load_missing_kinetic_law_information_from_tsv_helper(self):
url = 'http://sabiork.h-its.org/entry/exportToExcelCustomizable'
response = requests.get(url, params={
'entryIDs[]': [4096],
'fields[]': [
'EntryID',
'KineticMechanismType',
'Tissue',
'Parameter',
],
'preview': False,
'format': 'tsv',
'distinctRows': 'false',
})
tsv = response.text
self.src.load_missing_kinetic_law_information_from_tsv_helper(tsv)
result = self.src.collection.find_one({'kinlaw_id': 4096})
self.assertEqual(result.get('mechanism', 'no mechanism filed'), None)
# @unittest.skip('passed')
def test_infer_compound_structures_from_names(self):
compound_1 = {
"_id" : 73,
"name" : "L-Glutamate",
"cross_references" : [
{
"namespace" : "chebi",
"id" : "CHEBI:16015"
},
{
"namespace" : "chebi",
"id" : "CHEBI:29972"
},
{
"namespace" : "chebi",
"id" : "CHEBI:29985"
},
{
"namespace" : "chebi",
"id" : "CHEBI:29988"
},
{
"namespace" : "kegg.compound",
"id" : "C00025"
}
]
}
compound_2 = {
"_id" : 1922,
"name" : "2-Oxoglutarate",
"cross_references" : [
{
"namespace" : "kegg.compound",
"id" : "C00026"
},
{
"namespace" : "pubchem.substance",
"id" : "3328"
},
{
"namespace" : "chebi",
"id" : "CHEBI:16810"
},
{
"namespace" : "chebi",
"id" : "CHEBI:30915"
},
{
"namespace" : "reactome",
"id" : "113671"
},
{
"namespace" : "biocyc",
"id" : "2-KETOGLUTARATE"
},
{
"namespace" : "metanetx.chemical",
"id" : "MNXM20"
},
{
"namespace" : "BioModels",
"id" : "16810"
},
{
"namespace" : "BioModels",
"id" : "30915"
}
],
"structures" : [
{
"inchi" : "InChI=1S/C5H6O5/c6-3(5(9)10)1-2-4(7)8/h1-2H2,(H,7,8)(H,9,10)"
},
{
"smiles" : "OC(=O)CCC(=O)C(O)=O"
}
]
}
result = self.src.infer_compound_structures_from_names([compound_1, compound_2])
self.assertEqual(result[1], compound_2)
self.assertTrue('structures' in result[0])
# @unittest.skip('passed')
def test_calc_inchi_formula_connectivity(self):
s = {'smiles': '[H]O[H]'}
test_1 = self.src.calc_inchi_formula_connectivity(s)
self.assertEqual(test_1['_value_inchi'], 'InChI=1S/H2O/h1H2')
self.assertEqual(test_1['_value_inchi_formula_connectivity'], 'H2O')
s = {'inchi': 'InChI=1S/H2O/h1H2'}
test_2 = self.src.calc_inchi_formula_connectivity(s)
self.assertEqual(test_2['_value_inchi'], 'InChI=1S/H2O/h1H2')
self.assertEqual(test_2['_value_inchi_formula_connectivity'], 'H2O')
s = {'inchi': 'InChI=1S/C9H10O3/c10-8(9(11)12)6-7-4-2-1-3-5-7/h1-5,8,10H,6H2,(H,11,12)/t8-/m1/s1'}
test_3 = self.src.calc_inchi_formula_connectivity(s)
self.assertEqual(test_3['_value_inchi'], 'InChI=1S/C9H10O3/c10-8(9(11)12)6-7-4-2-1-3-5-7/h1-5,8,10H,6H2,(H,11,12)/t8-/m1/s1')
self.assertEqual(test_3['_value_inchi_formula_connectivity'], 'C9H10O3/c10-8(9(11)12)6-7-4-2-1-3-5-7')
# @unittest.skip('passed')
def test_parse_complex_subunit_structure(self):
response = requests.get('http://sabiork.h-its.org/kindatadirectiframe.jsp', params={
'kinlawid': 4096, 'newinterface': 'true'})
doc = bs4.BeautifulSoup(response.text, 'html.parser')
td = doc.find('td', text='Modifier-Catalyst')
tr = td.parent
td = tr.find_all('td')[-1]
inner_html = td.decode_contents(formatter='html').strip() + ' '
test_1 = self.src.parse_complex_subunit_structure(inner_html)
self.assertEqual({'P22256': 2, 'P50457': 1}, test_1)
@unittest.skip('temporrary')
def test_load_missing_enzyme_information_from_html(self):
ids = [4096]
self.src.load_missing_enzyme_information_from_html(ids)
projection = {'enzyme':1}
test_doc = self.src.collection.find_one(filter={'kinlaw_id': { '$in': ids }}, projection=projection)
print(test_doc)
l = self.file_manager.search_dict_list(test_doc['enzyme'], 'coeffcient')
self.assertFalse(len(l)>0)
# @unittest.skip('passed')
def test_calc_enzyme_molecular_weights(self):
null = None
enzyme = [{
"_id" : 141214,
"molecular_weight" : null,
"name" : "4-aminobutyrate transaminase",
"subunits" : [
{
"uniprot" : "P22256",
"coefficient" : 2
},
{
"uniprot" : "P50457",
"coefficient" : 1
}
],
"cross_references" : [ ]
}]
results = self.src.calc_enzyme_molecular_weights(enzyme, len(enzyme))
self.assertTrue(results[0]['molecular_weight'] != None)
# @unittest.skip('takes too long')
def test_load_content(self):
self.src.load_content()
@unittest.skip('function obsolete')
def test_add_inchi_hash(self):
result = self.src.add_inchi_hash()
query = {}
projection = {'products': 1, 'kinlaw_id': 1, 'reactants': 1, '_id': 0}
col = self.src.collection.find(filter=query, projection=projection)
for doc in col:
self.assertTrue('InChI_Key' in doc['products'][0]['structures'][0] )
|
|
import seabreeze.backends
# get the backend and add some functions/classes to this module
lib = seabreeze.backends.get_backend()
# from . import cseabreeze as lib
list_devices = lib.device_list_devices
SeaBreezeError = lib.SeaBreezeError
SeaBreezeDevice = lib.SeaBreezeDevice
import numpy
class _HelperFeatureAdder(object):
def __init__(self, other):
self._other = other
def add(self, feature):
ffunc = getattr(lib, "device_get_%s_feature_id" % feature)
fids = ffunc(self._other._dev)
if fids:
return fids[0]
else:
return -1 # It seems as if negative numbers are not used for featureIDs
class LightSource(object):
def __init__(self, device, featureId, index):
ident = device, featureId, index
self._ident = ident
self._has_enable = lib.light_source_has_enable(*ident)
self._has_varint = lib.light_source_has_variable_intensity(*ident)
def set_enable(self, enable):
if self._has_enable:
d, f, i = self._ident
lib.light_source_set_enable(d, f, i, enable)
else:
raise SeaBreezeError("Light source #%d can't be enabled or disabled.")
def set_intensity(self, intensity):
if self._has_varint:
d, f, i = self._ident
lib.light_source_set_intensity(d, f, i, intensity)
else:
raise SeaBreezeError("Light source #%d intensity can't be set.")
def get_intensity(self):
try:
return lib.light_source_get_intensity(*self._ident)
except SeaBreezeError:
raise
def __repr__(self):
d, f, i = self._ident
return "<LightSource #%d at %s:%s>" % (i, d.model, d.serial)
class Spectrometer(object):
def __init__(self, device):
self._open_device(device)
@classmethod
def from_serial_number(cls, serial=None):
if serial is None: # pick first spectrometer
for dev in lib.device_list_devices():
if not lib.device_is_open(dev):
return cls(dev)
else:
raise SeaBreezeError("No unopened device found.")
else: # pick spectrometer with correct serial
for dev in lib.device_list_devices():
if dev.serial == str(serial):
if lib.device_is_open(dev):
raise SeaBreezeError("Device already opened.")
else:
return cls(dev)
else:
raise SeaBreezeError("No device attached with serial number '%s'." % serial)
def _open_device(self, device):
if not isinstance(device, SeaBreezeDevice):
raise SeaBreezeError("Not a SeaBreezeDevice")
if lib.device_is_open(device):
raise SeaBreezeError("Device already opened.")
if hasattr(self, '_dev') and lib.device_is_open(getattr(self, '_dev')):
lib.device_close(getattr(self, '_dev'))
self._dev = device
print "about to open"
lib.device_open(self._dev)
print "opened, now getting default info"
# get default information
self._serial = self._dev.serial
self._model = self._dev.model
# get features
feature = _HelperFeatureAdder(self)
self._fidsp = feature.add('spectrometer')
self._fidsh = feature.add('shutter')
self._fidls = feature.add('light_source')
self._fidcs = feature.add('continuous_strobe')
self._fidee = feature.add('eeprom')
self._fidic = feature.add('irrad_calibration')
self._fidla = feature.add('lamp')
self._fidte = feature.add('tec')
self._fidnc = feature.add('nonlinearity_coeffs') # Added
self._fidsl = feature.add('stray_light_coeffs')
# get additional information
print "get additional info"
self._pixels = lib.spectrometer_get_formatted_spectrum_length(self._dev, self._fidsp)
self._minimum_integration_time_micros = (
lib.spectrometer_get_minimum_integration_time_micros(self._dev, self._fidsp))
# get wavelengths
print "get wavelengths"
self._wavelengths = numpy.zeros((self._pixels,), dtype=numpy.double)
transfered_N = 0
while True:
transfered_N += lib.spectrometer_get_wavelengths(self._dev, self._fidsp,
self._wavelengths[transfered_N:])
if transfered_N >= self._pixels:
break
# get dark pixel indices
print "get dark pixel indicies"
self._dark = lib.spectrometer_get_electric_dark_pixel_indices(self._dev, self._fidsp)
self._has_dark_pixels = True if self._dark.size > 0 else False
# get nonlinearity coefficients
print "get nonlinearity"
try:
sbnc = lib.nonlinearity_coeffs_get(self._dev, self._fidnc)
self._nc = numpy.poly1d(sbnc[::-1])
self._has_nonlinearity_coeffs = True
except SeaBreezeError:
self._has_nonlinearity_coeffs = False
# if lightsources
try:
N_light_sources = lib.light_source_get_count(self._dev, self._fidls)
self._light_sources = tuple(LightSource(self._dev, self._fidls, i)
for i in range(N_light_sources))
except SeaBreezeError:
self._light_sources = tuple()
def wavelengths(self):
return self._wavelengths
def intensities(self, correct_dark_counts=False, correct_nonlinearity=False):
if correct_dark_counts and not self._has_dark_pixels:
raise SeaBreezeError("This device does not support dark count correction.")
if correct_nonlinearity and not self._has_nonlinearity_coeffs:
raise SeaBreezeError("This device does not support nonlinearity correction.")
# Get the intensities
out = numpy.empty((self._pixels,), dtype=numpy.double)
transfered_N = 0
while True:
transfered_N += lib.spectrometer_get_formatted_spectrum(self._dev, self._fidsp,
out[transfered_N:])
if transfered_N >= self._pixels:
break
# Do corrections if requested
if (correct_nonlinearity or correct_dark_counts):
dark_offset = numpy.mean(out[self._dark]) if self._has_dark_pixels else 0.
out -= dark_offset
if (correct_nonlinearity):
out = out / numpy.polyval(self._nc, out)
if correct_nonlinearity and (not correct_dark_counts):
out += dark_offset
return out
def spectrum(self, correct_dark_counts=False, correct_nonlinearity=False):
return numpy.vstack((self._wavelengths,
self.intensities(correct_dark_counts, correct_nonlinearity)))
def integration_time_micros(self, integration_time_micros):
lib.spectrometer_set_integration_time_micros(self._dev,
self._fidsp, integration_time_micros)
def trigger_mode(self, mode):
lib.spectrometer_set_trigger_mode(self._dev, self._fidsp, mode)
@property
def serial_number(self):
return self._serial
@property
def model(self):
return self._model
@property
def pixels(self):
return self._pixels
@property
def minimum_integration_time_micros(self):
return self._minimum_integration_time_micros
@property
def light_sources(self):
return self._light_sources
def eeprom_read_slot(self, slot):
return lib.eeprom_read_slot(self._dev, self._fidee, slot)
def tec_set_enable(self, enable):
lib.tec_set_enable(self._dev, self._fidte, enable)
def tec_set_temperature_C(self, set_point_C):
lib.tec_set_temperature_setpoint_degrees_C(self._dev, self._fidte, set_point_C)
def tec_get_temperature_C(self):
return lib.tec_read_temperature_degrees_C(self._dev, self._fidte)
def lamp_set_enable(self, enable):
lib.lamp_set_lamp_enable(self._dev, self._fidla, enable)
def shutter_set_open(self, state):
lib.shutter_set_shutter_open(self._dev, self._fidsh, state)
def stray_light_coeffs(self):
return lib.stray_light_coeffs_get(self._dev, self._fidsl)
def irrad_calibration(self):
out = numpy.empty((self._pixels,), dtype=numpy.float32)
lib.irrad_calibration_read(self._dev, self._fidic, out)
return out
def irrad_calibration_collection_area(self):
if lib.irrad_calibration_has_collection_area(self._dev, self._fidic):
return lib.irrad_calibration_read_collection_area(self._dev, self._fidic)
else:
raise SeaBreezeError("Device does not store irrad calibration area.")
def continuous_strobe_set_enable(self, enable):
lib.continuous_strobe_set_enable(self._dev, self._fidcs, enable)
def continuous_strobe_set_period_micros(self, period_micros):
lib.continuous_strobe_set_period_micros(self._dev, self._fidcs, period_micros)
def close(self):
lib.device_close(self._dev)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
return "<Spectrometer %s:%s>" % (self.model, self.serial_number)
|
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluator computing metrics over given pairs of predictions and labels."""
import os
import pickle
from absl import logging
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow_graphics.geometry.representation import grid
from tensorflow_graphics.math.interpolation import trilinear
from tensorflow_graphics.projects.points_to_3Dobjects.models import centernet_utils
from tensorflow_graphics.projects.points_to_3Dobjects.utils import tf_utils
from google3.pyglib import gfile
from google3.third_party.google_research.google_research.tf3d.object_detection.box_utils import np_box_ops
class ShapeAccuracyMetric:
"""Computes the accuracy of shpe prediction."""
def __init__(self, k=1):
self.metric = tf.keras.metrics.SparseTopKCategoricalAccuracy(k)
def update(self, sparse_labels, predicted_probabilities, sample_weights=None):
self.metric.update_state(sparse_labels, predicted_probabilities,
sample_weights)
def evaluate(self):
return self.metric.result().numpy()
def reset(self):
self.metric.reset_states()
def get_2d_bounding_box_iou(box1, box2):
"""Compute IoU between two 2D bounding boxes.
Args:
box1: Input tensor with shape [4] [x_min, y_min, x_max, y_max]
box2: Input tensor with shape [4] [x_min, y_min, x_max, y_max]
Returns:
The intersection over union as a float.
"""
x_min1, y_min1, x_max1, y_max1 = box1
x_min2, y_min2, x_max2, y_max2 = box2
ma = np.maximum
mi = np.minimum
intersection = ma(0, mi(x_max1, x_max2) - ma(x_min1, x_min2)) * \
ma(0, mi(y_max1, y_max2) - ma(y_min1, y_min2))
area1 = (x_max1 - x_min1) * (y_max1 - y_min1)
area2 = (x_max2 - x_min2) * (y_max2 - y_min2)
union = area1 + area2 - intersection
print(intersection / union)
return intersection / (union + 1e-5)
def get_3d_bounding_box_iou(box1, box2):
"""Computes intersection between two given 3d bounding boxes.
Args:
box1: Input tensor with shape [B, 7] where the inner dimensions are as
follows:[x, y, z, length, width, height, yaw].
box2: Input tensor with shape [B, 7] where the inner dimensions are as
follows:[x, y, z, length, width, height, yaw].
Returns:
The IoU between the two bounding boxes.
"""
box1 = box1.numpy() if isinstance(box1, tf.Tensor) else box1
box2 = box2.numpy() if isinstance(box2, tf.Tensor) else box2
box1 = box1.astype(np.float32)
box2 = box2.astype(np.float32)
# rotates around z, while we rotate around y so need to swap
center_1 = tf.reshape(box1[0:3][[0, 2, 1]], [1, 3])
center_2 = tf.reshape(box2[0:3][[0, 2, 1]], [1, 3])
rotation_z_1 = tf.reshape(box1[-1], [1])
rotation_z_2 = tf.reshape(box2[-1], [1])
length_1 = tf.reshape(box1[3 + 0], [1])
height_1 = tf.reshape(box1[3 + 2], [1])
width_1 = tf.reshape(box1[3 + 1], [1])
length_2 = tf.reshape(box2[3 + 0], [1])
height_2 = tf.reshape(box2[3 + 2], [1])
width_2 = tf.reshape(box2[3 + 1], [1])
iou = np.squeeze(np_box_ops.iou3d_7dof_box(
length_1, height_1, width_1, center_1, rotation_z_1,
length_2, height_2, width_2, center_2, rotation_z_2))
return iou
class IoUMetric:
"""IoU metric."""
def __init__(self, max_num_classes=6, resolution=128, tol=0.05, slave=False,
path=None):
self.max_num_classes = max_num_classes
self.iou_per_class = {i: [] for i in range(self.max_num_classes)}
self.resolution = resolution
self.slave = slave
self.path = path
self.tol = tol
def update(self, labeled_sdfs, labeled_classes, labeled_poses,
predicted_sdfs, predicted_classes, predicted_poses):
"""Update."""
labeled_rotations = labeled_poses[0]
labeled_translations = labeled_poses[1]
labeled_sizes = labeled_poses[2]
status = True
if status:
box_limits_x = [100, -100]
# box_limits_y = [100, -100]
box_limits_z = [100, -100]
for i in range(labeled_translations.shape[0]):
rot = tf.reshape(tf.gather(labeled_rotations[i], [0, 2, 6, 8]), [2, 2])
min_x = tf.cast(0.0 - labeled_sizes[i][0] / 2.0, dtype=tf.float32)
max_x = tf.cast(0.0 + labeled_sizes[i][0] / 2.0, dtype=tf.float32)
# min_y = tf.cast(0.0 - labeled_sizes[i][1] / 2.0, dtype=tf.float32)
# max_y = tf.cast(0.0 + labeled_sizes[i][1] / 2.0, dtype=tf.float32)
min_z = tf.cast(0.0 - labeled_sizes[i][2] / 2.0, dtype=tf.float32)
max_z = tf.cast(0.0 + labeled_sizes[i][2] / 2.0, dtype=tf.float32)
translation = tf.reshape([labeled_translations[i][0],
labeled_translations[i][2]], [2, 1])
pt_0 = rot @ tf.reshape([min_x, min_z], [2, 1]) + translation
pt_1 = rot @ tf.reshape([min_x, max_z], [2, 1]) + translation
pt_2 = rot @ tf.reshape([max_x, min_z], [2, 1]) + translation
pt_3 = rot @ tf.reshape([max_x, max_z], [2, 1]) + translation
for pt in [pt_0, pt_1, pt_2, pt_3]:
if pt[0] < box_limits_x[0]:
box_limits_x[0] = pt[0]
if pt[0] > box_limits_x[1]:
box_limits_x[1] = pt[0]
if pt[1] < box_limits_z[0]:
box_limits_z[0] = pt[1]
if pt[1] > box_limits_z[1]:
box_limits_z[1] = pt[1]
mean_x = tf.reduce_mean(box_limits_x)
mean_z = tf.reduce_mean(box_limits_z)
else:
mean_x = tf.reduce_mean(labeled_translations[:, 0])
mean_z = tf.reduce_mean(labeled_translations[:, 2])
samples_world = grid.generate(
(mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5),
[self.resolution, self.resolution, self.resolution])
# samples_world = grid.generate(
# (box_limits_x[0][0], box_limits_y[0], box_limits_z[0][0]),
# (box_limits_x[1][0], box_limits_y[1], box_limits_z[1][0]),
# [self.resolution, self.resolution, self.resolution])
# samples_world = grid.generate(
# (-5.0, -5.0, -5.0),
# (5.0, 5.0, 5.0),
# [self.resolution, self.resolution, self.resolution])
samples_world = tf.reshape(samples_world, [-1, 3])
ious = []
status = False
if status:
_, axs = plt.subplots(labeled_translations.shape[0], 5)
fig_obj_count = 0
for class_id in range(self.max_num_classes):
# Do the same for the ground truth and predictions
sdf_values = tf.zeros_like(samples_world)[:, 0:1]
for mtype, (classes, sdfs, poses) in enumerate([
(labeled_classes, labeled_sdfs, labeled_poses),
(predicted_classes, predicted_sdfs, predicted_poses)]):
for i in range(classes.shape[0]):
if class_id == classes[i]:
sdf = tf.expand_dims(sdfs[i], -1)
sdf = sdf * -1.0 # inside positive, outside zero
samples_object = centernet_utils.transform_pointcloud(
tf.reshape(samples_world, [1, 1, -1, 3]),
tf.reshape(poses[2][i], [1, 1, 3]),
tf.reshape(poses[0][i], [1, 1, 3, 3]),
tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0
samples_object = \
(samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5
samples = tf.squeeze(samples_object)
interpolated = trilinear.interpolate(sdf, samples)
sdf_values += tf.math.sign(tf.nn.relu(interpolated + self.tol))
status2 = False
if status2:
a = 2
values = interpolated
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0])
print(mtype, fig_obj_count, 0)
values = tf.math.sign(tf.nn.relu(interpolated + self.tol))
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1])
print(mtype, fig_obj_count, 1)
if mtype == 1:
values = sdf_values
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 4].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 4])
print(mtype, fig_obj_count, 2)
fig_obj_count += 1
intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
union = tf.reduce_sum(tf.math.sign(sdf_values))
iou = intersection / union
if not tf.math.is_nan(iou):
ious.append(iou)
status3 = False
if status3:
_ = plt.figure(figsize=(5, 5))
plt.clf()
# mask = (sdf_values.numpy() > 0)[:, 0]
# plt.scatter(samples_world.numpy()[mask, 0],
# samples_world.numpy()[mask, 1],
# marker='.', c=sdf_values.numpy()[mask, 0])
plt.scatter(samples_world.numpy()[:, 0],
samples_world.numpy()[:, 1],
marker='.', c=sdf_values.numpy()[:, 0])
plt.colorbar()
if not tf.math.is_nan(iou):
self.iou_per_class[class_id].append(iou)
if ious:
ious = [0]
return np.mean(ious), np.min(ious)
def evaluate(self):
"""Evaluate."""
if self.slave:
data = self.iou_per_class
with gfile.Open(self.path, 'wb') as file:
pickle.dump(data, file)
logging.info(file)
return
else:
iou_per_class_means = []
for _, v in self.iou_per_class.items():
if v:
iou_per_class_means.append(np.mean(v))
return np.mean(iou_per_class_means)
def reset(self):
self.iou_per_class = {i: [] for i in range(self.max_num_classes)}
class CollisionMetric:
"""Collision."""
def __init__(self, max_num_classes=6, resolution=128,
tol=0.04, slave=False, path=None):
self.max_num_classes = max_num_classes
self.collisions = []
self.intersections = []
self.ious = []
self.resolution = resolution
self.slave = slave
self.path = path
self.tol = tol
def update(self, labeled_sdfs, labeled_classes, labeled_poses,
predicted_sdfs, predicted_classes, predicted_poses):
"""Update."""
if labeled_sdfs or labeled_classes:
print(labeled_sdfs)
mean_x = tf.reduce_mean(labeled_poses[1][:, 0])
mean_z = tf.reduce_mean(labeled_poses[1][:, 2])
samples_world = grid.generate(
(mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5),
[self.resolution, self.resolution, self.resolution])
samples_world = tf.reshape(samples_world, [-1, 3])
status = False
if status:
_, axs = plt.subplots(3, 3)
fig_obj_count = 0
# Do the same for the ground truth and predictions
num_collisions = 0
prev_intersection = 0
sdf_values = tf.zeros_like(samples_world)[:, 0:1]
for classes, sdfs, poses in [(predicted_classes,
predicted_sdfs,
predicted_poses)]:
for i in range(classes.shape[0]):
sdf = tf.expand_dims(sdfs[i], -1)
sdf = sdf * -1.0 # inside positive, outside zero
samples_object = centernet_utils.transform_pointcloud(
tf.reshape(samples_world, [1, 1, -1, 3]),
tf.reshape(poses[2][i], [1, 1, 3]),
tf.reshape(poses[0][i], [1, 1, 3, 3]),
tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0
samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5
samples = tf.squeeze(samples_object)
interpolated = trilinear.interpolate(sdf, samples)
occupancy_value = tf.math.sign(tf.nn.relu(interpolated + self.tol))
sdf_values += occupancy_value
intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
if intersection > prev_intersection:
prev_intersection = intersection
num_collisions += 1
status2 = False
if status2:
a = 1
values = interpolated
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 0].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 0])
values = tf.math.sign(tf.nn.relu(interpolated + self.tol))
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 1].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 1])
values = sdf_values
inter = tf.reshape(values, [self.resolution,
self.resolution,
self.resolution])
inter = tf.transpose(tf.reduce_max(inter, axis=a))
im = axs[fig_obj_count, 2].matshow(inter.numpy())
plt.colorbar(im, ax=axs[fig_obj_count, 2])
fig_obj_count += 1
intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
union = tf.reduce_sum(tf.math.sign(sdf_values))
iou = intersection / union
self.collisions.append(num_collisions)
self.intersections.append(intersection)
self.ious.append(iou)
return num_collisions, intersection, iou
def evaluate(self):
"""Evaluate."""
if self.slave:
data = {'collisions': self.collisions,
'intersections': self.intersections,
'ious': self.ious}
with gfile.Open(self.path, 'wb') as file:
pickle.dump(data, file)
logging.info(file)
return
else:
# self.collisions = []
# for k, v in self.iou_per_class.items():
# if len(v) > 0:
# iou_per_class_means.append(np.mean(v))
return np.sum(self.collisions)
def reset(self):
self.intersections = []
self.ious = []
self.collisions = []
class BoxIoUMetric:
"""BoxIOU."""
def __init__(self, t=0.5, threed=False):
self.labeled_boxes = {}
self.predicted_boxes = {}
self.threshold = t
self.threed = threed
self.get_iou_func = get_2d_bounding_box_iou
if self.threed:
self.get_iou_func = get_3d_bounding_box_iou
def update(self, scene_id, labeled_boxes, labeled_classes, predicted_boxes,
predicted_classes, confidences):
"""For one scene, provide all ground-truth and all predicted detections."""
self.labeled_boxes[scene_id] = (labeled_boxes, labeled_classes)
self.predicted_boxes[scene_id] = (predicted_boxes, predicted_classes,
confidences)
def evaluate(self):
"""Eval."""
predictions_per_class = {} # map {classname: pred}
labels_per_class = {} # map {classname: gt}
for scene_id in self.predicted_boxes:
bboxes, classnames, scores = self.predicted_boxes[scene_id]
classnames = classnames.numpy()
bboxes = bboxes.numpy()
scores = scores.numpy()
for i in range(classnames.shape[0]):
classname = classnames[i]
bbox = bboxes[i]
score = scores[i]
# for classname, bbox, score in self.predicted_boxes[scene_id]:
if classname not in predictions_per_class:
predictions_per_class[classname] = {}
if scene_id not in predictions_per_class[classname]:
predictions_per_class[classname][scene_id] = []
if classname not in labels_per_class:
labels_per_class[classname] = {}
if scene_id not in labels_per_class[classname]:
labels_per_class[classname][scene_id] = []
predictions_per_class[classname][scene_id].append((bbox, score))
for scene_id in self.labeled_boxes:
bboxes, classnames = self.labeled_boxes[scene_id]
classnames = classnames.numpy()
bboxes = bboxes.numpy()
for i in range(classnames.shape[0]):
classname = classnames[i]
bbox = bboxes[i]
if classname not in labels_per_class:
labels_per_class[classname] = {}
if scene_id not in labels_per_class[classname]:
labels_per_class[classname][scene_id] = []
labels_per_class[classname][scene_id].append(bbox)
recall_per_class = {}
precision_per_class = {}
ap_per_class = {}
for classname in labels_per_class:
print('Computing AP for class: ', classname)
if classname in predictions_per_class:
recall, precision, ap = self._eval_detections_per_class(
# this does not work when class was never predicted
predictions_per_class[classname],
labels_per_class[classname],
self.threshold)
else:
recall, precision, ap = 0.0, 0.0, 0.0
recall_per_class[classname] = recall
precision_per_class[classname] = precision
ap_per_class[classname] = ap
print(classname, ap)
# return recall_per_class, precision_per_class, ap_per_class
mean = np.mean(np.array([v for k, v in ap_per_class.items()]))
print(mean)
return mean
def _get_iou_main(self, get_iou_func, args):
return get_iou_func(*args)
def _voc_ap(self, rec, prec):
"""Compute VOC AP given precision and recall."""
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
return np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
def _eval_detections_per_class(self, pred, gt, ovthresh=0.25):
"""Generic functions to compute precision/recall for object detection."""
# construct gt objects
class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id])
det = [False] * len(bbox)
npos += len(bbox)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# pad empty list to all other imgids
for img_id in pred:
if img_id not in gt:
class_recs[img_id] = {'bbox': np.array([]), 'det': []}
# construct dets
image_ids = []
confidence = []
bb = []
for img_id in pred:
for box, score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
bb.append(box)
confidence = np.array(confidence)
bb = np.array(bb) # (nd,4 or 8,3 or 6)
# sort by confidence
sorted_ind = np.argsort(-confidence)
bb = bb[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
r = class_recs[image_ids[d]]
bb = bb[d, ...].astype(float)
ovmax = -np.inf
bbgt = r['bbox'].astype(float)
if bbgt.size > 0:
# compute overlaps
for j in range(bbgt.shape[0]):
iou = self._get_iou_main(self.get_iou_func, (bb, bbgt[j, ...]))
if iou > ovmax:
ovmax = iou
jmax = j
if ovmax > ovthresh:
if not r['det'][jmax]:
tp[d] = 1.
r['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos + 1e-5)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = self._voc_ap(rec, prec)
return rec, prec, ap
def reset(self):
self.labeled_boxes = {}
self.predicted_boxes = {}
class Evaluator:
"""Evaluator for specified metrics."""
def __init__(self, metrics, split, shapenet_dir):
self.metrics = metrics
self.split = split
self.shapenet_dir = shapenet_dir
def add_detections(self, sample, detections):
"""Add detections to evaluation.
Args:
sample: the ground truth information
detections: the predicted detections
Returns:
dict of intermediate results.
"""
result_dict = {'iou_mean': -1, 'iou_min': -1, 'collisions': 0,
'collision_intersection': 0, 'collision_iou': 0}
num_boxes = sample['num_boxes'].numpy()
for _, metric in self.metrics.items():
if isinstance(metric, ShapeAccuracyMetric):
labels = sample['shapes']
weights = tf.math.sign(labels + 1) # -1 is mapped to zero, else 1
metric.update(labels, detections['shapes_logits'], weights)
elif isinstance(metric, BoxIoUMetric):
scene_id = str(sample['scene_filename'].numpy(), 'utf-8')
# Get ground truth boxes
labeled_boxes = tf.gather(
sample['groundtruth_boxes'], axis=1, indices=[1, 0, 3, 2]) * 256.0
if metric.threed:
rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix(
tf.reshape(detections['rotations_3d'][i], [3, 3]),
1) for i in range(num_boxes)], axis=0)
rotations_y = tf.reshape(rotations_y, [-1, 1])
labeled_boxes = tf.concat([sample['translations_3d'],
sample['sizes_3d'],
rotations_y], axis=1)
# Get predicted boxes
predicted_boxes = detections['detection_boxes']
if metric.threed:
rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix(
tf.reshape(detections['rotations_3d'][i], [3, 3]),
1) for i in range(num_boxes)], axis=0)
rotations_y = tf.reshape(rotations_y, [-1, 1])
predicted_boxes = tf.concat([detections['translations_3d'],
detections['sizes_3d'],
rotations_y], axis=1)
labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64)
predicted_classes = tf.cast(detections['detection_classes'], tf.int64)
confidences = detections['detection_scores']
metric.update(scene_id, labeled_boxes, labeled_classes, predicted_boxes,
predicted_classes, confidences)
elif isinstance(metric, IoUMetric):
classes = sample['classes']
mesh_names = sample['mesh_names']
labeled_sdfs = []
for i in range(num_boxes):
class_id = str(classes[i].numpy()).zfill(8)
model_name = str(mesh_names[i].numpy(), 'utf-8')
path_prefix = os.path.join(self.shapenet_dir, class_id, model_name)
file_sdf = os.path.join(path_prefix, 'model_normalized_sdf.npy')
with gfile.Open(file_sdf, 'rb') as f:
labeled_sdfs.append(tf.expand_dims(np.load(f).astype(np.float32),
0))
labeled_sdfs = tf.concat(labeled_sdfs, axis=0)
labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64)
labeled_permutation = np.argsort(labeled_classes)
labeled_sdfs = labeled_sdfs.numpy()[labeled_permutation]
labeled_classes = labeled_classes.numpy()[labeled_permutation]
labeled_rotations_3d = sample['rotations_3d'].numpy()
labeled_rotations_3d = labeled_rotations_3d[labeled_permutation]
labeled_translations_3d = sample['translations_3d'].numpy()
labeled_translations_3d = labeled_translations_3d[labeled_permutation]
labeled_sizes_3d = sample['sizes_3d'].numpy()[labeled_permutation]
labeled_poses = (labeled_rotations_3d, labeled_translations_3d,
labeled_sizes_3d)
# Predictions
predicted_classes = tf.cast(detections['detection_classes'], tf.int64)
predicted_permutation = np.argsort(predicted_classes)
predicted_classes = predicted_classes.numpy()[predicted_permutation]
predicted_sdfs = \
detections['predicted_sdfs'].numpy()[predicted_permutation]
predicted_rotations_3d = \
detections['rotations_3d'].numpy()[predicted_permutation]
predicted_translations_3d = \
detections['translations_3d'].numpy()[predicted_permutation]
predicted_sizes_3d = \
detections['sizes_3d'].numpy()[predicted_permutation]
predicted_poses = (predicted_rotations_3d, predicted_translations_3d,
predicted_sizes_3d)
full_oracle = False
if full_oracle:
predicted_sdfs = detections['groundtruth_sdfs'].numpy()
predicted_sdfs = predicted_sdfs[labeled_permutation]
predicted_classes = labeled_classes
predicted_poses = labeled_poses
print('----------------------------')
print(predicted_sdfs.shape)
print(predicted_classes.shape)
print(predicted_poses[0].shape)
print(predicted_poses[1].shape)
print(predicted_poses[2].shape)
pose_oracle = False
if pose_oracle:
predicted_sdfs = detections['predicted_sdfs'].numpy()
predicted_sdfs = predicted_sdfs[predicted_permutation]
predicted_poses = (labeled_rotations_3d, labeled_translations_3d,
labeled_sizes_3d)
class_oracle = True
if class_oracle:
predicted_classes *= 0
labeled_classes *= 0
iou_mean, iou_min = metric.update(
labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs,
predicted_classes, predicted_poses, sample['dot'])
result_dict['iou_mean'] = iou_mean
result_dict['iou_min'] = iou_min
elif isinstance(metric, CollisionMetric):
labeled_sdfs = detections['groundtruth_sdfs']
labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64)
labeled_poses = (sample['rotations_3d'],
sample['translations_3d'],
sample['sizes_3d'])
predicted_classes = tf.cast(detections['detection_classes'], tf.int64)
predicted_sdfs = detections['predicted_sdfs']
predicted_poses = (detections['rotations_3d'],
detections['translations_3d'],
detections['sizes_3d'])
full_oracle = False
if full_oracle:
predicted_sdfs = detections['groundtruth_sdfs'].numpy()
predicted_classes = labeled_classes
predicted_poses = labeled_poses
num_collisions, intersection, iou = metric.update(
labeled_sdfs, labeled_classes, labeled_poses,
predicted_sdfs, predicted_classes, predicted_poses)
result_dict['collisions'] = num_collisions
result_dict['collision_intersection'] = intersection
result_dict['collision_iou'] = iou
return result_dict
def evaluate(self):
"""Runs metrics over provided pairs and returns metric dict."""
metrics = {}
for name, metric in self.metrics.items():
metrics[name] = metric.evaluate()
return metrics
def reset_metrics(self):
for _, metric in self.metrics.items():
metric.reset()
|
|
#!/usr/bin/env python
import numpy as np
import itertools
from grid import Grid
import tools
# A class that carries the logic of evaluating the energy, force and torque
# of a pair of rigid molecules. The coordinates of each molecule are given
# in the form of Xcom and q, with Xcom being the Cartesian coordinates of the
# center of mass, q being the quaternion representation of its orientation
# wrt a reference pose. The class evaluates the EFTs for a pair of such
# coordinates by
# 1. Apply translational and rotational operations to the pair to align the
# COM of the first molecule with the origin and its orientation to the
# reference pose.
# 2. Convert the modified Xcom and q of the second molecule into spherical
# coordinates.
# 3. Use the resulted six-dimensional coordinate to query a six-dimensional
# grid that stores precomputed EFTs.
# 4. Unapply rotation in step 1 to obtain correctly oriented forces and torques
class EFT_calculator:
def __init__(self, order=2):
self.mol = Water()
self.grid = Grid()
self.order = order # order of the interpolant, 1 for linear
# Setup the grid structure. If provided with a data file, load it
def setup(self, filename=None):
if not filename:
self.grid.setup()
else:
self.grid.load(filename)
# Given a calculator that evalulates the atomic coordinates of a pair,
# use the results to fill the grid
def fill_grid(self, calculator, filename='grid_data.txt'):
def f(x):
coor = self._spherical2Atomic(x)
return calculator.eval(coor)
if not self.grid.n:
raise Exception('setup() before fill')
self.grid.fill(f)
self.grid.save(filename)
def fill_with_QM(self, logfilelist):
""" input filename is a file with all gird GAMESS result log in order."""
loglist = open(logfilelist, 'r').readlines()
for i in range(len(loglist)):
loglist[i] = loglist[i].rstrip()
i = 0
for leaf, x in self.grid._gen_leaves_with_x():
leaf.y, coord = self._parseQMlog(loglist[i]) #coord is not using here
i += 1
if i >=len(loglist):break
def _parseQMlog(self, logname):
"""extract energy, force from GAMESS log file and
return (energy, force[0],force[1],force[2], torque[0],torque[1],torque[2])
ni, nj is the atom num. of framgment i,j
"""
AU2KCAL = 23.0605*27.2116
HperB2toque = 1185.82 # 1Hartree/Bohr = 1185.82 kcal/mol/Angstrom
frgE1 = -76.2987810745 * AU2KCAL
frgE2 = -76.2987810745 * AU2KCAL
e = 0.0
f = np.zeros(3)
t = np.zeros(3)
logf = open(logname, 'r')
log = logf.readlines()
logf.close()
coords = []
gradients = []
for idx, i in enumerate(log):
if i[0:13] == " INPUT CARD> " and len(i.split()) == 7:
try:coords.append([float(i) for i in i.split()[4:7]])
except ValueError:continue
if 'E(MP2)=' in i : e = float(i.split()[1]) * AU2KCAL - frgE1 - frgE2
if 'GRADIENT OF THE ENERGY' in i:
for gline in log[idx+4:idx+10]:
gradients.append([float(g) * HperB2toque for g in gline.split()[2:5]])
break
coords = np.array(coords)
gradients = np.array(gradients)
# from com => probe
com1 = self.mol.getCOM(coords[3:])
coord1 = coords[:3]
grad1 = gradients[:3]
for idx in range(len(grad1)):
f += grad1[idx]
t += np.cross(coord1[idx] - com1, grad1[idx])
return np.array([e, f[0], f[1], f[2], t[0], t[1], t[2]]), coords
# Evaluate the Xcom and q for a pair of mols by querying the grid
def eval(self, Xcom0, q0, Xcom1, q1):
# move COM of mol0 to origin
X = Xcom1 - Xcom0
# reorient to align mol0 with refCoor
R = tools.q2R(q0)
X = np.dot(X, R)
q = tools.qdiv(q1, q0)
# Use mirror symmetry of mol0 to move mol1 such that its COM has positive y and z values
reflections = []
qsub = q[1:]
for i in self.mol.refl_axes:
if X[i] < 0:
X[i] = -X[i]
# the following operation on q is equivalent to changing R to MRM
# i.e., the probe mol is reflected twice, once in the reference frame,
# once in the molecular frame.
qsub[i] = -qsub[i]
qsub[:] = -qsub
reflections.append(i)
# Use mirror symmetry of mol1 to orient it such that it has positive q[0] and q[1] values
if q[0] < 0:
q = -q
if q[1] < 0:
q[0], q[1], q[2], q[3] = -q[1], q[0], q[3], -q[2]
# convert X, q to polar coordinates
r, phi, theta = tools.xyz2spherical(X)
ophi1, ophi2, otheta = tools.q2spherical(q)
coor = [r, phi, theta, ophi1, ophi2, otheta]
# use the grid to obtain results
eft = self.grid.interpolate(coor, self.order)
ener = eft[0]
force = eft[1:4]
torque = eft[4:7]
# Reverse the operations for mol0 mirror symmetry back
for i in reflections:
force[i] = -force[i]
torque[i] = -torque[i]
torque[:] = -torque
# Reverse the reorientation applied to align mol0 with refCoor
force[:] = np.dot(force, R.T)
torque[:] = np.dot(torque, R.T)
return eft
# Generate atomic coordinates for mol pair for grid points along with
# an id. The optional arguments can be used to specify a range for the id.
# The coordinates are in the form of [XO0, XH0, XH0, XO1, XH1, XH1], where 0 indicates
# the center molecule, 1 the probe molecule.
def gen_atomic_coors(self, start=None, stop=None):
if stop is None:
if start is not None:
raise Exception('Specify start and stop at the same time!')
start = 0
stop = self.grid.n
gen_x = itertools.islice(self.grid.gen_x(), start, stop)
for i in range(start, stop):
x = gen_x.next()
coors = self._spherical2Atomic(x)
yield i, coors
def gen_PDB(self, confs=None):
if confs is None:confs=self.grid.gen_x()
for i, x in enumerate(confs):
#if np.linalg.norm(conf.q) > 1: pdb.set_trace()
coors = self._spherical2PDB(x)
yield i, coors
# Construct atomic coordinates for a pair from grid coordinate
def _spherical2Atomic(self, coor):
r, phi, theta, ophi1, ophi2, otheta = coor
Xcom = tools.spherical2xyz(r, phi, theta)
q = tools.spherical2q(ophi1, ophi2, otheta)
coor = self.mol.Xq2Atomic(Xcom, q)
return np.concatenate((self.mol.refCoor, coor), axis=0)
def _spherical2PDB(self, coor, NdxAtom=1,NdxRes=1):
c= self._spherical2Atomic(coor)
mol = 'TITLE para:' + '%8.3f'*6%tuple(coor) + '\n'
for i in range(self.mol.n1+self.mol.n2):
mol += "ATOM %5d%3s%6s A%4d%12.3f%8.3f%8.3f 1.00 0.00\n" % (
NdxAtom, self.mol.ele[i], self.mol.frg, NdxRes,c[i][0],c[i][1],c[i][2])
if NdxAtom == self.mol.n1:NdxRes += 1
NdxAtom += 1
return mol
# A class that holds information related to the atomic structure of a water
# molecule. It also includes several methods that carries out operations
# related to the atomic coordinates.
class Water:
def __init__(self):
self.frg = "HOH"
self.n1 = 3
self.n2 = 3
self.ele = "OHHOHH"
self.mass = np.array([15.99900, 1.00800, 1.00800])
refCoor = np.array([ [-0.06556939, 0.00000000, 0.00000000],
[0.52035943, 0.76114632, 0.00000000],
[0.52035943, -0.76114632, 0.00000000] ])
# The following code ensures that refCoor has COM at origin and orientation
# aligned with the getR() method
refCoor = refCoor - self.getCOM(refCoor)
R = self.getR(refCoor)
refCoor = np.dot(refCoor, R)
self.refCoor = refCoor
# refl_axes is a list of indices of axes. Reflection along each of these
# axes corresponds to a mirror symmetry of the molecule
self.refl_axes = [1, 2]
# Calculate the rotation matrix R that relates coors to self.refCoor
# vec_in_reference_frame = R \dot vec_in_body_frame
# R(coors) \dot refCoor = coors - COM(coors)
# This function defines the orientation of self.refCoor
# Need to be consistent with self.refl_axes
def getR(self, coors):
coors = np.copy(coors)
offset = coors[0]
coors -= offset
xvec = coors[1] + coors[2]
zvec = np.cross(coors[1], coors[2])
yvec = np.cross(zvec, xvec)
xvec /= np.linalg.norm(xvec)
yvec /= np.linalg.norm(yvec)
zvec /= np.linalg.norm(zvec)
R = np.array([xvec, yvec, zvec]).T
return R
# Calculate the center of mass
def getCOM(self, coors):
return np.dot(self.mass, coors) / self.mass.sum()
# Convert atomic coordinates to Xcom and q
def atomic2Xq(self, coors):
Xcom = self.getCOM(coors)
R = self.getR(coors)
q = tools.R2q(R)
return Xcom, q
# Given Xcom and q, rebuild the atomic coordinates
def Xq2Atomic(self, Xcom, q):
R = tools.q2R(q)
coor = np.dot(self.refCoor, R.T)
coor += Xcom
return coor
|
|
##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2012-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import time
import datetime
import pwd
import grp
import os
import IECore
import Gaffer
import GafferTest
class FileSystemPathTest( GafferTest.TestCase ) :
def test( self ) :
p = Gaffer.FileSystemPath( __file__ )
self.assert_( p.isValid() )
self.assert_( p.isLeaf() )
while len( p ) :
del p[-1]
self.assert_( p.isValid() )
self.assert_( not p.isLeaf() )
def testIsLeaf( self ) :
path = Gaffer.FileSystemPath( "/this/path/doesnt/exist" )
self.assert_( not path.isLeaf() )
def testConstructWithFilter( self ) :
p = Gaffer.FileSystemPath( __file__ )
self.failUnless( p.getFilter() is None )
f = Gaffer.FileNamePathFilter( [ "*.exr" ] )
p = Gaffer.FileSystemPath( __file__, filter = f )
self.failUnless( p.getFilter().isSame( f ) )
def testBrokenSymbolicLinks( self ) :
os.symlink( self.temporaryDirectory() + "/nonExistent", self.temporaryDirectory() + "/broken" )
# we do want symlinks to appear in children, even if they're broken
d = Gaffer.FileSystemPath( self.temporaryDirectory() )
c = d.children()
self.assertEqual( len( c ), 1 )
l = c[0]
self.assertEqual( str( l ), self.temporaryDirectory() + "/broken" )
# we also want broken symlinks to report themselves as "valid",
# because having a path return a child and then claim the child
# is invalid seems rather useless. admittedly this is a bit of
# a compromise.
self.assertEqual( l.isValid(), True )
# since we said it was valid, it ought to have some info
info = l.info()
self.failUnless( info is not None )
def testSymLinkInfo( self ) :
with open( self.temporaryDirectory() + "/a", "w" ) as f :
f.write( "AAAA" )
os.symlink( self.temporaryDirectory() + "/a", self.temporaryDirectory() + "/l" )
# symlinks should report the info for the file
# they point to.
a = Gaffer.FileSystemPath( self.temporaryDirectory() + "/a" )
l = Gaffer.FileSystemPath( self.temporaryDirectory() + "/l" )
aInfo = a.info()
self.assertEqual( aInfo["fileSystem:size"], l.info()["fileSystem:size"] )
# unless they're broken
os.remove( str( a ) )
self.assertNotEqual( aInfo["fileSystem:size"], l.info()["fileSystem:size"] )
def testCopy( self ) :
p = Gaffer.FileSystemPath( self.temporaryDirectory() )
p2 = p.copy()
self.assertEqual( p, p2 )
self.assertEqual( str( p ), str( p2 ) )
def testEmptyPath( self ) :
p = Gaffer.FileSystemPath()
self.assertEqual( str( p ), "" )
self.assertTrue( p.isEmpty() )
self.assertFalse( p.isValid() )
def testRelativePath( self ) :
os.chdir( self.temporaryDirectory() )
with open( self.temporaryDirectory() + "/a", "w" ) as f :
f.write( "AAAA" )
p = Gaffer.FileSystemPath( "a" )
self.assertEqual( str( p ), "a" )
self.assertFalse( p.isEmpty() )
self.assertTrue( p.isValid() )
p2 = Gaffer.FileSystemPath( "nonexistent" )
self.assertEqual( str( p2 ), "nonexistent" )
self.assertFalse( p2.isEmpty() )
self.assertFalse( p2.isValid() )
def testRelativePathChildren( self ) :
os.chdir( self.temporaryDirectory() )
os.mkdir( "dir" )
with open( self.temporaryDirectory() + "/dir/a", "w" ) as f :
f.write( "AAAA" )
p = Gaffer.FileSystemPath( "dir" )
c = p.children()
self.assertEqual( len( c ), 1 )
self.assertEqual( str( c[0] ), "dir/a" )
self.assertTrue( c[0].isValid() )
def testChildrenOfFile( self ) :
p = Gaffer.FileSystemPath( __file__ )
self.assertEqual( p.children(), [] )
def testModificationTimes( self ) :
p = Gaffer.FileSystemPath( self.temporaryDirectory() )
p.append( "t" )
with open( str( p ), "w" ) as f :
f.write( "AAAA" )
mt = p.property( "fileSystem:modificationTime" )
self.assertTrue( isinstance( mt, datetime.datetime ) )
self.assertLess( (datetime.datetime.utcnow() - mt).total_seconds(), 2 )
time.sleep( 1 )
with open( str( p ), "w" ) as f :
f.write( "BBBB" )
mt = p.property( "fileSystem:modificationTime" )
self.assertTrue( isinstance( mt, datetime.datetime ) )
self.assertLess( (datetime.datetime.utcnow() - mt).total_seconds(), 2 )
def testOwner( self ) :
p = Gaffer.FileSystemPath( self.temporaryDirectory() )
p.append( "t" )
with open( str( p ), "w" ) as f :
f.write( "AAAA" )
o = p.property( "fileSystem:owner" )
self.assertTrue( isinstance( o, str ) )
self.assertEqual( o, pwd.getpwuid( os.stat( str( p ) ).st_uid ).pw_name )
def testGroup( self ) :
p = Gaffer.FileSystemPath( self.temporaryDirectory() )
p.append( "t" )
with open( str( p ), "w" ) as f :
f.write( "AAAA" )
g = p.property( "fileSystem:group" )
self.assertTrue( isinstance( g, str ) )
self.assertEqual( g, grp.getgrgid( os.stat( str( p ) ).st_gid ).gr_name )
def testPropertyNames( self ) :
p = Gaffer.FileSystemPath( self.temporaryDirectory() )
a = p.propertyNames()
self.assertTrue( isinstance( a, list ) )
self.assertTrue( "fileSystem:group" in a )
self.assertTrue( "fileSystem:owner" in a )
self.assertTrue( "fileSystem:modificationTime" in a )
self.assertTrue( "fileSystem:size" in a )
self.assertTrue( "fileSystem:frameRange" not in a )
p = Gaffer.FileSystemPath( self.temporaryDirectory(), includeSequences = True )
self.assertTrue( "fileSystem:frameRange" in p.propertyNames() )
def testSequences( self ) :
os.mkdir( self.temporaryDirectory() + "/dir" )
for n in [ "singleFile.txt", "a.001.txt", "a.002.txt", "a.004.txt", "b.003.txt" ] :
with open( self.temporaryDirectory() + "/" + n, "w" ) as f :
f.write( "AAAA" )
p = Gaffer.FileSystemPath( self.temporaryDirectory(), includeSequences = True )
self.assertTrue( p.getIncludeSequences() )
c = p.children()
self.assertEqual( len( c ), 8 )
s = sorted( c, key=str )
self.assertEqual( str(s[0]), self.temporaryDirectory() + "/a.###.txt" )
self.assertEqual( str(s[1]), self.temporaryDirectory() + "/a.001.txt" )
self.assertEqual( str(s[2]), self.temporaryDirectory() + "/a.002.txt" )
self.assertEqual( str(s[3]), self.temporaryDirectory() + "/a.004.txt" )
self.assertEqual( str(s[4]), self.temporaryDirectory() + "/b.###.txt" )
self.assertEqual( str(s[5]), self.temporaryDirectory() + "/b.003.txt" )
self.assertEqual( str(s[6]), self.temporaryDirectory() + "/dir" )
self.assertEqual( str(s[7]), self.temporaryDirectory() + "/singleFile.txt" )
for x in s :
self.assertTrue( x.isValid() )
if not os.path.isdir( str(x) ) :
self.assertTrue( x.isLeaf() )
self.assertEqual( x.property( "fileSystem:owner" ), pwd.getpwuid( os.stat( str( p ) ).st_uid ).pw_name )
self.assertEqual( x.property( "fileSystem:group" ), grp.getgrgid( os.stat( str( p ) ).st_gid ).gr_name )
self.assertLess( (datetime.datetime.utcnow() - x.property( "fileSystem:modificationTime" )).total_seconds(), 2 )
if "###" not in str(x) :
self.assertFalse( x.isFileSequence() )
self.assertEqual( x.fileSequence(), None )
self.assertEqual( x.property( "fileSystem:frameRange" ), "" )
if os.path.isdir( str(x) ) :
self.assertEqual( x.property( "fileSystem:size" ), 0 )
else :
self.assertEqual( x.property( "fileSystem:size" ), 4 )
self.assertEqual( s[0].property( "fileSystem:frameRange" ), "1-2,4" )
self.assertTrue( s[0].isFileSequence() )
self.assertTrue( isinstance( s[0].fileSequence(), IECore.FileSequence ) )
self.assertEqual( s[0].fileSequence(), IECore.FileSequence( str(s[0]), IECore.frameListFromList( [ 1, 2, 4 ] ) ) )
self.assertEqual( s[0].property( "fileSystem:size" ), 4 * 3 )
self.assertEqual( s[4].property( "fileSystem:frameRange" ), "3" )
self.assertTrue( s[4].isFileSequence() )
self.assertTrue( isinstance( s[4].fileSequence(), IECore.FileSequence ) )
self.assertEqual( s[4].fileSequence(), IECore.FileSequence( str(s[4]), IECore.frameListFromList( [ 3 ] ) ) )
self.assertEqual( s[4].property( "fileSystem:size" ), 4 )
# make sure we can copy
p2 = p.copy()
self.assertTrue( p2.getIncludeSequences() )
self.assertEqual( len( p2.children() ), 8 )
# make sure we can still exclude the sequences
p = Gaffer.FileSystemPath( self.temporaryDirectory(), includeSequences = False )
self.assertFalse( p.getIncludeSequences() )
c = p.children()
self.assertEqual( len( c ), 6 )
s = sorted( c, key=str )
self.assertEqual( str(s[0]), self.temporaryDirectory() + "/a.001.txt" )
self.assertEqual( str(s[1]), self.temporaryDirectory() + "/a.002.txt" )
self.assertEqual( str(s[2]), self.temporaryDirectory() + "/a.004.txt" )
self.assertEqual( str(s[3]), self.temporaryDirectory() + "/b.003.txt" )
self.assertEqual( str(s[4]), self.temporaryDirectory() + "/dir" )
self.assertEqual( str(s[5]), self.temporaryDirectory() + "/singleFile.txt" )
# and we can include them again
p.setIncludeSequences( True )
self.assertTrue( p.getIncludeSequences() )
c = p.children()
self.assertEqual( len( c ), 8 )
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.__originalCWD = os.getcwd()
def tearDown( self ) :
GafferTest.TestCase.tearDown( self )
os.chdir( self.__originalCWD )
if __name__ == "__main__":
unittest.main()
|
|
from logging import debug, error, exception
import sqlite3, re
from functools import wraps, lru_cache
from unidecode import unidecode
from typing import Set, List, Optional
from .models import Show, ShowType, Stream, Service, LinkSite, Link, Episode, EpisodeScore, UnprocessedStream, UnprocessedShow
def living_in(the_database):
"""
wow wow
:param the_database:
:return:
"""
try:
db = sqlite3.connect(the_database)
db.execute("PRAGMA foreign_keys=ON")
except sqlite3.OperationalError:
error("Failed to open database, {}".format(the_database))
return None
return DatabaseDatabase(db)
# Database
def db_error(f):
@wraps(f)
def protected(*args, **kwargs):
try:
f(*args, **kwargs)
return True
except:
exception("Database exception thrown")
return False
return protected
def db_error_default(default_value):
value = default_value
def decorate(f):
@wraps(f)
def protected(*args, **kwargs):
nonlocal value
try:
return f(*args, **kwargs)
except:
exception("Database exception thrown")
return value
return protected
return decorate
class DatabaseDatabase:
def __init__(self, db):
self._db = db
self.q = db.cursor()
# Set up collations
self._db.create_collation("alphanum", _collate_alphanum)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self._db, attr)
def get_count(self):
return self.q.fetchone()[0]
def save(self):
self.commit()
# Setup
def setup_tables(self):
self.q.execute("""CREATE TABLE IF NOT EXISTS ShowTypes (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
key TEXT NOT NULL
)""")
self.q.executemany("INSERT OR IGNORE INTO ShowTypes (id, key) VALUES (?, ?)", [(t.value, t.name.lower()) for t in ShowType])
self.q.execute("""CREATE TABLE IF NOT EXISTS Shows (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT NOT NULL,
length INTEGER,
type INTEGER NOT NULL,
has_source INTEGER NOT NULL DEFAULT 0,
enabled INTEGER NOT NULL DEFAULT 1,
delayed INTEGER NOT NULL DEFAULT 0,
FOREIGN KEY(type) REFERENCES ShowTypes(id)
)""")
self.q.execute("""CREATE TABLE IF NOT EXISTS ShowNames (
show INTEGER NOT NULL,
name TEXT NOT NULL
)""")
self.q.execute("""CREATE TABLE IF NOT EXISTS Services (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
key TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 0,
use_in_post INTEGER NOT NULL DEFAULT 1
)""")
self.q.execute("""CREATE TABLE IF NOT EXISTS Streams (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
service TEXT NOT NULL,
show INTEGER,
show_id TEXT,
show_key TEXT NOT NULL,
name TEXT,
remote_offset INTEGER NOT NULL DEFAULT 0,
display_offset INTEGER NOT NULL DEFAULT 0,
active INTEGER NOT NULL DEFAULT 1,
FOREIGN KEY(service) REFERENCES Services(id),
FOREIGN KEY(show) REFERENCES Shows(id)
)""")
self.q.execute("""CREATE TABLE IF NOT EXISTS Episodes (
show INTEGER NOT NULL,
episode INTEGER NOT NULL,
post_url TEXT,
FOREIGN KEY(show) REFERENCES Shows(id)
)""")
self.q.execute("""CREATE TABLE IF NOT EXISTS LinkSites (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
key TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1
)""")
self.q.execute("""CREATE TABLE IF NOT EXISTS Links (
show INTEGER NOT NULL,
site INTEGER NOT NULL,
site_key TEXT NOT NULL,
FOREIGN KEY(site) REFERENCES LinkSites(id)
FOREIGN KEY(show) REFERENCES Shows(id)
)""")
self.q.execute("""CREATE TABLE IF NOT EXISTS Scores (
show INTEGER NOT NULL,
episode INTEGER NOT NULL,
site INTEGER NOT NULL,
score REAL NOT NULL,
FOREIGN KEY(show) REFERENCES Shows(id),
FOREIGN KEY(site) REFERENCES LinkSites(id)
)""")
self.commit()
def register_services(self, services):
self.q.execute("UPDATE Services SET enabled = 0")
for service_key in services:
service = services[service_key]
self.q.execute("INSERT OR IGNORE INTO Services (key, name) VALUES (?, '')", (service.key,))
self.q.execute("UPDATE Services SET name = ?, enabled = 1 WHERE key = ?", (service.name, service.key))
self.commit()
def register_link_sites(self, sites):
self.q.execute("UPDATE LinkSites SET enabled = 0")
for site_key in sites:
site = sites[site_key]
self.q.execute("INSERT OR IGNORE INTO LinkSites (key, name) VALUES (?, '')", (site.key,))
self.q.execute("UPDATE LinkSites SET name = ?, enabled = 1 WHERE key = ?", (site.name, site.key))
self.commit()
# Services
@db_error_default(None)
@lru_cache(10)
def get_service(self, id=None, key=None) -> Optional[Service]:
if id is not None:
self.q.execute("SELECT id, key, name, enabled, use_in_post FROM Services WHERE id = ?", (id,))
elif key is not None:
self.q.execute("SELECT id, key, name, enabled, use_in_post FROM Services WHERE key = ?", (key,))
else:
error("ID or key required to get service")
return None
service = self.q.fetchone()
return Service(*service)
@db_error_default(list())
def get_services(self, enabled=True, disabled=False) -> List[Service]:
services = list()
if enabled:
self.q.execute("SELECT id, key, name, enabled, use_in_post FROM Services WHERE enabled = 1")
for service in self.q.fetchall():
services.append(Service(*service))
if disabled:
self.q.execute("SELECT id, key, name, enabled, use_in_post FROM Services WHERE enabled = 0")
for service in self.q.fetchall():
services.append(Service(*service))
return services
@db_error_default(None)
def get_stream(self, id=None, service_tuple=None) -> Optional[Stream]:
if id is not None:
debug("Getting stream for id {}".format(id))
self.q.execute("SELECT id, service, show, show_id, show_key, name, remote_offset, display_offset, active FROM Streams WHERE id = ?", (id,))
stream = self.q.fetchone()
if stream is None:
error("Stream {} not found".format(id))
return None
stream = Stream(*stream)
return stream
elif service_tuple is not None:
service, show_key = service_tuple
debug("Getting stream for {}/{}".format(service, show_key))
self.q.execute("SELECT id, service, show, show_id, show_key, name, remote_offset, display_offset, active FROM Streams WHERE service = ? AND show_key = ?",
(service.id, show_key))
stream = self.q.fetchone()
if stream is None:
error("Stream {} not found".format(id))
return None
stream = Stream(*stream)
return stream
else:
error("Nothing provided to get stream")
return None
@db_error_default(list())
def get_streams(self, service=None, show=None, active=True, unmatched=False, missing_name=False) -> List[Stream]:
# Not the best combination of options, but it's only the usage needed
if service is not None:
debug("Getting all streams for service {}".format(service.key))
service = self.get_service(key=service.key)
self.q.execute("SELECT id, service, show, show_id, show_key, name, remote_offset, display_offset, active FROM Streams \
WHERE service = ? AND active = ?", (service.id, 1 if active else 0))
elif show is not None:
debug("Getting all streams for show {}".format(show.id))
self.q.execute("SELECT id, service, show, show_id, show_key, name, remote_offset, display_offset, active FROM Streams \
WHERE show = ? AND active = ?", (show.id, active))
elif unmatched:
debug("Getting unmatched streams")
self.q.execute("SELECT id, service, show, show_id, show_key, name, remote_offset, display_offset, active FROM Streams \
WHERE show IS NULL")
elif missing_name:
self.q.execute("SELECT id, service, show, show_id, show_key, name, remote_offset, display_offset, active FROM Streams \
WHERE (name IS NULL OR name = '') AND active = ?", (active,))
else:
error("A service or show must be provided to get streams")
return list()
streams = self.q.fetchall()
streams = [Stream(*stream) for stream in streams]
return streams
@db_error_default(False)
def has_stream(self, service_key, key) -> bool:
service = self.get_service(key=service_key)
self.q.execute("SELECT count(*) FROM Streams WHERE service = ? AND show_key = ?", (service.id, key))
return self.get_count() > 0
@db_error
def add_stream(self, raw_stream: UnprocessedStream, show_id, commit=True):
debug("Inserting stream: {}".format(raw_stream))
service = self.get_service(key=raw_stream.service_key)
self.q.execute("INSERT INTO Streams (service, show, show_id, show_key, name, remote_offset, display_offset, active) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(service.id, show_id, raw_stream.show_id, raw_stream.show_key, raw_stream.name, raw_stream.remote_offset, raw_stream.display_offset, show_id is not None))
if commit:
self.commit()
@db_error
def update_stream(self, stream: Stream, show=None, active=None, name=None, show_id=None, show_key=None, remote_offset=None, commit=True):
debug("Updating stream: id={}".format(stream.id))
if show is not None:
self.q.execute("UPDATE Streams SET show = ? WHERE id = ?", (show, stream.id))
if active is not None:
self.q.execute("UPDATE Streams SET active = ? WHERE id = ?", (active, stream.id))
if name is not None:
self.q.execute("UPDATE Streams SET name = ? WHERE id = ?", (name, stream.id))
if show_id is not None:
self.q.execute("UPDATE Streams SET show_id = ? WHERE id = ?", (show_id, stream.id))
if show_key is not None:
self.q.execute("UPDATE Streams SET show_key = ? WHERE id = ?", (show_key, stream.id))
if remote_offset is not None:
self.q.execute("UPDATE Streams SET remote_offset = ? WHERE id = ?", (remote_offset, stream.id))
if commit:
self.commit()
# Links
@db_error_default(None)
def get_link_site(self, id:str=None, key:str=None) -> Optional[LinkSite]:
if id is not None:
self.q.execute("SELECT id, key, name, enabled FROM LinkSites WHERE id = ?", (id,))
elif key is not None:
self.q.execute("SELECT id, key, name, enabled FROM LinkSites WHERE key = ?", (key,))
else:
error("ID or key required to get link site")
return None
site = self.q.fetchone()
if site is None:
return None
return LinkSite(*site)
@db_error_default(list())
def get_link_sites(self, enabled=True, disabled=False) -> List[LinkSite]:
sites = list()
if enabled:
self.q.execute("SELECT id, key, name, enabled FROM LinkSites WHERE enabled = 1")
for link in self.q.fetchall():
sites.append(LinkSite(*link))
if disabled:
self.q.execute("SELECT id, key, name, enabled FROM LinkSites WHERE enabled = 0")
for link in self.q.fetchall():
sites.append(LinkSite(*link))
return sites
@db_error_default(list())
def get_links(self, show:Show=None) -> List[Link]:
if show is not None:
debug("Getting all links for show {}".format(show.id))
# Get all streams with show ID
self.q.execute("SELECT site, show, site_key FROM Links WHERE show = ?", (show.id,))
links = self.q.fetchall()
links = [Link(*link) for link in links]
return links
else:
error("A show must be provided to get links")
return list()
@db_error_default(None)
def get_link(self, show: Show, link_site: LinkSite) -> Optional[Link]:
debug("Getting link for show {} and site {}".format(show.id, link_site.key))
self.q.execute("SELECT site, show, site_key FROM Links WHERE show = ? AND site = ?", (show.id, link_site.id))
link = self.q.fetchone()
if link is None:
return None
link = Link(*link)
return link
@db_error_default(False)
def has_link(self, site_key, key) -> bool:
site = self.get_link_site(key=site_key)
self.q.execute("SELECT count(*) FROM Links WHERE site = ? AND site_key = ?",
(site.id, key))
return self.get_count() > 0
@db_error
def add_link(self, raw_show: UnprocessedShow, show_id, commit=True):
debug("Inserting link: {}/{}".format(show_id, raw_show))
site = self.get_link_site(key=raw_show.site_key)
if site is None:
error(" Invalid site \"{}\"".format(raw_show.site_key))
return
site_key = raw_show.show_key
self.q.execute("INSERT INTO Links (show, site, site_key) VALUES (?, ?, ?)",
(show_id, site.id, site_key))
if commit:
self.commit()
# Shows
@db_error_default(list())
def get_shows(self, missing_length=False, missing_stream=False, enabled=True, delayed=False) -> [Show]:
shows = list()
if missing_length:
self.q.execute("SELECT id, name, length, type, has_source, enabled, delayed FROM Shows WHERE (length IS NULL OR length = '' OR length = 0) AND enabled = ?", (enabled,))
elif missing_stream:
self.q.execute(
"SELECT id, name, length, type, has_source, enabled, delayed FROM Shows show \
WHERE (SELECT count(*) FROM Streams stream WHERE stream.show = show.id AND stream.active = 1) = 0 AND enabled = ?",
(enabled,))
elif delayed:
self.q.execute("SELECT id, name, length, type, has_source, enabled, delayed FROM Shows WHERE delayed = 1 AND enabled = ?", (enabled,))
else:
self.q.execute("SELECT id, name, length, type, has_source, enabled, delayed FROM Shows WHERE enabled = ?", (enabled,))
for show in self.q.fetchall():
shows.append(Show(*show))
return shows
@db_error_default(None)
def get_show(self, id=None, stream=None) -> Optional[Show]:
#debug("Getting show from database")
# Get show ID
if stream and not id:
id = stream.show
# Get show
if id is None:
error("Show ID not provided to get_show")
return None
self.q.execute("SELECT id, name, length, type, has_source, enabled, delayed FROM Shows WHERE id = ?", (id,))
show = self.q.fetchone()
if show is None:
return None
show_type = to_show_type(show[4])
show = Show(*show[:4], show_type, *show[5:])
return show
@db_error_default(None)
def add_show(self, raw_show: UnprocessedShow, commit=True) -> int:
debug("Inserting show: {}".format(raw_show))
name = raw_show.name
length = raw_show.episode_count
show_type = from_show_type(raw_show.show_type)
has_source = raw_show.has_source
self.q.execute("INSERT INTO Shows (name, length, type, has_source) VALUES (?, ?, ?, ?)", (name, length, show_type, has_source))
show_id = self.q.lastrowid
self.add_show_names(raw_show.name, *raw_show.more_names, id=show_id, commit=commit)
if commit:
self.commit()
return show_id
@db_error_default(None)
def update_show(self, show_id: str, raw_show: UnprocessedShow, commit=True):
debug("Updating show: {}".format(raw_show))
#name = raw_show.name
length = raw_show.episode_count
show_type = from_show_type(raw_show.show_type)
has_source = raw_show.has_source
self.q.execute("UPDATE Shows SET length = ?, type = ?, has_source = ? WHERE id = ?",
(length, show_type, has_source, show_id))
if commit:
self.commit()
@db_error
def add_show_names(self, *names, id=None, commit=True):
self.q.executemany("INSERT INTO ShowNames (show, name) VALUES (?, ?)", [(id, name) for name in names])
if commit:
self.commit()
@db_error
def set_show_episode_count(self, show, length):
debug("Updating show episode count in database: {}, {}".format(show.name, length))
self.q.execute("UPDATE Shows SET length = ? WHERE id = ?", (length, show.id))
self.commit()
@db_error
def set_show_delayed(self, show: Show, delayed=True):
debug("Marking show {} as delayed: {}".format(show.name, delayed))
self.q.execute("UPDATE Shows SET delayed = ? WHERE id = ?", (delayed, show.id))
self.commit()
@db_error
def set_show_enabled(self, show: Show, enabled=True, commit=True):
debug("Marking show {} as {}".format(show.name, "enabled" if enabled else "disabled"))
self.q.execute("UPDATE Shows SET enabled = ? WHERE id = ?", (enabled, show.id))
if commit:
self.commit()
# Episodes
@db_error_default(True)
def stream_has_episode(self, stream: Stream, episode_num) -> bool:
self.q.execute("SELECT count(*) FROM Episodes WHERE show = ? AND episode = ?", (stream.show, episode_num))
num_found = self.get_count()
debug("Found {} entries matching show {}, episode {}".format(num_found, stream.show, episode_num))
return num_found > 0
@db_error_default(None)
def get_latest_episode(self, show: Show) -> Optional[Episode]:
self.q.execute("SELECT episode, post_url FROM Episodes WHERE show = ? ORDER BY episode DESC LIMIT 1", (show.id,))
data = self.q.fetchone()
if data is not None:
return Episode(data[0], None, data[1], None)
return None
@db_error
def add_episode(self, show_id, episode_num, post_url):
debug("Inserting episode {} for show {} ({})".format(episode_num, show_id, post_url))
self.q.execute("INSERT INTO Episodes (show, episode, post_url) VALUES (?, ?, ?)", (show_id, episode_num, post_url))
self.commit()
@db_error_default(list())
def get_episodes(self, show, ensure_sorted=True) -> List[Episode]:
episodes = list()
self.q.execute("SELECT episode, post_url FROM Episodes WHERE show = ?", (show.id,))
for data in self.q.fetchall():
episodes.append(Episode(data[0], None, data[1], None))
if ensure_sorted:
episodes = sorted(episodes, key=lambda e: e.number)
return episodes
# Scores
@db_error_default(list())
def get_show_scores(self, show: Show) -> List[EpisodeScore]:
self.q.execute("SELECT episode, site, score FROM Scores WHERE show=?", (show.id,))
return [EpisodeScore(show.id, *s) for s in self.q.fetchall()]
@db_error_default(list())
def get_episode_scores(self, show: Show, episode: Episode) -> List[EpisodeScore]:
self.q.execute("SELECT site, score FROM Scores WHERE show=? AND episode=?", (show.id, episode.number))
return [EpisodeScore(show.id, episode.number, *s) for s in self.q.fetchall()]
@db_error_default(None)
def get_episode_score_avg(self, show: Show, episode: Episode) -> Optional[EpisodeScore]:
debug("Calculating avg score for {} ({})".format(show.name, show.id))
self.q.execute("SELECT score FROM Scores WHERE show=? AND episode=?", (show.id, episode.number))
scores = [s[0] for s in self.q.fetchall()]
if len(scores) > 0:
score = sum(scores)/len(scores)
debug(" Score: {} (from {} scores)".format(score, len(scores)))
return EpisodeScore(show.id, episode.number, None, score)
return None
@db_error
def add_episode_score(self, show: Show, episode: Episode, site: LinkSite, score: float, commit=True):
self.q.execute("INSERT INTO Scores (show, episode, site, score) VALUES (?, ?, ?, ?)", (show.id, episode.number, site.id, score))
if commit:
self.commit()
# Searching
@db_error_default(set())
def search_show_ids_by_names(self, *names, exact=False) -> Set[Show]:
shows = set()
for name in names:
debug("Searching shows by name: {}".format(name))
if exact:
self.q.execute("SELECT show, name FROM ShowNames WHERE name = ?", (name,))
else:
self.q.execute("SELECT show, name FROM ShowNames WHERE name = ? COLLATE alphanum", (name,))
matched = self.q.fetchall()
for match in matched:
debug(" Found match: {} | {}".format(match[0], match[1]))
shows.add(match[0])
return shows
# Helper methods
## Conversions
def to_show_type(db_val: str) -> ShowType:
for st in ShowType:
if st.value == db_val:
return st
return ShowType.UNKNOWN
def from_show_type(st: ShowType) -> Optional[str]:
if st is None:
return None
return st.value
## Collations
def _collate_alphanum(str1, str2):
str1 = _alphanum_convert(str1)
str2 = _alphanum_convert(str2)
if str1 == str2:
return 0
elif str1 < str2:
return -1
else:
return 1
_alphanum_regex = re.compile("[^a-zA-Z0-9]+")
_romanization_o = re.compile("\bwo\b")
def _alphanum_convert(s):
#TODO: punctuation is important for some shows to distinguish between seasons (ex. K-On! and K-On!!)
# 6/28/16: The purpose of this function is weak collation; use of punctuation to distinguish between seasons can be done later when handling multiple found shows.
# Characters to words
s = s.replace("&", "and")
# Japanese romanization differences
s = _romanization_o.sub("o", s)
s = s.replace("uu", "u")
s = s.replace("wo", "o")
s = _alphanum_regex.sub("", s)
s = s.lower()
return unidecode(s)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import re
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.build_graph.intermediate_target_factory import hash_target
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.util.process_handler import subprocess
from pants_test.backend.project_info.tasks.resolve_jars_test_mixin import ResolveJarsTestMixin
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine
from pants_test.subsystem.subsystem_util import global_subsystem_instance
class ExportIntegrationTest(ResolveJarsTestMixin, PantsRunIntegrationTest):
_confs_args = [
'--export-libraries-sources',
'--export-libraries-javadocs',
]
def run_export(self, test_target, workdir, load_libs=False, only_default=False, extra_args=None):
"""Runs ./pants export ... and returns its json output.
:param string|list test_target: spec of the targets to run on.
:param string workdir: working directory to run pants with.
:param bool load_libs: whether to load external libraries (of any conf).
:param bool only_default: if loading libraries, whether to only resolve the default conf, or to
additionally resolve sources and javadocs.
:param list extra_args: list of extra arguments for the pants invocation.
:return: the json output of the console task.
:rtype: dict
"""
export_out_file = os.path.join(workdir, 'export_out.txt')
args = ['export',
'--output-file={out_file}'.format(out_file=export_out_file)] + maybe_list(test_target)
libs_args = ['--no-export-libraries'] if not load_libs else self._confs_args
if load_libs and only_default:
libs_args = []
pants_run = self.run_pants_with_workdir(args + libs_args + (extra_args or []), workdir)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(export_out_file),
msg='Could not find export output file in {out_file}'
.format(out_file=export_out_file))
with open(export_out_file) as json_file:
json_data = json.load(json_file)
if not load_libs:
self.assertIsNone(json_data.get('libraries'))
return json_data
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
json_data = self.run_export(targets, workdir, load_libs=True, only_default=not load_extra_confs,
extra_args=extra_args)
for jar in expected_jars:
self.assertIn(jar, json_data['libraries'])
for path in json_data['libraries'][jar].values():
self.assertTrue(os.path.exists(path), 'Expected jar at {} to actually exist.'.format(path))
def test_export_code_gen(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
thrift_target_name = ('examples.src.thrift.org.pantsbuild.example.precipitation'
'.precipitation-java')
codegen_target_regex = os.path.join(os.path.relpath(workdir, get_buildroot()),
'gen/thrift-java/[^/]*/[^/:]*/[^/:]*:{0}'.format(thrift_target_name))
p = re.compile(codegen_target_regex)
print(json_data.get('targets').keys())
self.assertTrue(any(p.match(target) for target in json_data.get('targets').keys()))
def test_export_json_transitive_jar(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
targets = json_data.get('targets')
self.assertIn('org.hamcrest:hamcrest-core:1.3', targets[test_target]['libraries'])
def test_export_jar_path_with_excludes(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:foo'
json_data = self.run_export(test_target, workdir, load_libs=True)
self.assertIsNone(json_data
.get('libraries')
.get('com.typesafe.sbt:incremental-compiler:0.13.7'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
def test_export_jar_path_with_excludes_soft(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:'
json_data = self.run_export(test_target,
workdir,
load_libs=True,
extra_args=['--resolve-ivy-soft-excludes'])
self.assertIsNotNone(json_data
.get('libraries')
.get('com.martiansoftware:nailgun-server:0.9.1'))
self.assertIsNotNone(json_data.get('libraries').get('org.pantsbuild:jmake:1.3.8-10'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
self.assertTrue('org.pantsbuild' in foo_target.get('excludes'))
# This test fails when the `PANTS_IVY_CACHE_DIR` is set to something that isn't
# the default location. The set cache_dir likely needs to be plumbed down
# to the sub-invocation of pants.
# https://github.com/pantsbuild/pants/issues/3126
def test_export_jar_path(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
ivy_subsystem = global_subsystem_instance(IvySubsystem)
ivy_cache_dir = ivy_subsystem.get_options().cache_dir
common_lang_lib_info = json_data.get('libraries').get('junit:junit:4.12')
self.assertIsNotNone(common_lang_lib_info)
self.assertEquals(
common_lang_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'junit/junit/jars/junit-4.12.jar')
)
self.assertEquals(
common_lang_lib_info.get('javadoc'),
os.path.join(ivy_cache_dir,
'junit/junit/javadocs/junit-4.12-javadoc.jar')
)
self.assertEquals(
common_lang_lib_info.get('sources'),
os.path.join(ivy_cache_dir,
'junit/junit/sources/junit-4.12-sources.jar')
)
def test_dep_map_for_java_sources(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir)
targets = json_data.get('targets')
self.assertIn('examples/src/java/org/pantsbuild/example/java_sources:java_sources', targets)
def test_sources_and_javadocs(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/scala/org/pantsbuild/testproject/unicode/shapeless'
json_data = self.run_export(test_target, workdir, load_libs=True)
shapeless_lib = json_data.get('libraries').get('com.chuusai:shapeless_2.11:2.2.5')
self.assertIsNotNone(shapeless_lib)
self.assertIsNotNone(shapeless_lib['default'])
self.assertIsNotNone(shapeless_lib['sources'])
self.assertIsNotNone(shapeless_lib['javadoc'])
# This test fails when the `PANTS_IVY_CACHE_DIR` is set to something that isn't
# the default location. The set cache_dir likely needs to be plumbed down
# to the sub-invocation of pants.
# See https://github.com/pantsbuild/pants/issues/3126
def test_ivy_classifiers(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier:ivyclassifier'
json_data = self.run_export(test_target, workdir, load_libs=True)
ivy_subsystem = global_subsystem_instance(IvySubsystem)
ivy_cache_dir = ivy_subsystem.get_options().cache_dir
avro_lib_info = json_data.get('libraries').get('org.apache.avro:avro:1.7.7')
self.assertIsNotNone(avro_lib_info)
self.assertEquals(
avro_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7.jar')
)
self.assertEquals(
avro_lib_info.get('tests'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7-tests.jar')
)
self.assertEquals(
avro_lib_info.get('javadoc'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/javadocs/avro-1.7.7-javadoc.jar')
)
self.assertEquals(
avro_lib_info.get('sources'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/sources/avro-1.7.7-sources.jar')
)
def test_distributions_and_platforms(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/java/org/pantsbuild/example/hello/simple'
json_data = self.run_export(test_target, workdir, load_libs=False, extra_args=[
'--jvm-platform-default-platform=java7',
'--jvm-platform-platforms={'
' "java7": {"source": "1.7", "target": "1.7", "args": [ "-X123" ]},'
' "java8": {"source": "1.8", "target": "1.8", "args": [ "-X456" ]}'
'}',
'--jvm-distributions-paths={'
' "macos": [ "/Library/JDK" ],'
' "linux": [ "/usr/lib/jdk7", "/usr/lib/jdk8"]'
'}'
])
self.assertFalse('python_setup' in json_data)
target_name = 'examples/src/java/org/pantsbuild/example/hello/simple:simple'
targets = json_data.get('targets')
self.assertEquals('java7', targets[target_name]['platform'])
self.assertEquals(
{
'default_platform' : 'java7',
'platforms': {
'java7': {
'source_level': '1.7',
'args': ['-X123'],
'target_level': '1.7'},
'java8': {
'source_level': '1.8',
'args': ['-X456'],
'target_level': '1.8'},
}
},
json_data['jvm_platforms'])
def test_test_platform(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight-test-platform'
json_data = self.run_export(test_target, workdir)
self.assertEquals('java7', json_data['targets'][test_target]['platform'])
self.assertEquals('java8', json_data['targets'][test_target]['test_platform'])
@ensure_engine
def test_intellij_integration(self):
with self.temporary_workdir() as workdir:
exported_file = os.path.join(workdir, "export_file.json")
p = subprocess.Popen(['build-support/pants-intellij.sh', '--export-output-file=' + exported_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
self.assertEqual(p.returncode, 0)
with open(exported_file) as data_file:
json_data = json.load(data_file)
python_setup = json_data['python_setup']
self.assertIsNotNone(python_setup)
self.assertIsNotNone(python_setup['interpreters'])
default_interpreter = python_setup['default_interpreter']
self.assertIsNotNone(default_interpreter)
self.assertIsNotNone(python_setup['interpreters'][default_interpreter])
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['binary']))
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['chroot']))
python_target = json_data['targets']['src/python/pants/backend/python/targets:targets']
self.assertIsNotNone(python_target)
self.assertEquals(default_interpreter, python_target['python_interpreter'])
def test_intransitive_and_scope(self):
with self.temporary_workdir() as workdir:
test_path = 'testprojects/maven_layout/provided_patching/one/src/main/java'
test_target = '{}:common'.format(test_path)
json_data = self.run_export(test_target, workdir)
h = hash_target('{}:shadow'.format(test_path), 'provided')
synthetic_target = '{}:shadow-unstable-provided-{}'.format(test_path, h)
self.assertEquals(False, json_data['targets'][synthetic_target]['transitive'])
self.assertEquals('compile test', json_data['targets'][synthetic_target]['scope'])
def test_export_is_target_roots(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/::'
json_data = self.run_export(test_target, workdir, load_libs=False)
for target_address, attributes in json_data['targets'].items():
# Make sure all targets under `test_target`'s directory are target roots.
self.assertEqual(
attributes['is_target_root'],
target_address.startswith("examples/tests/java/org/pantsbuild/example")
)
|
|
################################################################################
# Copyright (C) 2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
r"""
General functions random sampling and distributions.
"""
import numpy as np
from scipy import special
from . import linalg
from . import misc
def intervals(N, length, amount=1, gap=0):
r"""
Return random non-overlapping parts of a sequence.
For instance, N=16, length=2 and amount=4:
[0, |1, 2|, 3, 4, 5, |6, 7|, 8, 9, |10, 11|, |12, 13|, 14, 15]
that is,
[1,2,6,7,10,11,12,13]
However, the function returns only the indices of the beginning of the
sequences, that is, in the example:
[1,6,10,12]
"""
if length * amount + gap * (amount-1) > N:
raise ValueError("Too short sequence")
# In practice, we draw the sizes of the gaps between the sequences
total_gap = N - length*amount - gap*(amount-1)
gaps = np.random.multinomial(total_gap, np.ones(amount+1)/(amount+1))
# And then we get the beginning index of each sequence
intervals = np.cumsum(gaps[:-1]) + np.arange(amount)*(length+gap)
return intervals
def mask(*shape, p=0.5):
r"""
Return a boolean array of the given shape.
Parameters
----------
d0, d1, ..., dn : int
Shape of the output.
p : value in range [0,1]
A probability that the elements are `True`.
"""
return np.random.rand(*shape) < p
def wishart(nu, V):
r"""
Draw a random sample from the Wishart distribution.
Parameters
----------
nu : int
"""
# TODO/FIXME: Are these correct..
D = np.shape(V)[0]
if nu < D:
raise ValueError("Degrees of freedom must be equal or greater than the "
"dimensionality of the matrix.")
X = np.random.multivariate_normal(np.zeros(D), V, size=nu)
return np.dot(X, X.T)
wishart_rand = wishart
def invwishart_rand(nu, V):
# TODO/FIXME: Are these correct..
return np.linalg.inv(wishart_rand(nu, V))
def covariance(D, size=(), nu=None):
r"""
Draw a random covariance matrix.
Draws from inverse-Wishart distribution. The distribution of each element is
independent of the dimensionality of the matrix.
C ~ Inv-W(I, D)
Parameters
----------
D : int
Dimensionality of the covariance matrix.
Returns:
--------
C : (D,D) ndarray
Positive-definite symmetric :math:`D\times D` matrix.
"""
if nu is None:
nu = D
if nu < D:
raise ValueError("nu must be greater than or equal to D")
try:
size = tuple(size)
except TypeError:
size = (size,)
shape = size + (D,nu)
C = np.random.randn(*shape)
C = linalg.dot(C, np.swapaxes(C, -1, -2)) / nu
return linalg.inv(C)
#return np.linalg.inv(np.dot(C, C.T))
def correlation(D):
r"""
Draw a random correlation matrix.
"""
X = np.random.randn(D,D);
s = np.sqrt(np.sum(X**2, axis=-1, keepdims=True))
X = X / s
return np.dot(X, X.T)
def gaussian_logpdf(yVy, yVmu, muVmu, logdet_V, D):
r"""
Log-density of a Gaussian distribution.
:math:`\mathcal{G}(\mathbf{y}|\boldsymbol{\mu},\mathbf{V}^{-1})`
Parameters
-----------
yVy : ndarray or double
:math:`\mathbf{y}^T\mathbf{Vy}`
yVmu : ndarray or double
:math:`\mathbf{y}^T\mathbf{V}\boldsymbol{\mu}`
muVmu : ndarray or double
:math:`\boldsymbol{\mu}^T\mathbf{V}\boldsymbol{\mu}`
logdet_V : ndarray or double
Log-determinant of the precision matrix, :math:`\log|\mathbf{V}|`.
D : int
Dimensionality of the distribution.
"""
return -0.5*yVy + yVmu - 0.5*muVmu + 0.5*logdet_V - 0.5*D*np.log(2*np.pi)
def gaussian_entropy(logdet_V, D):
r"""
Compute the entropy of a Gaussian distribution.
If you want to get the gradient, just let each parameter be a gradient of
that term.
Parameters
----------
logdet_V : ndarray or double
The log-determinant of the precision matrix.
D : int
The dimensionality of the distribution.
"""
return -0.5*logdet_V + 0.5*D + 0.5*D*np.log(2*np.pi)
def gamma_logpdf(bx, logx, a_logx, a_logb, gammaln_a):
r"""
Log-density of :math:`\mathcal{G}(x|a,b)`.
If you want to get the gradient, just let each parameter be a gradient of
that term.
Parameters
----------
bx : ndarray
:math:`bx`
logx : ndarray
:math:`\log(x)`
a_logx : ndarray
:math:`a \log(x)`
a_logb : ndarray
:math:`a \log(b)`
gammaln_a : ndarray
:math:`\log\Gamma(a)`
"""
return a_logb - gammaln_a + a_logx - logx - bx
#def gamma_logpdf(a, log_b, gammaln_a,
def gamma_entropy(a, log_b, gammaln_a, psi_a, a_psi_a):
r"""
Entropy of :math:`\mathcal{G}(a,b)`.
If you want to get the gradient, just let each parameter be a gradient of
that term.
Parameters
----------
a : ndarray
:math:`a`
log_b : ndarray
:math:`\log(b)`
gammaln_a : ndarray
:math:`\log\Gamma(a)`
psi_a : ndarray
:math:`\psi(a)`
a_psi_a : ndarray
:math:`a\psi(a)`
"""
return a - log_b + gammaln_a + psi_a - a_psi_a
def orth(D):
r"""
Draw random orthogonal matrix.
"""
Q = np.random.randn(D,D)
(Q, _) = np.linalg.qr(Q)
return Q
def svd(s):
r"""
Draw a random matrix given its singular values.
"""
D = len(s)
U = orth(D) * s
V = orth(D)
return np.dot(U, V.T)
def sphere(N=1):
r"""
Draw random points uniformly on a unit sphere.
Returns (latitude,longitude) in degrees.
"""
lon = np.random.uniform(-180, 180, N)
lat = (np.arccos(np.random.uniform(-1, 1, N)) * 180 / np.pi) - 90
return (lat, lon)
def bernoulli(p, size=None):
r"""
Draw random samples from the Bernoulli distribution.
"""
if isinstance(size, int):
size = (size,)
if size is None:
size = np.shape(p)
return (np.random.rand(*size) < p)
def categorical(p, size=None):
r"""
Draw random samples from a categorical distribution.
"""
if size is None:
size = np.shape(p)[:-1]
if isinstance(size, int):
size = (size,)
if np.any(np.asanyarray(p)<0):
raise ValueError("Array contains negative probabilities")
if not misc.is_shape_subset(np.shape(p)[:-1], size):
raise ValueError("Probability array shape and requested size are "
"inconsistent")
size = tuple(size)
# Normalize probabilities
p = p / np.sum(p, axis=-1, keepdims=True)
# Compute cumulative probabilities (p_1, p_1+p_2, ..., p_1+...+p_N):
P = np.cumsum(p, axis=-1)
# Draw samples from interval [0,1]
x = np.random.rand(*size)
# For simplicity, repeat p to the size of the output (plus probability axis)
K = np.shape(p)[-1]
P = P * np.ones(tuple(size)+(K,))
if size == ():
z = np.searchsorted(P, x)
else:
# Seach the indices
z = np.zeros(size)
inds = misc.nested_iterator(size)
for ind in inds:
z[ind] = np.searchsorted(P[ind], x[ind])
return z.astype(np.int)
def multinomial(n, p, size=None):
plates_n = np.shape(n)
plates_p = np.shape(p)[:-1]
k = np.shape(p)[-1]
if size is None:
size = misc.broadcasted_shape(plates_n, plates_p)
if not misc.is_shape_subset(plates_n, size):
raise ValueError("Shape of n does not broadcast to the given size")
if not misc.is_shape_subset(plates_p, size):
raise ValueError("Shape of p does not broadcast to the given size")
# This isn't a very efficient implementation. One could use NumPy's
# multinomial once for all those plates for which n and p is the same.
n = np.broadcast_to(n, size)
p = np.broadcast_to(p, size + (k,))
x = np.empty(size + (k,))
for i in misc.nested_iterator(size):
x[i] = np.random.multinomial(n[i], p[i])
return x.astype(np.int)
def gamma(a, b, size=None):
x = np.random.gamma(a, b, size=size)
if np.any(x == 0):
raise RuntimeError(
"Numerically zero samples. Try using a larger shape parameter in "
"the gamma distribution."
)
return x
def dirichlet(alpha, size=None):
r"""
Draw random samples from the Dirichlet distribution.
"""
if isinstance(size, int):
size = (size,)
if size is None:
size = np.shape(alpha)
else:
size = size + np.shape(alpha)[-1:]
p = np.random.gamma(alpha, size=size)
sump = np.sum(p, axis=-1, keepdims=True)
if np.any(sump == 0):
raise RuntimeError(
"Numerically zero samples. Try using a larger Dirichlet "
"concentration parameter value."
)
p /= sump
return p
def logodds_to_probability(x):
r"""
Solves p from log(p/(1-p))
"""
return 1 / (1 + np.exp(-x))
def alpha_beta_recursion(logp0, logP):
r"""
Compute alpha-beta recursion for Markov chain
Initial state log-probabilities are in `p0` and state transition
log-probabilities are in P. The probabilities do not need to be scaled to
sum to one, but they are interpreted as below:
logp0 = log P(z_0) + log P(y_0|z_0)
logP[...,n,:,:] = log P(z_{n+1}|z_n) + log P(y_{n+1}|z_{n+1})
"""
logp0 = misc.atleast_nd(logp0, 1)
logP = misc.atleast_nd(logP, 3)
D = np.shape(logp0)[-1]
N = np.shape(logP)[-3]
plates = misc.broadcasted_shape(np.shape(logp0)[:-1], np.shape(logP)[:-3])
if np.shape(logP)[-2:] != (D,D):
raise ValueError("Dimension mismatch %s != %s"
% (np.shape(logP)[-2:],
(D,D)))
#
# Run the recursion algorithm
#
# Allocate memory
logalpha = np.zeros(plates+(N,D))
logbeta = np.zeros(plates+(N,D))
g = np.zeros(plates)
# Forward recursion
logalpha[...,0,:] = logp0
for n in range(1,N):
# Compute: P(z_{n-1},z_n|x_1,...,x_n)
v = logalpha[...,n-1,:,None] + logP[...,n-1,:,:]
c = misc.logsumexp(v, axis=(-1,-2))
# Sum over z_{n-1} to get: log P(z_n|x_1,...,x_n)
logalpha[...,n,:] = misc.logsumexp(v - c[...,None,None], axis=-2)
g -= c
# Compute the normalization of the last term
v = logalpha[...,N-1,:,None] + logP[...,N-1,:,:]
g -= misc.logsumexp(v, axis=(-1,-2))
# Backward recursion
logbeta[...,N-1,:] = 0
for n in reversed(range(N-1)):
v = logbeta[...,n+1,None,:] + logP[...,n+1,:,:]
c = misc.logsumexp(v, axis=(-1,-2))
logbeta[...,n,:] = misc.logsumexp(v - c[...,None,None], axis=-1)
v = logalpha[...,:,:,None] + logbeta[...,:,None,:] + logP[...,:,:,:]
c = misc.logsumexp(v, axis=(-1,-2))
zz = np.exp(v - c[...,None,None])
# The logsumexp normalization is not numerically accurate, so do
# normalization again:
zz /= np.sum(zz, axis=(-1,-2), keepdims=True)
z0 = np.sum(zz[...,0,:,:], axis=-1)
z0 /= np.sum(z0, axis=-1, keepdims=True)
return (z0, zz, g)
def gaussian_gamma_to_t(mu, Cov, a, b, ndim=1):
r"""
Integrates gamma distribution to obtain parameters of t distribution
"""
alpha = a/b
nu = 2*a
S = Cov / misc.add_trailing_axes(alpha, 2*ndim)
return (mu, S, nu)
def t_logpdf(z2, logdet_cov, nu, D):
r"""
"""
return (special.gammaln((nu+D)/2)
- special.gammaln(nu/2)
- 0.5 * D * np.log(nu*np.pi)
- 0.5 * logdet_cov
- 0.5 * (nu+D) * np.log(1 + z2/nu))
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""High-level wrapper for datastore queries."""
from google.cloud.ndb import exceptions
from google.cloud.ndb import model
__all__ = [
"Cursor",
"QueryOptions",
"RepeatedStructuredPropertyPredicate",
"ParameterizedThing",
"Parameter",
"ParameterizedFunction",
"Node",
"FalseNode",
"ParameterNode",
"FilterNode",
"PostFilterNode",
"ConjunctionNode",
"DisjunctionNode",
"AND",
"OR",
"Query",
"gql",
"QueryIterator",
]
Cursor = NotImplemented # From `google.appengine.datastore.datastore_query`
_EQ_OP = "="
_NE_OP = "!="
_IN_OP = "in"
_LT_OP = "<"
_GT_OP = ">"
_OPS = frozenset([_EQ_OP, _NE_OP, _LT_OP, "<=", _GT_OP, ">=", _IN_OP])
class QueryOptions:
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
class RepeatedStructuredPropertyPredicate:
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
class ParameterizedThing:
"""Base class for :class:`Parameter` and :class:`ParameterizedFunction`.
This exists purely for :func:`isinstance` checks.
"""
__slots__ = ()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
return not self == other
class Parameter(ParameterizedThing):
"""Represents a bound variable in a GQL query.
``Parameter(1)`` corresponds to a slot labeled ``:1`` in a GQL query.
``Parameter('xyz')`` corresponds to a slot labeled ``:xyz``.
The value must be set (bound) separately.
Args:
key (Union[str, int]): The parameter key.
Raises:
TypeError: If the ``key`` is not a string or integer.
"""
__slots__ = ("_key",)
def __init__(self, key):
if not isinstance(key, (int, str)):
raise TypeError(
"Parameter key must be an integer or string, not {}".format(
key
)
)
self._key = key
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self._key)
def __eq__(self, other):
if not isinstance(other, Parameter):
return NotImplemented
return self._key == other._key
@property
def key(self):
"""Retrieve the key."""
return self._key
def resolve(self, bindings, used):
"""Resolve the current parameter from the parameter bindings.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): A mapping of already used
parameters. This will be modified if the current parameter
is in ``bindings``.
Returns:
Any: The bound value for the current parameter.
Raises:
.BadArgumentError: If the current parameter is not in ``bindings``.
"""
key = self._key
if key not in bindings:
raise exceptions.BadArgumentError(
"Parameter :{} is not bound.".format(key)
)
value = bindings[key]
used[key] = True
return value
class ParameterizedFunction(ParameterizedThing):
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
class Node:
"""Base class for filter expression tree nodes.
Tree nodes are considered immutable, even though they can contain
Parameter instances, which are not. In particular, two identical
trees may be represented by the same Node object in different
contexts.
Raises:
TypeError: Always, only subclasses are allowed.
"""
__slots__ = ()
def __new__(cls):
if cls is Node:
raise TypeError("Cannot instantiate Node, only a subclass.")
return super(Node, cls).__new__(cls)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
return not self == other
def __le__(self, unused_other):
raise TypeError("Nodes cannot be ordered")
def __lt__(self, unused_other):
raise TypeError("Nodes cannot be ordered")
def __ge__(self, unused_other):
raise TypeError("Nodes cannot be ordered")
def __gt__(self, unused_other):
raise TypeError("Nodes cannot be ordered")
def _to_filter(self, post=False):
"""Helper to convert to low-level filter, or :data:`None`.
Raises:
NotImplementedError: Always. This method is virtual.
"""
raise NotImplementedError
def _post_filters(self):
"""Helper to extract post-filter nodes, if any.
Returns:
None: Always. Because this is the base implementation.
"""
return None
def resolve(self, bindings, used):
"""Return a node with parameters replaced by the selected values.
.. note::
Both ``bindings`` and ``used`` are unused by this base class
implementation.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): A mapping of already used
parameters. This will be modified if the current parameter
is in ``bindings``.
Returns:
Node: The current node.
"""
return self
class FalseNode(Node):
"""Tree node for an always-failing filter."""
__slots__ = ()
def __eq__(self, other):
"""Equality check.
An instance will always equal another :class:`FalseNode` instance. This
is because they hold no state.
"""
if not isinstance(other, FalseNode):
return NotImplemented
return True
def _to_filter(self, post=False):
"""(Attempt to) convert to a low-level filter instance.
Args:
post (bool): Indicates if this is a post-filter node.
Raises:
.BadQueryError: If ``post`` is :data:`False`, because there's no
point submitting a query that will never return anything.
"""
if post:
return None
raise exceptions.BadQueryError("Cannot convert FalseNode to predicate")
class ParameterNode(Node):
"""Tree node for a parameterized filter.
Args:
prop (~google.cloud.ndb.model.Property): A property describing a value
type.
op (str): The comparison operator. One of ``=``, ``!=``, ``<``, ``<=``,
``>``, ``>=`` or ``in``.
param (ParameterizedThing): The parameter corresponding to the node.
Raises:
TypeError: If ``prop`` is not a
:class:`~google.cloud.ndb.model.Property`.
TypeError: If ``op`` is not one of the accepted operators.
TypeError: If ``param`` is not a :class:`.Parameter` or
:class:`.ParameterizedFunction`.
"""
__slots__ = ("_prop", "_op", "_param")
def __new__(cls, prop, op, param):
if not isinstance(prop, model.Property):
raise TypeError("Expected a Property, got {!r}".format(prop))
if op not in _OPS:
raise TypeError("Expected a valid operator, got {!r}".format(op))
if not isinstance(param, ParameterizedThing):
raise TypeError(
"Expected a ParameterizedThing, got {!r}".format(param)
)
obj = super(ParameterNode, cls).__new__(cls)
obj._prop = prop
obj._op = op
obj._param = param
return obj
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method only applies if the ``pickle`` protocol is 2 or
greater.
Returns:
Tuple[~google.cloud.ndb.model.Property, str, ParameterizedThing]:
A tuple containing the internal state: the property, operation and
parameter.
"""
return self._prop, self._op, self._param
def __repr__(self):
return "ParameterNode({!r}, {!r}, {!r})".format(
self._prop, self._op, self._param
)
def __eq__(self, other):
if not isinstance(other, ParameterNode):
return NotImplemented
return (
self._prop._name == other._prop._name
and self._op == other._op
and self._param == other._param
)
def _to_filter(self, post=False):
"""Helper to convert to low-level filter, or :data:`None`.
Args:
post (bool): Indicates if this is a post-filter node.
Raises:
.BadArgumentError: Always. This is because this node represents
a parameter, i.e. no value exists to be filtered on.
"""
raise exceptions.BadArgumentError(
"Parameter :{} is not bound.".format(self._param.key)
)
def resolve(self, bindings, used):
"""Return a node with parameters replaced by the selected values.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): A mapping of already used
parameters.
Returns:
Union[~google.cloud.ndb.query.DisjunctionNode, \
~google.cloud.ndb.query.FilterNode, \
~google.cloud.ndb.query.FalseNode]: A node corresponding to
the value substituted.
"""
value = self._param.resolve(bindings, used)
if self._op == _IN_OP:
return self._prop._IN(value)
else:
return self._prop._comparison(self._op, value)
class FilterNode(Node):
"""Tree node for a single filter expression.
For example ``FilterNode("a", ">", 3)`` filters for entities where the
value ``a`` is greater than ``3``.
.. warning::
The constructor for this type may not always return a
:class:`FilterNode`. For example:
* The filter ``name != value`` is converted into
``(name > value) OR (name < value)`` (a :class:`DisjunctionNode`)
* The filter ``name in (value1, ..., valueN)`` is converted into
``(name = value1) OR ... OR (name = valueN)`` (also a
:class:`DisjunctionNode`)
* The filter ``name in ()`` (i.e. a property is among an empty list
of values) is converted into a :class:`FalseNode`
* The filter ``name in (value1,)`` (i.e. a list with one element) is
converted into ``name = value1``, a related :class:`FilterNode`
with a different ``opsymbol`` and ``value`` than what was passed
to the constructor
Args:
name (str): The name of the property being filtered.
opsymbol (str): The comparison operator. One of ``=``, ``!=``, ``<``,
``<=``, ``>``, ``>=`` or ``in``.
value (Any): The value to filter on / relative to.
Raises:
TypeError: If ``opsymbol`` is ``"in"`` but ``value`` is not a
basic container (:class:`list`, :class:`tuple`, :class:`set` or
:class:`frozenset`)
"""
__slots__ = ("_name", "_opsymbol", "_value")
def __new__(cls, name, opsymbol, value):
if isinstance(value, model.Key):
value = value._key
if opsymbol == _NE_OP:
node1 = FilterNode(name, _LT_OP, value)
node2 = FilterNode(name, _GT_OP, value)
return DisjunctionNode(node1, node2)
if opsymbol == _IN_OP:
if not isinstance(value, (list, tuple, set, frozenset)):
raise TypeError(
"in expected a list, tuple or set of values; "
"received {!r}".format(value)
)
nodes = [
FilterNode(name, _EQ_OP, sub_value) for sub_value in value
]
if not nodes:
return FalseNode()
if len(nodes) == 1:
return nodes[0]
return DisjunctionNode(*nodes)
instance = super(FilterNode, cls).__new__(cls)
instance._name = name
instance._opsymbol = opsymbol
instance._value = value
return instance
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method only applies if the ``pickle`` protocol is 2 or
greater.
Returns:
Tuple[str, str, Any]: A tuple containing the
internal state: the name, ``opsymbol`` and value.
"""
return self._name, self._opsymbol, self._value
def __repr__(self):
return "{}({!r}, {!r}, {!r})".format(
type(self).__name__, self._name, self._opsymbol, self._value
)
def __eq__(self, other):
if not isinstance(other, FilterNode):
return NotImplemented
return (
self._name == other._name
and self._opsymbol == other._opsymbol
and self._value == other._value
)
def _to_filter(self, post=False):
"""Helper to convert to low-level filter, or :data:`None`.
Args:
post (bool): Indicates if this is a post-filter node.
Returns:
None: If this is a post-filter.
Raises:
NotImplementedError: If the ``opsymbol`` is ``!=`` or ``in``, since
they should correspond to a composite filter. This should
never occur since the constructor will create ``OR`` nodes for
``!=`` and ``in``
NotImplementedError: If not a post-filter and the ``opsymbol``
is a simple comparison. (For now) this is because the original
implementation relied on a low-level datastore query module.
"""
if post:
return None
if self._opsymbol in (_NE_OP, _IN_OP):
raise NotImplementedError(
"Inequality filters are not single filter "
"expressions and therefore cannot be converted "
"to a single filter ({!r})".format(self._opsymbol)
)
raise NotImplementedError("Missing datastore_query.make_filter")
class PostFilterNode(Node):
"""Tree node representing an in-memory filtering operation.
This is used to represent filters that cannot be executed by the
datastore, for example a query for a structured value.
Args:
predicate (Callable[[Any], bool]): A filter predicate that
takes a datastore entity (typically as a protobuf) and
returns :data:`True` or :data:`False` if the entity matches
the given filter.
"""
__slots__ = ("predicate",)
def __new__(cls, predicate):
instance = super(PostFilterNode, cls).__new__(cls)
instance.predicate = predicate
return instance
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method only applies if the ``pickle`` protocol is 2 or
greater.
Returns:
Tuple[Callable[[Any], bool],]: A tuple containing a single value,
the ``predicate`` attached to this node.
"""
return (self.predicate,)
def __repr__(self):
return "{}({})".format(type(self).__name__, self.predicate)
def __eq__(self, other):
if not isinstance(other, PostFilterNode):
return NotImplemented
return self is other or self.predicate == other.predicate
def _to_filter(self, post=False):
"""Helper to convert to low-level filter, or :data:`None`.
Args:
post (bool): Indicates if this is a post-filter node.
Returns:
Tuple[Callable[[Any], bool], None]: If this is a post-filter, this
returns the stored ``predicate``, otherwise it returns
:data:`None`.
"""
if post:
return self.predicate
else:
return None
class _BooleanClauses:
"""This type will be used for symbolically performing boolean operations.
Internally, the state will track a symbolic expression like::
A or (B and C) or (A and D)
as a list of the ``OR`` components::
[A, B and C, A and D]
When ``combine_or=False``, it will track ``AND`` statements as a list,
making the final simplified form of our example::
[[A], [B, C], [A, D]]
Via :meth:`add_node`, we will ensure that new nodes will be correctly
combined (via ``AND`` or ``OR``) with the current expression.
Args:
name (str): The name of the class that is tracking a
boolean expression.
combine_or (bool): Indicates if new nodes will be combined
with the current boolean expression via ``AND`` or ``OR``.
"""
__slots__ = ("name", "combine_or", "or_parts")
def __init__(self, name, combine_or):
self.name = name
self.combine_or = combine_or
if combine_or:
# For ``OR()`` the parts are just nodes.
self.or_parts = []
else:
# For ``AND()`` the parts are "segments", i.e. node lists.
self.or_parts = [[]]
def add_node(self, node):
"""Update the current boolean expression.
This uses the distributive law for sets to combine as follows:
- ``(A or B or C or ...) or D`` -> ``A or B or C or ... or D``
- ``(A or B or C or ...) and D`` ->
``(A and D) or (B and D) or (C and D) or ...``
Args:
node (Node): A node to add to the list of clauses.
Raises:
TypeError: If ``node`` is not a :class:`.Node`.
"""
if not isinstance(node, Node):
raise TypeError(
"{}() expects Node instances as arguments; "
"received a non-Node instance {!r}".format(self.name, node)
)
if self.combine_or:
if isinstance(node, DisjunctionNode):
# [S1 or ... or Sn] or [A1 or ... or Am]
# -> S1 or ... Sn or A1 or ... or Am
self.or_parts.extend(node._nodes)
else:
# [S1 or ... or Sn] or [A1]
# -> S1 or ... or Sn or A1
self.or_parts.append(node)
else:
if isinstance(node, DisjunctionNode):
# [S1 or ... or Sn] and [A1 or ... or Am]
# -> [S1 and A1] or ... or [Sn and A1] or
# ... or [Sn and Am] or ... or [Sn and Am]
new_segments = []
for segment in self.or_parts:
# ``segment`` represents ``Si``
for sub_node in node:
# ``sub_node`` represents ``Aj``
new_segment = segment + [sub_node]
new_segments.append(new_segment)
# Replace wholesale.
self.or_parts[:] = new_segments
elif isinstance(node, ConjunctionNode):
# [S1 or ... or Sn] and [A1 and ... and Am]
# -> [S1 and A1 and ... and Am] or ... or
# [Sn and A1 and ... and Am]
for segment in self.or_parts:
# ``segment`` represents ``Si``
segment.extend(node._nodes)
else:
# [S1 or ... or Sn] and [A1]
# -> [S1 and A1] or ... or [Sn and A1]
for segment in self.or_parts:
segment.append(node)
class ConjunctionNode(Node):
"""Tree node representing a boolean ``AND`` operator on multiple nodes.
.. warning::
The constructor for this type may not always return a
:class:`ConjunctionNode`. For example:
* If the passed in ``nodes`` has only one entry, that single node
will be returned by the constructor
* If the resulting boolean expression has an ``OR`` in it, then a
:class:`DisjunctionNode` will be returned; e.g.
``AND(OR(A, B), C)`` becomes ``OR(AND(A, C), AND(B, C))``
Args:
nodes (Tuple[Node, ...]): A list of nodes to be joined.
Raises:
TypeError: If ``nodes`` is empty.
RuntimeError: If the ``nodes`` combine to an "empty" boolean
expression.
"""
__slots__ = ("_nodes",)
def __new__(cls, *nodes):
if not nodes:
raise TypeError("ConjunctionNode() requires at least one node.")
elif len(nodes) == 1:
return nodes[0]
clauses = _BooleanClauses("ConjunctionNode", combine_or=False)
for node in nodes:
clauses.add_node(node)
if not clauses.or_parts:
# NOTE: The original implementation returned a ``FalseNode``
# here but as far as I can tell this code is unreachable.
raise RuntimeError("Invalid boolean expression")
if len(clauses.or_parts) > 1:
return DisjunctionNode(
*[ConjunctionNode(*segment) for segment in clauses.or_parts]
)
instance = super(ConjunctionNode, cls).__new__(cls)
instance._nodes = clauses.or_parts[0]
return instance
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method only applies if the ``pickle`` protocol is 2 or
greater.
Returns:
Tuple[Node, ...]: The list of stored nodes, converted to a
:class:`tuple`.
"""
return tuple(self._nodes)
def __iter__(self):
return iter(self._nodes)
def __repr__(self):
all_nodes = ", ".join(map(str, self._nodes))
return "AND({})".format(all_nodes)
def __eq__(self, other):
if not isinstance(other, ConjunctionNode):
return NotImplemented
return self._nodes == other._nodes
def _to_filter(self, post=False):
"""Helper to convert to low-level filter, or :data:`None`.
Args:
post (bool): Indicates if this is a post-filter node.
Returns:
Optional[Node]: The single or composite filter corresponding to
the pre- or post-filter nodes stored.
Raises:
NotImplementedError: If a composite filter must be returned. This
is because the original implementation relied on a low-level
datastore query module.
"""
filters = []
for node in self._nodes:
if isinstance(node, PostFilterNode) == post:
as_filter = node._to_filter(post=post)
if as_filter:
filters.append(as_filter)
if not filters:
return None
if len(filters) == 1:
return filters[0]
raise NotImplementedError("Missing datastore_query.CompositeFilter")
def _post_filters(self):
"""Helper to extract post-filter nodes, if any.
Filters all of the stored nodes that are :class:`PostFilterNode`.
Returns:
Optional[Node]: One of the following:
* :data:`None` if there are no post-filter nodes in this ``AND()``
clause
* The single node if there is exactly one post-filter node, e.g.
if the only node in ``AND(A, B, ...)`` that is a post-filter
node is ``B``
* The current node if every stored node a post-filter node, e.g.
if all nodes ``A, B, ...`` in ``AND(A, B, ...)`` are
post-filter nodes
* A **new** :class:`ConjunctionNode` containing the post-filter
nodes, e.g. if only ``A, C`` are post-filter nodes in
``AND(A, B, C)``, then the returned node is ``AND(A, C)``
"""
post_filters = [
node for node in self._nodes if isinstance(node, PostFilterNode)
]
if not post_filters:
return None
if len(post_filters) == 1:
return post_filters[0]
if post_filters == self._nodes:
return self
return ConjunctionNode(*post_filters)
def resolve(self, bindings, used):
"""Return a node with parameters replaced by the selected values.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): A mapping of already used
parameters. This will be modified for each parameter found
in ``bindings``.
Returns:
Node: The current node, if all nodes are already resolved.
Otherwise returns a modifed :class:`ConjunctionNode` with
each individual node resolved.
"""
resolved_nodes = [node.resolve(bindings, used) for node in self._nodes]
if resolved_nodes == self._nodes:
return self
return ConjunctionNode(*resolved_nodes)
class DisjunctionNode(Node):
"""Tree node representing a boolean ``OR`` operator on multiple nodes.
.. warning::
This constructor may not always return a :class:`DisjunctionNode`.
If the passed in ``nodes`` has only one entry, that single node
will be returned by the constructor.
Args:
nodes (Tuple[Node, ...]): A list of nodes to be joined.
Raises:
TypeError: If ``nodes`` is empty.
"""
__slots__ = ("_nodes",)
def __new__(cls, *nodes):
if not nodes:
raise TypeError("DisjunctionNode() requires at least one node")
elif len(nodes) == 1:
return nodes[0]
instance = super(DisjunctionNode, cls).__new__(cls)
instance._nodes = []
clauses = _BooleanClauses("DisjunctionNode", combine_or=True)
for node in nodes:
clauses.add_node(node)
instance._nodes[:] = clauses.or_parts
return instance
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method only applies if the ``pickle`` protocol is 2 or
greater.
Returns:
Tuple[Node, ...]: The list of stored nodes, converted to a
:class:`tuple`.
"""
return tuple(self._nodes)
def __iter__(self):
return iter(self._nodes)
def __repr__(self):
all_nodes = ", ".join(map(str, self._nodes))
return "OR({})".format(all_nodes)
def __eq__(self, other):
if not isinstance(other, DisjunctionNode):
return NotImplemented
return self._nodes == other._nodes
def resolve(self, bindings, used):
"""Return a node with parameters replaced by the selected values.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): A mapping of already used
parameters. This will be modified for each parameter found
in ``bindings``.
Returns:
Node: The current node, if all nodes are already resolved.
Otherwise returns a modifed :class:`DisjunctionNode` with
each individual node resolved.
"""
resolved_nodes = [node.resolve(bindings, used) for node in self._nodes]
if resolved_nodes == self._nodes:
return self
return DisjunctionNode(*resolved_nodes)
# AND and OR are preferred aliases for these.
AND = ConjunctionNode
OR = DisjunctionNode
class Query:
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
def gql(*args, **kwargs):
raise NotImplementedError
class QueryIterator:
__slots__ = ()
def __init__(self, *args, **kwargs):
raise NotImplementedError
|
|
#!/usr/bin/env python3
#
# Copyright (C) 2016-2017 ShadowMan
#
import os
import json
import time
import flask
import logging
import asyncio
import aiohttp
from collections import namedtuple
from functools import partial
from aiohttp import web
from trainquery import train_station, utils, train_query, train_query_result, exceptions
async_loop = asyncio.get_event_loop()
train_station.init(async_loop)
with open('templates/index.html') as html:
response_text = html.read()
async def index_handler(request):
return web.Response(text = response_text, content_type = 'text/html', charset = 'utf-8')
WS_Response_Data = namedtuple('WS_Response_Data', 'event data')
def _server_emit(ws, ws_response):
if ws_response is None:
raise RuntimeWarning('the event handler must be return a response')
if isinstance(ws_response, WS_Response_Data):
if isinstance(ws_response.data, dict):
response_event = ws_response.event
ws_response = ws_response.data
ws_response.update({'event': response_event})
if isinstance(ws_response, dict):
ws.send_json(ws_response)
if isinstance(ws_response, str):
ws.send_str(ws_response)
def check_train_station(request, emit):
if (train_station.get(request.get('station_name'))):
emit(WS_Response_Data(
# event
'response.train.station',
# data
{
'status': True, 'message': 'success',
'key': request.get('key'),
'station_code': train_station.get(request.get('station_name'))
}
))
else:
emit(WS_Response_Data(
# event
'response.train.station',
# data
{
'status': False, 'message': 'the station not found',
'key': request.get('key'),
'station_code': None
}
))
async def foreach_train(result, emit):
if isinstance(result, train_query_result.ResultParser):
# emit response.train.count
emit(WS_Response_Data(
# event
'response.train.count',
# data
{
'status': True,
'message': 'succeed',
'count': len(result.get_trains_code())
}
))
# emit response.train.list
emit(WS_Response_Data(
# event
'response.train.list',
# data
{
'status': True,
'message': 'succeed',
'list': result.get_trains_code()
}
))
# all train information
for train_code in result.get_trains_code():
selector = result.select(train_code)
emit(WS_Response_Data(
# event
'response.train.profile',
# data
{
'train_code': selector.train_code,
'start_time': selector.start_time,
'arrive_time': selector.arrive_time,
'total_time': selector.total_time,
'start_station': selector.start_station,
'end_station': selector.end_station,
'available': selector.purchase_flag
}
))
# try:
# print('\t', selector.train_code, await selector.seat())
# except exceptions.ReTryExceed as e:
# logging.info('query seat retry count exceeded. ignore this train[{}]'.format(selector.train_code))
# print('\t\t', selector.train_code, await selector.check())
async def query_train_list(request, emit):
emit(WS_Response_Data(
# event
'response.train.query.progress',
# data
{
'status': True,
'progress': 0,
'message': 'start'
}
))
try:
from_station = request.get('from')
to_station = request.get('to')
train_date = request.get('date')
ts = request.get('ts')
train_ts = time.mktime(time.strptime(train_date, '%Y-%m-%d'))
await train_query.TrainQuery().query(
from_station, to_station,
train_ts,
result_handler = foreach_train, args = (emit,)
)
except KeyError:
raise RuntimeWarning('this frame is not true frame')
except Exception as e:
raise RuntimeError(e)
# ending
emit(WS_Response_Data(
# event
'response.train.query.progress',
# data
{
'status': True,
'progress': 100,
'message': 'end'
}
))
global_event_handlers = {
'request.train.station': check_train_station,
'request.train.list': query_train_list
}
async def web_socket_handler(request):
# WebSocket Response Instance
ws = web.WebSocketResponse()
# Prepare Request
await ws.prepare(request)
# emit methods
emit = partial(_server_emit, ws)
async for message in ws:
if message.type == aiohttp.WSMsgType.ERROR:
logging.warning('ws connection closed with exception %s'.format(ws.exception()))
elif message.type == aiohttp.WSMsgType.TEXT:
try:
message_data = json.loads(message.data)
except Exception:
await ws.close()
logging.warning('the ws message is not json string')
raise RuntimeWarning('the ws message is not json string')
try:
event = message_data['event']
message_data.pop('event')
if event in global_event_handlers and callable(global_event_handlers[event]):
if asyncio.iscoroutinefunction(global_event_handlers[event]):
await global_event_handlers[event](message_data, emit)
else:
global_event_handlers.get(event)(message_data, emit)
else:
raise RuntimeWarning('event \'{}\'not found'.format(event))
except RuntimeWarning:
raise
except KeyError:
raise RuntimeWarning('the ws message must be have event name')
except Exception:
emit(WS_Response_Data('error', 'the ws message handler had error occurs'))
return ws
if __name__ == '__main__':
app = web.Application()
app.router.add_get("/", index_handler)
app.router.add_get('/socket', web_socket_handler)
app.router.add_static('/static', './static')
web.run_app(app, host = '127.0.0.1', port = 5000)
|
|
import re
import sys
for path in sys.path:
if path and 'anaconda' in path:
sys.path.remove(path)
import numpy as np
from pybedtools import *
import subprocess, os, shutil
from collections import *
import time
import dill as pickle
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
# help from online sources ^^^^, helps verify backtracking from MAFs to the original fastas
def parseConfigFindPath(stringFind,configFile):
"""findPath will find path of associated specified string or info from config file"""
for line in configFile:
if stringFind in line: # if find string specified, return pathname or info
configFile.seek(0)
return line.split()[-1].strip('\n')
configFile.seek(0)
def parseConfigFindList(stringFind,configFile):
"""parseConfigFindList inputs a particular string to find and read file after and a configuration file object
outputs list of relevant filenames"""
read = 0
listOfItems = []
for line in configFile:
if line:
if read == 1:
if 'Stop' in line:
configFile.seek(0)
break # exit the function and return the list of files or list information
listOfItems.append(line.strip('\n'))
if stringFind in line:
read = 1 # if find string specified, begin reading lines
configFile.seek(0)
return listOfItems
start = time.clock()
pickleSkip = 0
print 'Loading CNS configuration file...','time=',time.clock()-start
configFile = open('configCNSAnalysis.txt','r')
rootfolder = parseConfigFindPath('root_folder',configFile)
pathPython = parseConfigFindPath('pathPython',configFile)
# species names and IDs, FIXME please can add this info to a config file
masterListSpecies = parseConfigFindList('masterListSpecies',configFile)
checkValidity = parseConfigFindPath('checkValidity',configFile)
intragenus = parseConfigFindList('intragenus',configFile)
intergenus = parseConfigFindList('intergenus',configFile)
subgenome = parseConfigFindList('subgenome',configFile)
conservedFastaPath = parseConfigFindPath('conservedFastaPath',configFile)
pickleSkip = parseConfigFindPath('pickleSkip',configFile)
pickleName = parseConfigFindPath('pickleName',configFile)
fasta2phylip = parseConfigFindPath('fasta2phylip',configFile)
phyML = parseConfigFindPath('PhyML',configFile)
bootstrap = parseConfigFindPath('bootstrap',configFile)
treeFile = parseConfigFindPath('treeFile',configFile)
treeOut = parseConfigFindPath('treeOut',configFile)
ratioCopy = parseConfigFindPath('ratioCopy',configFile)
outputTreeImages = parseConfigFindPath('outputTreeImages',configFile)
configFile.close()
if phyML == '1':
phyML = 1
else:
phyML = 0
if outputTreeImages == '1':
outputTreeImages = 1
else:
outputTreeImages = 0
if ratioCopy == '1':
ratioCopy = 1
else:
ratioCopy = 0
if fasta2phylip == '1':
fasta2phylip = 1
else:
fasta2phylip = 0
if treeOut == '1':
treeOut = 1
else:
treeOut = 0
if pickleSkip == '1':
pickleSkip = 1
else:
pickleSkip = 0
if checkValidity == '0':
checkValidity = 0
sys.path.append(pathPython) # add python path
class speciesClass(): # add information about species that stores name and protyome ID, genome .fa file Fasta object, conserved Bed Element files
#and you can generate bed files for genes of species and CDS of species
def __init__(self,speciesNumber,genomeFileList,gffFileList,speciesName,speciesShortName):
self.speciesNumber = speciesNumber
for file in gffFileList:
if self.speciesNumber in file:
self.gffFile = file
self.speciesName = speciesName
self.speciesShortName = speciesShortName
self.conservedElementsBed = '%s_ConservedElements.bed'%self.speciesName
#self.conservedElementsBedFile = open(self.conservedElementsBed, 'w')
def count2Conditional(countSeq,speciesList):
global speciesInfo
return ''.join('%s%d'%(speciesInfo[[speciesName for speciesName in speciesInfo.keys() if species in speciesName][0]].speciesShortName,countSeq[species]) for species in speciesList)
speciesInfo = {}
conditionalDictionary = defaultdict(list)
# list all files in analysis directory
listALLFiles = str(subprocess.Popen('ls', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
# FIXME ['ls', '%s' % '']
print 'Generating File List','time=',time.clock()-start
# generate list of MAF, GFF and .fa files
listMAFfiles = []
listGFFFiles = []
listGenomeFiles = []
for file in listALLFiles:
if file.endswith('.maf'):
listMAFfiles.append(file.strip('\n'))
if file.endswith('.gff') or file.endswith('.gff3'):
listGFFFiles.append(file.strip('\n'))
if file.endswith('.fa') or file.endswith('.fasta'):
listGenomeFiles.append(file.strip('\n'))
print 'Initializing instances of species class...','time=',time.clock()-start
# generate speciesClass objects with relevant info seen above, for each species on masterListSpecies
for species in masterListSpecies:
speciesInfo[species] = speciesClass(species.split('_')[1], listGenomeFiles, listGFFFiles, species.split('_')[0], species.split('_')[2])
"""
def turnSixteen(x):
if x == -1:
return 16
else:
return x
"""
print 'Generating list of intragenus, intergenus species and complete species list','time=',time.clock()-start
listIntraGenus = []
listInterGenus = []
listSubgenomes = []
for shortName in intragenus:
listIntraGenus.append([species.split('_')[0] for species in masterListSpecies if shortName == species.split('_')[-1].strip('\n')][0])
if shortName in subgenome:
listSubgenomes.append(
[species.split('_')[0] for species in masterListSpecies if shortName == species.split('_')[-1].strip('\n')][
0])
for shortName in intergenus:
listInterGenus.append([species.split('_')[0] for species in masterListSpecies if shortName == species.split('_')[-1].strip('\n')][0])
listIncludedSpecies = listIntraGenus+listInterGenus
def findBadCharPosition(strSeq):
"""for each MAF sequence, output maximum number of valid characters in a row, exclude duplicates/lowercase/N/softmask <- invalid
only accept sequence in analysis if at least 15 valid characters in a row"""
#minVal = np.min(np.vectorize(lambda y: turnSixteen(y))(np.vectorize(lambda x: strSeq.find(x))(np.array(['a','t','c','g','N']))))
if 'a' in strSeq or 'c' in strSeq or 'N' in strSeq or 'g' in strSeq or 't' in strSeq:
return np.max(np.vectorize(lambda x: len(x))(np.array(strSeq.replace('a','N').replace('c','N').replace('t','N').replace('g','N').strip('-').split('N'))))
else:
return 16
# if checking whether original fasta sequences are valid, MAF info can backtrack to find bed information/fasta DNA sequence start stop info
segmentCount = 0
# original conditionals, two-copy ratios between species, NKH 111 is PvirN has 1 sequence, K has 1, Hallii has 1
# can design something similar for Bhybridum analysis... eg. BdDSBs1011 D and S are subgenomes
# check out each MAF
mafAnalysisStructure = defaultdict(list)
if pickleSkip == 0:
print 'Reading MAF Segments and inputting them into MAF analysis structure','time=',time.clock()-start
with open('mafAllOnes.maf','a') as f:
for file in listMAFfiles:
inputMAF = open(file,'r')
# for each segment in MAF file
for segment in inputMAF.read().split('\n\n'):
writeLines = []
countSeq = Counter()
# dont skip analyzing segment or writing lines
skipSegment = 0
# set count of # seq for each species in segment to 0, can have multiple copies throughout genome
for species in masterListSpecies:
countSeq[species.split('_')[0]] = 0# FIXME change _ to - the newest MAF files will likely use - instead of _
# , may have to very code to reflect this
speciesList = []
outputInfo = []
if '#' in segment:
skipSegment = 1
for line in segment.split('\n'):
if line and skipSegment == 0:
if line[0] == 's' and 'Anc' not in line.split()[1]: # if this is a sequence from an actual species
# if length of sequence is >= 20 and over 15 valid characters in row
try:
if int(line.split()[3]) >= 20 and findBadCharPosition(line.split()[-1]) >= 15:
lineList = line.split()
lineList2 = lineList[1].split('.')
speciesName = lineList2[0]
countSeq[speciesName] += 1
lineList3 = lineList2[-1].split('_')
writeLines.append(line+'\n')
# writing to a bed file for a particular species, generate list to write for each species (chr,xi,xf,orientation)!!!!
else: # if dont meet reqs, skip analysis of this segment
skipSegment = 1
except:
skipSegment = 1
# FIXME need INCLUDE AT LEAST TWO SPECIES ELSE SKIP SEGMENT ^^^ CAN FIX ABOVE
if skipSegment == 0: # if staying with segment for all species, write the lines to output bed and add respective
# number of sequences per species for each segment, also output MASegment ID number
if all(countSeq[species] == 1 for species in listIncludedSpecies):
f.write('a\n')
f.writelines(writeLines)
f.write('\n\n')
#FIXME _ to -
segmentCount+=1 # add count to number of segments to generate multiple alignment segment IDs
#, lineList[4],''.join(speciesName2 + ',' for speciesName2 in speciesList)[:-1]))
#FIXME '_' or '-' ^^^^^
#for infoTuple in outputInfo:
#write --> (infoTuple+(''.join(speciesName + ',' for speciesName in speciesList)[:-1],)))#str(Pvir[infoTuple[0]][infoTuple[1]:infoTuple[2]]))))
a=1
inputMAF.close()
print 'End..','time=',time.clock()-start
|
|
#!/usr/bin/python
# Copyright: (c) 2017, VEXXHOST, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_endpoint
short_description: Manage OpenStack Identity service endpoints
extends_documentation_fragment: openstack
author:
- Mohammed Naser (@mnaser)
- Alberto Murillo (@albertomurillo)
version_added: "2.5"
description:
- Create, update, or delete OpenStack Identity service endpoints. If a
service with the same combination of I(service), I(interface) and I(region)
exist, the I(url) and I(state) (C(present) or C(absent)) will be updated.
options:
service:
description:
- Name or id of the service.
required: true
endpoint_interface:
description:
- Interface of the service.
choices: [admin, public, internal]
required: true
url:
description:
- URL of the service.
required: true
region:
description:
- Region that the service belongs to. Note that I(region_name) is used for authentication.
enabled:
description:
- Is the service enabled.
default: True
type: bool
state:
description:
- Should the resource be C(present) or C(absent).
choices: [present, absent]
default: present
requirements:
- openstacksdk >= 0.13.0
'''
EXAMPLES = '''
- name: Create a service for glance
os_keystone_endpoint:
cloud: mycloud
service: glance
endpoint_interface: public
url: http://controller:9292
region: RegionOne
state: present
- name: Delete a service for nova
os_keystone_endpoint:
cloud: mycloud
service: nova
endpoint_interface: public
region: RegionOne
state: absent
'''
RETURN = '''
endpoint:
description: Dictionary describing the endpoint.
returned: On success when I(state) is C(present)
type: complex
contains:
id:
description: Endpoint ID.
type: str
sample: 3292f020780b4d5baf27ff7e1d224c44
region:
description: Region Name.
type: str
sample: RegionOne
service_id:
description: Service ID.
type: str
sample: b91f1318f735494a825a55388ee118f3
interface:
description: Endpoint Interface.
type: str
sample: public
url:
description: Service URL.
type: str
sample: http://controller:9292
enabled:
description: Service status.
type: bool
sample: True
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, endpoint):
if endpoint.enabled != module.params['enabled']:
return True
if endpoint.url != module.params['url']:
return True
return False
def _system_state_change(module, endpoint):
state = module.params['state']
if state == 'absent' and endpoint:
return True
if state == 'present':
if endpoint is None:
return True
return _needs_update(module, endpoint)
return False
def main():
argument_spec = openstack_full_argument_spec(
service=dict(type='str', required=True),
endpoint_interface=dict(type='str', required=True, choices=['admin', 'public', 'internal']),
url=dict(type='str', required=True),
region=dict(type='str'),
enabled=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
service_name_or_id = module.params['service']
interface = module.params['endpoint_interface']
url = module.params['url']
region = module.params['region']
enabled = module.params['enabled']
state = module.params['state']
sdk, cloud = openstack_cloud_from_module(module)
try:
service = cloud.get_service(service_name_or_id)
if service is None:
module.fail_json(msg='Service %s does not exist' % service_name_or_id)
filters = dict(service_id=service.id, interface=interface)
if region is not None:
filters['region'] = region
endpoints = cloud.search_endpoints(filters=filters)
if len(endpoints) > 1:
module.fail_json(msg='Service %s, interface %s and region %s are '
'not unique' %
(service_name_or_id, interface, region))
elif len(endpoints) == 1:
endpoint = endpoints[0]
else:
endpoint = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, endpoint))
if state == 'present':
if endpoint is None:
result = cloud.create_endpoint(service_name_or_id=service,
url=url, interface=interface,
region=region, enabled=enabled)
endpoint = result[0]
changed = True
else:
if _needs_update(module, endpoint):
endpoint = cloud.update_endpoint(
endpoint.id, url=url, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, endpoint=endpoint)
elif state == 'absent':
if endpoint is None:
changed = False
else:
cloud.delete_endpoint(endpoint.id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
|
import json
from django.core import mail
import six
from olympia import amo
from olympia.abuse.models import AbuseReport
from olympia.amo.tests import (
APITestClient, TestCase, addon_factory, reverse_ns, user_factory)
class AddonAbuseViewSetTestBase(object):
client_class = APITestClient
def setUp(self):
self.url = reverse_ns('abusereportaddon-list')
def check_reporter(self, report):
raise NotImplementedError
def check_report(self, report, text):
assert six.text_type(report) == text
assert report.ip_address == '123.45.67.89'
assert mail.outbox[0].subject == text
self.check_reporter(report)
def test_report_addon_by_id(self):
addon = addon_factory()
response = self.client.post(
self.url,
data={'addon': six.text_type(addon.id), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201
assert AbuseReport.objects.filter(addon_id=addon.id).exists()
report = AbuseReport.objects.get(addon_id=addon.id)
self.check_report(report,
u'[Extension] Abuse Report for %s' % addon.name)
def test_report_addon_by_slug(self):
addon = addon_factory()
response = self.client.post(
self.url,
data={'addon': addon.slug, 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201
assert AbuseReport.objects.filter(addon_id=addon.id).exists()
report = AbuseReport.objects.get(addon_id=addon.id)
self.check_report(report,
u'[Extension] Abuse Report for %s' % addon.name)
def test_report_addon_by_guid(self):
addon = addon_factory(guid='@badman')
response = self.client.post(
self.url,
data={'addon': addon.guid, 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201
assert AbuseReport.objects.filter(addon_id=addon.id).exists()
report = AbuseReport.objects.get(addon_id=addon.id)
self.check_report(report,
u'[Extension] Abuse Report for %s' % addon.name)
def test_report_addon_guid_not_on_amo(self):
guid = '@mysteryman'
response = self.client.post(
self.url,
data={'addon': guid, 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201
assert AbuseReport.objects.filter(guid=guid).exists()
report = AbuseReport.objects.get(guid=guid)
self.check_report(report,
u'[Addon] Abuse Report for %s' % guid)
def test_report_addon_invalid_identifier(self):
response = self.client.post(
self.url,
data={'addon': 'randomnotguid', 'message': 'abuse!'})
assert response.status_code == 404
def test_addon_not_public(self):
addon = addon_factory(status=amo.STATUS_NULL)
response = self.client.post(
self.url,
data={'addon': six.text_type(addon.id), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201
assert AbuseReport.objects.filter(addon_id=addon.id).exists()
report = AbuseReport.objects.get(addon_id=addon.id)
self.check_report(report,
u'[Extension] Abuse Report for %s' % addon.name)
def test_no_addon_fails(self):
response = self.client.post(
self.url,
data={'message': 'abuse!'})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Need an addon parameter'}
def test_message_required_empty(self):
addon = addon_factory()
response = self.client.post(
self.url,
data={'addon': six.text_type(addon.id),
'message': ''})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Abuse reports need a message'}
def test_message_required_missing(self):
addon = addon_factory()
response = self.client.post(
self.url,
data={'addon': six.text_type(addon.id)})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Abuse reports need a message'}
def test_throttle(self):
addon = addon_factory()
for x in range(20):
response = self.client.post(
self.url,
data={'addon': six.text_type(addon.id), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201, x
response = self.client.post(
self.url,
data={'addon': six.text_type(addon.id), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 429
class TestAddonAbuseViewSetLoggedOut(AddonAbuseViewSetTestBase, TestCase):
def check_reporter(self, report):
assert not report.reporter
class TestAddonAbuseViewSetLoggedIn(AddonAbuseViewSetTestBase, TestCase):
def setUp(self):
super(TestAddonAbuseViewSetLoggedIn, self).setUp()
self.user = user_factory()
self.client.login_api(self.user)
def check_reporter(self, report):
assert report.reporter == self.user
class UserAbuseViewSetTestBase(object):
client_class = APITestClient
def setUp(self):
self.url = reverse_ns('abusereportuser-list')
def check_reporter(self, report):
raise NotImplementedError
def check_report(self, report, text):
assert six.text_type(report) == text
assert report.ip_address == '123.45.67.89'
assert mail.outbox[0].subject == text
self.check_reporter(report)
def test_report_user_id(self):
user = user_factory()
response = self.client.post(
self.url,
data={'user': six.text_type(user.id), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201
assert AbuseReport.objects.filter(user_id=user.id).exists()
report = AbuseReport.objects.get(user_id=user.id)
self.check_report(report,
u'[User] Abuse Report for %s' % user.name)
def test_report_user_username(self):
user = user_factory()
response = self.client.post(
self.url,
data={'user': six.text_type(user.username), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201
assert AbuseReport.objects.filter(user_id=user.id).exists()
report = AbuseReport.objects.get(user_id=user.id)
self.check_report(report,
u'[User] Abuse Report for %s' % user.name)
def test_no_user_fails(self):
response = self.client.post(
self.url,
data={'message': 'abuse!'})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Need a user parameter'}
def test_message_required_empty(self):
user = user_factory()
response = self.client.post(
self.url,
data={'user': six.text_type(user.username), 'message': ''})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Abuse reports need a message'}
def test_message_required_missing(self):
user = user_factory()
response = self.client.post(
self.url,
data={'user': six.text_type(user.username)})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Abuse reports need a message'}
def test_throttle(self):
user = user_factory()
for x in range(20):
response = self.client.post(
self.url,
data={'user': six.text_type(
user.username), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 201, x
response = self.client.post(
self.url,
data={'user': six.text_type(user.username), 'message': 'abuse!'},
REMOTE_ADDR='123.45.67.89')
assert response.status_code == 429
class TestUserAbuseViewSetLoggedOut(UserAbuseViewSetTestBase, TestCase):
def check_reporter(self, report):
assert not report.reporter
class TestUserAbuseViewSetLoggedIn(UserAbuseViewSetTestBase, TestCase):
def setUp(self):
super(TestUserAbuseViewSetLoggedIn, self).setUp()
self.user = user_factory()
self.client.login_api(self.user)
def check_reporter(self, report):
assert report.reporter == self.user
|
|
"""
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
:codeauthor: Alexandru Bleotu (alexandru.bleotu@morganstanley.com)
salt.utils.schema
~~~~~~~~~~~~~~~~~
Object Oriented Configuration - JSON Schema compatible generator
This code was inspired by `jsl`__, "A Python DSL for describing JSON
schemas".
.. __: https://jsl.readthedocs.io/
A configuration document or configuration document section is defined using
the py:class:`Schema`, the configuration items are defined by any of the
subclasses of py:class:`BaseSchemaItem` as attributes of a subclass of
py:class:`Schema` class.
A more complex configuration document (containing a defininitions section)
is defined using the py:class:`DefinitionsSchema`. This type of
schema supports having complex configuration items as attributes (defined
extending the py:class:`ComplexSchemaItem`). These items have other
configuration items (complex or not) as attributes, allowing to verify
more complex JSON data structures
As an example:
.. code-block:: python
class HostConfig(Schema):
title = 'Host Configuration'
description = 'This is the host configuration'
host = StringItem(
'Host',
'The looong host description',
default=None,
minimum=1
)
port = NumberItem(
description='The port number',
default=80,
required=False,
minimum=0,
inclusiveMinimum=False,
maximum=65535
)
The serialized version of the above configuration definition is:
.. code-block:: python
>>> print(HostConfig.serialize())
OrderedDict([
('$schema', 'http://json-schema.org/draft-04/schema#'),
('title', 'Host Configuration'),
('description', 'This is the host configuration'),
('type', 'object'),
('properties', OrderedDict([
('host', {'minimum': 1,
'type': 'string',
'description': 'The looong host description',
'title': 'Host'}),
('port', {'description': 'The port number',
'default': 80,
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'})
])),
('required', ['host']),
('x-ordering', ['host', 'port']),
('additionalProperties', True)]
)
>>> print(salt.utils.json.dumps(HostConfig.serialize(), indent=2))
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Host Configuration",
"description": "This is the host configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
}
},
"required": [
"host"
],
"x-ordering": [
"host",
"port"
],
"additionalProperties": false
}
The serialized version of the configuration block can be used to validate a
configuration dictionary using the `python jsonschema library`__.
.. __: https://pypi.python.org/pypi/jsonschema
.. code-block:: python
>>> import jsonschema
>>> jsonschema.validate({'host': 'localhost', 'port': 80}, HostConfig.serialize())
>>> jsonschema.validate({'host': 'localhost', 'port': -1}, HostConfig.serialize())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 478, in validate
cls(schema, *args, **kwargs).validate(instance)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 123, in validate
raise error
jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0
Failed validating 'minimum' in schema['properties']['port']:
{'default': 80,
'description': 'The port number',
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'}
On instance['port']:
-1
>>>
A configuration document can even be split into configuration sections. Let's reuse the above
``HostConfig`` class and include it in a configuration block:
.. code-block:: python
class LoggingConfig(Schema):
title = 'Logging Configuration'
description = 'This is the logging configuration'
log_level = StringItem(
'Logging Level',
'The logging level',
default='debug',
minimum=1
)
class MyConfig(Schema):
title = 'My Config'
description = 'This my configuration'
hostconfig = HostConfig()
logconfig = LoggingConfig()
The JSON Schema string version of the above is:
.. code-block:: python
>>> print salt.utils.json.dumps(MyConfig.serialize(), indent=4)
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "My Config",
"description": "This my configuration",
"type": "object",
"properties": {
"hostconfig": {
"id": "https://non-existing.saltstack.com/schemas/hostconfig.json#",
"title": "Host Configuration",
"description": "This is the host configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
}
},
"required": [
"host"
],
"x-ordering": [
"host",
"port"
],
"additionalProperties": false
},
"logconfig": {
"id": "https://non-existing.saltstack.com/schemas/logconfig.json#",
"title": "Logging Configuration",
"description": "This is the logging configuration",
"type": "object",
"properties": {
"log_level": {
"default": "debug",
"minimum": 1,
"type": "string",
"description": "The logging level",
"title": "Logging Level"
}
},
"required": [
"log_level"
],
"x-ordering": [
"log_level"
],
"additionalProperties": false
}
},
"additionalProperties": false
}
>>> import jsonschema
>>> jsonschema.validate(
{'hostconfig': {'host': 'localhost', 'port': 80},
'logconfig': {'log_level': 'debug'}},
MyConfig.serialize())
>>> jsonschema.validate(
{'hostconfig': {'host': 'localhost', 'port': -1},
'logconfig': {'log_level': 'debug'}},
MyConfig.serialize())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 478, in validate
cls(schema, *args, **kwargs).validate(instance)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 123, in validate
raise error
jsonschema.exceptions.ValidationError: -1 is less than the minimum of 0
Failed validating 'minimum' in schema['properties']['hostconfig']['properties']['port']:
{'default': 80,
'description': 'The port number',
'inclusiveMinimum': False,
'maximum': 65535,
'minimum': 0,
'type': 'number'}
On instance['hostconfig']['port']:
-1
>>>
If however, you just want to use the configuration blocks for readability
and do not desire the nested dictionaries serialization, you can pass
``flatten=True`` when defining a configuration section as a configuration
subclass attribute:
.. code-block:: python
class MyConfig(Schema):
title = 'My Config'
description = 'This my configuration'
hostconfig = HostConfig(flatten=True)
logconfig = LoggingConfig(flatten=True)
The JSON Schema string version of the above is:
.. code-block:: python
>>> print(salt.utils.json.dumps(MyConfig, indent=4))
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "My Config",
"description": "This my configuration",
"type": "object",
"properties": {
"host": {
"minimum": 1,
"type": "string",
"description": "The looong host description",
"title": "Host"
},
"port": {
"description": "The port number",
"default": 80,
"inclusiveMinimum": false,
"maximum": 65535,
"minimum": 0,
"type": "number"
},
"log_level": {
"default": "debug",
"minimum": 1,
"type": "string",
"description": "The logging level",
"title": "Logging Level"
}
},
"x-ordering": [
"host",
"port",
"log_level"
],
"additionalProperties": false
}
"""
import inspect
import textwrap
import salt.utils.args
# import salt.utils.yaml
from salt.utils.odict import OrderedDict
BASE_SCHEMA_URL = "https://non-existing.saltstack.com/schemas"
RENDER_COMMENT_YAML_MAX_LINE_LENGTH = 80
class NullSentinel:
"""
A class which instance represents a null value.
Allows specifying fields with a default value of null.
"""
def __bool__(self):
return False
__nonzero__ = __bool__
Null = NullSentinel()
"""
A special value that can be used to set the default value
of a field to null.
"""
# make sure nobody creates another Null value
def _failing_new(*args, **kwargs):
raise TypeError("Can't create another NullSentinel instance")
NullSentinel.__new__ = staticmethod(_failing_new)
del _failing_new
class SchemaMeta(type):
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Mark the instance as a configuration document/section
attrs["__config__"] = True
attrs["__flatten__"] = False
attrs["__config_name__"] = None
# Let's record the configuration items/sections
items = {}
sections = {}
order = []
# items from parent classes
for base in reversed(bases):
if hasattr(base, "_items"):
items.update(base._items)
if hasattr(base, "_sections"):
sections.update(base._sections)
if hasattr(base, "_order"):
order.extend(base._order)
# Iterate through attrs to discover items/config sections
for key, value in attrs.items():
entry_name = None
if not hasattr(value, "__item__") and not hasattr(value, "__config__"):
continue
if hasattr(value, "__item__"):
# the value is an item instance
if hasattr(value, "title") and value.title is None:
# It's an item instance without a title, make the title
# its name
value.title = key
entry_name = value.__item_name__ or key
items[entry_name] = value
if hasattr(value, "__config__"):
entry_name = value.__config_name__ or key
sections[entry_name] = value
order.append(entry_name)
attrs["_order"] = order
attrs["_items"] = items
attrs["_sections"] = sections
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, flatten=False, allow_additional_items=False, **kwargs):
instance = object.__new__(cls)
instance.__config_name__ = kwargs.pop("name", None)
if flatten is True:
# This configuration block is to be treated as a part of the
# configuration for which it was defined as an attribute, not as
# its own sub configuration
instance.__flatten__ = True
if allow_additional_items is True:
# The configuration block only accepts the configuration items
# which are defined on the class. On additional items, validation
# with jsonschema will fail
instance.__allow_additional_items__ = True
instance.__init__(**kwargs)
return instance
class BaseSchemaItemMeta(type):
"""
Config item metaclass to "tag" the class as a configuration item
"""
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
# Register the class as an item class
attrs["__item__"] = True
attrs["__item_name__"] = None
# Instantiate an empty list to store the config item attribute names
attributes = []
for base in reversed(bases):
try:
base_attributes = getattr(base, "_attributes", [])
if base_attributes:
attributes.extend(base_attributes)
# Extend the attributes with the base argspec argument names
# but skip "self"
for argname in salt.utils.args.get_function_argspec(base.__init__).args:
if argname == "self" or argname in attributes:
continue
if argname == "name":
continue
attributes.append(argname)
except TypeError:
# On the base object type, __init__ is just a wrapper which
# triggers a TypeError when we're trying to find out its
# argspec
continue
attrs["_attributes"] = attributes
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
# Create the instance class
instance = object.__new__(cls)
if args:
raise RuntimeError(
"Please pass all arguments as named arguments. Un-named "
"arguments are not supported"
)
for key in kwargs.copy():
# Store the kwarg keys as the instance attributes for the
# serialization step
if key == "name":
# This is the item name to override the class attribute name
instance.__item_name__ = kwargs.pop(key)
continue
if key not in instance._attributes:
instance._attributes.append(key)
# Init the class
instance.__init__(*args, **kwargs)
# Validate the instance after initialization
for base in reversed(inspect.getmro(cls)):
validate_attributes = getattr(base, "__validate_attributes__", None)
if validate_attributes:
if (
instance.__validate_attributes__.__func__.__code__
is not validate_attributes.__code__
):
# The method was overridden, run base.__validate_attributes__ function
base.__validate_attributes__(instance)
# Finally, run the instance __validate_attributes__ function
instance.__validate_attributes__()
# Return the initialized class
return instance
class Schema(metaclass=SchemaMeta):
"""
Configuration definition class
"""
# Define some class level attributes to make PyLint happier
title = None
description = None
_items = _sections = _order = None
__flatten__ = False
__allow_additional_items__ = False
@classmethod
def serialize(cls, id_=None):
# The order matters
serialized = OrderedDict()
if id_ is not None:
# This is meant as a configuration section, sub json schema
serialized["id"] = "{}/{}.json#".format(BASE_SCHEMA_URL, id_)
else:
# Main configuration block, json schema
serialized["$schema"] = "http://json-schema.org/draft-04/schema#"
if cls.title is not None:
serialized["title"] = cls.title
if cls.description is not None:
if cls.description == cls.__doc__:
serialized["description"] = textwrap.dedent(cls.description).strip()
else:
serialized["description"] = cls.description
required = []
ordering = []
serialized["type"] = "object"
properties = OrderedDict()
cls.after_items_update = []
for name in cls._order: # pylint: disable=E1133
skip_order = False
item_name = None
if name in cls._sections: # pylint: disable=E1135
section = cls._sections[name]
serialized_section = section.serialize(
None if section.__flatten__ is True else name
)
if section.__flatten__ is True:
# Flatten the configuration section into the parent
# configuration
properties.update(serialized_section["properties"])
if "x-ordering" in serialized_section:
ordering.extend(serialized_section["x-ordering"])
if "required" in serialized_section:
required.extend(serialized_section["required"])
if hasattr(section, "after_items_update"):
cls.after_items_update.extend(section.after_items_update)
skip_order = True
else:
# Store it as a configuration section
properties[name] = serialized_section
if name in cls._items: # pylint: disable=E1135
config = cls._items[name]
item_name = config.__item_name__ or name
# Handle the configuration items defined in the class instance
if config.__flatten__ is True:
serialized_config = config.serialize()
cls.after_items_update.append(serialized_config)
skip_order = True
else:
properties[item_name] = config.serialize()
if config.required:
# If it's a required item, add it to the required list
required.append(item_name)
if skip_order is False:
# Store the order of the item
if item_name is not None:
if item_name not in ordering:
ordering.append(item_name)
else:
if name not in ordering:
ordering.append(name)
if properties:
serialized["properties"] = properties
# Update the serialized object with any items to include after properties.
# Do not overwrite properties already existing in the serialized dict.
if cls.after_items_update:
after_items_update = {}
for entry in cls.after_items_update:
for name, data in entry.items():
if name in after_items_update:
if isinstance(after_items_update[name], list):
after_items_update[name].extend(data)
else:
after_items_update[name] = data
if after_items_update:
after_items_update.update(serialized)
serialized = after_items_update
if required:
# Only include required if not empty
serialized["required"] = required
if ordering:
# Only include ordering if not empty
serialized["x-ordering"] = ordering
serialized["additionalProperties"] = cls.__allow_additional_items__
return serialized
@classmethod
def defaults(cls):
serialized = cls.serialize()
defaults = {}
for name, details in serialized["properties"].items():
if "default" in details:
defaults[name] = details["default"]
continue
if "properties" in details:
for sname, sdetails in details["properties"].items():
if "default" in sdetails:
defaults.setdefault(name, {})[sname] = sdetails["default"]
continue
return defaults
@classmethod
def as_requirements_item(cls):
serialized_schema = cls.serialize()
required = serialized_schema.get("required", [])
for name in serialized_schema["properties"]:
if name not in required:
required.append(name)
return RequirementsItem(requirements=required)
# @classmethod
# def render_as_rst(cls):
# '''
# Render the configuration block as a restructured text string
# '''
# # TODO: Implement RST rendering
# raise NotImplementedError
# @classmethod
# def render_as_yaml(cls):
# '''
# Render the configuration block as a parseable YAML string including comments
# '''
# # TODO: Implement YAML rendering
# raise NotImplementedError
class SchemaItem(metaclass=BaseSchemaItemMeta):
"""
Base configuration items class.
All configurations must subclass it
"""
# Define some class level attributes to make PyLint happier
__type__ = None
__format__ = None
_attributes = None
__flatten__ = False
__serialize_attr_aliases__ = None
required = False
def __init__(self, required=None, **extra):
"""
:param required: If the configuration item is required. Defaults to ``False``.
"""
if required is not None:
self.required = required
self.extra = extra
def __validate_attributes__(self):
"""
Run any validation check you need the instance attributes.
ATTENTION:
Don't call the parent class when overriding this
method because it will just duplicate the executions. This class'es
metaclass will take care of that.
"""
if self.required not in (True, False):
raise RuntimeError("'required' can only be True/False")
def _get_argname_value(self, argname):
"""
Return the argname value looking up on all possible attributes
"""
# Let's see if there's a private function to get the value
argvalue = getattr(self, "__get_{}__".format(argname), None)
if argvalue is not None and callable(argvalue):
argvalue = argvalue() # pylint: disable=not-callable
if argvalue is None:
# Let's see if the value is defined as a public class variable
argvalue = getattr(self, argname, None)
if argvalue is None:
# Let's see if it's defined as a private class variable
argvalue = getattr(self, "__{}__".format(argname), None)
if argvalue is None:
# Let's look for it in the extra dictionary
argvalue = self.extra.get(argname, None)
return argvalue
def serialize(self):
"""
Return a serializable form of the config instance
"""
raise NotImplementedError
class BaseSchemaItem(SchemaItem):
"""
Base configuration items class.
All configurations must subclass it
"""
# Let's define description as a class attribute, this will allow a custom configuration
# item to do something like:
# class MyCustomConfig(StringItem):
# '''
# This is my custom config, blah, blah, blah
# '''
# description = __doc__
#
description = None
# The same for all other base arguments
title = None
default = None
enum = None
enumNames = None
def __init__(
self,
title=None,
description=None,
default=None,
enum=None,
enumNames=None,
**kwargs
):
"""
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
"""
if title is not None:
self.title = title
if description is not None:
self.description = description
if default is not None:
self.default = default
if enum is not None:
self.enum = enum
if enumNames is not None:
self.enumNames = enumNames
super().__init__(**kwargs)
def __validate_attributes__(self):
if self.enum is not None:
if not isinstance(self.enum, (list, tuple, set)):
raise RuntimeError(
"Only the 'list', 'tuple' and 'set' python types can be used "
"to define 'enum'"
)
if not isinstance(self.enum, list):
self.enum = list(self.enum)
if self.enumNames is not None:
if not isinstance(self.enumNames, (list, tuple, set)):
raise RuntimeError(
"Only the 'list', 'tuple' and 'set' python types can be used "
"to define 'enumNames'"
)
if len(self.enum) != len(self.enumNames):
raise RuntimeError(
"The size of 'enumNames' must match the size of 'enum'"
)
if not isinstance(self.enumNames, list):
self.enumNames = list(self.enumNames)
def serialize(self):
"""
Return a serializable form of the config instance
"""
serialized = {"type": self.__type__}
for argname in self._attributes:
if argname == "required":
# This is handled elsewhere
continue
argvalue = self._get_argname_value(argname)
if argvalue is not None:
if argvalue is Null:
argvalue = None
# None values are not meant to be included in the
# serialization, since this is not None...
if (
self.__serialize_attr_aliases__
and argname in self.__serialize_attr_aliases__
):
argname = self.__serialize_attr_aliases__[argname]
serialized[argname] = argvalue
return serialized
def __get_description__(self):
if self.description is not None:
if self.description == self.__doc__:
return textwrap.dedent(self.description).strip()
return self.description
# def render_as_rst(self, name):
# '''
# Render the configuration item as a restructured text string
# '''
# # TODO: Implement YAML rendering
# raise NotImplementedError
# def render_as_yaml(self, name):
# '''
# Render the configuration item as a parseable YAML string including comments
# '''
# # TODO: Include the item rules in the output, minimum, maximum, etc...
# output = '# ----- '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 2)
# output += '>\n'
# if self.description:
# output += '\n'.join(textwrap.wrap(self.description,
# width=RENDER_COMMENT_YAML_MAX_LINE_LENGTH,
# initial_indent='# '))
# output += '\n'
# yamled_default_value = salt.utils.yaml.safe_dump(self.default, default_flow_style=False).split('\n...', 1)[0]
# output += '# Default: {0}\n'.format(yamled_default_value)
# output += '#{0}: {1}\n'.format(name, yamled_default_value)
# output += '# <---- '
# output += self.title
# output += ' '
# output += '-' * (RENDER_COMMENT_YAML_MAX_LINE_LENGTH - 7 - len(self.title) - 1)
# return output + '\n'
class NullItem(BaseSchemaItem):
__type__ = "null"
class BooleanItem(BaseSchemaItem):
__type__ = "boolean"
class StringItem(BaseSchemaItem):
"""
A string configuration field
"""
__type__ = "string"
__serialize_attr_aliases__ = {"min_length": "minLength", "max_length": "maxLength"}
format = None
pattern = None
min_length = None
max_length = None
def __init__(
self,
format=None, # pylint: disable=redefined-builtin
pattern=None,
min_length=None,
max_length=None,
**kwargs
):
"""
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param format:
A semantic format of the string (for example, ``"date-time"``, ``"email"``, or ``"uri"``).
:param pattern:
A regular expression (ECMA 262) that a string value must match.
:param min_length:
The minimum length
:param max_length:
The maximum length
"""
if format is not None: # pylint: disable=redefined-builtin
self.format = format
if pattern is not None:
self.pattern = pattern
if min_length is not None:
self.min_length = min_length
if max_length is not None:
self.max_length = max_length
super().__init__(**kwargs)
def __validate_attributes__(self):
if self.format is None and self.__format__ is not None:
self.format = self.__format__
class EMailItem(StringItem):
"""
An internet email address, see `RFC 5322, section 3.4.1`__.
.. __: http://tools.ietf.org/html/rfc5322
"""
__format__ = "email"
class IPv4Item(StringItem):
"""
An IPv4 address configuration field, according to dotted-quad ABNF syntax as defined in
`RFC 2673, section 3.2`__.
.. __: http://tools.ietf.org/html/rfc2673
"""
__format__ = "ipv4"
class IPv6Item(StringItem):
"""
An IPv6 address configuration field, as defined in `RFC 2373, section 2.2`__.
.. __: http://tools.ietf.org/html/rfc2373
"""
__format__ = "ipv6"
class HostnameItem(StringItem):
"""
An Internet host name configuration field, see `RFC 1034, section 3.1`__.
.. __: http://tools.ietf.org/html/rfc1034
"""
__format__ = "hostname"
class DateTimeItem(StringItem):
"""
An ISO 8601 formatted date-time configuration field, as defined by `RFC 3339, section 5.6`__.
.. __: http://tools.ietf.org/html/rfc3339
"""
__format__ = "date-time"
class UriItem(StringItem):
"""
A universal resource identifier (URI) configuration field, according to `RFC3986`__.
.. __: http://tools.ietf.org/html/rfc3986
"""
__format__ = "uri"
class SecretItem(StringItem):
"""
A string configuration field containing a secret, for example, passwords, API keys, etc
"""
__format__ = "secret"
class NumberItem(BaseSchemaItem):
__type__ = "number"
__serialize_attr_aliases__ = {
"multiple_of": "multipleOf",
"exclusive_minimum": "exclusiveMinimum",
"exclusive_maximum": "exclusiveMaximum",
}
multiple_of = None
minimum = None
exclusive_minimum = None
maximum = None
exclusive_maximum = None
def __init__(
self,
multiple_of=None,
minimum=None,
exclusive_minimum=None,
maximum=None,
exclusive_maximum=None,
**kwargs
):
"""
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param multiple_of:
A value must be a multiple of this factor.
:param minimum:
The minimum allowed value
:param exclusive_minimum:
Whether a value is allowed to be exactly equal to the minimum
:param maximum:
The maximum allowed value
:param exclusive_maximum:
Whether a value is allowed to be exactly equal to the maximum
"""
if multiple_of is not None:
self.multiple_of = multiple_of
if minimum is not None:
self.minimum = minimum
if exclusive_minimum is not None:
self.exclusive_minimum = exclusive_minimum
if maximum is not None:
self.maximum = maximum
if exclusive_maximum is not None:
self.exclusive_maximum = exclusive_maximum
super().__init__(**kwargs)
class IntegerItem(NumberItem):
__type__ = "integer"
class ArrayItem(BaseSchemaItem):
__type__ = "array"
__serialize_attr_aliases__ = {
"min_items": "minItems",
"max_items": "maxItems",
"unique_items": "uniqueItems",
"additional_items": "additionalItems",
}
items = None
min_items = None
max_items = None
unique_items = None
additional_items = None
def __init__(
self,
items=None,
min_items=None,
max_items=None,
unique_items=None,
additional_items=None,
**kwargs
):
"""
:param required:
If the configuration item is required. Defaults to ``False``.
:param title:
A short explanation about the purpose of the data described by this item.
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param items:
Either of the following:
* :class:`BaseSchemaItem` -- all items of the array must match the field schema;
* a list or a tuple of :class:`fields <.BaseSchemaItem>` -- all items of the array must be
valid according to the field schema at the corresponding index (tuple typing);
:param min_items:
Minimum length of the array
:param max_items:
Maximum length of the array
:param unique_items:
Whether all the values in the array must be distinct.
:param additional_items:
If the value of ``items`` is a list or a tuple, and the array length is larger than
the number of fields in ``items``, then the additional items are described
by the :class:`.BaseField` passed using this argument.
:type additional_items: bool or :class:`.BaseSchemaItem`
"""
if items is not None:
self.items = items
if min_items is not None:
self.min_items = min_items
if max_items is not None:
self.max_items = max_items
if unique_items is not None:
self.unique_items = unique_items
if additional_items is not None:
self.additional_items = additional_items
super().__init__(**kwargs)
def __validate_attributes__(self):
if not self.items and not self.additional_items:
raise RuntimeError("One of items or additional_items must be passed.")
if self.items is not None:
if isinstance(self.items, (list, tuple)):
for item in self.items:
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
"All items passed in the item argument tuple/list must be "
"a subclass of Schema, SchemaItem or BaseSchemaItem, "
"not {}".format(type(item))
)
elif not isinstance(self.items, (Schema, SchemaItem)):
raise RuntimeError(
"The items argument passed must be a subclass of "
"Schema, SchemaItem or BaseSchemaItem, not "
"{}".format(type(self.items))
)
def __get_items__(self):
if isinstance(self.items, (Schema, SchemaItem)):
# This is either a Schema or a Basetem, return it in its
# serialized form
return self.items.serialize()
if isinstance(self.items, (tuple, list)):
items = []
for item in self.items:
items.append(item.serialize())
return items
class DictItem(BaseSchemaItem):
__type__ = "object"
__serialize_attr_aliases__ = {
"min_properties": "minProperties",
"max_properties": "maxProperties",
"pattern_properties": "patternProperties",
"additional_properties": "additionalProperties",
}
properties = None
pattern_properties = None
additional_properties = None
min_properties = None
max_properties = None
def __init__(
self,
properties=None,
pattern_properties=None,
additional_properties=None,
min_properties=None,
max_properties=None,
**kwargs
):
"""
:param required:
If the configuration item is required. Defaults to ``False``.
:type required:
boolean
:param title:
A short explanation about the purpose of the data described by this item.
:type title:
str
:param description:
A detailed explanation about the purpose of the data described by this item.
:param default:
The default value for this configuration item. May be :data:`.Null` (a special value
to set the default value to null).
:param enum:
A list(list, tuple, set) of valid choices.
:param properties:
A dictionary containing fields
:param pattern_properties:
A dictionary whose keys are regular expressions (ECMA 262).
Properties match against these regular expressions, and for any that match,
the property is described by the corresponding field schema.
:type pattern_properties: dict[str -> :class:`.Schema` or
:class:`.SchemaItem` or :class:`.BaseSchemaItem`]
:param additional_properties:
Describes properties that are not described by the ``properties`` or ``pattern_properties``.
:type additional_properties: bool or :class:`.Schema` or :class:`.SchemaItem`
or :class:`.BaseSchemaItem`
:param min_properties:
A minimum number of properties.
:type min_properties: int
:param max_properties:
A maximum number of properties
:type max_properties: int
"""
if properties is not None:
self.properties = properties
if pattern_properties is not None:
self.pattern_properties = pattern_properties
if additional_properties is not None:
self.additional_properties = additional_properties
if min_properties is not None:
self.min_properties = min_properties
if max_properties is not None:
self.max_properties = max_properties
super().__init__(**kwargs)
def __validate_attributes__(self):
if (
not self.properties
and not self.pattern_properties
and not self.additional_properties
):
raise RuntimeError(
"One of properties, pattern_properties or additional_properties must be"
" passed"
)
if self.properties is not None:
if not isinstance(self.properties, (Schema, dict)):
raise RuntimeError(
"The passed properties must be passed as a dict or "
" a Schema not '{}'".format(type(self.properties))
)
if not isinstance(self.properties, Schema):
for key, prop in self.properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
"The passed property who's key is '{}' must be of type "
"Schema, SchemaItem or BaseSchemaItem, not "
"'{}'".format(key, type(prop))
)
if self.pattern_properties is not None:
if not isinstance(self.pattern_properties, dict):
raise RuntimeError(
"The passed pattern_properties must be passed as a dict "
"not '{}'".format(type(self.pattern_properties))
)
for key, prop in self.pattern_properties.items():
if not isinstance(prop, (Schema, SchemaItem)):
raise RuntimeError(
"The passed pattern_property who's key is '{}' must "
"be of type Schema, SchemaItem or BaseSchemaItem, "
"not '{}'".format(key, type(prop))
)
if self.additional_properties is not None:
if not isinstance(self.additional_properties, (bool, Schema, SchemaItem)):
raise RuntimeError(
"The passed additional_properties must be of type bool, "
"Schema, SchemaItem or BaseSchemaItem, not '{}'".format(
type(self.pattern_properties)
)
)
def __get_properties__(self):
if self.properties is None:
return
if isinstance(self.properties, Schema):
return self.properties.serialize()["properties"]
properties = OrderedDict()
for key, prop in self.properties.items():
properties[key] = prop.serialize()
return properties
def __get_pattern_properties__(self):
if self.pattern_properties is None:
return
pattern_properties = OrderedDict()
for key, prop in self.pattern_properties.items():
pattern_properties[key] = prop.serialize()
return pattern_properties
def __get_additional_properties__(self):
if self.additional_properties is None:
return
if isinstance(self.additional_properties, bool):
return self.additional_properties
return self.additional_properties.serialize()
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
result = super().serialize()
required = []
if self.properties is not None:
if isinstance(self.properties, Schema):
serialized = self.properties.serialize()
if "required" in serialized:
required.extend(serialized["required"])
else:
for key, prop in self.properties.items():
if prop.required:
required.append(key)
if required:
result["required"] = required
return result
class RequirementsItem(SchemaItem):
__type__ = "object"
requirements = None
def __init__(self, requirements=None):
if requirements is not None:
self.requirements = requirements
super().__init__()
def __validate_attributes__(self):
if self.requirements is None:
raise RuntimeError("The passed requirements must not be empty")
if not isinstance(self.requirements, (SchemaItem, list, tuple, set)):
raise RuntimeError(
"The passed requirements must be passed as a list, tuple, "
"set SchemaItem or BaseSchemaItem, not '{}'".format(self.requirements)
)
if not isinstance(self.requirements, SchemaItem):
if not isinstance(self.requirements, list):
self.requirements = list(self.requirements)
for idx, item in enumerate(self.requirements):
if not isinstance(item, ((str,), SchemaItem)):
raise RuntimeError(
"The passed requirement at the {} index must be of type "
"str or SchemaItem, not '{}'".format(idx, type(item))
)
def serialize(self):
if isinstance(self.requirements, SchemaItem):
requirements = self.requirements.serialize()
else:
requirements = []
for requirement in self.requirements:
if isinstance(requirement, SchemaItem):
requirements.append(requirement.serialize())
continue
requirements.append(requirement)
return {"required": requirements}
class OneOfItem(SchemaItem):
__type__ = "oneOf"
items = None
def __init__(self, items=None, required=None):
if items is not None:
self.items = items
super().__init__(required=required)
def __validate_attributes__(self):
if not self.items:
raise RuntimeError("The passed items must not be empty")
if not isinstance(self.items, (list, tuple)):
raise RuntimeError(
"The passed items must be passed as a list/tuple not '{}'".format(
type(self.items)
)
)
for idx, item in enumerate(self.items):
if not isinstance(item, (Schema, SchemaItem)):
raise RuntimeError(
"The passed item at the {} index must be of type "
"Schema, SchemaItem or BaseSchemaItem, not "
"'{}'".format(idx, type(item))
)
if not isinstance(self.items, list):
self.items = list(self.items)
def __call__(self, flatten=False):
self.__flatten__ = flatten
return self
def serialize(self):
return {self.__type__: [i.serialize() for i in self.items]}
class AnyOfItem(OneOfItem):
__type__ = "anyOf"
class AllOfItem(OneOfItem):
__type__ = "allOf"
class NotItem(SchemaItem):
__type__ = "not"
item = None
def __init__(self, item=None):
if item is not None:
self.item = item
super().__init__()
def __validate_attributes__(self):
if not self.item:
raise RuntimeError("An item must be passed")
if not isinstance(self.item, (Schema, SchemaItem)):
raise RuntimeError(
"The passed item be of type Schema, SchemaItem or "
"BaseSchemaItem, not '{}'".format(type(self.item))
)
def serialize(self):
return {self.__type__: self.item.serialize()}
# ----- Custom Preconfigured Configs -------------------------------------------------------------------------------->
class PortItem(IntegerItem):
minimum = 0 # yes, 0 is a valid port number
maximum = 65535
# <---- Custom Preconfigured Configs ---------------------------------------------------------------------------------
class ComplexSchemaItem(BaseSchemaItem):
"""
.. versionadded:: 2016.11.0
Complex Schema Item
"""
# This attribute is populated by the metaclass, but pylint fails to see it
# and assumes it's not an iterable
_attributes = []
_definition_name = None
def __init__(self, definition_name=None, required=None):
super().__init__(required=required)
self.__type__ = "object"
self._definition_name = (
definition_name if definition_name else self.__class__.__name__
)
# Schema attributes might have been added as class attributes so we
# and they must be added to the _attributes attr
self._add_missing_schema_attributes()
def _add_missing_schema_attributes(self):
"""
Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically
"""
for attr in [attr for attr in dir(self) if not attr.startswith("__")]:
attr_val = getattr(self, attr)
if (
isinstance(getattr(self, attr), SchemaItem)
and attr not in self._attributes
):
self._attributes.append(attr)
@property
def definition_name(self):
return self._definition_name
def serialize(self):
"""
The serialization of the complex item is a pointer to the item
definition
"""
return {"$ref": "#/definitions/{}".format(self.definition_name)}
def get_definition(self):
"""Returns the definition of the complex item"""
serialized = super().serialize()
# Adjust entries in the serialization
del serialized["definition_name"]
serialized["title"] = self.definition_name
properties = {}
required_attr_names = []
for attr_name in self._attributes:
attr = getattr(self, attr_name)
if attr and isinstance(attr, BaseSchemaItem):
# Remove the attribute entry added by the base serialization
del serialized[attr_name]
properties[attr_name] = attr.serialize()
properties[attr_name]["type"] = attr.__type__
if attr.required:
required_attr_names.append(attr_name)
if serialized.get("properties") is None:
serialized["properties"] = {}
serialized["properties"].update(properties)
# Assign the required array
if required_attr_names:
serialized["required"] = required_attr_names
return serialized
def get_complex_attrs(self):
"""Returns a dictionary of the complex attributes"""
return [
getattr(self, attr_name)
for attr_name in self._attributes
if isinstance(getattr(self, attr_name), ComplexSchemaItem)
]
class DefinitionsSchema(Schema):
"""
.. versionadded:: 2016.11.0
JSON schema class that supports ComplexSchemaItem objects by adding
a definitions section to the JSON schema, containing the item definitions.
All references to ComplexSchemaItems are built using schema inline
dereferencing.
"""
@classmethod
def serialize(cls, id_=None):
# Get the initial serialization
serialized = super().serialize(id_)
complex_items = []
# Augment the serializations with the definitions of all complex items
aux_items = cls._items.values()
# Convert dict_view object to a list on Python 3
aux_items = list(aux_items)
while aux_items:
item = aux_items.pop(0)
# Add complex attributes
if isinstance(item, ComplexSchemaItem):
complex_items.append(item)
aux_items.extend(item.get_complex_attrs())
# Handle container items
if isinstance(item, OneOfItem):
aux_items.extend(item.items)
elif isinstance(item, ArrayItem):
aux_items.append(item.items)
elif isinstance(item, DictItem):
if item.properties:
aux_items.extend(item.properties.values())
if item.additional_properties and isinstance(
item.additional_properties, SchemaItem
):
aux_items.append(item.additional_properties)
definitions = OrderedDict()
for config in complex_items:
if isinstance(config, ComplexSchemaItem):
definitions[config.definition_name] = config.get_definition()
serialized["definitions"] = definitions
return serialized
|
|
import os
import ImagingReso._utilities as reso_util
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import ResoFit._utilities as fit_util
from ResoFit._utilities import load_txt_csv
class Experiment(object):
def __init__(self,
spectra_file: str,
data_file: str,
folder: str,
source_to_detector_m,
offset_us,
baseline: bool,
baseline_deg: int):
"""
Load experiment data from 'YOUR_FILE_NAME.csv' or 'YOUR_FILE_NAME.txt' files
:param spectra_file: data file stores the time-of-flight
:param data_file: data file of neutron transmission
:param folder: folder name in str under /ResoFit directory
"""
_file_path = os.path.abspath(os.path.dirname(__file__))
self.folder_path = os.path.join(_file_path, folder)
# Error for 'folder' existence
if os.path.isdir(self.folder_path) is False:
raise ValueError("Folder '{}' specified does not exist".format(folder))
self.spectra_file = spectra_file
self.data_file = data_file
self.source_to_detector_m = source_to_detector_m
self.offset_us = offset_us
self.spectra_path = os.path.join(self.folder_path, spectra_file)
self.data_path = os.path.join(self.folder_path, data_file)
# Load spectrum and data
self.spectra = load_txt_csv(self.spectra_path)
self.data = load_txt_csv(self.data_path)
self.t_unit = 'us'
self.img_start = 0
# Default slice parameter
self.slice_start = None
self.slice_end = None
# Class to store peak info
self.o_peak = None
# Error loading data and spectra
if type(self.spectra[0][0]) is str:
if self.spectra[0][0].islower() or self.spectra[0][0].isupper() is True:
raise ValueError("Remove the axis descriptions in '{}' before loading ".format(spectra_file))
else:
raise ValueError("The file '{}' columns must be separated with 'tab' or ',' ".format(spectra_file))
if type(self.data[0][0]) is str:
if self.data[0][0].islower() or self.data[0][0].isupper() is True:
raise ValueError("Remove the axis descriptions in '{}' before loading ".format(data_file))
else:
raise ValueError("The file '{}' columns must be separated with 'tab' or ',' ".format(data_file))
if list(self.data[0][:4]) == [1, 2, 3, 4]:
raise ValueError(
"Duplicated index column was found in '{}', please remove duplicated column".format(data_file))
# store raw data (df)
self.data_raw = self.data[:]
self.spectra_raw = self.spectra[:]
self.t_start_us = fit_util.convert_s_to(self.spectra[0][0], t_unit='us')
self.time_resolution_us = fit_util.convert_s_to(self.spectra[0][2] - self.spectra[0][1], t_unit='us')
# raw image number saved
self.img_num = self.data.index.values
# Baseline_rmv
self.baseline = baseline
self.baseline_deg = baseline_deg
def get_x(self, x_type, offset_us=None, source_to_detector_m=None, t_unit='us'):
"""
Get the 'x' in eV or angstrom with experimental parameters
:param t_unit:
:type t_unit:
:param x_type:
:type x_type:
:param offset_us:
:type offset_us:
:param source_to_detector_m:
:type source_to_detector_m:
:return:
:rtype:
"""
fit_util.check_if_in_list(x_type, fit_util.x_type_list)
if offset_us is not None:
self.offset_us = offset_us
if source_to_detector_m is not None:
self.source_to_detector_m = source_to_detector_m
if t_unit != self.t_unit:
self.t_unit = t_unit
_x_exp_raw = np.array(self.spectra[0][:]) # Default x_type == 'time' (x in seconds)
x_e = np.array(reso_util.s_to_ev(array=_x_exp_raw,
offset_us=self.offset_us,
source_to_detector_m=self.source_to_detector_m))
x_exp_raw = fit_util.convert_energy_to(x=x_e,
x_type=x_type,
offset_us=self.offset_us,
source_to_detector_m=self.source_to_detector_m,
t_unit=self.t_unit,
num_offset=self.img_start)
return x_exp_raw
def get_y(self, y_type, disable_rmv=False):
"""
Get the 'y' in eV or angstrom with experimental parameters
:param y_type: bool to switch between transmission and attenuation
:return: array
"""
fit_util.check_if_in_list(y_type, fit_util.y_type_list)
y_exp_raw = np.array(self.data[0][:])
if not disable_rmv:
if self.baseline:
y_exp_raw = fit_util.rm_envelope(y_exp_raw, deg=self.baseline_deg)
if y_type == 'attenuation':
y_exp_raw = 1 - y_exp_raw
return y_exp_raw
def xy_scaled(self, energy_min, energy_max, energy_step,
x_type, y_type, t_unit='us',
offset_us=None, source_to_detector_m=None, disable_rmv=False):
"""
Get interpolated x & y within the scaled range same as simulation
:param energy_min:
:type energy_min:
:param energy_max:
:type energy_max:
:param energy_step:
:type energy_step:
:param x_type:
:type x_type:
:param y_type:
:type y_type:
:param t_unit:
:type t_unit:
:param offset_us:
:type offset_us:
:param source_to_detector_m:
:type source_to_detector_m:
:return:
:rtype:
"""
if offset_us is not None:
self.offset_us = offset_us
if source_to_detector_m is not None:
self.source_to_detector_m = source_to_detector_m
if t_unit != self.t_unit:
self.t_unit = t_unit
x_exp_raw = self.get_x(x_type='energy',
offset_us=self.offset_us,
source_to_detector_m=self.source_to_detector_m)
_x_max_energy = x_exp_raw[0]
_x_min_energy = x_exp_raw[-1]
if energy_min < _x_min_energy:
raise ValueError(
"'Energy min' ({} eV) used for interpolation is beyond 'data min' ({} eV) ".format(energy_min,
_x_min_energy))
if energy_max > _x_max_energy:
raise ValueError(
"'Energy max' ({} eV) used for interpolation is beyond 'data max' ({} eV) ".format(energy_max,
_x_max_energy))
y_exp_raw = self.get_y(y_type=y_type, disable_rmv=disable_rmv)
nbr_point = int((energy_max - energy_min) / energy_step + 1)
_x_interp = np.linspace(energy_min, energy_max, nbr_point)
# y_interp_function = interp1d(x=x_exp_raw, y=y_exp_raw, kind='slinear')
y_interp_function = interp1d(x=x_exp_raw, y=y_exp_raw, kind='cubic')
y_interp = y_interp_function(_x_interp)
x_interp = fit_util.convert_energy_to(x_type=x_type,
x=_x_interp,
t_unit=self.t_unit,
offset_us=self.offset_us,
source_to_detector_m=self.source_to_detector_m,
num_offset=self.img_start,
)
return x_interp, y_interp
def norm_to(self, file, norm_factor=1, reset_index=False):
"""
Use specified file for normalization and save normalized data signal in self.data
:param file: string. filename with suffix. ex: 'your_data.csv' inside the folder specified in __init__
:param norm_factor:
:type norm_factor:
:param reset_index: True -> reset pd.Dataframe indexes after slicing
:return: pd.DataFrame in place. normalized data signal in self.data
"""
if file is not None:
# Load file
_full_path = os.path.join(self.folder_path, file)
df = load_txt_csv(_full_path)
# Resize length
if len(self.data) != len(df):
if self.slice_start is None and self.slice_end is None:
raise ValueError("The length of the 'norm_to_file' is not equal to the length of the data file.")
else:
if self.slice_end is not None:
df.drop(df.index[self.slice_end:], inplace=True)
if self.slice_start is not None:
df.drop(df.index[:self.slice_start], inplace=True)
if reset_index is True:
df.reset_index(drop=True, inplace=True)
self.data[0] = self.data[0] / df[0]
# Apply norm_factor
self.data[0] = self.data[0] / norm_factor
def slice(self, start=None, end=None):
"""
Slice the signal by image number
:param start: start image
:param end: end image
:return: pd.Dataframe. sliced self.spectra and self.data
"""
if start and end is not None:
if start > end:
raise ValueError(
"The image number of 'start' ({}) can NOT be greater than 'end' ({}).".format(start,
end))
if end == start:
raise ValueError(
"The image number of 'start' ({}) and 'end' ({}) can not be the same.".format(start,
end))
if end is not None:
self.data.drop(self.data.index[end:], inplace=True)
self.spectra.drop(self.spectra.index[end:], inplace=True)
# No 'index reset needed' after drop
self.slice_end = end
# raw image number saved
self.img_num = self.data.index.values
if start is not None:
self.img_start = start
self.data.drop(self.data.index[:start], inplace=True)
self.spectra.drop(self.spectra.index[:start], inplace=True)
self.slice_start = start
# raw image number saved
self.img_num = self.data.index.values
# Disabled reset_index #
# if reset_index is True:
# self.spectra.reset_index(drop=True, inplace=True)
# self.data.reset_index(drop=True, inplace=True)
# self.img_start = 0
def find_peak(self, x_type, y_type, thres, min_dist, imprv_reso=False):
"""
find and return x and y of detected peak in pd.DataFrame
x is image number from data file. type (int)
y is attenuation
Note: impr_reso for finding peak is disabled here to make sure the output image_num is integer
:param x_type:
:type x_type:
:param y_type:
:type y_type:
:param thres:
:type thres:
:param min_dist:
:type min_dist:
:param imprv_reso:
:type imprv_reso:
:return:
:rtype:
"""
_x = self.get_x(
x_type=x_type,
offset_us=self.offset_us,
source_to_detector_m=self.source_to_detector_m,
t_unit=self.t_unit
)
_y = self.get_y(
y_type='attenuation',
)
self.o_peak = fit_util.ResoPeak(x=_x, y=_y, x_type=x_type, y_type=y_type, img_num=self.img_num)
self.o_peak.find_peak(thres=thres, min_dist=min_dist, imprv_reso=imprv_reso)
if len(self.o_peak.peak_dict['df']) < 1:
raise ValueError("No peak has been detected.")
# if y_type == 'transmission':
# self.o_peak.peak_dict['df']['y'] = 1 - self.o_peak.peak_dict['df']['y']
return self.o_peak.peak_dict
def plot(self, x_type, y_type,
t_unit='us', offset_us=None, source_to_detector_m=None,
logx=False, logy=False, ax_mpl=None, fmt='.', ms=2, lw=1.5, alpha=1,
grid=False, label=None, plot_with_baseline=False):
"""
Display the loaded signal from data and spectra files.
"""
self.__check_in_list(x_type=x_type, y_type=y_type, t_unit=t_unit)
if offset_us is not None:
self.offset_us = offset_us
if source_to_detector_m is not None:
self.source_to_detector_m = source_to_detector_m
if t_unit != self.t_unit:
self.t_unit = t_unit
fig_title = 'Experimental data'
if label is None:
_label = self.data_file.split('.')[0] + '_data'
else:
_label = label
if ax_mpl is None:
fig, ax_mpl = plt.subplots()
"""X-axis"""
x_exp_raw = self.get_x(x_type=x_type,
t_unit=self.t_unit,
offset_us=self.offset_us,
source_to_detector_m=self.source_to_detector_m)
"""Y-axis"""
if self.baseline:
if plot_with_baseline:
y_exp_raw_before = self.get_y(y_type=y_type, disable_rmv=True)
ax_mpl.plot(x_exp_raw, y_exp_raw_before, '--',
label='Before baseline removal (deg={})'.format(self.baseline_deg),
ms=ms, lw=lw, alpha=alpha)
y_exp_raw = self.get_y(y_type=y_type)
# Plot
if len(y_exp_raw) - len(x_exp_raw) == 1:
y_exp_raw = y_exp_raw[:-1]
assert y_exp_raw.shape == x_exp_raw.shape
ax_mpl.plot(x_exp_raw, y_exp_raw, fmt, label=_label, ms=ms, lw=lw, alpha=alpha)
if self.o_peak is not None:
if len(self.o_peak.peak_dict) != 0:
# _x_tag = fit_util.get_peak_tag(x_type=x_type)
ax_mpl.scatter(self.o_peak.peak_dict['x'],
self.o_peak.peak_dict['y'],
c='r',
marker='x',
# s=30,
# marker='o',
# facecolors='none',
# edgecolors='k',
label='_nolegend_')
ax_mpl = fit_util.set_plt(ax=ax_mpl, fig_title=fig_title, grid=grid,
x_type=x_type, y_type=y_type, t_unit=t_unit, logx=logx, logy=logy)
return ax_mpl
def export(self, x_type, y_type,
t_unit='us', offset_us=None, source_to_detector_m=None):
self.__check_in_list(x_type=x_type, y_type=y_type, t_unit=t_unit)
if offset_us is not None:
self.offset_us = offset_us
if source_to_detector_m is not None:
self.source_to_detector_m = source_to_detector_m
if t_unit != self.t_unit:
self.t_unit = t_unit
_df = pd.DataFrame()
"""X-axis"""
x_exp_raw = self.get_x(x_type=x_type,
t_unit=t_unit,
offset_us=self.offset_us,
source_to_detector_m=self.source_to_detector_m)
"""Y-axis"""
y_exp_raw = self.get_y(y_type=y_type)
_df['x'] = x_exp_raw
_df['y'] = y_exp_raw
_df.to_clipboard(index=False)
return _df
def __check_in_list(self, x_type, y_type, t_unit):
fit_util.check_if_in_list(x_type, fit_util.x_type_list)
fit_util.check_if_in_list(y_type, fit_util.y_type_list)
fit_util.check_if_in_list(t_unit, fit_util.t_unit_list)
|
|
""" Associative and Commutative unification
This module provides goals for associative and commutative unification. It
accomplishes this through naively trying all possibilities. This was built to
be used in the computer algebra systems SymPy and Theano.
>>> from logpy import run, var, fact
>>> from logpy.assoccomm import eq_assoccomm as eq
>>> from logpy.assoccomm import commutative, associative
>>> # Define some dummy Ops
>>> add = 'add'
>>> mul = 'mul'
>>> # Declare that these ops are commutative using the facts system
>>> fact(commutative, mul)
>>> fact(commutative, add)
>>> fact(associative, mul)
>>> fact(associative, add)
>>> # Define some wild variables
>>> x, y = var('x'), var('y')
>>> # Two expressions to match
>>> pattern = (mul, (add, 1, x), y) # (1 + x) * y
>>> expr = (mul, 2, (add, 3, 1)) # 2 * (3 + 1)
>>> print(run(0, (x,y), eq(pattern, expr)))
((3, 2),)
"""
from logpy.core import (isvar, assoc, unify,
conde, var, eq, fail, goaleval, lall, EarlyGoalError,
condeseq, goaleval)
from .goals import heado, permuteq, conso, tailo
from .facts import Relation
from logpy import core
from .util import groupsizes, index
from .util import transitive_get as walk
from .term import term, arguments, operator
associative = Relation('associative')
commutative = Relation('commutative')
def assocunify(u, v, s, eq=core.eq, n=None):
""" Associative Unification
See Also:
eq_assoccomm
"""
uop, uargs = op_args(u)
vop, vargs = op_args(v)
if not uop and not vop:
res = unify(u, v, s)
if res is not False:
return (res,) # TODO: iterate through all possibilities
if uop and vop:
s = unify(uop, vop, s)
if s is False:
raise StopIteration()
op = walk(uop, s)
sm, lg = (uargs, vargs) if len(uargs) <= len(vargs) else (vargs, uargs)
ops = assocsized(op, lg, len(sm))
goal = condeseq([(eq, a, b) for a, b, in zip(sm, lg2)] for lg2 in ops)
return goaleval(goal)(s)
if uop:
op, tail = uop, uargs
b = v
if vop:
op, tail = vop, vargs
b = u
ns = [n] if n else range(2, len(tail)+1)
knowns = (build(op, x) for n in ns for x in assocsized(op, tail, n))
goal = condeseq([(core.eq, b, k)] for k in knowns)
return goaleval(goal)(s)
def assocsized(op, tail, n):
""" All associative combinations of x in n groups """
gsizess = groupsizes(len(tail), n)
partitions = (groupsizes_to_partition(*gsizes) for gsizes in gsizess)
return (makeops(op, partition(tail, part)) for part in partitions)
def makeops(op, lists):
""" Construct operations from an op and parition lists
>>> from logpy.assoccomm import makeops
>>> makeops('add', [(1, 2), (3, 4, 5)])
(('add', 1, 2), ('add', 3, 4, 5))
"""
return tuple(l[0] if len(l) == 1 else build(op, l) for l in lists)
def partition(tup, part):
""" Partition a tuple
>>> from logpy.assoccomm import partition
>>> partition("abcde", [[0,1], [4,3,2]])
[('a', 'b'), ('e', 'd', 'c')]
"""
return [index(tup, ind) for ind in part]
def groupsizes_to_partition(*gsizes):
"""
>>> from logpy.assoccomm import groupsizes_to_partition
>>> groupsizes_to_partition(2, 3)
[[0, 1], [2, 3, 4]]
"""
idx = 0
part = []
for gs in gsizes:
l = []
for i in range(gs):
l.append(idx)
idx += 1
part.append(l)
return part
def eq_assoc(u, v, eq=core.eq, n=None):
""" Goal for associative equality
>>> from logpy import run, var, fact
>>> from logpy.assoccomm import eq_assoc as eq
>>> fact(commutative, 'add') # declare that 'add' is commutative
>>> fact(associative, 'add') # declare that 'add' is associative
>>> x = var()
>>> run(0, x, eq(('add', 1, 2, 3), ('add', 1, x)))
(('add', 2, 3),)
"""
uop, uargs = op_args(u)
vop, vargs = op_args(v)
if uop and vop:
return conde([(core.eq, u, v)],
[(eq, uop, vop), (associative, uop),
lambda s: assocunify(u, v, s, eq, n)])
if uop or vop:
if vop:
uop, vop = vop, uop
uargs, vargs = vargs, uargs
v, u = u, v
return conde([(core.eq, u, v)],
[(associative, uop),
lambda s: assocunify(u, v, s, eq, n)])
return (core.eq, u, v)
def eq_comm(u, v, eq=None):
""" Goal for commutative equality
>>> from logpy import run, var, fact
>>> from logpy.assoccomm import eq_comm as eq
>>> from logpy.assoccomm import commutative, associative
>>> fact(commutative, 'add') # declare that 'add' is commutative
>>> fact(associative, 'add') # declare that 'add' is associative
>>> x = var()
>>> run(0, x, eq(('add', 1, 2, 3), ('add', 2, x, 1)))
(3,)
"""
eq = eq or eq_comm
op = var()
utail = var()
vtail = var()
if isvar(u) and isvar(v):
return (core.eq, u, v)
raise EarlyGoalError()
uop, uargs = op_args(u)
vop, vargs = op_args(v)
if not uop and not vop:
return (core.eq, u, v)
if vop and not uop:
uop, uargs = vop, vargs
v, u = u, v
return (conde, ((core.eq, u, v),),
((commutative, uop),
(buildo, uop, vtail, v),
(permuteq, uargs, vtail, eq)))
def build_tuple(op, args):
try:
return term(op, args)
except TypeError:
raise EarlyGoalError()
def buildo(op, args, obj):
""" obj is composed of op on args
Example: in add(1,2,3) ``add`` is the op and (1,2,3) are the args
Checks op_regsitry for functions to define op/arg relationships
"""
if not isvar(obj):
oop, oargs = op_args(obj)
return lall((eq, op, oop), (eq, args, oargs))
else:
try:
return eq(obj, build(op, args))
except TypeError:
raise EarlyGoalError()
raise EarlyGoalError()
def build(op, args):
try:
return term(op, args)
except NotImplementedError:
raise EarlyGoalError()
def op_args(x):
""" Break apart x into an operation and tuple of args """
if isvar(x):
return None, None
try:
return operator(x), arguments(x)
except NotImplementedError:
return None, None
def eq_assoccomm(u, v):
""" Associative/Commutative eq
Works like logic.core.eq but supports associative/commutative expr trees
tree-format: (op, *args)
example: (add, 1, 2, 3)
State that operations are associative or commutative with relations
>>> from logpy.assoccomm import eq_assoccomm as eq
>>> from logpy.assoccomm import commutative, associative
>>> from logpy import fact, run, var
>>> fact(commutative, 'add') # declare that 'add' is commutative
>>> fact(associative, 'add') # declare that 'add' is associative
>>> x = var()
>>> e1 = ('add', 1, 2, 3)
>>> e2 = ('add', 1, x)
>>> run(0, x, eq(e1, e2))
(('add', 2, 3), ('add', 3, 2))
"""
try:
uop, uargs = op_args(u)
vop, vargs = op_args(v)
except ValueError:
return (eq, u, v)
if uop and not vop and not isvar(v):
return fail
if vop and not uop and not isvar(u):
return fail
if uop and vop and not uop == vop:
return fail
if uop and not (uop,) in associative.facts:
return (eq, u, v)
if vop and not (vop,) in associative.facts:
return (eq, u, v)
if uop and vop:
u, v = (u, v) if len(uargs) >= len(vargs) else (v, u)
n = min(map(len, (uargs, vargs))) # length of shorter tail
else:
n = None
if vop and not uop:
u, v = v, u
w = var()
return (lall, (eq_assoc, u, w, eq_assoccomm, n),
(eq_comm, v, w, eq_assoccomm))
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import (
ad_group_criterion_simulation,
)
from google.ads.googleads.v8.services.types import (
ad_group_criterion_simulation_service,
)
from .transports.base import (
AdGroupCriterionSimulationServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import AdGroupCriterionSimulationServiceGrpcTransport
class AdGroupCriterionSimulationServiceClientMeta(type):
"""Metaclass for the AdGroupCriterionSimulationService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdGroupCriterionSimulationServiceTransport]]
_transport_registry["grpc"] = AdGroupCriterionSimulationServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdGroupCriterionSimulationServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupCriterionSimulationServiceClient(
metaclass=AdGroupCriterionSimulationServiceClientMeta
):
"""Service to fetch ad group criterion simulations."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupCriterionSimulationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupCriterionSimulationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupCriterionSimulationServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupCriterionSimulationServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_group_criterion_simulation_path(
customer_id: str,
ad_group_id: str,
criterion_id: str,
type: str,
modification_method: str,
start_date: str,
end_date: str,
) -> str:
"""Return a fully-qualified ad_group_criterion_simulation string."""
return "customers/{customer_id}/adGroupCriterionSimulations/{ad_group_id}~{criterion_id}~{type}~{modification_method}~{start_date}~{end_date}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
type=type,
modification_method=modification_method,
start_date=start_date,
end_date=end_date,
)
@staticmethod
def parse_ad_group_criterion_simulation_path(path: str) -> Dict[str, str]:
"""Parse a ad_group_criterion_simulation path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupCriterionSimulations/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)~(?P<type>.+?)~(?P<modification_method>.+?)~(?P<start_date>.+?)~(?P<end_date>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, AdGroupCriterionSimulationServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group criterion simulation service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupCriterionSimulationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupCriterionSimulationServiceTransport):
# transport is a AdGroupCriterionSimulationServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupCriterionSimulationServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group_criterion_simulation(
self,
request: ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_criterion_simulation.AdGroupCriterionSimulation:
r"""Returns the requested ad group criterion simulation in full
detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAdGroupCriterionSimulationRequest`):
The request object. Request message for
[AdGroupCriterionSimulationService.GetAdGroupCriterionSimulation][google.ads.googleads.v8.services.AdGroupCriterionSimulationService.GetAdGroupCriterionSimulation].
resource_name (:class:`str`):
Required. The resource name of the ad
group criterion simulation to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AdGroupCriterionSimulation:
An ad group criterion simulation. Supported combinations of advertising
channel type, criterion type, simulation type, and
simulation modification method are detailed below
respectively. Hotel AdGroupCriterion simulation
operations starting in V5.
1. DISPLAY - KEYWORD - CPC_BID - UNIFORM
2. SEARCH - KEYWORD - CPC_BID - UNIFORM
3. SHOPPING - LISTING_GROUP - CPC_BID - UNIFORM
4. HOTEL - LISTING_GROUP - CPC_BID - UNIFORM
5. HOTEL - LISTING_GROUP - PERCENT_CPC_BID - UNIFORM
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest,
):
request = ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_ad_group_criterion_simulation
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdGroupCriterionSimulationServiceClient",)
|
|
import doctest
import random
import unittest
import gc
from sys import getsizeof
import pympler.muppy
from pympler import muppy
class MuppyTest(unittest.TestCase):
def test_objects(self):
"""Test that objects returns a non-empty list."""
self.assertTrue(len(muppy.get_objects()) > 0)
def test_diff(self):
"""Test if the diff of to object lists is correct.
The diff has to work in both directions, that is it has to show
newly created objects, as well as removed objects.
The sorting is irrelevant.
"""
(o1, o2, o3, o4, o5, o6) = (1, 'a', 'b', 4, 5, (1,))
list1 = [o1, o2, o3, o4]
list2 = [o1, o2, o3, o4, o5]
list3 = [o5, o3, o1, o4, o2]
list4 = [o1, o2, o3, o4, o6]
# empty lists
expected = {'+': [], '-': []}
self.assertEqual(muppy.get_diff([], []), expected)
# one more entry
expected = {'+': [o5], '-': []}
self.assertEqual(muppy.get_diff(list1, list2), expected)
# one more entry, but different order
self.assertEqual(muppy.get_diff(list1, list3), expected)
# one entry removed
expected = {'+': [], '-': [5]}
self.assertEqual(muppy.get_diff(list2, list1), expected)
# one entry removed, but different order
self.assertEqual(muppy.get_diff(list3, list1), expected)
# one more entry of different type
expected = {'+': [o6], '-': []}
self.assertEqual(muppy.get_diff(list1, list4), expected)
def test_filter_by_type(self):
"""Test that only elements of a certain type are included,
no elements are removed which belong to this type and
no elements are added."""
s = (s1, s2, s3, s4) = ('', 'a', 'b', 'a')
t = (t1, t2) = (dict, str)
i1 = 1
l1 = []
objects = [s1, s2, i1, l1, t1, t2, s3, s4]
objects = muppy.filter(objects, Type=str)
self.assertEqual(len(objects), len(s))
for element in s:
self.assertEqual(element in objects, True)
def test_filter_by_size(self):
"""Test that only elements within the specified size boundaries
are returned.
Also verify that if minimum is larger than maximum an exception is
raised."""
minimum = 42
maximum = 958
objects = []
for i in range(1000):
objects.append(' ' * i)
objects = muppy.filter(objects, min=minimum, max=maximum)
self.assertTrue(len(objects) != 0)
for o in objects:
self.assertTrue(minimum <= getsizeof(o) <= maximum)
self.assertRaises(ValueError, muppy.filter, objects, min=17, max=16)
def test_get_referents(self):
"""Test that referents are included in return value.
Per default, only first level referents should be returned.
If specified otherwise referents from even more levels are included
in the result set.
Duplicates have to be removed."""
(o1, o2, o3, o4, o5) = (1, 'a', 'b', 4, 5)
l0 = [o1, o2]
l1 = [10, 11, 12, 13, l0]
l2 = [o1, o2, o3, o4, o5, l1]
#return all objects from first level
res = muppy.get_referents(l2, level=1)
self.assertEqual(len(l2), len(res))
for o in res:
self.assertTrue(o in l2)
# return all objects from first and second level
res = muppy.get_referents(l2, level=2)
self.assertEqual(len(l1) + len(l2), len(res))
for o in res:
self.assertTrue((o in l1) or (o in l2))
# return all objects from all levels, but with duplicates removed
res = muppy.get_referents(l2, level=4242)
self.assertEqual(len(l1) + len(l2), len(res))
for o in res:
self.assertTrue((o in l0) or (o in l1) or (o in l2))
def test_get_size(self):
"""Test that the return value is the sum of the size of all objects."""
(o1, o2, o3, o4, o5) = (1, 'a', 'b', 4, 5)
list = [o1, o2, o3, o4, o5]
expected = 0
for o in list:
expected += getsizeof(o)
self.assertEqual(muppy.get_size(list), expected)
# do to the poor performance excluded from tests, for now
# def test_get_usage(self):
# """Test that the return value reflects changes to the memory usage.
#
# For functions which leave the memory unchanged a None should be
# returned.
#
# Parameters of the function should be forwarded correctly.
# """
#
# # we need to pull some tricks here, since parsing the code, static
# # objects are already created, e.g. parsing "a = 'some text'" will
# # already create a string object for 'some text'. So we compute the
# # values to use dynamically.
#
# # check that no increase in memory usage returns None
# a = 1
# b = 2
# c = 3
# d = 4
# e = 1
# def function(): pass
# expected = None
# # XXX: Fix get_usage tests.
# res = muppy._get_usage(function)
# print res
# self.assertEqual(res, expected)
# # passing of parameter should also work
# def function(arg):
# a = arg
# expected = None
# res = muppy._get_usage(function, 42)
# self.assertEqual(res, expected)
# # memory leaks should be indicated
# def function():
# try:
# muppy.extra_var.append(1)
# except AttributeError:
# muppy.extra_var = []
# res = muppy._get_usage(function)
# self.assertTrue(res is not None)
def test_is_containerobject(self):
"""Test that (non-)container objects are identified correctly."""
self.assertTrue(muppy._is_containerobject([]))
self.assertTrue(muppy._is_containerobject((1,)))
self.assertTrue(muppy._is_containerobject({}))
self.assertTrue(muppy._is_containerobject(int))
self.assertTrue(muppy._is_containerobject(type))
self.assertFalse(muppy._is_containerobject(1))
self.assertFalse(muppy._is_containerobject(''))
def test_remove_duplicates(self):
"""Test that this operations returns a duplicate-free lists.
That, is no objects are listed twice. This does not apply to objects
with same values."""
(o1, o2, o3, o4, o5) = (1, 'a', 'b', 'c', 5)
objects = [o1, o2, o3, o4, o5, o5, o4, o3, o2, o1]
expected = set(objects)
res = muppy._remove_duplicates(objects)
self.assertEqual(len(expected), len(res))
for o in res:
self.assertTrue(o in expected)
def test_sort(self):
"""Test that objects are sorted by size."""
objects = ['', 'a', 'ab', 'ab', 'abc', '0']
objects = muppy.sort(objects)
while len(objects) > 1:
prev_o = objects.pop(0)
self.assertTrue(getsizeof(objects[0]) >= getsizeof(prev_o),\
"The previous element appears to be larger than the " +\
"current: %s<%s" % (prev_o, objects[0]))
def test_ignore_frame(self):
"""Test whether reference cycles are created
"""
gc.collect()
gc.disable()
objs = muppy.get_objects()
del objs
self.assertEqual(gc.collect(), 0)
objs = muppy.get_objects(include_frames=True)
del objs
self.assertEqual(gc.collect(), 0)
gc.enable()
def test_untracked_containers(self):
"""Test whether untracked container objects are detected.
"""
untracked = {}
tracked = {'untracked': untracked}
self.assertTrue(gc.is_tracked(tracked))
self.assertFalse(gc.is_tracked(untracked))
objects = [id(o) for o in muppy.get_objects()]
self.assertTrue(id(untracked) in objects)
def suite():
suite = unittest.makeSuite(MuppyTest,'test')
suite.addTest(doctest.DocTestSuite())
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
|
from discord.ext import commands, tasks, menus
from .utils import checks, db, cache
from .utils.formats import plural, human_join
from .utils.paginator import SimplePages
from collections import Counter, defaultdict
import discord
import datetime
import time
import json
import random
import asyncio
import asyncpg
import logging
import weakref
import re
log = logging.getLogger(__name__)
class StarError(commands.CheckFailure):
pass
def requires_starboard():
async def predicate(ctx):
if ctx.guild is None:
return False
cog = ctx.bot.get_cog('Stars')
ctx.starboard = await cog.get_starboard(ctx.guild.id, connection=ctx.db)
if ctx.starboard.channel is None:
raise StarError('\N{WARNING SIGN} Starboard channel not found.')
return True
return commands.check(predicate)
def MessageID(argument):
try:
return int(argument, base=10)
except ValueError:
raise StarError(f'"{argument}" is not a valid message ID. Use Developer Mode to get the Copy ID option.')
class Starboard(db.Table):
id = db.Column(db.Integer(big=True), primary_key=True)
channel_id = db.Column(db.Integer(big=True))
threshold = db.Column(db.Integer, default=1, nullable=False)
locked = db.Column(db.Boolean, default=False)
max_age = db.Column(db.Interval, default="'7 days'::interval", nullable=False)
class StarboardEntry(db.Table, table_name='starboard_entries'):
id = db.PrimaryKeyColumn()
bot_message_id = db.Column(db.Integer(big=True), index=True)
message_id = db.Column(db.Integer(big=True), index=True, unique=True, nullable=False)
channel_id = db.Column(db.Integer(big=True))
author_id = db.Column(db.Integer(big=True))
guild_id = db.Column(db.ForeignKey('starboard', 'id', sql_type=db.Integer(big=True)), index=True, nullable=False)
class Starrers(db.Table):
id = db.PrimaryKeyColumn()
author_id = db.Column(db.Integer(big=True), nullable=False)
entry_id = db.Column(db.ForeignKey('starboard_entries', 'id'), index=True, nullable=False)
@classmethod
def create_table(cls, *, exists_ok=True):
statement = super().create_table(exists_ok=exists_ok)
sql = "CREATE UNIQUE INDEX IF NOT EXISTS starrers_uniq_idx ON starrers (author_id, entry_id);"
return statement + '\n' + sql
class StarboardConfig:
__slots__ = ('bot', 'id', 'channel_id', 'threshold', 'locked', 'needs_migration', 'max_age')
def __init__(self, *, guild_id, bot, record=None):
self.id = guild_id
self.bot = bot
if record:
self.channel_id = record['channel_id']
self.threshold = record['threshold']
self.locked = record['locked']
self.needs_migration = self.locked is None
if self.needs_migration:
self.locked = True
self.max_age = record['max_age']
else:
self.channel_id = None
@property
def channel(self):
guild = self.bot.get_guild(self.id)
return guild and guild.get_channel(self.channel_id)
class Stars(commands.Cog):
"""A starboard to upvote posts obviously.
There are two ways to make use of this feature, the first is
via reactions, react to a message with \N{WHITE MEDIUM STAR} and
the bot will automatically add (or remove) it to the starboard.
The second way is via Developer Mode. Enable it under Settings >
Appearance > Developer Mode and then you get access to Copy ID
and using the star/unstar commands.
"""
def __init__(self, bot):
self.bot = bot
# cache message objects to save Discord some HTTP requests.
self._message_cache = {}
self.clean_message_cache.start()
# if it's in this set,
self._about_to_be_deleted = set()
self._locks = weakref.WeakValueDictionary()
self.spoilers = re.compile(r'\|\|(.+?)\|\|')
@property
def display_emoji(self) -> discord.PartialEmoji:
return discord.PartialEmoji(name='\N{WHITE MEDIUM STAR}')
def cog_unload(self):
self.clean_message_cache.cancel()
async def cog_command_error(self, ctx, error):
if isinstance(error, StarError):
await ctx.send(error)
@tasks.loop(hours=1.0)
async def clean_message_cache(self):
self._message_cache.clear()
@cache.cache()
async def get_starboard(self, guild_id, *, connection=None):
connection = connection or self.bot.pool
query = "SELECT * FROM starboard WHERE id=$1;"
record = await connection.fetchrow(query, guild_id)
return StarboardConfig(guild_id=guild_id, bot=self.bot, record=record)
def star_emoji(self, stars):
if 5 > stars >= 0:
return '\N{WHITE MEDIUM STAR}'
elif 10 > stars >= 5:
return '\N{GLOWING STAR}'
elif 25 > stars >= 10:
return '\N{DIZZY SYMBOL}'
else:
return '\N{SPARKLES}'
def star_gradient_colour(self, stars):
# We define as 13 stars to be 100% of the star gradient (half of the 26 emoji threshold)
# So X / 13 will clamp to our percentage,
# We start out with 0xfffdf7 for the beginning colour
# Gradually evolving into 0xffc20c
# rgb values are (255, 253, 247) -> (255, 194, 12)
# To create the gradient, we use a linear interpolation formula
# Which for reference is X = X_1 * p + X_2 * (1 - p)
p = stars / 13
if p > 1.0:
p = 1.0
red = 255
green = int((194 * p) + (253 * (1 - p)))
blue = int((12 * p) + (247 * (1 - p)))
return (red << 16) + (green << 8) + blue
def is_url_spoiler(self, text, url):
spoilers = self.spoilers.findall(text)
for spoiler in spoilers:
if url in spoiler:
return True
return False
def get_emoji_message(self, message, stars):
emoji = self.star_emoji(stars)
if stars > 1:
content = f'{emoji} **{stars}** {message.channel.mention} ID: {message.id}'
else:
content = f'{emoji} {message.channel.mention} ID: {message.id}'
embed = discord.Embed(description=message.content)
if message.embeds:
data = message.embeds[0]
if data.type == 'image' and not self.is_url_spoiler(message.content, data.url):
embed.set_image(url=data.url)
if message.attachments:
file = message.attachments[0]
spoiler = file.is_spoiler()
if not spoiler and file.url.lower().endswith(('png', 'jpeg', 'jpg', 'gif', 'webp')):
embed.set_image(url=file.url)
elif spoiler:
embed.add_field(name='Attachment', value=f'||[{file.filename}]({file.url})||', inline=False)
else:
embed.add_field(name='Attachment', value=f'[{file.filename}]({file.url})', inline=False)
ref = message.reference
if ref and isinstance(ref.resolved, discord.Message):
embed.add_field(name='Replying to...', value=f'[{ref.resolved.author}]({ref.resolved.jump_url})', inline=False)
embed.add_field(name='Original', value=f'[Jump!]({message.jump_url})', inline=False)
embed.set_author(name=message.author.display_name, icon_url=message.author.display_avatar.url)
embed.timestamp = message.created_at
embed.colour = self.star_gradient_colour(stars)
return content, embed
async def get_message(self, channel, message_id):
try:
return self._message_cache[message_id]
except KeyError:
try:
o = discord.Object(id=message_id + 1)
pred = lambda m: m.id == message_id
# don't wanna use get_message due to poor rate limit (1/1s) vs (50/1s)
msg = await channel.history(limit=1, before=o).next()
if msg.id != message_id:
return None
self._message_cache[message_id] = msg
return msg
except Exception:
return None
async def reaction_action(self, fmt, payload):
if str(payload.emoji) != '\N{WHITE MEDIUM STAR}':
return
guild = self.bot.get_guild(payload.guild_id)
if guild is None:
return
channel = guild.get_channel_or_thread(payload.channel_id)
if not isinstance(channel, (discord.Thread, discord.TextChannel)):
return
method = getattr(self, f'{fmt}_message')
user = payload.member or (await self.bot.get_or_fetch_member(guild, payload.user_id))
if user is None or user.bot:
return
try:
await method(channel, payload.message_id, payload.user_id, verify=True)
except StarError:
pass
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
if not isinstance(channel, discord.TextChannel):
return
starboard = await self.get_starboard(channel.guild.id)
if starboard.channel is None or starboard.channel.id != channel.id:
return
# the starboard channel got deleted, so let's clear it from the database.
async with self.bot.pool.acquire(timeout=300.0) as con:
query = "DELETE FROM starboard WHERE id=$1;"
await con.execute(query, channel.guild.id)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
await self.reaction_action('star', payload)
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
await self.reaction_action('unstar', payload)
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
if payload.message_id in self._about_to_be_deleted:
# we triggered this deletion ourselves and
# we don't need to drop it from the database
self._about_to_be_deleted.discard(payload.message_id)
return
starboard = await self.get_starboard(payload.guild_id)
if starboard.channel is None or starboard.channel.id != payload.channel_id:
return
# at this point a message got deleted in the starboard
# so just delete it from the database
async with self.bot.pool.acquire(timeout=300.0) as con:
query = "DELETE FROM starboard_entries WHERE bot_message_id=$1;"
await con.execute(query, payload.message_id)
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, payload):
if payload.message_ids <= self._about_to_be_deleted:
# see comment above
self._about_to_be_deleted.difference_update(payload.message_ids)
return
starboard = await self.get_starboard(payload.guild_id)
if starboard.channel is None or starboard.channel.id != payload.channel_id:
return
async with self.bot.pool.acquire(timeout=300.0) as con:
query = "DELETE FROM starboard_entries WHERE bot_message_id=ANY($1::bigint[]);"
await con.execute(query, list(payload.message_ids))
@commands.Cog.listener()
async def on_raw_reaction_clear(self, payload):
guild = self.bot.get_guild(payload.guild_id)
if guild is None:
return
channel = guild.get_channel_or_thread(payload.channel_id)
if channel is None or not isinstance(channel, (discord.Thread, discord.TextChannel)):
return
async with self.bot.pool.acquire(timeout=300.0) as con:
starboard = await self.get_starboard(channel.guild.id, connection=con)
if starboard.channel is None:
return
query = "DELETE FROM starboard_entries WHERE message_id=$1 RETURNING bot_message_id;"
bot_message_id = await con.fetchrow(query, payload.message_id)
if bot_message_id is None:
return
bot_message_id = bot_message_id[0]
msg = await self.get_message(starboard.channel, bot_message_id)
if msg is not None:
await msg.delete()
async def star_message(self, channel, message_id, starrer_id, *, verify=False):
guild_id = channel.guild.id
lock = self._locks.get(guild_id)
if lock is None:
self._locks[guild_id] = lock = asyncio.Lock(loop=self.bot.loop)
async with lock:
async with self.bot.pool.acquire(timeout=300.0) as con:
if verify:
config = self.bot.get_cog('Config')
if config:
plonked = await config.is_plonked(guild_id, starrer_id, channel=channel, connection=con)
if plonked:
return
perms = await config.get_command_permissions(guild_id, connection=con)
if perms.is_command_blocked('star', channel.id):
return
await self._star_message(channel, message_id, starrer_id, connection=con)
async def _star_message(self, channel, message_id, starrer_id, *, connection):
"""Stars a message.
Parameters
------------
channel: :class:`TextChannel`
The channel that the starred message belongs to.
message_id: int
The message ID of the message being starred.
starrer_id: int
The ID of the person who starred this message.
connection: asyncpg.Connection
The connection to use.
"""
guild_id = channel.guild.id
starboard = await self.get_starboard(guild_id)
starboard_channel = starboard.channel
if starboard_channel is None:
raise StarError('\N{WARNING SIGN} Starboard channel not found.')
if starboard.locked:
raise StarError('\N{NO ENTRY SIGN} Starboard is locked.')
if channel.is_nsfw() and not starboard_channel.is_nsfw():
raise StarError('\N{NO ENTRY SIGN} Cannot star NSFW in non-NSFW starboard channel.')
if channel.id == starboard_channel.id:
# special case redirection code goes here
# ergo, when we add a reaction from starboard we want it to star
# the original message
query = "SELECT channel_id, message_id FROM starboard_entries WHERE bot_message_id=$1;"
record = await connection.fetchrow(query, message_id)
if record is None:
raise StarError('Could not find message in the starboard.')
ch = channel.guild.get_channel_or_thread(record['channel_id'])
if ch is None:
raise StarError('Could not find original channel.')
return await self._star_message(ch, record['message_id'], starrer_id, connection=connection)
if not starboard_channel.permissions_for(starboard_channel.guild.me).send_messages:
raise StarError('\N{NO ENTRY SIGN} Cannot post messages in starboard channel.')
msg = await self.get_message(channel, message_id)
if msg is None:
raise StarError('\N{BLACK QUESTION MARK ORNAMENT} This message could not be found.')
if msg.author.id == starrer_id:
raise StarError('\N{NO ENTRY SIGN} You cannot star your own message.')
empty_message = len(msg.content) == 0 and len(msg.attachments) == 0
if empty_message or msg.type not in (discord.MessageType.default, discord.MessageType.reply):
raise StarError('\N{NO ENTRY SIGN} This message cannot be starred.')
oldest_allowed = discord.utils.utcnow() - starboard.max_age
if msg.created_at < oldest_allowed:
raise StarError('\N{NO ENTRY SIGN} This message is too old.')
# check if this is freshly starred
# originally this was a single query but it seems
# WHERE ... = (SELECT ... in some_cte) is bugged
# so I'm going to do two queries instead
query = """WITH to_insert AS (
INSERT INTO starboard_entries AS entries (message_id, channel_id, guild_id, author_id)
VALUES ($1, $2, $3, $4)
ON CONFLICT (message_id) DO NOTHING
RETURNING entries.id
)
INSERT INTO starrers (author_id, entry_id)
SELECT $5, entry.id
FROM (
SELECT id FROM to_insert
UNION ALL
SELECT id FROM starboard_entries WHERE message_id=$1
LIMIT 1
) AS entry
RETURNING entry_id;
"""
try:
record = await connection.fetchrow(query, message_id, channel.id, guild_id, msg.author.id, starrer_id)
except asyncpg.UniqueViolationError:
raise StarError('\N{NO ENTRY SIGN} You already starred this message.')
entry_id = record[0]
query = "SELECT COUNT(*) FROM starrers WHERE entry_id=$1;"
record = await connection.fetchrow(query, entry_id)
count = record[0]
if count < starboard.threshold:
return
# at this point, we either edit the message or we create a message
# with our star info
content, embed = self.get_emoji_message(msg, count)
# get the message ID to edit:
query = "SELECT bot_message_id FROM starboard_entries WHERE message_id=$1;"
record = await connection.fetchrow(query, message_id)
bot_message_id = record[0]
if bot_message_id is None:
new_msg = await starboard_channel.send(content, embed=embed)
query = "UPDATE starboard_entries SET bot_message_id=$1 WHERE message_id=$2;"
await connection.execute(query, new_msg.id, message_id)
else:
new_msg = await self.get_message(starboard_channel, bot_message_id)
if new_msg is None:
# deleted? might as well purge the data
query = "DELETE FROM starboard_entries WHERE message_id=$1;"
await connection.execute(query, message_id)
else:
await new_msg.edit(content=content, embed=embed)
async def unstar_message(self, channel, message_id, starrer_id, *, verify=False):
guild_id = channel.guild.id
lock = self._locks.get(guild_id)
if lock is None:
self._locks[guild_id] = lock = asyncio.Lock(loop=self.bot.loop)
async with lock:
async with self.bot.pool.acquire(timeout=300.0) as con:
if verify:
config = self.bot.get_cog('Config')
if config:
plonked = await config.is_plonked(guild_id, starrer_id, channel=channel, connection=con)
if plonked:
return
perms = await config.get_command_permissions(guild_id, connection=con)
if perms.is_command_blocked('star', channel.id):
return
await self._unstar_message(channel, message_id, starrer_id, connection=con)
async def _unstar_message(self, channel, message_id, starrer_id, *, connection):
"""Unstars a message.
Parameters
------------
channel: :class:`TextChannel`
The channel that the starred message belongs to.
message_id: int
The message ID of the message being unstarred.
starrer_id: int
The ID of the person who unstarred this message.
connection: asyncpg.Connection
The connection to use.
"""
guild_id = channel.guild.id
starboard = await self.get_starboard(guild_id)
starboard_channel = starboard.channel
if starboard_channel is None:
raise StarError('\N{WARNING SIGN} Starboard channel not found.')
if starboard.locked:
raise StarError('\N{NO ENTRY SIGN} Starboard is locked.')
if channel.id == starboard_channel.id:
query = "SELECT channel_id, message_id FROM starboard_entries WHERE bot_message_id=$1;"
record = await connection.fetchrow(query, message_id)
if record is None:
raise StarError('Could not find message in the starboard.')
ch = channel.guild.get_channel_or_thread(record['channel_id'])
if ch is None:
raise StarError('Could not find original channel.')
return await self._unstar_message(ch, record['message_id'], starrer_id, connection=connection)
if not starboard_channel.permissions_for(starboard_channel.guild.me).send_messages:
raise StarError('\N{NO ENTRY SIGN} Cannot edit messages in starboard channel.')
query = """DELETE FROM starrers USING starboard_entries entry
WHERE entry.message_id=$1
AND entry.id=starrers.entry_id
AND starrers.author_id=$2
RETURNING starrers.entry_id, entry.bot_message_id
"""
record = await connection.fetchrow(query, message_id, starrer_id)
if record is None:
raise StarError('\N{NO ENTRY SIGN} You have not starred this message.')
entry_id = record[0]
bot_message_id = record[1]
query = "SELECT COUNT(*) FROM starrers WHERE entry_id=$1;"
count = await connection.fetchrow(query, entry_id)
count = count[0]
if count == 0:
# delete the entry if we have no more stars
query = "DELETE FROM starboard_entries WHERE id=$1;"
await connection.execute(query, entry_id)
if bot_message_id is None:
return
bot_message = await self.get_message(starboard_channel, bot_message_id)
if bot_message is None:
return
if count < starboard.threshold:
self._about_to_be_deleted.add(bot_message_id)
if count:
# update the bot_message_id to be NULL in the table since we're deleting it
query = "UPDATE starboard_entries SET bot_message_id=NULL WHERE id=$1;"
await connection.execute(query, entry_id)
await bot_message.delete()
else:
msg = await self.get_message(channel, message_id)
if msg is None:
raise StarError('\N{BLACK QUESTION MARK ORNAMENT} This message could not be found.')
content, embed = self.get_emoji_message(msg, count)
await bot_message.edit(content=content, embed=embed)
@commands.group(invoke_without_command=True)
@checks.is_mod()
async def starboard(self, ctx, *, name='starboard'):
"""Sets up the starboard for this server.
This creates a new channel with the specified name
and makes it into the server's "starboard". If no
name is passed in then it defaults to "starboard".
You must have Manage Server permission to use this.
"""
# bypass the cache just in case someone used the star
# reaction earlier before having it set up, or they
# decided to use the ?star command
self.get_starboard.invalidate(self, ctx.guild.id)
starboard = await self.get_starboard(ctx.guild.id, connection=ctx.db)
if starboard.channel is not None:
return await ctx.send(f'This server already has a starboard ({starboard.channel.mention}).')
if hasattr(starboard, 'locked'):
try:
confirm = await ctx.prompt('Apparently, a previously configured starboard channel was deleted. Is this true?')
except RuntimeError as e:
await ctx.send(e)
else:
if confirm:
await ctx.db.execute('DELETE FROM starboard WHERE id=$1;', ctx.guild.id)
else:
return await ctx.send('Aborting starboard creation. Join the bot support server for more questions.')
perms = ctx.channel.permissions_for(ctx.me)
if not perms.manage_roles or not perms.manage_channels:
return await ctx.send('\N{NO ENTRY SIGN} I do not have proper permissions (Manage Roles and Manage Channel)')
overwrites = {
ctx.me: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_messages=True,
embed_links=True, read_message_history=True),
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=True, send_messages=False,
read_message_history=True)
}
reason = f'{ctx.author} (ID: {ctx.author.id}) has created the starboard channel.'
try:
channel = await ctx.guild.create_text_channel(name=name, overwrites=overwrites, reason=reason)
except discord.Forbidden:
return await ctx.send('\N{NO ENTRY SIGN} I do not have permissions to create a channel.')
except discord.HTTPException:
return await ctx.send('\N{NO ENTRY SIGN} This channel name is bad or an unknown error happened.')
query = "INSERT INTO starboard (id, channel_id) VALUES ($1, $2);"
try:
await ctx.db.execute(query, ctx.guild.id, channel.id)
except:
await channel.delete(reason='Failure to commit to create the ')
await ctx.send('Could not create the channel due to an internal error. Join the bot support server for help.')
else:
self.get_starboard.invalidate(self, ctx.guild.id)
await ctx.send(f'\N{GLOWING STAR} Starboard created at {channel.mention}.')
@starboard.command(name='info')
@requires_starboard()
async def starboard_info(self, ctx):
"""Shows meta information about the starboard."""
starboard = ctx.starboard
channel = starboard.channel
data = []
if channel is None:
data.append('Channel: #deleted-channel')
else:
data.append(f'Channel: {channel.mention}')
data.append(f'NSFW: {channel.is_nsfw()}')
data.append(f'Locked: {starboard.locked}')
data.append(f'Limit: {plural(starboard.threshold):star}')
data.append(f'Max Age: {plural(starboard.max_age.days):day}')
await ctx.send('\n'.join(data))
@commands.group(invoke_without_command=True, ignore_extra=False)
@commands.guild_only()
async def star(self, ctx, message: MessageID):
"""Stars a message via message ID.
To star a message you should right click on the on a message and then
click "Copy ID". You must have Developer Mode enabled to get that
functionality.
It is recommended that you react to a message with \N{WHITE MEDIUM STAR} instead.
You can only star a message once.
"""
try:
await self.star_message(ctx.channel, message, ctx.author.id)
except StarError as e:
await ctx.send(e)
else:
await ctx.message.delete()
@commands.command()
@commands.guild_only()
async def unstar(self, ctx, message: MessageID):
"""Unstars a message via message ID.
To unstar a message you should right click on the on a message and then
click "Copy ID". You must have Developer Mode enabled to get that
functionality.
"""
try:
await self.unstar_message(ctx.channel, message, ctx.author.id, verify=True)
except StarError as e:
return await ctx.send(e)
else:
await ctx.message.delete()
@star.command(name='clean')
@checks.is_mod()
@requires_starboard()
async def star_clean(self, ctx, stars=1):
"""Cleans the starboard
This removes messages in the starboard that only have less
than or equal to the number of specified stars. This defaults to 1.
Note that this only checks the last 100 messages in the starboard.
This command requires the Manage Server permission.
"""
stars = max(stars, 1)
channel = ctx.starboard.channel
last_messages = await channel.history(limit=100).map(lambda m: m.id).flatten()
query = """WITH bad_entries AS (
SELECT entry_id
FROM starrers
INNER JOIN starboard_entries
ON starboard_entries.id = starrers.entry_id
WHERE starboard_entries.guild_id=$1
AND starboard_entries.bot_message_id = ANY($2::bigint[])
GROUP BY entry_id
HAVING COUNT(*) <= $3
)
DELETE FROM starboard_entries USING bad_entries
WHERE starboard_entries.id = bad_entries.entry_id
RETURNING starboard_entries.bot_message_id
"""
to_delete = await ctx.db.fetch(query, ctx.guild.id, last_messages, stars)
# we cannot bulk delete entries over 14 days old
min_snowflake = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
to_delete = [discord.Object(id=r[0]) for r in to_delete if r[0] > min_snowflake]
try:
self._about_to_be_deleted.update(o.id for o in to_delete)
await channel.delete_messages(to_delete)
except discord.HTTPException:
await ctx.send('Could not delete messages.')
else:
await ctx.send(f'\N{PUT LITTER IN ITS PLACE SYMBOL} Deleted {plural(len(to_delete)):message}.')
@star.command(name='show')
@requires_starboard()
async def star_show(self, ctx, message: MessageID):
"""Shows a starred message via its ID.
To get the ID of a message you should right click on the
message and then click "Copy ID". You must have
Developer Mode enabled to get that functionality.
You can only use this command once per 10 seconds.
"""
query = """SELECT entry.channel_id,
entry.message_id,
entry.bot_message_id,
COUNT(*) OVER(PARTITION BY entry_id) AS "Stars"
FROM starrers
INNER JOIN starboard_entries entry
ON entry.id = starrers.entry_id
WHERE entry.guild_id=$1
AND (entry.message_id=$2 OR entry.bot_message_id=$2)
LIMIT 1
"""
record = await ctx.db.fetchrow(query, ctx.guild.id, message)
if record is None:
return await ctx.send('This message has not been starred.')
bot_message_id = record['bot_message_id']
if bot_message_id is not None:
# "fast" path, just redirect the message
msg = await self.get_message(ctx.starboard.channel, bot_message_id)
if msg is not None:
embed = msg.embeds[0] if msg.embeds else None
return await ctx.send(msg.content, embed=embed)
else:
# somehow it got deleted, so just delete the entry
query = "DELETE FROM starboard_entries WHERE message_id=$1;"
await ctx.db.execute(query, record['message_id'])
return
# slow path, try to fetch the content
channel = ctx.guild.get_channel_or_thread(record['channel_id'])
if channel is None:
return await ctx.send("The message's channel has been deleted.")
msg = await self.get_message(channel, record['message_id'])
if msg is None:
return await ctx.send('The message has been deleted.')
content, embed = self.get_emoji_message(msg, record['Stars'])
await ctx.send(content, embed=embed)
@star.command(name='who')
@requires_starboard()
async def star_who(self, ctx, message: MessageID):
"""Show who starred a message.
The ID can either be the starred message ID
or the message ID in the starboard channel.
"""
query = """SELECT starrers.author_id
FROM starrers
INNER JOIN starboard_entries entry
ON entry.id = starrers.entry_id
WHERE entry.message_id = $1 OR entry.bot_message_id = $1
"""
records = await ctx.db.fetch(query, message)
if records is None or len(records) == 0:
return await ctx.send('No one starred this message or this is an invalid message ID.')
records = [r[0] for r in records]
members = [str(member) async for member in self.bot.resolve_member_ids(ctx.guild, records)]
p = SimplePages(entries=members, per_page=20, ctx=ctx)
base = format(plural(len(records)), 'star')
if len(records) > len(members):
p.embed.title = f'{base} ({len(records) - len(members)} left server)'
else:
p.embed.title = base
await p.start()
@star.command(name='migrate')
@requires_starboard()
@checks.is_mod()
async def star_migrate(self, ctx):
"""Migrates the starboard to the newest version.
While doing this, the starboard is locked.
Note: This is an **incredibly expensive operation**.
It will take a very long time.
You must have Manage Server permissions to use this.
"""
perms = ctx.starboard.channel.permissions_for(ctx.me)
if not perms.read_message_history:
return await ctx.send(f'Bot does not have Read Message History in {ctx.starboard.channel.mention}.')
if ctx.starboard.locked:
return await ctx.send('Starboard must be unlocked to migrate. It will be locked during the migration.')
stats = self.bot.get_cog('Stats')
if stats is None:
return await ctx.send('Internal error occurred: Stats cog not loaded')
webhook = stats.webhook
start = time.time()
guild_id = ctx.guild.id
query = "UPDATE starboard SET locked=TRUE WHERE id=$1;"
await ctx.db.execute(query, guild_id)
self.get_starboard.invalidate(self, guild_id)
await ctx.send('Starboard is now locked and migration will now begin.')
valid_msg = re.compile(r'.+?<#(?P<channel_id>[0-9]{17,21})>\s*ID\:\s*(?P<message_id>[0-9]{17,21})')
async with ctx.typing():
fetched = 0
updated = 0
failed = 0
# At the time of writing, the average server only had ~256 entries.
async for message in ctx.starboard.channel.history(limit=1000):
fetched += 1
match = valid_msg.match(message.content)
if match is None:
continue
groups = match.groupdict()
groups['guild_id'] = guild_id
fmt = 'https://discord.com/channels/{guild_id}/{channel_id}/{message_id}'.format(**groups)
if len(message.embeds) == 0:
continue
embed = message.embeds[0]
if len(embed.fields) == 0 or embed.fields[0].name == 'Attachments':
embed.add_field(name='Original', value=f'[Jump!]({fmt})', inline=False)
try:
await message.edit(embed=embed)
except discord.HTTPException:
failed += 1
else:
updated += 1
delta = time.time() - start
query = "UPDATE starboard SET locked = FALSE WHERE id=$1;"
await ctx.db.execute(query, guild_id)
self.get_starboard.invalidate(self, guild_id)
m = await ctx.send(f'{ctx.author.mention}, we are done migrating!\n' \
'The starboard has been unlocked.\n' \
f'Updated {updated}/{fetched} entries to the new format.\n' \
f'Took {delta:.2f}s.')
e = discord.Embed(title='Starboard Migration', colour=discord.Colour.gold())
e.add_field(name='Updated', value=updated)
e.add_field(name='Fetched', value=fetched)
e.add_field(name='Failed', value=failed)
e.add_field(name='Name', value=ctx.guild.name)
e.add_field(name='ID', value=guild_id)
e.set_footer(text=f'Took {delta:.2f}s to migrate')
e.timestamp = m.created_at
await webhook.send(embed=e)
def records_to_value(self, records, fmt=None, default='None!'):
if not records:
return default
emoji = 0x1f947 # :first_place:
fmt = fmt or (lambda o: o)
return '\n'.join(f'{chr(emoji + i)}: {fmt(r["ID"])} ({plural(r["Stars"]):star})'
for i, r in enumerate(records))
async def star_guild_stats(self, ctx):
e = discord.Embed(title='Server Starboard Stats')
e.timestamp = ctx.starboard.channel.created_at
e.set_footer(text='Adding stars since')
# messages starred
query = "SELECT COUNT(*) FROM starboard_entries WHERE guild_id=$1;"
record = await ctx.db.fetchrow(query, ctx.guild.id)
total_messages = record[0]
# total stars given
query = """SELECT COUNT(*)
FROM starrers
INNER JOIN starboard_entries entry
ON entry.id = starrers.entry_id
WHERE entry.guild_id=$1;
"""
record = await ctx.db.fetchrow(query, ctx.guild.id)
total_stars = record[0]
e.description = f'{plural(total_messages):message} starred with a total of {total_stars} stars.'
e.colour = discord.Colour.gold()
# this big query fetches 3 things:
# top 3 starred posts (Type 3)
# top 3 most starred authors (Type 1)
# top 3 star givers (Type 2)
query = """WITH t AS (
SELECT
entry.author_id AS entry_author_id,
starrers.author_id,
entry.bot_message_id
FROM starrers
INNER JOIN starboard_entries entry
ON entry.id = starrers.entry_id
WHERE entry.guild_id=$1
)
(
SELECT t.entry_author_id AS "ID", 1 AS "Type", COUNT(*) AS "Stars"
FROM t
WHERE t.entry_author_id IS NOT NULL
GROUP BY t.entry_author_id
ORDER BY "Stars" DESC
LIMIT 3
)
UNION ALL
(
SELECT t.author_id AS "ID", 2 AS "Type", COUNT(*) AS "Stars"
FROM t
GROUP BY t.author_id
ORDER BY "Stars" DESC
LIMIT 3
)
UNION ALL
(
SELECT t.bot_message_id AS "ID", 3 AS "Type", COUNT(*) AS "Stars"
FROM t
WHERE t.bot_message_id IS NOT NULL
GROUP BY t.bot_message_id
ORDER BY "Stars" DESC
LIMIT 3
);
"""
records = await ctx.db.fetch(query, ctx.guild.id)
starred_posts = [r for r in records if r['Type'] == 3]
e.add_field(name='Top Starred Posts', value=self.records_to_value(starred_posts), inline=False)
to_mention = lambda o: f'<@{o}>'
star_receivers = [r for r in records if r['Type'] == 1]
value = self.records_to_value(star_receivers, to_mention, default='No one!')
e.add_field(name='Top Star Receivers', value=value, inline=False)
star_givers = [r for r in records if r['Type'] == 2]
value = self.records_to_value(star_givers, to_mention, default='No one!')
e.add_field(name='Top Star Givers', value=value, inline=False)
await ctx.send(embed=e)
async def star_member_stats(self, ctx, member):
e = discord.Embed(colour=discord.Colour.gold())
e.set_author(name=member.display_name, icon_url=member.display_avatar.url)
# this query calculates
# 1 - stars received,
# 2 - stars given
# The rest are the top 3 starred posts
query = """WITH t AS (
SELECT entry.author_id AS entry_author_id,
starrers.author_id,
entry.message_id
FROM starrers
INNER JOIN starboard_entries entry
ON entry.id=starrers.entry_id
WHERE entry.guild_id=$1
)
(
SELECT '0'::bigint AS "ID", COUNT(*) AS "Stars"
FROM t
WHERE t.entry_author_id=$2
)
UNION ALL
(
SELECT '0'::bigint AS "ID", COUNT(*) AS "Stars"
FROM t
WHERE t.author_id=$2
)
UNION ALL
(
SELECT t.message_id AS "ID", COUNT(*) AS "Stars"
FROM t
WHERE t.entry_author_id=$2
GROUP BY t.message_id
ORDER BY "Stars" DESC
LIMIT 3
)
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
received = records[0]['Stars']
given = records[1]['Stars']
top_three = records[2:]
# this query calculates how many of our messages were starred
query = """SELECT COUNT(*) FROM starboard_entries WHERE guild_id=$1 AND author_id=$2;"""
record = await ctx.db.fetchrow(query, ctx.guild.id, member.id)
messages_starred = record[0]
e.add_field(name='Messages Starred', value=messages_starred)
e.add_field(name='Stars Received', value=received)
e.add_field(name='Stars Given', value=given)
e.add_field(name='Top Starred Posts', value=self.records_to_value(top_three), inline=False)
await ctx.send(embed=e)
@star.command(name='stats')
@requires_starboard()
async def star_stats(self, ctx, *, member: discord.Member = None):
"""Shows statistics on the starboard usage of the server or a member."""
if member is None:
await self.star_guild_stats(ctx)
else:
await self.star_member_stats(ctx, member)
@star.command(name='random')
@requires_starboard()
async def star_random(self, ctx):
"""Shows a random starred message."""
query = """SELECT bot_message_id
FROM starboard_entries
WHERE guild_id=$1
AND bot_message_id IS NOT NULL
OFFSET FLOOR(RANDOM() * (
SELECT COUNT(*)
FROM starboard_entries
WHERE guild_id=$1
AND bot_message_id IS NOT NULL
))
LIMIT 1
"""
record = await ctx.db.fetchrow(query, ctx.guild.id)
if record is None:
return await ctx.send('Could not find anything.')
message_id = record[0]
message = await self.get_message(ctx.starboard.channel, message_id)
if message is None:
return await ctx.send(f'Message {message_id} has been deleted somehow.')
if message.embeds:
await ctx.send(message.content, embed=message.embeds[0])
else:
await ctx.send(message.content)
@star.command(name='lock')
@checks.is_mod()
@requires_starboard()
async def star_lock(self, ctx):
"""Locks the starboard from being processed.
This is a moderation tool that allows you to temporarily
disable the starboard to aid in dealing with star spam.
When the starboard is locked, no new entries are added to
the starboard as the bot will no longer listen to reactions or
star/unstar commands.
To unlock the starboard, use the unlock subcommand.
To use this command you need Manage Server permission.
"""
if ctx.starboard.needs_migration:
return await ctx.send('Your starboard requires migration!')
query = "UPDATE starboard SET locked=TRUE WHERE id=$1;"
await ctx.db.execute(query, ctx.guild.id)
self.get_starboard.invalidate(self, ctx.guild.id)
await ctx.send('Starboard is now locked.')
@star.command(name='unlock')
@checks.is_mod()
@requires_starboard()
async def star_unlock(self, ctx):
"""Unlocks the starboard for re-processing.
To use this command you need Manage Server permission.
"""
if ctx.starboard.needs_migration:
return await ctx.send('Your starboard requires migration!')
query = "UPDATE starboard SET locked=FALSE WHERE id=$1;"
await ctx.db.execute(query, ctx.guild.id)
self.get_starboard.invalidate(self, ctx.guild.id)
await ctx.send('Starboard is now unlocked.')
@star.command(name='limit', aliases=['threshold'])
@checks.is_mod()
@requires_starboard()
async def star_limit(self, ctx, stars: int):
"""Sets the minimum number of stars required to show up.
When this limit is set, messages must have this number
or more to show up in the starboard channel.
You cannot have a negative number and the maximum
star limit you can set is 100.
Note that messages that previously did not meet the
limit but now do will still not show up in the starboard
until starred again.
You must have Manage Server permissions to use this.
"""
if ctx.starboard.needs_migration:
return await ctx.send('Your starboard requires migration!')
stars = min(max(stars, 1), 100)
query = "UPDATE starboard SET threshold=$2 WHERE id=$1;"
await ctx.db.execute(query, ctx.guild.id, stars)
self.get_starboard.invalidate(self, ctx.guild.id)
await ctx.send(f'Messages now require {plural(stars):star} to show up in the starboard.')
@star.command(name='age')
@checks.is_mod()
@requires_starboard()
async def star_age(self, ctx, number: int, units='days'):
"""Sets the maximum age of a message valid for starring.
By default, the maximum age is 7 days. Any message older
than this specified age is invalid of being starred.
To set the limit you must specify a number followed by
a unit. The valid units are "days", "weeks", "months",
or "years". They do not have to be pluralized. The
default unit is "days".
The number cannot be negative, and it must be a maximum
of 35. If the unit is years then the cap is 10 years.
You cannot mix and match units.
You must have Manage Server permissions to use this.
"""
valid_units = ('days', 'weeks', 'months', 'years')
if units[-1] != 's':
units = units + 's'
if units not in valid_units:
return await ctx.send(f'Not a valid unit! I expect only {human_join(valid_units)}.')
number = min(max(number, 1), 35)
if units == 'years' and number > 10:
return await ctx.send('The maximum is 10 years!')
# the input is sanitised so this should be ok
# only doing this because asyncpg requires a timedelta object but
# generating that with these clamp units is overkill
query = f"UPDATE starboard SET max_age='{number} {units}'::interval WHERE id=$1;"
await ctx.db.execute(query, ctx.guild.id)
self.get_starboard.invalidate(self, ctx.guild.id)
if number == 1:
age = f'1 {units[:-1]}'
else:
age = f'{number} {units}'
await ctx.send(f'Messages must now be less than {age} old to be starred.')
@commands.command(hidden=True)
@commands.is_owner()
async def star_announce(self, ctx, *, message):
"""Announce stuff to every starboard."""
query = "SELECT id, channel_id FROM starboard;"
records = await ctx.db.fetch(query)
await ctx.release()
to_send = []
for guild_id, channel_id in records:
guild = self.bot.get_guild(guild_id)
if guild:
channel = guild.get_channel(channel_id)
if channel and channel.permissions_for(guild.me).send_messages:
to_send.append(channel)
await ctx.send(f'Preparing to send to {len(to_send)} channels (out of {len(records)}).')
success = 0
start = time.time()
for index, channel in enumerate(to_send):
if index % 5 == 0:
await asyncio.sleep(1)
try:
await channel.send(message)
except:
pass
else:
success += 1
delta = time.time() - start
await ctx.send(f'Successfully sent to {success} channels (out of {len(to_send)}) in {delta:.2f}s.')
def setup(bot):
bot.add_cog(Stars(bot))
|
|
from __future__ import unicode_literals
import transaction
from pyramid.view import view_config
from pyramid.security import Allow
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPNotFound
from ez2pay.i18n import LocalizerFactory
from ez2pay.models.user import UserModel
from ez2pay.models.group import GroupModel
from ez2pay.models.permission import PermissionModel
from ez2pay.utils import check_csrf_token
from .forms import FormFactory
get_localizer = LocalizerFactory()
class AdminContext(object):
"""Context for providing ACL attribute of administrator
"""
# only users who have manage permission can access
__acl__ = [
(Allow, 'permission:admin', 'admin'),
]
def __init__(self, request):
pass
@view_config(route_name='admin.home',
renderer='templates/home.genshi',
permission='admin')
def home(request):
return dict()
@view_config(route_name='admin.user_list',
renderer='templates/user_list.genshi',
permission='admin')
def user_list(request):
user_model = UserModel(request.db_session)
users = user_model.get_list()
return dict(users=users)
@view_config(route_name='admin.user_create',
renderer='templates/user_create.genshi',
permission='admin')
def user_create(request):
_ = get_localizer(request)
user_model = UserModel(request.db_session)
group_model = GroupModel(request.db_session)
factory = FormFactory(_)
UserCreateForm = factory.make_user_create_form()
form = UserCreateForm(request.params)
groups = group_model.get_list()
form.groups.choices = [
(str(g.group_id), '%s - %s' % (g.group_name, g.display_name))
for g in groups
]
if request.method == 'POST':
check_csrf_token(request)
validate_result = form.validate()
user_name = request.params['user_name']
display_name = request.params['display_name']
password = request.params['password']
email = request.params['email']
groups = request.params.getall('groups')
by_name = user_model.get_by_name(user_name)
if by_name is not None:
msg = _(u'Username %s already exists') % user_name
form.user_name.errors.append(msg)
validate_result = False
by_email = user_model.get_by_email(email)
if by_email is not None:
msg = _(u'Email %s already exists') % email
form.email.errors.append(msg)
validate_result = False
if validate_result:
with transaction.manager:
user_id = user_model.create(
user_name=user_name,
display_name=display_name,
password=password,
email=email,
)
user_model.update_groups(user_id, map(int, groups))
msg = _(u"User ${user_name} has been created",
mapping=dict(user_name=user_name))
request.add_flash(msg, 'success')
return HTTPFound(location=request.route_url('admin.user_list'))
return dict(form=form)
@view_config(route_name='admin.user_edit',
renderer='templates/user_edit.genshi',
permission='admin')
def user_edit(request):
_ = get_localizer(request)
user_model = UserModel(request.db_session)
group_model = GroupModel(request.db_session)
user_name = request.matchdict['user_name']
user = user_model.get_by_name(user_name)
if user is None:
msg = _(u'User %s does not exists') % user_name
return HTTPNotFound(msg)
user_groups = [str(g.group_id) for g in user.groups]
factory = FormFactory(_)
UserEditForm = factory.make_user_edit_form()
form = UserEditForm(
request.params,
display_name=user.display_name,
email=user.email,
groups=user_groups
)
groups = group_model.get_list()
form.groups.choices = [
(str(g.group_id), '%s - %s' % (g.group_name, g.display_name), )
for g in groups
]
if request.method == 'POST':
check_csrf_token(request)
validate_result = form.validate()
display_name = request.params['display_name']
password = request.params['password']
email = request.params['email']
groups = request.params.getall('groups')
by_email = user_model.get_by_email(email)
if by_email is not None and email != user.email:
msg = _(u'Email %s already exists') % email
form.email.errors.append(msg)
validate_result = False
if validate_result:
with transaction.manager:
user_model.update_user(
user_id=user.user_id,
display_name=display_name,
email=email,
)
if password:
user_model.update_password(user.user_id, password)
user_model.update_groups(user.user_id, map(int, groups))
msg = _(u"User ${user_name} has been updated",
mapping=dict(user_name=user_name))
request.add_flash(msg, 'success')
url = request.route_url('admin.user_edit',
user_name=user.user_name)
return HTTPFound(location=url)
return dict(form=form, user=user)
@view_config(route_name='admin.group_list',
renderer='templates/group_list.genshi',
permission='admin')
def group_list(request):
group_model = GroupModel(request.db_session)
groups = group_model.get_list()
return dict(groups=groups)
@view_config(route_name='admin.group_create',
renderer='templates/group_create.genshi',
permission='admin')
def group_create(request):
_ = get_localizer(request)
group_model = GroupModel(request.db_session)
permission_model = PermissionModel(request.db_session)
factory = FormFactory(_)
GroupCreateForm = factory.make_group_create_form()
form = GroupCreateForm(request.params)
permissions = permission_model.get_list()
form.permissions.choices = [
(str(p.permission_id), p.permission_name)
for p in permissions
]
if request.method == 'POST':
check_csrf_token(request)
validate_result = form.validate()
group_name = request.params['group_name']
display_name = request.params['display_name']
permissions = request.params.getall('permissions')
by_name = group_model.get_by_name(group_name)
if by_name is not None:
msg = _(u'Group name %s already exists') % group_name
form.group_name.errors.append(msg)
validate_result = False
if validate_result:
with transaction.manager:
group_id = group_model.create(
group_name=group_name,
display_name=display_name,
)
group_model.update_permissions(
group_id=group_id,
permission_ids=permissions,
)
msg = _(u"Group ${group_name} has been created",
mapping=dict(group_name=group_name))
request.add_flash(msg, 'success')
return HTTPFound(location=request.route_url('admin.group_list'))
return dict(form=form)
@view_config(route_name='admin.group_edit',
renderer='templates/group_edit.genshi',
permission='admin')
def group_edit(request):
_ = get_localizer(request)
group_model = GroupModel(request.db_session)
permission_model = PermissionModel(request.db_session)
group_name = request.matchdict['group_name']
group = group_model.get_by_name(group_name)
if group is None:
msg = _(u'Group %s does not exist') % group_name
return HTTPNotFound(msg)
group_permissions = [str(p.permission_id) for p in group.permissions]
factory = FormFactory(_)
GroupEditForm = factory.make_group_edit_form()
form = GroupEditForm(
request.params,
permissions=group_permissions,
group_name=group.group_name,
display_name=group.display_name
)
permissions = permission_model.get_list()
form.permissions.choices = [
(str(p.permission_id), p.permission_name)
for p in permissions
]
if request.method == 'POST':
check_csrf_token(request)
validate_result = form.validate()
group_name = request.params['group_name']
display_name = request.params['display_name']
permissions = request.params.getall('permissions')
by_name = group_model.get_by_name(group_name)
if by_name is not None and group_name != group.group_name:
msg = _(u'Group name %s already exists') % group_name
form.group_name.errors.append(msg)
validate_result = False
if validate_result:
with transaction.manager:
group_model.update_group(
group_id=group.group_id,
group_name=group_name,
display_name=display_name,
)
group_model.update_permissions(
group_id=group.group_id,
permission_ids=permissions,
)
group = group_model.get(group.group_id)
msg = _(u"Group ${group_name} has been updated",
mapping=dict(group_name=group.group_name))
request.add_flash(msg, 'success')
url = request.route_url('admin.group_edit',
group_name=group.group_name)
return HTTPFound(location=url)
return dict(form=form, group=group)
@view_config(route_name='admin.permission_list',
renderer='templates/permission_list.genshi',
permission='admin')
def permission_list(request):
permission_model = PermissionModel(request.db_session)
permissions = permission_model.get_list()
return dict(permissions=permissions)
@view_config(route_name='admin.permission_create',
renderer='templates/permission_create.genshi',
permission='admin')
def permission_create(request):
_ = get_localizer(request)
permission_model = PermissionModel(request.db_session)
factory = FormFactory(_)
PermissionCreateForm = factory.make_permission_create_form()
form = PermissionCreateForm(request.params)
if request.method == 'POST':
check_csrf_token(request)
validate_result = form.validate()
permission_name = request.params['permission_name']
description = request.params['description']
by_name = permission_model.get_by_name(permission_name)
if by_name is not None:
msg = _(u'Permission name %s already exists') % permission_name
form.permission_name.errors.append(msg)
validate_result = False
if validate_result:
with transaction.manager:
permission_model.create(
permission_name=permission_name,
description=description,
)
msg = _(u"Permission ${permission_name} has been created",
mapping=dict(permission_name=permission_name))
request.add_flash(msg, 'success')
return HTTPFound(location=request.route_url('admin.permission_list'))
return dict(form=form)
@view_config(route_name='admin.permission_edit',
renderer='templates/permission_edit.genshi',
permission='admin')
def permission_edit(request):
_ = get_localizer(request)
permission_model = PermissionModel(request.db_session)
permission_name = request.matchdict['permission_name']
permission = permission_model.get_by_name(permission_name)
if permission is None:
msg = _(u'Permission %s does not exist') % permission_name
return HTTPNotFound(msg)
factory = FormFactory(_)
PermissionEditForm = factory.make_permission_edit_form()
form = PermissionEditForm(request.params, permission)
if request.method == 'POST':
check_csrf_token(request)
validate_result = form.validate()
permission_name = request.params['permission_name']
description = request.params['description']
by_name = permission_model.get_by_name(permission_name)
if (
by_name is not None and
permission_name != permission.permission_name
):
msg = _(u'Permission name %s already exists') % permission_name
form.permission_name.errors.append(msg)
validate_result = False
if validate_result:
with transaction.manager:
permission_model.update_permission(
permission_id=permission.permission_id,
permission_name=permission_name,
description=description,
)
msg = _(u"Permission ${permission_name} has been updated",
mapping=dict(permission_name=permission_name))
request.add_flash(msg, 'success')
url = request.route_url('admin.permission_edit',
permission_name=permission_name)
return HTTPFound(location=url)
return dict(form=form, permission=permission)
|
|
'''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd, _, _ = select.select([sock], [], [], sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
|
|
import datetime
import json
import furl
import responses
from django.utils import timezone
from nose.tools import * # flake8: noqa
from framework.auth.core import Auth
from addons.github.models import GithubFolder
from addons.github.tests.factories import GitHubAccountFactory
from api.base.settings.defaults import API_BASE
from api.base.utils import waterbutler_api_url_for
from api_tests import utils as api_utils
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
PrivateLinkFactory
)
def prepare_mock_wb_response(
node=None,
provider='github',
files=None,
folder=True,
path='/',
method=responses.GET,
status_code=200
):
"""Prepare a mock Waterbutler response with responses library.
:param Node node: Target node.
:param str provider: Addon provider
:param list files: Optional list of files. You can specify partial data; missing values
will have defaults.
:param folder: True if mocking out a folder response, False if a file response.
:param path: Waterbutler path, passed to waterbutler_api_url_for.
:param str method: HTTP method.
:param int status_code: HTTP status.
"""
node = node
files = files or []
wb_url = waterbutler_api_url_for(node._id, provider=provider, _internal=True, path=path, meta=True, view_only=None, base_url=node.osfstorage_region.waterbutler_url)
default_file = {
u'contentType': None,
u'extra': {u'downloads': 0, u'version': 1},
u'kind': u'file',
u'modified': None,
u'name': u'NewFile',
u'path': u'/NewFile',
u'provider': provider,
u'size': None,
u'materialized': '/',
}
if len(files):
data = [dict(default_file, **each) for each in files]
else:
data = [default_file]
jsonapi_data = []
for datum in data:
jsonapi_data.append({'attributes': datum})
if not folder:
jsonapi_data = jsonapi_data[0]
responses.add(
responses.Response(
method,
wb_url,
json={u'data': jsonapi_data},
status=status_code,
content_type='application/json'
)
)
class TestNodeFilesList(ApiTestCase):
def setUp(self):
super(TestNodeFilesList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.private_url = '/{}nodes/{}/files/'.format(
API_BASE, self.project._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(creator=self.user, is_public=True)
self.public_url = '/{}nodes/{}/files/'.format(API_BASE, self.public_project._id)
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
def view_only_link(self):
private_link = PrivateLinkFactory(creator=self.user)
private_link.nodes.add(self.project)
private_link.save()
return private_link
def _prepare_mock_wb_response(self, node=None, **kwargs):
prepare_mock_wb_response(node=node or self.project, **kwargs)
def test_returns_public_files_logged_out(self):
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_returns_public_files_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
def test_returns_storage_addons_link(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_in('storage_addons', res.json['data'][0]['links'])
def test_returns_file_data(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_file('NewFile')
fobj.save()
res = self.app.get(
'{}osfstorage/{}'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(isinstance(res.json['data'], dict))
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['kind'], 'file')
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
def test_returns_osfstorage_folder_version_two(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/'.format(self.private_url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_returns_osf_storage_folder_version_two_point_two(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/?version=2.2'.format(self.private_url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_list_returns_folder_data(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFolder')
def test_returns_folder_data(self):
fobj = self.project.get_addon(
'osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get(
'{}osfstorage/{}/'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 0)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_returns_private_files_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_returns_private_files_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
def test_returns_private_files_logged_in_non_contributor(self):
res = self.app.get(
self.private_url,
auth=self.user_two.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_returns_addon_folders(self):
user_auth = Auth(self.user)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
assert_equal(
res.json['data'][0]['attributes']['provider'],
'osfstorage'
)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
providers = [item['attributes']['provider'] for item in data]
assert_equal(len(data), 2)
assert_in('github', providers)
assert_in('osfstorage', providers)
@responses.activate
def test_vol_node_files_list(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}])
self.add_github()
vol = self.view_only_link()
url = '/{}nodes/{}/files/github/?view_only={}'.format(
API_BASE, self.project._id, vol.key)
res = self.app.get(url, auth=self.user_two.auth)
wb_request = responses.calls[-1].request
url = furl.furl(wb_request.url)
assert_equal(url.query, 'meta=True&view_only={}'.format(unicode(vol.key, 'utf-8')))
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFile')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
assert_in(vol.key, res.json['data'][0]['links']['info'])
assert_in(vol.key, res.json['data'][0]['links']['move'])
assert_in(vol.key, res.json['data'][0]['links']['upload'])
assert_in(vol.key, res.json['data'][0]['links']['download'])
assert_in(vol.key, res.json['data'][0]['links']['delete'])
@responses.activate
def test_returns_node_files_list(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}])
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFile')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFile')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
@responses.activate
def test_returns_folder_metadata_not_children(self):
folder = GithubFolder(
name='Folder',
target=self.project,
path='/Folder/'
)
folder.save()
self._prepare_mock_wb_response(provider='github', files=[{'name': 'Folder'}], path='/Folder/')
self.add_github()
url = '/{}nodes/{}/files/github/Folder/'.format(API_BASE, self.project._id)
res = self.app.get(url, params={'info': ''}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['attributes']['kind'], 'folder')
assert_equal(res.json['data'][0]['attributes']['name'], 'Folder')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
@responses.activate
def test_returns_node_file(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}],
folder=False, path='/file')
self.add_github()
url = '/{}nodes/{}/files/github/file'.format(
API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, headers={
'COOKIE': 'foo=bar;' # Webtests doesnt support cookies?
})
# test create
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
assert_equal(res.json['data']['attributes']['provider'], 'github')
# test get
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
assert_equal(res.json['data']['attributes']['provider'], 'github')
@responses.activate
def test_notfound_node_file_returns_folder(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}],
path='/file')
url = '/{}nodes/{}/files/github/file'.format(
API_BASE, self.project._id)
res = self.app.get(
url, auth=self.user.auth,
expect_errors=True,
headers={'COOKIE': 'foo=bar;'} # Webtests doesnt support cookies?
)
assert_equal(res.status_code, 404)
@responses.activate
def test_notfound_node_folder_returns_file(self):
self._prepare_mock_wb_response(
provider='github', files=[{'name': 'NewFile'}],
folder=False, path='/')
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(
url, auth=self.user.auth,
expect_errors=True,
headers={'COOKIE': 'foo=bar;'} # Webtests doesnt support cookies?
)
assert_equal(res.status_code, 404)
@responses.activate
def test_waterbutler_server_error_returns_503(self):
self._prepare_mock_wb_response(status_code=500)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(
url, auth=self.user.auth,
expect_errors=True,
headers={'COOKIE': 'foo=bar;'} # Webtests doesnt support cookies?
)
assert_equal(res.status_code, 503)
@responses.activate
def test_waterbutler_invalid_data_returns_503(self):
wb_url = waterbutler_api_url_for(self.project._id, _internal=True, provider='github', path='/', meta=True, base_url=self.project.osfstorage_region.waterbutler_url)
self.add_github()
responses.add(
responses.Response(
responses.GET,
wb_url,
body=json.dumps({}),
status=400
)
)
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 503)
@responses.activate
def test_handles_unauthenticated_waterbutler_request(self):
self._prepare_mock_wb_response(status_code=401)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@responses.activate
def test_handles_notfound_waterbutler_request(self):
invalid_provider = 'gilkjadsflhub'
self._prepare_mock_wb_response(
status_code=404, provider=invalid_provider)
url = '/{}nodes/{}/files/{}/'.format(API_BASE,
self.project._id, invalid_provider)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
def test_handles_request_to_provider_not_configured_on_project(self):
provider = 'box'
url = '/{}nodes/{}/files/{}/'.format(
API_BASE, self.project._id, provider)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_false(self.project.get_addon(provider))
assert_equal(res.status_code, 404)
assert_equal(
res.json['errors'][0]['detail'],
'The {} provider is not configured for this project.'.format(provider))
@responses.activate
def test_handles_bad_waterbutler_request(self):
wb_url = waterbutler_api_url_for(self.project._id, _internal=True, provider='github', path='/', meta=True, base_url=self.project.osfstorage_region.waterbutler_url)
responses.add(
responses.Response(
responses.GET,
wb_url,
json={'bad' : 'json'},
status=418
)
)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 503)
assert_in('detail', res.json['errors'][0])
def test_files_list_contains_relationships_object(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert 'relationships' in res.json['data'][0]
class TestNodeFilesListFiltering(ApiTestCase):
def setUp(self):
super(TestNodeFilesListFiltering, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
# Prep HTTP mocks
prepare_mock_wb_response(
node=self.project, provider='github',
files=[
{'name': 'abc', 'path': '/abc/', 'materialized': '/abc/', 'kind': 'folder'},
{'name': 'xyz', 'path': '/xyz', 'materialized': '/xyz', 'kind': 'file'},
]
)
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
@responses.activate
def test_node_files_are_filterable_by_name(self):
url = '/{}nodes/{}/files/github/?filter[name]=xyz'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'abc'
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'abc'
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
@responses.activate
def test_node_files_filter_by_name_case_insensitive(self):
url = '/{}nodes/{}/files/github/?filter[name]=XYZ'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# filters out 'abc', but finds 'xyz'
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# filters out 'abc', but finds 'xyz'
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
@responses.activate
def test_node_files_are_filterable_by_path(self):
url = '/{}nodes/{}/files/github/?filter[path]=abc'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
@responses.activate
def test_node_files_are_filterable_by_kind(self):
url = '/{}nodes/{}/files/github/?filter[kind]=folder'.format(
API_BASE, self.project._id)
self.add_github()
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
@responses.activate
def test_node_files_external_provider_can_filter_by_last_touched(self):
yesterday_stamp = timezone.now() - datetime.timedelta(days=1)
self.add_github()
url = '/{}nodes/{}/files/github/?filter[last_touched][gt]={}'.format(
API_BASE, self.project._id, yesterday_stamp.isoformat())
# test create
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
# test get
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
def test_node_files_osfstorage_cannot_filter_by_last_touched(self):
yesterday_stamp = timezone.now() - datetime.timedelta(days=1)
self.file = api_utils.create_test_file(self.project, self.user)
url = '/{}nodes/{}/files/osfstorage/?filter[last_touched][gt]={}'.format(
API_BASE, self.project._id, yesterday_stamp.isoformat())
# test create
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
# test get
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
class TestNodeFilesListPagination(ApiTestCase):
def setUp(self):
super(TestNodeFilesListPagination, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {
oauth_settings._id: []}
addon.user_settings.save()
def check_file_order(self, resp):
previous_file_name = 0
for file in resp.json['data']:
int_file_name = int(file['attributes']['name'])
assert int_file_name > previous_file_name, 'Files were not in order'
previous_file_name = int_file_name
@responses.activate
def test_node_files_are_sorted_correctly(self):
prepare_mock_wb_response(
node=self.project, provider='github',
files=[
{'name': '01', 'path': '/01/', 'materialized': '/01/', 'kind': 'folder'},
{'name': '02', 'path': '/02', 'materialized': '/02', 'kind': 'file'},
{'name': '03', 'path': '/03/', 'materialized': '/03/', 'kind': 'folder'},
{'name': '04', 'path': '/04', 'materialized': '/04', 'kind': 'file'},
{'name': '05', 'path': '/05/', 'materialized': '/05/', 'kind': 'folder'},
{'name': '06', 'path': '/06', 'materialized': '/06', 'kind': 'file'},
{'name': '07', 'path': '/07/', 'materialized': '/07/', 'kind': 'folder'},
{'name': '08', 'path': '/08', 'materialized': '/08', 'kind': 'file'},
{'name': '09', 'path': '/09/', 'materialized': '/09/', 'kind': 'folder'},
{'name': '10', 'path': '/10', 'materialized': '/10', 'kind': 'file'},
{'name': '11', 'path': '/11/', 'materialized': '/11/', 'kind': 'folder'},
{'name': '12', 'path': '/12', 'materialized': '/12', 'kind': 'file'},
{'name': '13', 'path': '/13/', 'materialized': '/13/', 'kind': 'folder'},
{'name': '14', 'path': '/14', 'materialized': '/14', 'kind': 'file'},
{'name': '15', 'path': '/15/', 'materialized': '/15/', 'kind': 'folder'},
{'name': '16', 'path': '/16', 'materialized': '/16', 'kind': 'file'},
{'name': '17', 'path': '/17/', 'materialized': '/17/', 'kind': 'folder'},
{'name': '18', 'path': '/18', 'materialized': '/18', 'kind': 'file'},
{'name': '19', 'path': '/19/', 'materialized': '/19/', 'kind': 'folder'},
{'name': '20', 'path': '/20', 'materialized': '/20', 'kind': 'file'},
{'name': '21', 'path': '/21/', 'materialized': '/21/', 'kind': 'folder'},
{'name': '22', 'path': '/22', 'materialized': '/22', 'kind': 'file'},
{'name': '23', 'path': '/23/', 'materialized': '/23/', 'kind': 'folder'},
{'name': '24', 'path': '/24', 'materialized': '/24', 'kind': 'file'},
]
)
self.add_github()
url = '/{}nodes/{}/files/github/?page[size]=100'.format(
API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth)
self.check_file_order(res)
class TestNodeStorageProviderDetail(ApiTestCase):
def setUp(self):
super(TestNodeStorageProviderDetail, self).setUp()
self.user = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True)
self.private_project = ProjectFactory(creator=self.user)
self.public_url = '/{}nodes/{}/files/providers/osfstorage/'.format(
API_BASE, self.public_project._id)
self.private_url = '/{}nodes/{}/files/providers/osfstorage/'.format(
API_BASE, self.private_project._id)
def test_can_view_if_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data']['id'],
'{}:osfstorage'.format(self.private_project._id)
)
def test_can_view_if_public(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(
res.json['data']['id'],
'{}:osfstorage'.format(self.public_project._id)
)
def test_cannot_view_if_private(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import xmpp
from confs import getConfs, getConfByTarget, getConfByName, getConfsCount
from utils import sEq, answerPrivate, answerConf, getConferenceJid, normalize, protect
from registry import Registry
from logic import *
import time
import logging
import config
import cooldown
import background
import subscribe
registry = Registry()
subscribe = subscribe.Subscribe()
def onPrivateMessage(conn, msg):
answerPrivate(conn, msg, u"I'm not a bot!")
@protect
def onMessage(conn, msg):
if msg.getFrom().getDomain() != config.conference:
onPrivateMessage(conn, msg)
raise xmpp.NodeProcessed
return
if msg.getFrom().getNode() != config.mainChannel:
onBattleMessage(conn, msg, msg.getFrom().getNode())
raise xmpp.NodeProcessed
return
processCommand(conn, msg, msg.getBody())
raise xmpp.NodeProcessed
@protect
def onPresence(conn, msg):
if msg.getFrom().getDomain() == config.conference:
conf, nickname = msg.getFrom().getNode(), msg.getFrom().getResource()
role = msg.getTag("x", namespace = xmpp.NS_MUC_USER)
if role and role.getTag("item").getAttr("jid"):
person = xmpp.JID(role.getTag("item").getAttr("jid"))
alive = role.getTag("item").getAttr("role") != "none" and msg.getAttr("type") != "unavailable"
#print "%s: %s is %s and is he alive: %s" % (conf, person, nickname, alive)
if conf == config.mainChannel:
registry.onPresence(person, nickname, alive)
else:
confObj = getConfByName(conf)
if confObj:
confObj.onPresence(conn, person, nickname, alive)
raise xmpp.NodeProcessed
def onBattleMessage(conn, msg, confName):
conf = getConfByName(confName)
if conf:
conf.onMessage(conn, msg)
@protect
def sendInvite(conn, to, conf):
logging.info("Invite: %s tg: %s ",to,conf)
invite = xmpp.Message(to = xmpp.JID(node = conf, domain = config.conference))
invite.setTag('x', namespace = xmpp.NS_MUC_USER).setTag('invite', {'to': to})
conn.send(invite)
@protect
def sendSubscription(conn, to_jid, target):
msg = xmpp.Message(to = to_jid, body = u"Subscribed target %s is under attack!" % (target))
conn.send(msg)
def processCommand(conn, msg, msgText):
if not msgText: return
parts = [normalize(x) for x in msgText.strip().split(" ")]
name = msg.getFrom().getResource()
if not name:
return
if name not in registry:
return
logging.info("Command from %s: %s",name,msgText)
person = registry[name]
jid = person.getStripped()
hackername = utils.jidStrip(jid)
if len(parts) == 3:
cmd, target, action = parts
is_defend = sEq(action, u"defend")
is_offend = sEq(action, u"attack")
if sEq(cmd, "connect") and (is_offend or is_defend):
hour = time.localtime().tm_hour
if hour >= config.nightSleepHour and hour < config.morningStartHour:
answerPrivate(conn, msg, u"Cool down, guy! Time to sleep for a while... Back to work at 9 a.m.")
return
if isValidTarget(name, target) and getConfsCount(person) < allowedChannelsCount(jid):
if not cooldown.canEnter(person):
answerPrivate(conn, msg, u"Cool down, guy! Take a beer...")
logging.info( u"Cool down, guy! Take a beer... %s", hackername)
return
if is_offend and (not cooldown.canAttack(target)):
answerPrivate(conn, msg, u"Target was broken recently. It's impossible to hack it for now.")
logging.info(u"Target was broken recently. It's impossible to hack it for now. %s", hackername)
return
conf = getConfByTarget(target)
if not conf.defeated():
joinConference(conn, config.conference, conf.name, config.nickname)
sendInvite(conn, person, conf.name)
if is_offend:
conf.newOffender(person)
# cooldown.startedAttack(person)
subs = subscribe.subscribersFor(target)
for p in subs:
if registry.containsHacker(p) and p != hackername:
# print "SUBS %s"%p
sendSubscription(conn, registry.getHacker(p), target)
else:
conf.newDefender(person)
logging.info(u"CONF STARTED: %s" ,conf)
else:
answerPrivate(conn, msg, u"Access denied.")
elif len(parts) == 2:
cmd, target = parts
if sEq(cmd, "subscribe"):
if subscribe.canSubscribe(hackername, target):
subscribe.add(hackername, target)
answerPrivate(conn, msg, u"Target %s subscribed." % (target))
else:
answerPrivate(conn, msg, u"Too many subscriptions.")
elif sEq(cmd, "unsubscribe"):
if subscribe.remove(hackername, target):
answerPrivate(conn, msg, u"Target %s unsubscribed." % (target))
else:
answerPrivate(conn, msg, u"Target %s isn't subscribed." % (target))
elif len(parts) == 1:
cmd = parts[0]
if sEq(cmd, "subscriptions"):
txt = u"List of subscriptions:\n"
for s in subscribe.subscriptionsOf(hackername):
txt += u"%s\n" % (s)
answerPrivate(conn, msg, txt)
elif sEq(cmd, "status"):
txt = u"Current personal status: %d" % cooldown.getVitality(hackername)
answerPrivate(conn, msg, txt)
def doStep(conn):
try:
return conn.Process(10)
except KeyboardInterrupt:
return 0
def doIdle(conn):
try:
toDelete = []
for target, conf in getConfs().iteritems():
if conf.idle(conn):
toDelete.append(target)
cooldown.updateCooldowns()
for t in toDelete:
del getConfs()[t]
except KeyboardInterrupt:
return 0
return 1
def joinConference(conn, server, room, nickname, password = "", public = False):
p = xmpp.Presence(to = xmpp.JID(node = room, domain = server, resource = nickname))
p.setTag('x',namespace=xmpp.NS_MUC).setTagData('password', password)
p.setTag('x',namespace=xmpp.NS_MUC).setTagData('anonymous', False)
p.setTag('x',namespace=xmpp.NS_MUC).setTagData('public', public)
p.setTag('x',namespace=xmpp.NS_MUC).setTagData('public_list', public)
p.setTag('x',namespace=xmpp.NS_MUC).setTagData('allow_visitor_nickchange', False)
p.getTag('x').addChild('history',{'maxchars':'0','maxstanzas':'0'})
conn.send(p)
def main(name):
if len(sys.argv)<3:
print u"Usage: bot.py username@server.net password [logfile]"
else:
background.start()
jid = xmpp.JID(node = sys.argv[1], domain = config.server, resource = "LC")
user, server, password = jid.getNode(), jid.getDomain(),sys.argv[2]
conn = xmpp.Client(server)
conres = conn.connect()
if not conres:
logging.error(u"Unable to connect to server %s!",server)
return 1
if conres<>'tls':
logging.warning(u"Warning: unable to estabilish secure connection - TLS failed!")
authres = conn.auth(user,password)
if not authres:
logging.error(u"Unable to authorize on %s - check login/password.",server)
return 1
if authres != 'sasl':
logging.error(u"Warning: unable to perform SASL auth os %s. Old authentication method used!",server)
conn.RegisterHandler('message', onMessage)
conn.RegisterHandler('presence', onPresence)
conn.sendInitPresence()
joinConference(conn, config.conference, room = config.mainChannel, nickname = name, password = "", public = True)
logging.info("Bot started.")
counter = 1
while True:
# counter = 20
if not conn.isConnected(): conn.reconnectAndReauth()
result = doStep(conn)
if result == 0:
break;
if result == '0':
if doIdle(conn) == 0:
break;
# counter = 20
# else:
# counter -= 1
# print "Iteration: %s %s"%(repr(result),time.strftime('%H:%M:%S'))
# print "Idle iter: %s "%(time.strftime('%H:%M:%S'))
if __name__ == "__main__":
if len(sys.argv) > 3:
logging.basicConfig(filename=sys.argv[3],format= "%(asctime)s %(levelname)s:%(message)s", datefmt="%D %H:%M:%S")
print "logging to ",sys.argv[3]
else:
logging.basicConfig(format= "%(asctime)s %(levelname)s:%(message)s", datefmt="%D %H:%M:%S")
logging.getLogger().setLevel(logging.DEBUG)
sys.exit(main(config.nickname))
|
|
from hazelcast.exception import HazelcastSerializationError
from hazelcast.serialization import bits
from hazelcast.serialization.api import PortableReader
from hazelcast.serialization.portable.classdef import FieldType
class DefaultPortableReader(PortableReader):
def __init__(self, portable_serializer, data_input, class_def):
self._portable_serializer = portable_serializer
self._in = data_input
self._class_def = class_def
try:
# final position after portable is read
self._final_pos = data_input.read_int()
# field count
field_count = data_input.read_int()
except Exception:
raise HazelcastSerializationError()
if field_count != class_def.get_field_count():
raise ValueError("Field count({}) in stream does not match! {}".format(field_count, class_def))
self._offset = data_input.position()
self._raw = False
def get_version(self):
return self._class_def.version
def has_field(self, field_name):
return self._class_def.has_field(field_name)
def get_field_names(self):
return self._class_def.get_field_names()
def get_field_type(self, field_name):
return self._class_def.get_field_type(field_name)
def get_field_class_id(self, field_name):
return self._class_def.get_field_class_id(field_name)
def read_boolean(self, field_name):
pos = self._read_position(field_name, FieldType.BOOLEAN)
return self._in.read_boolean(pos)
def read_byte(self, field_name):
pos = self._read_position(field_name, FieldType.BYTE)
return self._in.read_byte(pos)
def read_char(self, field_name):
pos = self._read_position(field_name, FieldType.CHAR)
return self._in.read_char(pos)
def read_short(self, field_name):
pos = self._read_position(field_name, FieldType.SHORT)
return self._in.read_short(pos)
def read_int(self, field_name):
pos = self._read_position(field_name, FieldType.INT)
return self._in.read_int(pos)
def read_long(self, field_name):
pos = self._read_position(field_name, FieldType.LONG)
return self._in.read_long(pos)
def read_float(self, field_name):
pos = self._read_position(field_name, FieldType.FLOAT)
return self._in.read_float(pos)
def read_double(self, field_name):
pos = self._read_position(field_name, FieldType.DOUBLE)
return self._in.read_double(pos)
def read_utf(self, field_name):
cur_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.UTF)
self._in.set_position(pos)
return self._in.read_utf()
finally:
self._in.set_position(cur_pos)
def read_portable(self, field_name):
cur_pos = self._in.position()
try:
fd = self._class_def.get_field(field_name)
if fd is None:
raise self._create_unknown_field_exception(field_name)
if fd.field_type != FieldType.PORTABLE:
raise HazelcastSerializationError("Not a Portable field: {}".format(field_name))
pos = self._read_position_by_field_def(fd)
self._in.set_position(pos)
is_none = self._in.read_boolean()
factory_id = self._in.read_int()
class_id = self._in.read_int()
_check_factory_and_class(fd, factory_id, class_id)
if is_none:
return None
return self._portable_serializer.read_internal(self._in, factory_id, class_id)
finally:
self._in.set_position(cur_pos)
def read_boolean_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.BOOLEAN_ARRAY)
self._in.set_position(pos)
return self._in.read_boolean_array()
finally:
self._in.set_position(current_pos)
def read_byte_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.BYTE_ARRAY)
self._in.set_position(pos)
return self._in.read_byte_array()
finally:
self._in.set_position(current_pos)
def read_char_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.CHAR_ARRAY)
self._in.set_position(pos)
return self._in.read_char_array()
finally:
self._in.set_position(current_pos)
def read_short_array(self, field_name):
pass
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.SHORT_ARRAY)
self._in.set_position(pos)
return self._in.read_short_array()
finally:
self._in.set_position(current_pos)
def read_int_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.INT_ARRAY)
self._in.set_position(pos)
return self._in.read_int_array()
finally:
self._in.set_position(current_pos)
def read_long_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.LONG_ARRAY)
self._in.set_position(pos)
return self._in.read_long_array()
finally:
self._in.set_position(current_pos)
def read_float_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.FLOAT_ARRAY)
self._in.set_position(pos)
return self._in.read_float_array()
finally:
self._in.set_position(current_pos)
def read_double_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.DOUBLE_ARRAY)
self._in.set_position(pos)
return self._in.read_double_array()
finally:
self._in.set_position(current_pos)
def read_utf_array(self, field_name):
current_pos = self._in.position()
try:
pos = self._read_position(field_name, FieldType.UTF_ARRAY)
self._in.set_position(pos)
return self._in.read_utf_array()
finally:
self._in.set_position(current_pos)
def read_portable_array(self, field_name):
current_pos = self._in.position()
try:
fd = self._class_def.get_field(field_name)
if fd is None:
raise self._create_unknown_field_exception(field_name)
if fd.field_type != FieldType.PORTABLE_ARRAY:
raise HazelcastSerializationError("Not a portable array field: {}".format(field_name))
pos = self._read_position_by_field_def(fd)
self._in.set_position(pos)
length = self._in.read_int()
factory_id = self._in.read_int()
class_id = self._in.read_int()
if length == bits.NULL_ARRAY_LENGTH:
return None
_check_factory_and_class(fd, factory_id, class_id)
portables = [None] * length
if length > 0:
offset = self._in.position()
for i in xrange(0, length):
start = self._in.read_int(offset + i * bits.INT_SIZE_IN_BYTES)
self._in.set_position(start)
portables[i] = self._portable_serializer.read_internal(self._in, factory_id, class_id)
return portables
finally:
self._in.set_position(current_pos)
def get_raw_data_input(self):
if not self._raw:
pos = self._in.read_int(self._offset + self._class_def.get_field_count() * bits.INT_SIZE_IN_BYTES)
self._in.set_position(pos)
self._raw = True
return self._in
def end(self):
self._in.set_position(self._final_pos)
def _read_position(self, field_name, field_type):
if self._raw:
raise HazelcastSerializationError("Cannot read Portable fields after get_raw_data_input() is called!")
fd = self._class_def.get_field(field_name)
if fd is None:
return self._read_nested_position(field_name, field_type)
if fd.field_type != field_type:
raise HazelcastSerializationError("Not a '{}' field: {}".format(field_type, field_name))
return self._read_position_by_field_def(fd)
def _read_nested_position(self, field_name, field_type):
field_names = field_name.split(".")
if len(field_names) > 1:
fd = None
_reader = self
for i in xrange(0, len(field_names)):
fd = _reader._class_def.get_field(field_names[i])
if fd is None:
break
if i == len(field_names) - 1:
break
pos = _reader.read_position(fd)
self._in.set_position(pos)
is_none = self._in.read_boolean()
if is_none:
raise ValueError("Parent field is null: ".format(field_names[i]))
_reader = self._portable_serializer.create_reader(self._in)
if fd is None:
raise self._create_unknown_field_exception(field_name)
if fd.field_type != field_type:
raise HazelcastSerializationError("Not a '{}' field: {}".format(field_type, field_name))
return _reader.read_position(fd)
raise self._create_unknown_field_exception(field_name)
def _create_unknown_field_exception(self, field_name):
return HazelcastSerializationError("Unknown field name: '{}' for ClassDefinition[ id: {}, version: {} ]"
.format(field_name, self._class_def.class_id, self._class_def.version))
def _read_position_by_field_def(self, fd):
pos = self._in.read_int(self._offset + fd.index * bits.INT_SIZE_IN_BYTES)
_len = self._in.read_short(pos)
# name + len + type
return pos + bits.SHORT_SIZE_IN_BYTES + _len + 1
def _check_factory_and_class(field_def, factory_id, class_id):
if factory_id != field_def.factory_id:
raise ValueError("Invalid factoryId! Expected: {}, Current: {}".format(factory_id, field_def.factory_id))
if class_id != field_def.class_id:
raise ValueError("Invalid classId! Expected: {}, Current: {}".format(class_id, field_def.class_id))
class MorphingPortableReader(DefaultPortableReader):
pass
|
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from tempest.api.network import base
from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class PortsTestJSON(base.BaseNetworkTest):
_interface = 'json'
"""
Test the following operations for ports:
port create
port delete
port list
port show
port update
"""
@classmethod
@test.safe_setup
def setUpClass(cls):
super(PortsTestJSON, cls).setUpClass()
cls.network = cls.create_network()
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
self.client.delete_port(port_id)
_, body = self.client.list_ports()
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
@test.attr(type='smoke')
def test_create_update_delete_port(self):
# Verify port creation
_, body = self.client.create_port(network_id=self.network['id'])
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
_, body = self.client.update_port(port['id'],
name=new_name,
admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@test.attr(type='smoke')
def test_show_port(self):
# Verify the details of port
_, body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
# TODO(Santosh)- This is a temporary workaround to compare create_port
# and show_port dict elements.Remove this once extra_dhcp_opts issue
# gets fixed in neutron.( bug - 1365341.)
self.assertThat(self.port,
custom_matchers.MatchesDictExceptForKeys
(port, excluded_keys=['extra_dhcp_opts']))
@test.attr(type='smoke')
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
_, body = self.client.show_port(self.port['id'],
fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(port[field_name], self.port[field_name])
@test.attr(type='smoke')
def test_list_ports(self):
# Verify the port exists in the list of all ports
_, body = self.client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@test.attr(type='smoke')
def test_port_list_filter_by_router_id(self):
# Create a router
network = self.create_network()
self.create_subnet(network)
router = self.create_router(data_utils.rand_name('router-'))
resp, port = self.client.create_port(network_id=network['id'])
# Add router interface to port created above
resp, interface = self.client.add_router_interface_with_port_id(
router['id'], port['port']['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router['id'], port['port']['id'])
# List ports filtered by router_id
_, port_list = self.client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
self.assertEqual(ports[0]['device_id'], router['id'])
@test.attr(type='smoke')
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
_, body = self.client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
@test.attr(type='smoke')
def test_update_port_with_second_ip(self):
# Create a network with two subnets
network = self.create_network()
subnet_1 = self.create_subnet(network)
subnet_2 = self.create_subnet(network)
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
# Create a port with a single IP address from first subnet
port = self.create_port(network,
fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
# Update the port with a second IP address from second subnet
fixed_ips = fixed_ip_1 + fixed_ip_2
port = self.update_port(port, fixed_ips=fixed_ips)
self.assertEqual(2, len(port['fixed_ips']))
# Update the port to return to a single IP address
port = self.update_port(port, fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
class PortsTestXML(PortsTestJSON):
_interface = 'xml'
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
_interface = 'json'
@classmethod
@test.safe_setup
def setUpClass(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).setUpClass()
cls.identity_client = cls._get_identity_admin_client()
cls.tenant = cls.identity_client.get_tenant_by_name(
CONF.identity.tenant_name)
cls.network = cls.create_network()
cls.host_id = socket.gethostname()
@test.attr(type='smoke')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
"binding:host_id": self.host_id}
_, body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
host_id = port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.attr(type='smoke')
def test_update_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id']}
_, body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
_, body = self.admin_client.update_port(port['id'], **update_body)
updated_port = body['port']
host_id = updated_port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.attr(type='smoke')
def test_list_ports_binding_ext_attr(self):
# Create a new port
post_body = {"network_id": self.network['id']}
_, body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
update_body = {"binding:host_id": self.host_id}
self.admin_client.update_port(port['id'], **update_body)
# List all ports, ensure new port is part of list and its binding
# attributes are set and accurate
_, body = self.admin_client.list_ports()
ports_list = body['ports']
pids_list = [p['id'] for p in ports_list]
self.assertIn(port['id'], pids_list)
listed_port = [p for p in ports_list if p['id'] == port['id']]
self.assertEqual(1, len(listed_port),
'Multiple ports listed with id %s in ports listing: '
'%s' % (port['id'], ports_list))
self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
@test.attr(type='smoke')
def test_show_port_binding_ext_attr(self):
_, body = self.admin_client.create_port(network_id=self.network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
_, body = self.admin_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
show_port['binding:host_id'])
self.assertEqual(port['binding:vif_type'],
show_port['binding:vif_type'])
self.assertEqual(port['binding:vif_details'],
show_port['binding:vif_details'])
class PortsAdminExtendedAttrsTestXML(PortsAdminExtendedAttrsTestJSON):
_interface = 'xml'
class PortsIpV6TestJSON(PortsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
@classmethod
@test.safe_setup
def setUpClass(cls):
super(PortsIpV6TestJSON, cls).setUpClass()
if not CONF.network_feature_enabled.ipv6:
skip_msg = "IPv6 Tests are disabled."
raise cls.skipException(skip_msg)
class PortsIpV6TestXML(PortsIpV6TestJSON):
_interface = 'xml'
class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
@classmethod
def setUpClass(cls):
if not CONF.network_feature_enabled.ipv6:
skip_msg = "IPv6 Tests are disabled."
raise cls.skipException(skip_msg)
super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
class PortsAdminExtendedAttrsIpV6TestXML(PortsAdminExtendedAttrsIpV6TestJSON):
_interface = 'xml'
|
|
from inspect import isclass
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {review} for {addon} deleted.')
editor_format = _(u'{user} deleted {review} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class PAYPAL_FAILED(_LOG):
id = 51
format = _(u'{addon} failed checks with PayPal.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
u'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class ADDON_UNLISTED(_LOG):
id = 128
format = _(u'{addon} unlisted.')
keep = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False)
or l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from access.models import Group
from addons.models import Addon
from amo import get_user, logger_log
from devhub.models import (ActivityLog, AddonLog, CommentLog, GroupLog,
UserLog, VersionLog)
from users.models import UserProfile
from versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Addon:
AddonLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
elif isinstance(arg, Addon):
AddonLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
|
|
"""
Created on 28 Sep 2020
@author: Jade Page (jade.page@southcoastscience.com)
"""
from botocore.exceptions import ClientError
from collections import OrderedDict
from scs_core.data.datetime import LocalizedDatetime
from scs_core.data.json import JSONable
from scs_core.data.tokens import Tokens
from scs_core.sys.persistence_manager import PersistenceManager
# --------------------------------------------------------------------------------------------------------------------
class S3Manager(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, client, resource_client):
"""
Constructor
"""
self.__client = client
self.__resource_client = resource_client
# ----------------------------------------------------------------------------------------------------------------
def list_buckets(self, full_details):
response = self.__client.list_buckets()
if 'Buckets' not in response:
return
for bucket in response['Buckets']:
yield Bucket.construct(bucket) if full_details else bucket['Name']
def list_objects(self, bucket, depth, full_details, prefix=None):
prefix_tokens = None if prefix is None else Tokens.construct(prefix, '/')
next_token = None
summary = Summary.none()
while True:
response = self.__retrieve_objects(bucket, prefix, next_token)
if not response or 'Contents' not in response:
return
for item in response['Contents']:
key_tokens = Tokens.construct(item['Key'], '/')
if prefix is None or key_tokens.startswith(prefix_tokens):
# full...
if full_details:
yield Object.construct(item)
continue
# minimal...
if depth is None:
yield item['Key']
continue
# summary...
path = key_tokens.path(depth=depth)
obj = Object.construct(item)
if path == summary.path:
summary.add(obj)
continue
if not summary.is_none():
yield summary
summary = Summary.new(path, obj)
if 'NextContinuationToken' not in response:
break
next_token = response.get('NextContinuationToken')
if summary.path is not None:
yield summary
def retrieve_from_bucket(self, bucket, key):
response = self.__client.get_object(Bucket=bucket, Key=key)
meta = response.get('ResponseMetadata')
header = meta.get('HTTPHeaders')
last_modified = LocalizedDatetime.construct_from_s3(header.get('last-modified'))
content_body = response.get("Body")
data = content_body.read().decode()
return data, last_modified
def upload_file_to_bucket(self, filepath, bucket, key):
self.__resource_client.Bucket(bucket).upload_file(filepath, key)
return self.head(bucket, key)
def upload_bytes_to_bucket(self, body, bucket, key):
self.__resource_client.Bucket(bucket).put_object(Body=body, Key=key)
return self.head(bucket, key)
def put_object(self, body, bucket, key):
self.__client.put_object(Body=body, Bucket=bucket, Key=key)
return self.head(bucket, key)
def move_object(self, bucket, key, new_key):
source = '/'.join((bucket, key))
self.__client.copy_object(Bucket=bucket, CopySource=source, Key=new_key)
self.__client.delete_object(Bucket=bucket, Key=key)
return self.head(bucket, new_key)
def delete_objects(self, bucket, prefix, excluded=None):
excluded_tokens = Tokens.construct(excluded, '/')
for key in self.list_objects(bucket, None, False, prefix=prefix):
if excluded and Tokens.construct(key, '/').startswith(excluded_tokens):
continue
self.delete_object(bucket, key)
yield key
def delete_object(self, bucket, key):
self.__client.delete_object(Bucket=bucket, Key=key)
def exists(self, bucket, key):
try:
self.head(bucket, key)
return True
except ClientError as ex:
if ex.response['Error']['Code'] == "404":
return False
raise
def head(self, bucket, key):
response = self.__client.head_object(Bucket=bucket, Key=key)
return Head.construct(key, response)
# ----------------------------------------------------------------------------------------------------------------
def __retrieve_objects(self, bucket, prefix, next_token):
if prefix:
if next_token:
response = self.__client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
ContinuationToken=next_token,
Delimiter=",",
)
else:
response = self.__client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
Delimiter=",",
)
else:
if next_token:
response = self.__client.list_objects_v2(
Bucket=bucket,
ContinuationToken=next_token,
Delimiter=",",
)
else:
response = self.__client.list_objects_v2(
Bucket=bucket,
Delimiter=",",
)
return response
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "S3Manager:{client:%s, resource_client:%s}" % (self.__client, self.__resource_client)
# --------------------------------------------------------------------------------------------------------------------
class S3PersistenceManager(PersistenceManager):
"""
classdocs
"""
__BUCKET = 'scs-persistence'
# ----------------------------------------------------------------------------------------------------------------
@staticmethod
def __key(dirname, filename):
return '/'.join((dirname, filename))
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, client, resource_client):
"""
Constructor
"""
self.__manager = S3Manager(client, resource_client)
# ----------------------------------------------------------------------------------------------------------------
def list(self, container, dirname):
prefix_len = len(dirname) + 1
objects = self.__manager.list_objects(container, 1, True, prefix=dirname)
return [obj.key[prefix_len:] for obj in objects]
def exists(self, dirname, filename):
key = self.__key(dirname, filename)
return self.__manager.exists(self.__BUCKET, key)
def load(self, dirname, filename, encryption_key=None):
key = self.__key(dirname, filename)
text, last_modified = self.__manager.retrieve_from_bucket(self.__BUCKET, key)
if encryption_key:
from scs_core.data.crypt import Crypt # late import
jstr = Crypt.decrypt(encryption_key, text)
else:
jstr = text
return jstr, last_modified
def save(self, jstr, dirname, filename, encryption_key=None):
key = self.__key(dirname, filename)
if encryption_key:
from scs_core.data.crypt import Crypt # late import
text = Crypt.encrypt(encryption_key, jstr)
else:
text = jstr + '\n'
self.__manager.put_object(text, self.__BUCKET, key)
def remove(self, dirname, filename):
key = self.__key(dirname, filename)
self.__manager.delete_object(self.__BUCKET, key)
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def scs_path(cls):
return cls.__BUCKET
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "S3PersistenceManager:{manager:%s}" % self.__manager
# --------------------------------------------------------------------------------------------------------------------
class Bucket(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, response):
name = response.get('Name')
creation_date = LocalizedDatetime(response.get('CreationDate'))
return cls(name, creation_date)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, name, creation_date):
"""
Constructor
"""
self.__name = name # string
self.__creation_date = creation_date # LocalizedDatetime
def __lt__(self, other):
return self.name < other.name
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['name'] = self.name
jdict['creation-date'] = self.creation_date.as_iso8601()
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def name(self):
return self.__name
@property
def creation_date(self):
return self.__creation_date
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Bucket:{name:%s, creation_date:%s}" % (self.name, self.creation_date)
# --------------------------------------------------------------------------------------------------------------------
class Head(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, key, response):
last_modified = LocalizedDatetime(response.get('LastModified'))
e_tag = response.get('ETag')
size = response.get('ContentLength')
content_type = response.get('ContentType')
return cls(key, last_modified, e_tag, size, content_type)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, key, last_modified, e_tag, size, content_type):
"""
Constructor
"""
self.__key = key # string
self.__last_modified = last_modified # LocalizedDatetime
self.__e_tag = e_tag # string
self.__size = int(size) # int
self.__content_type = content_type # string
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['key'] = self.key
jdict['last-modified'] = self.last_modified.as_iso8601()
jdict['e-tag'] = self.e_tag
jdict['size'] = self.size
jdict['storage-class'] = self.content_type
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def key(self):
return self.__key
@property
def last_modified(self):
return self.__last_modified
@property
def e_tag(self):
return self.__e_tag
@property
def size(self):
return self.__size
@property
def content_type(self):
return self.__content_type
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Head:{key:%s, last_modified:%s, e_tag:%s, size:%s, content_type:%s}" % \
(self.key, self.last_modified, self.e_tag, self.size, self.content_type)
# --------------------------------------------------------------------------------------------------------------------
class Object(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, response):
key = response.get('Key')
last_modified = LocalizedDatetime(response.get('LastModified'))
e_tag = response.get('ETag')
size = response.get('Size')
storage_class = response.get('StorageClass')
return cls(key, last_modified, e_tag, size, storage_class)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, key, last_modified, e_tag, size, storage_class):
"""
Constructor
"""
self.__key = key # string
self.__last_modified = last_modified # LocalizedDatetime
self.__e_tag = e_tag # string
self.__size = int(size) # int
self.__storage_class = storage_class # string
def __lt__(self, other):
if self.key < other.key:
return True
return self.e_tag < other.e_tag
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['key'] = self.key
jdict['last-modified'] = self.last_modified.as_iso8601()
jdict['e-tag'] = self.e_tag
jdict['size'] = self.size
jdict['storage-class'] = self.storage_class
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def key(self):
return self.__key
@property
def last_modified(self):
return self.__last_modified
@property
def e_tag(self):
return self.__e_tag
@property
def size(self):
return self.__size
@property
def storage_class(self):
return self.__storage_class
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Object:{key:%s, last_modified:%s, e_tag:%s, size:%s, storage_class:%s}" % \
(self.key, self.last_modified, self.e_tag, self.size, self.storage_class)
# --------------------------------------------------------------------------------------------------------------------
class Summary(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def none(cls):
return cls(None, 0, None, 0)
@classmethod
def new(cls, path, obj: Object):
return cls(path, 1, obj.last_modified, obj.size)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, path, objects, last_modified, size):
"""
Constructor
"""
self.__path = path # string
self.__objects = int(objects) # int
self.__last_modified = last_modified # LocalizedDatetime
self.__size = int(size) # int
def __lt__(self, other):
return self.path < other.path
# ----------------------------------------------------------------------------------------------------------------
def is_none(self):
return self.path is None
def add(self, obj: Object):
self.__objects += 1
if self.last_modified is None or obj.last_modified > self.last_modified:
self.__last_modified = obj.last_modified
self.__size += obj.size
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['path'] = self.path
jdict['objects'] = self.objects
jdict['last-modified'] = self.last_modified.as_iso8601()
jdict['size'] = self.size
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def path(self):
return self.__path
@property
def objects(self):
return self.__objects
@property
def last_modified(self):
return self.__last_modified
@property
def size(self):
return self.__size
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Summary:{path:%s, objects:%s, last_modified:%s, size:%s}" % \
(self.path, self.objects, self.last_modified, self.size)
|
|
# -*- coding: utf-8 -*-
"""The source file generator for include source files."""
import logging
import os
from yaldevtools.source_generators import interface
class IncludeSourceFileGenerator(interface.SourceFileGenerator):
"""Include source file generator."""
def _GenerateFeaturesHeader(
self, project_configuration, template_mappings, include_header_file,
makefile_am_file, output_writer, output_filename):
"""Generates a features header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
include_header_file (LibraryIncludeHeaderFile): library include header
file.
makefile_am_file (MainMakefileAMFile): project main Makefile.am file.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(
self._template_directory, 'libyal', 'features.h.in')
template_filename = os.path.join(template_directory, 'header.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
# TODO: fix check for libsigscan.
if include_header_file.have_wide_character_type:
template_filename = os.path.join(
template_directory, 'wide_character_type.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
# TODO: improve detection if include is needed.
if 'libcthreads' in makefile_am_file.libraries:
template_filename = os.path.join(template_directory, 'multi_thread.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
if include_header_file.have_bfio:
template_filename = os.path.join(template_directory, 'bfio.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
template_filename = os.path.join(template_directory, 'footer.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
def _GenerateMakefileAM(
self, project_configuration, template_mappings, include_header_file,
makefile_am_file, output_writer, output_filename):
"""Generates a tests Makefile.am file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
include_header_file (LibraryIncludeHeaderFile): library include header
file.
makefile_am_file (LibraryMakefileAMFile): library Makefile.am file.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
library_name = project_configuration.library_name
pkginclude_headers = [
'\t{0:s}/definitions.h \\'.format(library_name),
'\t{0:s}/extern.h \\'.format(library_name),
'\t{0:s}/features.h \\'.format(library_name),
'\t{0:s}/types.h'.format(library_name)]
# TODO: detect if header file exits.
if library_name != 'libcerror':
pkginclude_header = '\t{0:s}/error.h \\'.format(library_name)
pkginclude_headers.append(pkginclude_header)
if include_header_file.HasFunction('get_codepage'):
pkginclude_header = '\t{0:s}/codepage.h \\'.format(library_name)
pkginclude_headers.append(pkginclude_header)
# TODO: detect if header file exits.
if library_name in ('libnk2', 'libpff'):
pkginclude_header = '\t{0:s}/mapi.h \\'.format(library_name)
pkginclude_headers.append(pkginclude_header)
# TODO: detect if header file exits.
if library_name == 'libolecf':
pkginclude_header = '\t{0:s}/ole.h \\'.format(library_name)
pkginclude_headers.append(pkginclude_header)
pkginclude_headers = sorted(pkginclude_headers)
template_mappings['pkginclude_headers'] = '\n'.join(pkginclude_headers)
template_filename = os.path.join(self._template_directory, 'Makefile.am')
output_filename = os.path.join('include', 'Makefile.am')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
def _GenerateTypesHeader(
self, project_configuration, template_mappings, include_header_file,
output_writer, output_filename):
"""Generates a types header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
include_header_file (LibraryIncludeHeaderFile): library include header
file.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(
self._template_directory, 'libyal', 'types.h.in')
type_definitions = []
# TODO: deprecate project_configuration.library_public_types ?
for type_name in sorted(project_configuration.library_public_types):
type_definition = 'typedef intptr_t {0:s}_{1:s}_t;'.format(
project_configuration.library_name, type_name)
type_definitions.append(type_definition)
template_mappings['library_type_definitions'] = '\n'.join(
type_definitions)
template_filename = os.path.join(template_directory, 'header.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
if type_definitions:
template_filename = os.path.join(template_directory, 'public_types.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
template_filename = os.path.join(template_directory, 'footer.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
def Generate(self, project_configuration, output_writer):
"""Generates include source files.
Args:
project_configuration (ProjectConfiguration): project configuration.
output_writer (OutputWriter): output writer.
"""
include_header_file = self._GetLibraryIncludeHeaderFile(
project_configuration)
if not include_header_file:
logging.warning(
'Missing: {0:s} skipping generation of include source files.'.format(
self._library_include_header_path))
return
makefile_am_file = self._GetMainMakefileAM(project_configuration)
template_mappings = self._GetTemplateMappings(
project_configuration,
authors_separator=',\n * ')
output_filename = os.path.join('include', 'Makefile.am')
self._GenerateMakefileAM(
project_configuration, template_mappings, include_header_file,
makefile_am_file, output_writer, output_filename)
output_directory = os.path.join(
'include', project_configuration.library_name)
template_directory = os.path.join(self._template_directory, 'libyal')
for directory_entry in os.listdir(template_directory):
template_filename = os.path.join(template_directory, directory_entry)
if not os.path.isfile(template_filename):
continue
output_filename = os.path.join(output_directory, directory_entry)
if (directory_entry not in ('definitions.h.in', 'extern.h') and
not os.path.exists(output_filename)):
continue
# Do not overwrite definitions.h.in when it exist.
if (directory_entry != 'definitions.h.in' and
os.path.exists(output_filename)):
self._GenerateSection(
template_filename, template_mappings, output_writer,
output_filename)
if directory_entry in ('codepage.h', 'definitions.h.in', 'error.h'):
self._VerticalAlignTabs(output_filename)
output_filename = os.path.join(output_directory, 'features.h.in')
self._GenerateFeaturesHeader(
project_configuration, template_mappings, include_header_file,
makefile_am_file, output_writer, output_filename)
output_filename = os.path.join(output_directory, 'types.h.in')
self._GenerateTypesHeader(
project_configuration, template_mappings, include_header_file,
output_writer, output_filename)
|
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ff_count_contigs'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ff_count_contigs.ff_count_contigsImpl import ff_count_contigs
impl_ff_count_contigs = ff_count_contigs(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['ff_count_contigs.count_contigs_async'] = ['ff_count_contigs', 'count_contigs']
async_check_methods['ff_count_contigs.count_contigs_check'] = ['ff_count_contigs', 'count_contigs']
sync_methods['ff_count_contigs.count_contigs'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ff_count_contigs'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ff_count_contigs.count_contigs,
name='ff_count_contigs.count_contigs',
types=[basestring, basestring])
self.method_authentication['ff_count_contigs.count_contigs'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"ff_count_contigs but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=g-classes-have-attributes
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import warnings
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.legacy_tf_layers import variable_scope_shim
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = base_layer.InputSpec # pylint: disable=invalid-name
_KERAS_STYLE_SCOPE = False
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.keras_style_scope'])
@tf_export(v1=['layers.experimental.keras_style_scope'])
@tf_contextlib.contextmanager
def keras_style_scope():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created in this scope use Keras-style
variable management. Creating such layers with a scope= argument is
disallowed, and reuse=True is disallowed.
The purpose of this scope is to allow users of existing layers to
slowly transition to a Keras layers API without breaking existing
functionality.
One example of this is when using TensorFlow's RNN classes with Keras
Models or Networks. Because Keras models do not properly set variable
scopes, users of RNNs may either accidentally share scopes between two
different models, or get errors about variables that already exist.
Example:
```python
class RNNModel(tf.keras.Model):
def __init__(self, name):
super(RNNModel, self).__init__(name=name)
self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
[tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)])
def call(self, input, state):
return self.rnn(input, state)
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# OK
output_1, next_state_1 = model_1(input, state)
# Raises an error about trying to create an already existing variable.
output_2, next_state_2 = model_2(input, state)
```
The solution is to wrap the model construction and execution in a keras-style
scope:
```python
with keras_style_scope():
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
Yields:
A keras layer style scope.
"""
global _KERAS_STYLE_SCOPE
stack = _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
try:
yield
finally:
_KERAS_STYLE_SCOPE = stack
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.set_keras_style'])
@tf_export(v1=['layers.experimental.set_keras_style'])
def set_keras_style():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created after keras style ha been enabled
use Keras-style variable management. Creating such layers with a
scope= argument is disallowed, and reuse=True is disallowed.
The purpose of this function is to allow users of existing layers to
slowly transition to Keras layers API without breaking existing
functionality.
For more details, see the documentation for `keras_style_scope`.
Note, once keras style has been set, it is set globally for the entire
program and cannot be unset.
Example:
```python
set_keras_style()
model_1 = RNNModel(name="model_1")
model_2 = RNNModel(name="model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
"""
global _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
def _is_in_keras_style_scope():
global _KERAS_STYLE_SCOPE
return _KERAS_STYLE_SCOPE
@keras_export(v1=['keras.__internal__.legacy.layers.Layer'])
@tf_export(v1=['layers.Layer'])
class Layer(base_layer.Layer):
"""Base layer class.
It is considered legacy, and we recommend the use of `tf.keras.layers.Layer`
instead.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and
non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
def __init__(self, trainable=True, name=None, dtype=None,
**kwargs):
# For backwards compatibility, legacy layers do not use `ResourceVariable`
# by default.
self._use_resource_variables = False
scope = kwargs.pop('_scope', None)
self._reuse = kwargs.pop('_reuse', None)
# Avoid an incorrect lint error
self._trainable_weights = []
self.built = False
if dtype is None:
# Indicates to infer dtype from inputs. When the V2 dtype behavior is
# enabled, Keras layers default their dtype to floatx instead, so we pass
# an "_infer" policy to keep the old V1 behavior.
dtype = policy.Policy('_infer')
if 'autocast' not in kwargs:
kwargs['autocast'] = False
# Mark that legacy layers should not be instrumented as Keras usage
self._disable_keras_instrumentation = True
super(Layer, self).__init__(trainable=trainable, name=name, dtype=dtype,
**kwargs)
if _is_in_keras_style_scope():
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
if self._reuse is not None:
raise ValueError(
'reuse argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(self._reuse))
self._keras_style = True
else:
self._keras_style = False
self._call_has_scope_arg = 'scope' in self._call_fn_args
if scope:
with vs.variable_scope(scope) as captured_scope:
self._scope = captured_scope
else:
self._scope = None
self._current_scope = None
# We no longer track graph in tf.layers layers. This property is only kept to
# maintain API backward compatibility.
@property
def graph(self):
warnings.warn('`Layer.graph` is deprecated and '
'will be removed in a future version. '
'Please stop using this property because tf.layers layers no '
'longer track their graph.')
if context.executing_eagerly():
raise RuntimeError('Layer.graph not supported when executing eagerly.')
return None
def _init_set_name(self, name):
# Determine layer name (non-unique).
if isinstance(name, vs.VariableScope):
base_name = name.name
self._name, _ = self._make_unique_name()
else:
base_name = name
self._name = name
if not name:
self._name, base_name = self._make_unique_name()
self._base_name = base_name
def _make_unique_name(self, name_uid_map=None, avoid_names=None,
namespace='', zero_based=False):
base_name = base_layer.to_snake_case(self.__class__.__name__)
name = backend.unique_object_name(
base_name,
name_uid_map=name_uid_map,
avoid_names=avoid_names,
namespace=namespace,
zero_based=zero_based)
return (name, base_name)
@property
def scope_name(self):
if not self._scope:
raise ValueError('No name available for layer scope because the layer "' +
self._name + '" has not been used yet. The scope name ' +
' is determined the first time the layer instance is ' +
'called. You must therefore call the layer before ' +
'querying `scope_name`.')
return self._scope.name
def add_loss(self, losses, inputs=None):
previous_losses_length = len(self._losses)
previous_callable_losses_length = len(self._callable_losses)
super(Layer, self).add_loss(losses, inputs=inputs)
if not context.executing_eagerly():
# TODO(fchollet): deprecate collection below.
new_losses = self._losses[previous_losses_length:]
new_callable_losses = self._callable_losses[
previous_callable_losses_length:]
for regularizer in new_callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
new_losses.append(loss_tensor)
_add_elements_to_collection(
new_losses,
ops.GraphKeys.REGULARIZATION_LOSSES)
def _name_scope(self): # pylint: disable=method-hidden
"""Determines op naming for the Layer."""
if self._keras_style:
return super(Layer, self)._name_scope()
return self._current_scope.original_name_scope
def _set_scope(self, scope=None):
if self._scope is None:
# If constructed with _scope=None, lazy setting of scope.
if self._reuse:
with vs.variable_scope(
scope if scope is not None else self._base_name) as captured_scope:
self._scope = captured_scope
else:
with vs.variable_scope(
scope, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=None,
**kwargs):
"""Adds a new variable to the layer, or gets an existing one; returns it.
Args:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
an instance of `PartitionedVariable` is returned. Available
partitioners include `tf.compat.v1.fixed_size_partitioner` and
`tf.compat.v1.variable_axis_size_partitioner`. For more details, see
the documentation of `tf.compat.v1.get_variable` and the "Variable
Partitioners and Sharding" section of the API guide.
**kwargs: Additional keyword arguments.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When trainable has been set to True with synchronization
set as `ON_READ`.
"""
for kwarg in kwargs:
if kwarg != 'experimental_autocast':
raise TypeError('Unknown keyword argument:', kwarg)
if self._keras_style:
return super(Layer, self).add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable and self.trainable,
constraint=constraint,
use_resource=use_resource,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=partitioner,
**kwargs)
if synchronization == vs.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
def _should_add_regularizer(variable, existing_variable_set):
if base_layer_utils.is_split_variable(variable):
for var in variable:
if var in existing_variable_set:
return False
return True
else:
return variable not in existing_variable_set
init_graph = None
if not context.executing_eagerly():
default_graph = ops.get_default_graph()
if default_graph.building_function:
with ops.init_scope():
# Retrieve the variables from the graph into which variables
# will be lifted; if initialization ops will be lifted into
# the eager context, then there is nothing to retrieve, since variable
# collections are not supported when eager execution is enabled.
if not context.executing_eagerly():
init_graph = ops.get_default_graph()
existing_variables = set(tf_variables.global_variables())
else:
# Initialization ops will not be lifted out of the default graph.
init_graph = default_graph
existing_variables = set(tf_variables.global_variables())
if dtype is None:
dtype = self.dtype or dtypes.float32
self._set_scope(None)
reuse = self.built or self._reuse
prev_len_trainable = len(self._trainable_weights)
with vs.variable_scope(
self._scope, reuse=reuse, auxiliary_name_scope=False) as scope:
self._current_scope = scope
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
use_resource = (use_resource or
self._use_resource_variables or
scope.use_resource)
if initializer is None:
initializer = scope.initializer
variable = super(Layer, self).add_weight(
name,
shape,
dtype=dtypes.as_dtype(dtype),
initializer=initializer,
trainable=trainable and self.trainable,
constraint=constraint,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
getter=vs.get_variable,
**kwargs)
if regularizer:
if (ops.executing_eagerly_outside_functions()
or _should_add_regularizer(variable, existing_variables)):
self._handle_weight_regularization(name, variable, regularizer)
var_store = vs._get_default_variable_store() # pylint: disable=protected-access
# When the shim to get variable scope working in TF2 is used,
# We need to explicitly make the shim track the regularization
# losses as the collections will not be accessible.
if hasattr(var_store, 'add_regularizer'):
var_store.add_regularizer(variable, regularizer)
if init_graph is not None:
# Handle edge case where a custom getter has overridden `trainable`.
# There is one known occurrence of this, in unit test
# testBasicRNNCellNotTrainable in
# contrib.rnn.python.kernel_tests.core_rnn_cell_test
with init_graph.as_default():
trainable_variables = tf_variables.trainable_variables()
if (trainable and self.trainable and
variable not in trainable_variables):
# A custom getter / variable scope overrode the trainable flag.
extra_trainable_vars = self._trainable_weights[prev_len_trainable:]
self._trainable_weights = self._trainable_weights[
:prev_len_trainable]
self._non_trainable_weights += extra_trainable_vars
return variable
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
Note:
- If the layer's `call` method takes a `scope` keyword argument,
this argument will be automatically set to the current variable scope.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
scope = kwargs.pop('scope', None)
if self._keras_style:
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
return super(Layer, self).__call__(inputs, *args, **kwargs)
self._set_scope(scope)
if self.built:
try:
# Some classes which inherit from Layer do not use its constructor, so
# rather than initializing to None we check for an AttributeError.
scope_context_manager = self._always_reuse_variable_scope # pylint: disable=access-member-before-definition
except AttributeError:
scope_context_manager = None
if scope_context_manager is None:
# From this point we will always set reuse=True, so create a "final"
# variable scope with this setting. We avoid re-creating variable scopes
# after this point as an optimization.
scope_context_manager = vs.variable_scope(
self._scope, reuse=True, auxiliary_name_scope=False)
# Do not cache variable scopes if Eager mode is enabled. If Eager mode
# is enabled then we don't want to reuse scopes because the cached scope
# might be from a FuncGraph or Eager scope we are no longer in.
if not ops.executing_eagerly_outside_functions():
self._always_reuse_variable_scope = scope_context_manager
else:
scope_context_manager = vs.variable_scope(
self._scope, reuse=self._reuse, auxiliary_name_scope=False)
with scope_context_manager as scope:
self._current_scope = scope
try:
call_has_scope_arg = self._call_has_scope_arg
except AttributeError:
self._call_fn_args = variable_scope_shim.fn_args(self.call)
self._call_has_scope_arg = 'scope' in self._call_fn_args
call_has_scope_arg = self._call_has_scope_arg
if call_has_scope_arg:
kwargs['scope'] = scope
# Actually call layer
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
if not context.executing_eagerly():
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def __deepcopy__(self, memo):
no_copy = set(['_graph', '_thread_local', '_metrics_lock'])
shallow_copy = set(['_scope', '_always_reuse_variable_scope'])
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in no_copy:
setattr(result, k, v)
elif k in shallow_copy:
setattr(result, k, copy.copy(v))
elif base_layer.is_tensor_or_tensor_list(v):
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def __setattr__(self, value, name):
# By-pass the automatic dependency tracking performed by the parent Layer.
super(trackable.Trackable, self).__setattr__(value, name) # pylint: disable=bad-super-call
@property
def _is_legacy_layer(self):
"""Used by keras to check compatibility. This should not be overridden."""
return True
def _add_elements_to_collection(elements, collection_list):
if context.executing_eagerly():
raise RuntimeError('Using collections from Layers not supported in Eager '
'mode. Tried to add %s to %s' % (elements,
collection_list))
elements = nest.flatten(elements)
collection_list = nest.flatten(collection_list)
for name in collection_list:
collection = ops.get_collection_ref(name)
collection_set = {id(e) for e in collection}
for element in elements:
if id(element) not in collection_set:
collection.append(element)
|
|
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import lockutils
from oslo_log import log as logging
from pypowervm import const as pvm_const
from pypowervm.tasks import hdisk
from pypowervm.tasks import partition as pvm_tpar
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.utils import transaction as pvm_tx
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
import six
from taskflow import task
from nova import conf as cfg
from nova import exception as exc
from nova.i18n import _
from nova.virt import block_device
from nova.virt.powervm import vm
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
LOCAL_FEED_TASK = 'local_feed_task'
UDID_KEY = 'target_UDID'
# A global variable that will cache the physical WWPNs on the system.
_vscsi_pfc_wwpns = None
@lockutils.synchronized('vscsi_wwpns')
def wwpns(adapter):
"""Builds the WWPNs of the adapters that will connect the ports.
:return: The list of WWPNs that need to be included in the zone set.
"""
return pvm_tpar.get_physical_wwpns(adapter, force_refresh=False)
class FCVscsiVolumeAdapter(object):
def __init__(self, adapter, instance, connection_info, stg_ftsk=None):
"""Initialize the PowerVMVolumeAdapter
:param adapter: The pypowervm adapter.
:param instance: The nova instance that the volume should attach to.
:param connection_info: The volume connection info generated from the
BDM. Used to determine how to attach the
volume to the VM.
:param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
I/O Operations. If provided, the Virtual I/O Server
mapping updates will be added to the FeedTask. This
defers the updates to some later point in time. If
the FeedTask is not provided, the updates will be run
immediately when the respective method is executed.
"""
self.adapter = adapter
self.instance = instance
self.connection_info = connection_info
self.vm_uuid = vm.get_pvm_uuid(instance)
self.reset_stg_ftsk(stg_ftsk=stg_ftsk)
self._pfc_wwpns = None
@property
def volume_id(self):
"""Method to return the volume id.
Every driver must implement this method if the default impl will
not work for their data.
"""
return block_device.get_volume_id(self.connection_info)
def reset_stg_ftsk(self, stg_ftsk=None):
"""Resets the pypowervm transaction FeedTask to a new value.
The previous updates from the original FeedTask WILL NOT be migrated
to this new FeedTask.
:param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
I/O Operations. If provided, the Virtual I/O Server
mapping updates will be added to the FeedTask. This
defers the updates to some later point in time. If
the FeedTask is not provided, the updates will be run
immediately when this method is executed.
"""
if stg_ftsk is None:
getter = pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
self.stg_ftsk = pvm_tx.FeedTask(LOCAL_FEED_TASK, getter)
else:
self.stg_ftsk = stg_ftsk
def _set_udid(self, udid):
"""This method will set the hdisk udid in the connection_info.
:param udid: The hdisk target_udid to be stored in system_metadata
"""
self.connection_info['data'][UDID_KEY] = udid
def _get_udid(self):
"""This method will return the hdisk udid stored in connection_info.
:return: The target_udid associated with the hdisk
"""
try:
return self.connection_info['data'][UDID_KEY]
except (KeyError, ValueError):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like live migrate
# and resize
LOG.info('Failed to retrieve target_UDID key from BDM for volume '
'id %s', self.volume_id, instance=self.instance)
return None
def attach_volume(self):
"""Attaches the volume."""
# Check if the VM is in a state where the attach is acceptable.
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
capable, reason = lpar_w.can_modify_io()
if not capable:
raise exc.VolumeAttachFailed(
volume_id=self.volume_id, reason=reason)
# Its about to get weird. The transaction manager has a list of
# VIOSes. We could use those, but they only have SCSI mappings (by
# design). They do not have storage (super expensive).
#
# We need the storage xag when we are determining which mappings to
# add to the system. But we don't want to tie it to the stg_ftsk. If
# we do, every retry, every etag gather, etc... takes MUCH longer.
#
# So we get the VIOSes with the storage xag here, separately, to save
# the stg_ftsk from potentially having to run it multiple times.
attach_ftsk = pvm_tx.FeedTask(
'attach_volume_to_vio', pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP]))
# Find valid hdisks and map to VM.
attach_ftsk.add_functor_subtask(
self._attach_volume_to_vio, provides='vio_modified',
flag_update=False)
ret = attach_ftsk.execute()
# Check the number of VIOSes
vioses_modified = 0
for result in ret['wrapper_task_rets'].values():
if result['vio_modified']:
vioses_modified += 1
# Validate that a vios was found
if vioses_modified == 0:
msg = (_('Failed to discover valid hdisk on any Virtual I/O '
'Server for volume %(volume_id)s.') %
{'volume_id': self.volume_id})
ex_args = {'volume_id': self.volume_id, 'reason': msg}
raise exc.VolumeAttachFailed(**ex_args)
self.stg_ftsk.execute()
def _attach_volume_to_vio(self, vios_w):
"""Attempts to attach a volume to a given VIO.
:param vios_w: The Virtual I/O Server wrapper to attach to.
:return: True if the volume was attached. False if the volume was
not (could be the Virtual I/O Server does not have
connectivity to the hdisk).
"""
status, device_name, udid = self._discover_volume_on_vios(vios_w)
if hdisk.good_discovery(status, device_name):
# Found a hdisk on this Virtual I/O Server. Add the action to
# map it to the VM when the stg_ftsk is executed.
with lockutils.lock(self.volume_id):
self._add_append_mapping(vios_w.uuid, device_name,
tag=self.volume_id)
# Save the UDID for the disk in the connection info. It is
# used for the detach.
self._set_udid(udid)
LOG.debug('Added deferred task to attach device %(device_name)s '
'to vios %(vios_name)s.',
{'device_name': device_name, 'vios_name': vios_w.name},
instance=self.instance)
# Valid attachment
return True
return False
def extend_volume(self):
# The compute node does not need to take any additional steps for the
# client to see the extended volume.
pass
def _discover_volume_on_vios(self, vios_w):
"""Discovers an hdisk on a single vios for the volume.
:param vios_w: VIOS wrapper to process
:returns: Status of the volume or None
:returns: Device name or None
:returns: UDID or None
"""
# Get the initiatior WWPNs, targets and Lun for the given VIOS.
vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)
# Build the ITL map and discover the hdisks on the Virtual I/O
# Server (if any).
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
if len(itls) == 0:
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
{'vios': vios_w.name, 'volume_id': self.volume_id},
instance=self.instance)
return None, None, None
status, device_name, udid = hdisk.discover_hdisk(self.adapter,
vios_w.uuid, itls)
if hdisk.good_discovery(status, device_name):
LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume '
'%(volume_id)s. Status code: %(status)s.',
{'hdisk': device_name, 'vios': vios_w.name,
'volume_id': self.volume_id, 'status': status},
instance=self.instance)
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
LOG.warning('Discovered device %(dev)s for volume %(volume)s '
'on %(vios)s is in use. Error code: %(status)s.',
{'dev': device_name, 'volume': self.volume_id,
'vios': vios_w.name, 'status': status},
instance=self.instance)
return status, device_name, udid
def _get_hdisk_itls(self, vios_w):
"""Returns the mapped ITLs for the hdisk for the given VIOS.
A PowerVM system may have multiple Virtual I/O Servers to virtualize
the I/O to the virtual machines. Each Virtual I/O server may have their
own set of initiator WWPNs, target WWPNs and Lun on which hdisk is
mapped. It will determine and return the ITLs for the given VIOS.
:param vios_w: A virtual I/O Server wrapper.
:return: List of the i_wwpns that are part of the vios_w,
:return: List of the t_wwpns that are part of the vios_w,
:return: Target lun id of the hdisk for the vios_w.
"""
it_map = self.connection_info['data']['initiator_target_map']
i_wwpns = it_map.keys()
active_wwpns = vios_w.get_active_pfc_wwpns()
vio_wwpns = [x for x in i_wwpns if x in active_wwpns]
t_wwpns = []
for it_key in vio_wwpns:
t_wwpns.extend(it_map[it_key])
lun = self.connection_info['data']['target_lun']
return vio_wwpns, t_wwpns, lun
def _add_append_mapping(self, vios_uuid, device_name, tag=None):
"""Update the stg_ftsk to append the mapping to the VIOS.
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
:param device_name: The hdisk device name.
:param tag: String tag to set on the physical volume.
"""
def add_func(vios_w):
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s on "
"vios %(vios)s.",
{'dev': device_name, 'vios': vios_w.name},
instance=self.instance)
pv = pvm_stor.PV.bld(self.adapter, device_name, tag=tag)
v_map = tsk_map.build_vscsi_mapping(None, vios_w, self.vm_uuid, pv)
return tsk_map.add_map(vios_w, v_map)
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
def detach_volume(self):
"""Detach the volume."""
# Check if the VM is in a state where the detach is acceptable.
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
capable, reason = lpar_w.can_modify_io()
if not capable:
raise exc.VolumeDetachFailed(
volume_id=self.volume_id, reason=reason)
# Run the detach
try:
# See logic in attach_volume for why this new FeedTask is here.
detach_ftsk = pvm_tx.FeedTask(
'detach_volume_from_vio', pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP]))
# Find hdisks to detach
detach_ftsk.add_functor_subtask(
self._detach_vol_for_vio, provides='vio_modified',
flag_update=False)
ret = detach_ftsk.execute()
# Warn if no hdisks detached.
if not any([result['vio_modified']
for result in ret['wrapper_task_rets'].values()]):
LOG.warning("Detach Volume: Failed to detach the "
"volume %(volume_id)s on ANY of the Virtual "
"I/O Servers.", {'volume_id': self.volume_id},
instance=self.instance)
except Exception as e:
LOG.exception('PowerVM error detaching volume from virtual '
'machine.', instance=self.instance)
ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e)}
raise exc.VolumeDetachFailed(**ex_args)
self.stg_ftsk.execute()
def _detach_vol_for_vio(self, vios_w):
"""Removes the volume from a specific Virtual I/O Server.
:param vios_w: The VIOS wrapper.
:return: True if a remove action was done against this VIOS. False
otherwise.
"""
LOG.debug("Detach volume %(vol)s from vios %(vios)s",
dict(vol=self.volume_id, vios=vios_w.name),
instance=self.instance)
device_name = None
udid = self._get_udid()
try:
if udid:
# This will only work if vios_w has the Storage XAG.
device_name = vios_w.hdisk_from_uuid(udid)
if not udid or not device_name:
# We lost our bdm data. We'll need to discover it.
status, device_name, udid = self._discover_volume_on_vios(
vios_w)
# Check if the hdisk is in a bad state in the I/O Server.
# Subsequent scrub code on future deploys will clean this up.
if not hdisk.good_discovery(status, device_name):
LOG.warning(
"Detach Volume: The backing hdisk for volume "
"%(volume_id)s on Virtual I/O Server %(vios)s is "
"not in a valid state. This may be the result of "
"an evacuate.",
{'volume_id': self.volume_id, 'vios': vios_w.name},
instance=self.instance)
return False
except Exception:
LOG.exception(
"Detach Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s. Volume "
"UDID: %(volume_uid)s.",
{'vios_name': vios_w.name, 'volume_id': self.volume_id,
'volume_uid': udid, }, instance=self.instance)
return False
# We have found the device name
LOG.info("Detach Volume: Discovered the device %(hdisk)s "
"on Virtual I/O Server %(vios_name)s for volume "
"%(volume_id)s. Volume UDID: %(volume_uid)s.",
{'hdisk': device_name, 'vios_name': vios_w.name,
'volume_id': self.volume_id, 'volume_uid': udid},
instance=self.instance)
# Add the action to remove the mapping when the stg_ftsk is run.
partition_id = vm.get_vm_qp(self.adapter, self.vm_uuid,
qprop='PartitionID')
with lockutils.lock(self.volume_id):
self._add_remove_mapping(partition_id, vios_w.uuid,
device_name)
# Add a step to also remove the hdisk
self._add_remove_hdisk(vios_w, device_name)
# Found a valid element to remove
return True
def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name):
"""Adds a subtask to remove the storage mapping.
:param vm_uuid: The UUID of the VM instance
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
:param device_name: The hdisk device name.
"""
def rm_func(vios_w):
LOG.info("Removing vSCSI mapping from physical volume %(dev)s "
"on vios %(vios)s",
{'dev': device_name, 'vios': vios_w.name},
instance=self.instance)
removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
return removed_maps
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
def _add_remove_hdisk(self, vio_wrap, device_name):
"""Adds a post-mapping task to remove the hdisk from the VIOS.
This removal is only done after the mapping updates have completed.
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
from.
:param device_name: The hdisk name to remove.
"""
def rm_hdisk():
LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server "
"%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
try:
# Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
vio_wrap.uuid)
except Exception:
# If there is a failure, log it, but don't stop the process
LOG.exception("There was an error removing the hdisk "
"%(disk)s from Virtual I/O Server %(vios)s.",
{'disk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
# Check if there are not multiple mapping for the device
if not self._check_host_mappings(vio_wrap, device_name):
name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
self.stg_ftsk.add_post_execute(task.FunctorTask(
rm_hdisk, name=name))
else:
LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server "
"%(vios)s because it has existing storage mappings",
{'disk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
def _check_host_mappings(self, vios_wrap, device_name):
"""Checks if the given hdisk has multiple mappings
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
from.
:param device_name: The hdisk name to remove.
:return: True if there are multiple instances using the given hdisk
"""
vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
if v.uuid == vios_wrap.uuid)
mappings = tsk_map.find_maps(
vios_scsi_mappings, None,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
LOG.debug("%(num)d storage mapping(s) found for %(dev)s on VIOS "
"%(vios)s", {'num': len(mappings), 'dev': device_name,
'vios': vios_wrap.name}, instance=self.instance)
# The mapping is still present as the task feed removes it later.
return len(mappings) > 1
|
|
"""
SoftLayer.file
~~~~~~~~~~~~~~~
File Storage Manager
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import exceptions
from SoftLayer.managers import storage_utils
from SoftLayer import utils
# pylint: disable=too-many-public-methods
class FileStorageManager(utils.IdentifierMixin, object):
"""Manages file Storage volumes."""
def __init__(self, client):
self.configuration = {}
self.client = client
def list_file_volumes(self, datacenter=None, username=None,
storage_type=None, **kwargs):
"""Returns a list of file volumes.
:param datacenter: Datacenter short name (e.g.: dal09)
:param username: Name of volume.
:param storage_type: Type of volume: Endurance or Performance
:param kwargs:
:return: Returns a list of file volumes.
"""
if 'mask' not in kwargs:
items = [
'id',
'username',
'capacityGb',
'bytesUsed',
'serviceResource.datacenter[name]',
'serviceResourceBackendIpAddress',
'activeTransactionCount',
'fileNetworkMountAddress',
'replicationPartnerCount'
]
kwargs['mask'] = ','.join(items)
_filter = utils.NestedDict(kwargs.get('filter') or {})
_filter['nasNetworkStorage']['serviceResource']['type']['type'] = \
(utils.query_filter('!~ NAS'))
_filter['nasNetworkStorage']['storageType']['keyName'] = (
utils.query_filter('*FILE_STORAGE*'))
if storage_type:
_filter['nasNetworkStorage']['storageType']['keyName'] = (
utils.query_filter('%s_FILE_STORAGE*' % storage_type.upper()))
if datacenter:
_filter['nasNetworkStorage']['serviceResource']['datacenter'][
'name'] = (utils.query_filter(datacenter))
if username:
_filter['nasNetworkStorage']['username'] = \
(utils.query_filter(username))
kwargs['filter'] = _filter.to_dict()
return self.client.call('Account', 'getNasNetworkStorage', **kwargs)
def get_file_volume_details(self, volume_id, **kwargs):
"""Returns details about the specified volume.
:param volume_id: ID of volume.
:param kwargs:
:return: Returns details about the specified volume.
"""
if 'mask' not in kwargs:
items = [
'id',
'username',
'password',
'capacityGb',
'bytesUsed',
'snapshotCapacityGb',
'parentVolume.snapshotSizeBytes',
'storageType.keyName',
'serviceResource.datacenter[name]',
'serviceResourceBackendIpAddress',
'fileNetworkMountAddress',
'storageTierLevel',
'provisionedIops',
'lunId',
'originalVolumeName',
'originalSnapshotName',
'originalVolumeSize',
'activeTransactionCount',
'activeTransactions.transactionStatus[friendlyName]',
'replicationPartnerCount',
'replicationStatus',
'replicationPartners[id,username,'
'serviceResourceBackendIpAddress,'
'serviceResource[datacenter[name]],'
'replicationSchedule[type[keyname]]]',
]
kwargs['mask'] = ','.join(items)
return self.client.call('Network_Storage', 'getObject',
id=volume_id, **kwargs)
def get_file_volume_access_list(self, volume_id, **kwargs):
"""Returns a list of authorized hosts for a specified volume.
:param volume_id: ID of volume.
:param kwargs:
:return: Returns a list of authorized hosts for a specified volume.
"""
if 'mask' not in kwargs:
items = [
'id',
'allowedVirtualGuests[allowedHost[credential, sourceSubnet]]',
'allowedHardware[allowedHost[credential]]',
'allowedSubnets[allowedHost[credential]]',
'allowedIpAddresses[allowedHost[credential]]',
]
kwargs['mask'] = ','.join(items)
return self.client.call('Network_Storage', 'getObject',
id=volume_id, **kwargs)
def get_file_volume_snapshot_list(self, volume_id, **kwargs):
"""Returns a list of snapshots for the specified volume.
:param volume_id: ID of volume.
:param kwargs:
:return: Returns a list of snapshots for the specified volume.
"""
if 'mask' not in kwargs:
items = [
'id',
'notes',
'snapshotSizeBytes',
'storageType[keyName]',
'snapshotCreationTimestamp',
'intervalSchedule',
'hourlySchedule',
'dailySchedule',
'weeklySchedule'
]
kwargs['mask'] = ','.join(items)
return self.client.call('Network_Storage', 'getSnapshots',
id=volume_id, **kwargs)
def authorize_host_to_volume(self, volume_id,
hardware_ids=None,
virtual_guest_ids=None,
ip_address_ids=None,
subnet_ids=None,
**kwargs):
"""Authorizes hosts to File Storage Volumes
:param volume_id: The File volume to authorize hosts to
:param hardware_ids: A List of SoftLayer_Hardware ids
:param virtual_guest_ids: A List of SoftLayer_Virtual_Guest ids
:param ip_address_ids: A List of SoftLayer_Network_Subnet_IpAddress ids
:param subnet_ids: A List of SoftLayer_Network_Subnet ids
:return: Returns an array of
SoftLayer_Network_Storage_Allowed_Host objects
which now have access to the given File volume
"""
host_templates = []
storage_utils.populate_host_templates(host_templates,
hardware_ids,
virtual_guest_ids,
ip_address_ids,
subnet_ids)
return self.client.call('Network_Storage', 'allowAccessFromHostList',
host_templates, id=volume_id, **kwargs)
def deauthorize_host_to_volume(self, volume_id,
hardware_ids=None,
virtual_guest_ids=None,
ip_address_ids=None,
subnet_ids=None,
**kwargs):
"""Revokes authorization of hosts to File Storage Volumes
:param volume_id: The File volume to deauthorize hosts to
:param hardware_ids: A List of SoftLayer_Hardware ids
:param virtual_guest_ids: A List of SoftLayer_Virtual_Guest ids
:param ip_address_ids: A List of SoftLayer_Network_Subnet_IpAddress ids
:param subnet_ids: A List of SoftLayer_Network_Subnet ids
:return: Returns an array of
SoftLayer_Network_Storage_Allowed_Host objects
which have access to the given File volume
"""
host_templates = []
storage_utils.populate_host_templates(host_templates,
hardware_ids,
virtual_guest_ids,
ip_address_ids,
subnet_ids)
return self.client.call('Network_Storage', 'removeAccessFromHostList',
host_templates, id=volume_id, **kwargs)
def order_replicant_volume(self, volume_id, snapshot_schedule,
location, tier=None):
"""Places an order for a replicant file volume.
:param volume_id: The ID of the primary volume to be replicated
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
file_mask = 'billingItem[activeChildren,hourlyFlag],'\
'storageTierLevel,osType,staasVersion,'\
'hasEncryptionAtRest,snapshotCapacityGb,schedules,'\
'intervalSchedule,hourlySchedule,dailySchedule,'\
'weeklySchedule,storageType[keyName],provisionedIops'
file_volume = self.get_file_volume_details(volume_id,
mask=file_mask)
order = storage_utils.prepare_replicant_order_object(
self, snapshot_schedule, location, tier, file_volume, 'file'
)
return self.client.call('Product_Order', 'placeOrder', order)
def get_replication_partners(self, volume_id):
"""Acquires list of replicant volumes pertaining to the given volume.
:param volume_id: The ID of the primary volume to be replicated
:return: Returns an array of SoftLayer_Location objects
"""
return self.client.call('Network_Storage',
'getReplicationPartners',
id=volume_id)
def get_replication_locations(self, volume_id):
"""Acquires list of the datacenters to which a volume can be replicated.
:param volume_id: The ID of the primary volume to be replicated
:return: Returns an array of SoftLayer_Network_Storage objects
"""
return self.client.call('Network_Storage',
'getValidReplicationTargetDatacenterLocations',
id=volume_id)
def order_duplicate_volume(self, origin_volume_id, origin_snapshot_id=None,
duplicate_size=None, duplicate_iops=None,
duplicate_tier_level=None,
duplicate_snapshot_size=None,
hourly_billing_flag=False):
"""Places an order for a duplicate file volume.
:param origin_volume_id: The ID of the origin volume to be duplicated
:param origin_snapshot_id: Origin snapshot ID to use for duplication
:param duplicate_size: Size/capacity for the duplicate volume
:param duplicate_iops: The IOPS per GB for the duplicate volume
:param duplicate_tier_level: Tier level for the duplicate volume
:param duplicate_snapshot_size: Snapshot space size for the duplicate
:param hourly_billing_flag: Billing type, monthly (False)
or hourly (True), default to monthly.
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
file_mask = 'id,billingItem[location,hourlyFlag],snapshotCapacityGb,'\
'storageType[keyName],capacityGb,originalVolumeSize,'\
'provisionedIops,storageTierLevel,'\
'staasVersion,hasEncryptionAtRest'
origin_volume = self.get_file_volume_details(origin_volume_id,
mask=file_mask)
order = storage_utils.prepare_duplicate_order_object(
self, origin_volume, duplicate_iops, duplicate_tier_level,
duplicate_size, duplicate_snapshot_size, 'file',
hourly_billing_flag
)
if origin_snapshot_id is not None:
order['duplicateOriginSnapshotId'] = origin_snapshot_id
return self.client.call('Product_Order', 'placeOrder', order)
def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None):
"""Places an order for modifying an existing file volume.
:param volume_id: The ID of the volume to be modified
:param new_size: The new size/capacity for the volume
:param new_iops: The new IOPS for the volume
:param new_tier_level: The new tier level for the volume
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
mask_items = [
'id',
'billingItem',
'storageType[keyName]',
'capacityGb',
'provisionedIops',
'storageTierLevel',
'staasVersion',
'hasEncryptionAtRest',
]
file_mask = ','.join(mask_items)
volume = self.get_file_volume_details(volume_id, mask=file_mask)
order = storage_utils.prepare_modify_order_object(
self, volume, new_iops, new_tier_level, new_size
)
return self.client.call('Product_Order', 'placeOrder', order)
def delete_snapshot(self, snapshot_id):
"""Deletes the specified snapshot object.
:param snapshot_id: The ID of the snapshot object to delete.
"""
return self.client.call('Network_Storage', 'deleteObject',
id=snapshot_id)
def order_file_volume(self, storage_type, location, size,
iops=None, tier_level=None, snapshot_size=None,
service_offering='storage_as_a_service',
hourly_billing_flag=False):
"""Places an order for a file volume.
:param storage_type: 'performance' or 'endurance'
:param location: Name of the datacenter in which to order the volume
:param size: Size of the desired volume, in GB
:param iops: Number of IOPs for a "Performance" order
:param tier_level: Tier level to use for an "Endurance" order
:param snapshot_size: The size of optional snapshot space,
if snapshot space should also be ordered (None if not ordered)
:param service_offering: Requested offering package to use in the order
('storage_as_a_service', 'enterprise', or 'performance')
:param hourly_billing_flag: Billing type, monthly (False)
or hourly (True), default to monthly.
"""
order = storage_utils.prepare_volume_order_object(
self, storage_type, location, size, iops, tier_level,
snapshot_size, service_offering, 'file', hourly_billing_flag
)
return self.client.call('Product_Order', 'placeOrder', order)
def create_snapshot(self, volume_id, notes='', **kwargs):
"""Creates a snapshot on the given file volume.
:param integer volume_id: The id of the volume
:param string notes: The notes or "name" to assign the snapshot
:return: Returns the id of the new snapshot
"""
return self.client.call('Network_Storage', 'createSnapshot',
notes, id=volume_id, **kwargs)
def enable_snapshots(self, volume_id, schedule_type, retention_count, minute, hour, day_of_week, **kwargs):
"""Enables snapshots for a specific file volume at a given schedule
:param integer volume_id: The id of the volume
:param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY'
:param integer retention_count: The number of snapshots to attempt to retain in this schedule
:param integer minute: The minute of the hour at which HOURLY, DAILY, and WEEKLY snapshots should be taken
:param integer hour: The hour of the day at which DAILY and WEEKLY snapshots should be taken
:param string|integer day_of_week: The day of the week on which WEEKLY snapshots should be taken,
either as a string ('SUNDAY') or integer ('0' is Sunday)
:return: Returns whether successfully scheduled or not
"""
return self.client.call('Network_Storage', 'enableSnapshots',
schedule_type,
retention_count,
minute,
hour,
day_of_week,
id=volume_id,
**kwargs)
def disable_snapshots(self, volume_id, schedule_type):
"""Disables snapshots for a specific file volume at a given schedule
:param integer volume_id: The id of the volume
:param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY'
:return: Returns whether successfully disabled or not
"""
return self.client.call('Network_Storage', 'disableSnapshots', schedule_type, id=volume_id)
def list_volume_schedules(self, volume_id):
"""Lists schedules for a given volume
:param integer volume_id: The id of the volume
:return: Returns list of schedules assigned to a given volume
"""
volume_detail = self.client.call(
'Network_Storage',
'getObject',
id=volume_id,
mask='schedules[type,properties[type]]')
return utils.lookup(volume_detail, 'schedules')
def order_snapshot_space(self, volume_id, capacity, tier, upgrade, **kwargs):
"""Orders snapshot space for the given file volume.
:param integer volume_id: The ID of the volume
:param integer capacity: The capacity to order, in GB
:param float tier: The tier level of the file volume, in IOPS per GB
:param boolean upgrade: Flag to indicate if this order is an upgrade
:return: Returns a SoftLayer_Container_Product_Order_Receipt
"""
file_mask = 'id,billingItem[location,hourlyFlag],'\
'storageType[keyName],storageTierLevel,provisionedIops,'\
'staasVersion,hasEncryptionAtRest'
file_volume = self.get_file_volume_details(volume_id,
mask=file_mask,
**kwargs)
order = storage_utils.prepare_snapshot_order_object(
self, file_volume, capacity, tier, upgrade)
return self.client.call('Product_Order', 'placeOrder', order)
def cancel_snapshot_space(self, volume_id, reason='No longer needed', immediate=False):
"""Cancels snapshot space for a given volume.
:param integer volume_id: The volume ID
:param string reason: The reason for cancellation
:param boolean immediate: Cancel immediately or on anniversary date
"""
file_volume = self.get_file_volume_details(
volume_id,
mask='mask[id,billingItem[activeChildren,hourlyFlag]]')
if 'activeChildren' not in file_volume['billingItem']:
raise exceptions.SoftLayerError(
'No snapshot space found to cancel')
children_array = file_volume['billingItem']['activeChildren']
billing_item_id = None
for child in children_array:
if child['categoryCode'] == 'storage_snapshot_space':
billing_item_id = child['id']
break
if not billing_item_id:
raise exceptions.SoftLayerError(
'No snapshot space found to cancel')
if utils.lookup(file_volume, 'billingItem', 'hourlyFlag'):
immediate = True
return self.client['Billing_Item'].cancelItem(
immediate,
True,
reason,
id=billing_item_id)
def restore_from_snapshot(self, volume_id, snapshot_id):
"""Restores a specific volume from a snapshot
:param integer volume_id: The ID of the volume
:param integer snapshot_id: The id of the restore point
:return: Returns whether successfully restored or not
"""
return self.client.call('Network_Storage', 'restoreFromSnapshot',
snapshot_id, id=volume_id)
def cancel_file_volume(self, volume_id, reason='No longer needed', immediate=False):
"""Cancels the given file storage volume.
:param integer volume_id: The volume ID
:param string reason: The reason for cancellation
:param boolean immediate: Cancel immediately or on anniversary date
"""
file_volume = self.get_file_volume_details(
volume_id,
mask='mask[id,billingItem[id,hourlyFlag]]')
if 'billingItem' not in file_volume:
raise exceptions.SoftLayerError('The volume has already been canceled')
billing_item_id = file_volume['billingItem']['id']
if utils.lookup(file_volume, 'billingItem', 'hourlyFlag'):
immediate = True
return self.client['Billing_Item'].cancelItem(
immediate,
True,
reason,
id=billing_item_id)
def failover_to_replicant(self, volume_id, replicant_id, immediate=False):
"""Failover to a volume replicant.
:param integer volume_id: The ID of the volume
:param integer replicant_id: ID of replicant to failover to
:param boolean immediate: Flag indicating if failover is immediate
:return: Returns whether failover was successful or not
"""
return self.client.call('Network_Storage', 'failoverToReplicant',
replicant_id, immediate, id=volume_id)
def failback_from_replicant(self, volume_id, replicant_id):
"""Failback from a volume replicant.
:param integer volume_id: The ID of the volume
:param integer replicant_id: ID of replicant to failback from
:return: Returns whether failback was successful or not
"""
return self.client.call('Network_Storage', 'failbackFromReplicant',
replicant_id, id=volume_id)
|
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" GlusterFS native protocol (glusterfs) driver for shares.
Test cases for GlusterFS native protocol driver.
"""
import ddt
import mock
from oslo_config import cfg
from manila.common import constants
from manila import context
from manila import exception
from manila.share import configuration as config
from manila.share.drivers.glusterfs import common
from manila.share.drivers import glusterfs_native
from manila import test
from manila.tests import fake_utils
CONF = cfg.CONF
def new_share(**kwargs):
share = {
'id': 'fakeid',
'name': 'fakename',
'size': 1,
'share_proto': 'glusterfs',
}
share.update(kwargs)
return share
@ddt.ddt
class GlusterfsNativeShareDriverTestCase(test.TestCase):
"""Tests GlusterfsNativeShareDriver."""
def setUp(self):
super(GlusterfsNativeShareDriverTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._execute = fake_utils.fake_execute
self._context = context.get_admin_context()
self.glusterfs_target1 = 'root@host1:/gv1'
self.glusterfs_target2 = 'root@host2:/gv2'
self.glusterfs_server1 = 'root@host1'
self.glusterfs_server2 = 'root@host2'
self.glusterfs_server1_volumes = 'manila-share-1-1G\nshare1'
self.glusterfs_server2_volumes = 'manila-share-2-2G\nshare2'
self.share1 = new_share(
export_location=self.glusterfs_target1,
status=constants.STATUS_AVAILABLE)
self.share2 = new_share(
export_location=self.glusterfs_target2,
status=constants.STATUS_AVAILABLE)
self.gmgr1 = common.GlusterManager(self.glusterfs_server1,
self._execute, None, None,
requires={'volume': False})
self.gmgr2 = common.GlusterManager(self.glusterfs_server2,
self._execute, None, None,
requires={'volume': False})
self.glusterfs_volumes_dict = (
{'root@host1:/manila-share-1-1G': {'size': 1},
'root@host2:/manila-share-2-2G': {'size': 2}})
self.glusterfs_used_vols = set([
'root@host1:/manila-share-1-1G',
'root@host2:/manila-share-2-2G'])
CONF.set_default('glusterfs_volume_pattern',
'manila-share-\d+-#{size}G$')
CONF.set_default('driver_handles_share_servers', False)
self.fake_conf = config.Configuration(None)
self.mock_object(common.GlusterManager, 'make_gluster_call')
self._driver = glusterfs_native.GlusterfsNativeShareDriver(
execute=self._execute,
configuration=self.fake_conf)
self.addCleanup(fake_utils.fake_execute_set_repliers, [])
self.addCleanup(fake_utils.fake_execute_clear_log)
def test_supported_protocols(self):
self.assertEqual(('GLUSTERFS', ),
self._driver.supported_protocols)
def test_setup_via_manager(self):
gmgr = mock.Mock()
gmgr.gluster_call = mock.Mock()
gmgr.volume = 'fakevol'
gmgr.export = 'fakehost:/fakevol'
gmgr.get_gluster_vol_option = mock.Mock(
return_value='glusterfs-server-name,some-other-name')
share = mock.Mock()
ret = self._driver._setup_via_manager({'manager': gmgr,
'share': share})
gmgr.get_gluster_vol_option.assert_called_once_with('auth.ssl-allow')
args = (
('volume', 'set', 'fakevol', 'nfs.export-volumes', 'off'),
('volume', 'set', 'fakevol', 'client.ssl', 'on'),
('volume', 'set', 'fakevol', 'server.ssl', 'on'),
('volume', 'set', 'fakevol', 'dynamic-auth', 'on'),
('volume', 'stop', 'fakevol', '--mode=script'),
('volume', 'start', 'fakevol'))
gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
self.assertEqual(ret, gmgr.export)
def test_setup_via_manager_with_parent(self):
gmgr = mock.Mock()
gmgr.gluster_call = mock.Mock()
gmgr.volume = 'fakevol'
gmgr.export = 'fakehost:/fakevol'
gmgr_parent = mock.Mock()
gmgr_parent.get_gluster_vol_option = mock.Mock(
return_value=(
'glusterfs-server-name,some-other-name,manila-host.com'))
share = mock.Mock()
share_parent = mock.Mock()
ret = self._driver._setup_via_manager(
{'manager': gmgr, 'share': share},
{'manager': gmgr_parent, 'share': share_parent})
gmgr_parent.get_gluster_vol_option.assert_called_once_with(
'auth.ssl-allow')
args = (
('volume', 'set', 'fakevol', 'auth.ssl-allow',
'glusterfs-server-name,manila-host.com'),
('volume', 'set', 'fakevol', 'nfs.export-volumes', 'off'),
('volume', 'set', 'fakevol', 'client.ssl', 'on'),
('volume', 'set', 'fakevol', 'server.ssl', 'on'),
('volume', 'set', 'fakevol', 'dynamic-auth', 'on'))
gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
self.assertEqual(ret, gmgr.export)
@ddt.data(True, False)
def test_setup_via_manager_no_option_data(self, has_parent):
share = mock.Mock()
gmgr = mock.Mock()
if has_parent:
share_parent = mock.Mock()
gmgr_parent = mock.Mock()
share_mgr_parent = {'share': share_parent, 'manager': gmgr_parent}
gmgr_queried = gmgr_parent
else:
share_mgr_parent = None
gmgr_queried = gmgr
gmgr_queried.get_gluster_vol_option = mock.Mock(return_value='')
self.assertRaises(exception.GlusterfsException,
self._driver._setup_via_manager,
{'share': share, 'manager': gmgr},
share_mgr_parent=share_mgr_parent)
gmgr_queried.get_gluster_vol_option.assert_called_once_with(
'auth.ssl-allow')
@ddt.data(exception.ProcessExecutionError, RuntimeError)
def test_setup_via_manager_exception(self, _exception):
share = mock.Mock()
gmgr = mock.Mock()
gmgr.gluster_call = mock.Mock(side_effect=_exception)
gmgr.get_gluster_vol_option = mock.Mock()
self.assertRaises(
{exception.ProcessExecutionError:
exception.GlusterfsException}.get(
_exception, _exception), self._driver._setup_via_manager,
{'share': share, 'manager': gmgr})
def test_snapshots_are_supported(self):
self.assertTrue(self._driver.snapshots_are_supported)
def test_allow_access_via_manager(self):
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
self.mock_object(gmgr1, 'get_gluster_vol_option',
mock.Mock(return_value='some.common.name'))
test_args = ('volume', 'set', 'gv1', 'auth.ssl-allow',
'some.common.name,' + access['access_to'])
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self._driver._allow_access_via_manager(gmgr1, self._context,
self.share1, access)
gmgr1.get_gluster_vol_option.assert_called_once_with('auth.ssl-allow')
gmgr1.gluster_call.assert_called_once_with(*test_args)
def test_allow_access_via_manager_with_share_having_access(self):
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
self.mock_object(
gmgr1, 'get_gluster_vol_option',
mock.Mock(return_value='some.common.name,' + access['access_to']))
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self._driver._allow_access_via_manager(gmgr1, self._context,
self.share1, access)
gmgr1.get_gluster_vol_option.assert_called_once_with('auth.ssl-allow')
self.assertFalse(gmgr1.gluster_call.called)
def test_allow_access_via_manager_invalid_access_type(self):
access = {'access_type': 'invalid', 'access_to': 'client.example.com'}
expected_exec = []
self.assertRaises(exception.InvalidShareAccess,
self._driver._allow_access_via_manager,
self.gmgr1, self._context, self.share1, access)
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
def test_allow_access_via_manager_excp(self):
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
test_args = ('volume', 'set', 'gv1', 'auth.ssl-allow',
'some.common.name,' + access['access_to'])
def raise_exception(*args, **kwargs):
if (args == test_args):
raise exception.ProcessExecutionError()
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
self.mock_object(gmgr1, 'get_gluster_vol_option',
mock.Mock(return_value='some.common.name'))
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self.mock_object(gmgr1, 'gluster_call',
mock.Mock(side_effect=raise_exception))
self.assertRaises(exception.GlusterfsException,
self._driver._allow_access_via_manager, gmgr1,
self._context, self.share1, access)
gmgr1.get_gluster_vol_option.assert_called_once_with('auth.ssl-allow')
gmgr1.gluster_call.assert_called_once_with(*test_args)
@ddt.data('on', '1', 'Yes', 'TRUE', 'enable')
def test_deny_access_via_manager(self, trueish):
self.mock_object(common, '_restart_gluster_vol', mock.Mock())
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
def _get_gluster_vol_option(opt):
if opt == 'auth.ssl-allow':
return('some.common.name,' + access['access_to'])
elif opt == 'dynamic-auth':
return trueish
self.mock_object(
gmgr1, 'get_gluster_vol_option',
mock.Mock(side_effect=_get_gluster_vol_option))
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self._driver._deny_access_via_manager(gmgr1, self._context,
self.share1, access)
gmgr1.get_gluster_vol_option.assert_has_calls(
[mock.call(a) for a in ('auth.ssl-allow', 'dynamic-auth')])
test_args = ('volume', 'set', 'gv1', 'auth.ssl-allow',
'some.common.name')
gmgr1.gluster_call.assert_called_once_with(*test_args)
self.assertFalse(common._restart_gluster_vol.called)
@ddt.data('off', None, 'strangelove', '!')
def test_deny_access_via_manager_no_dyn_auth(self, falseish):
self.mock_object(common, '_restart_gluster_vol', mock.Mock())
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
def _get_gluster_vol_option(opt):
if opt == 'auth.ssl-allow':
return('some.common.name,' + access['access_to'])
elif opt == 'dynamic-auth':
if falseish == '!':
raise exception.ProcessExecutionError(exit_code=1)
return falseish
self.mock_object(
gmgr1, 'get_gluster_vol_option',
mock.Mock(side_effect=_get_gluster_vol_option))
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self._driver._deny_access_via_manager(gmgr1, self._context,
self.share1, access)
gmgr1.get_gluster_vol_option.assert_has_calls(
[mock.call(a) for a in ('auth.ssl-allow', 'dynamic-auth')])
test_args = ('volume', 'set', 'gv1', 'auth.ssl-allow',
'some.common.name')
gmgr1.gluster_call.assert_called_once_with(*test_args)
common._restart_gluster_vol.assert_called_once_with(gmgr1)
def test_deny_access_via_manager_with_share_having_no_access(self):
self.mock_object(common, '_restart_gluster_vol', mock.Mock())
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
self.mock_object(gmgr1, 'get_gluster_vol_option',
mock.Mock(return_value='some.common.name'))
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self._driver._deny_access_via_manager(gmgr1, self._context,
self.share1, access)
gmgr1.get_gluster_vol_option.assert_called_once_with('auth.ssl-allow')
self.assertFalse(gmgr1.gluster_call.called)
self.assertFalse(common._restart_gluster_vol.called)
def test_deny_access_via_manager_invalid_access_type(self):
self.mock_object(common, '_restart_gluster_vol', mock.Mock())
access = {'access_type': 'invalid', 'access_to': 'NotApplicable'}
self.assertRaises(exception.InvalidShareAccess,
self._driver._deny_access_via_manager, self.gmgr1,
self._context, self.share1, access)
self.assertFalse(common._restart_gluster_vol.called)
@ddt.data({'trouble': exception.ProcessExecutionError,
'_exception': exception.GlusterfsException},
{'trouble': RuntimeError, '_exception': RuntimeError})
@ddt.unpack
def test_deny_access_via_manager_excp(self, trouble, _exception):
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
test_args = ('volume', 'set', 'gv1', 'auth.ssl-allow',
'some.common.name')
def raise_exception(*args, **kwargs):
if (args == test_args):
raise trouble()
self.mock_object(common, '_restart_gluster_vol', mock.Mock())
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
self.mock_object(
gmgr1, 'get_gluster_vol_option',
mock.Mock(return_value='some.common.name,' + access['access_to']))
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self.mock_object(gmgr1, 'gluster_call',
mock.Mock(side_effect=raise_exception))
self.assertRaises(_exception,
self._driver._deny_access_via_manager, gmgr1,
self._context, self.share1, access)
gmgr1.get_gluster_vol_option.assert_called_once_with('auth.ssl-allow')
gmgr1.gluster_call.assert_called_once_with(*test_args)
self.assertFalse(common._restart_gluster_vol.called)
@ddt.data({'trouble': (exception.ProcessExecutionError, {'exit_code': 2}),
'_exception': exception.GlusterfsException},
{'trouble': (RuntimeError, {}), '_exception': RuntimeError})
@ddt.unpack
def test_deny_access_via_manager_dyn_auth_fail(self, trouble, _exception):
self.mock_object(common, '_restart_gluster_vol', mock.Mock())
access = {'access_type': 'cert', 'access_to': 'client.example.com'}
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
None, None)
def _get_gluster_vol_option(opt):
if opt == 'auth.ssl-allow':
return('some.common.name,' + access['access_to'])
elif opt == 'dynamic-auth':
raise trouble[0](**trouble[1])
self.mock_object(
gmgr1, 'get_gluster_vol_option',
mock.Mock(side_effect=_get_gluster_vol_option))
self._driver.layout.gluster_used_vols = set([self.glusterfs_target1])
self.assertRaises(_exception,
self._driver._deny_access_via_manager, gmgr1,
self._context, self.share1, access)
gmgr1.get_gluster_vol_option.assert_has_calls(
[mock.call(a) for a in ('auth.ssl-allow', 'dynamic-auth')])
test_args = ('volume', 'set', 'gv1', 'auth.ssl-allow',
'some.common.name')
gmgr1.gluster_call.assert_called_once_with(*test_args)
self.assertFalse(common._restart_gluster_vol.called)
def test_update_share_stats(self):
self._driver._update_share_stats()
test_data = {
'share_backend_name': 'GlusterFS-Native',
'driver_handles_share_servers': False,
'vendor_name': 'Red Hat',
'driver_version': '1.1',
'storage_protocol': 'glusterfs',
'reserved_percentage': 0,
'QoS_support': False,
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'pools': None,
'snapshot_support': True,
}
self.assertEqual(test_data, self._driver._stats)
def test_get_network_allocations_number(self):
self.assertEqual(0, self._driver.get_network_allocations_number())
|
|
"""Abstraction for Arcyd's conduit operations."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdt_conduit
#
# Public Classes:
# Conduit
# .describe
# .refresh_cache_on_cycle
# .create_comment
# .create_empty_revision_as_user
# .get_commit_message
# .create_revision_as_user
# .query_name_and_phid_from_email
# .query_users_from_emails
# .parse_commit_message
# .is_review_accepted
# .is_review_abandoned
# .is_review_recently_updated
# .update_revision
# .set_requires_revision
# .close_revision
# .abandon_revision
# .accept_revision_as_user
# .commandeer_revision_as_user
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import datetime
import phlcon_differential
import phlcon_reviewstatecache
import phlcon_user
import phlsys_conduit
import phlsys_textconvert
import abdt_exception
import abdt_logging
# TODO: re-order methods as (accessor, mutator)
class Conduit(object):
def __init__(self, conduit):
"""Initialise a new Conduit.
:conduit: a phlsys_conduit to delegate to
:returns: None
"""
super(Conduit, self).__init__()
self._conduit = conduit
self._reviewstate_cache = phlcon_reviewstatecache.ReviewStateCache(
conduit)
def describe(self):
"""Return a string description of this conduit for a human to read.
:returns: a string
"""
description = None
if self._conduit.conduit_uri:
description = self._conduit.conduit_uri
else:
description = 'conduit is None'
return description
def refresh_cache_on_cycle(self):
"""Refresh the stored state of revisions and users.
Note that this should be called once per 'cycle' of git
repositories to avoid degredation of performance. This is
necessary because revisions that were not accessed since the
last refresh are evicted and will not benefit from the batching
of revision queries.
"""
self._reviewstate_cache.refresh_active_reviews()
def create_comment(self, revision, message, silent=False):
"""Make a comment on the specified 'revision'.
:revision: id of the revision to comment on
:message: the string message to leave as a comment, may be empty
:silent: mail notifications won't be sent if False
:returns: None
"""
phlcon_differential.create_comment(
self._conduit, revision, message, silent=silent)
self._log('conduit-comment', 'commented on {}'.format(revision))
def create_empty_revision_as_user(self, username):
"""Return the id of a newly created empty revision as 'username'.
:username: username for the author of the revision
:returns: id of created revision
"""
with phlsys_conduit.act_as_user_context(self._conduit, username):
revision = phlcon_differential.create_empty_revision(self._conduit)
self._log(
'conduit-createemptyrev',
'created {} as {}'.format(revision, username))
return revision
def get_commit_message(self, revisionid):
"""Return the string commit message appropriate for supplied revision.
:revisionid: the id of the revision to create a message for
:returns: the string of the commit message
"""
msg = phlcon_differential.get_commit_message(self._conduit, revisionid)
return phlsys_textconvert.lossy_unicode_to_ascii(msg)
def create_revision_as_user(self, raw_diff, fields, username):
"""Return the id of a newly created revision based on specified args.
See phlcon_differential.MessageFields for some examples of valid input
for specified 'fields'.
:raw_diff: raw output string from e.g. 'git diff master...'
:fields: dict of string attributes, required: 'title' and 'testPlan'
:username: username for the author of the revision
:returns: id of created revision
"""
with phlsys_conduit.act_as_user_context(self._conduit, username):
diffid = phlcon_differential.create_raw_diff(
self._conduit, raw_diff).id
review = phlcon_differential.create_revision(
self._conduit, diffid, fields)
self._log(
'conduit-createrev',
'created {} as {}'.format(review.revisionid, username))
return review.revisionid
def query_name_and_phid_from_email(self, email):
"""Return a (username, phid) tuple based on the provided email.
If an email does not correspond to a user then None is returned.
:email: a strings of the user's email address
:returns: a (username, phid) tuple
"""
user = phlcon_user.query_user_from_email(self._conduit, email)
result = None
if user:
result = (user.userName, user.phid)
return result
def query_users_from_emails(self, emails):
"""Return a list of username strings based on the provided emails.
If an email does not correspond to a username then None is inserted in
its place.
:emails: a list of strings corresponding to user email addresses
:returns: a list of strings corresponding to Phabricator usernames
"""
return phlcon_user.query_users_from_emails(self._conduit, emails)
def parse_commit_message(self, message):
"""Return a ParseCommitMessageResponse based on 'message'.
:message: a string message to parse
:returns: a phlcon_differential.ParseCommitMessageResponse
"""
message = phlsys_textconvert.to_unicode(message)
return phlcon_differential.parse_commit_message(self._conduit, message)
def _get_author_user(self, revisionid):
# TODO: these queries are very expensive, cache them
revision = phlcon_differential.query(self._conduit, [revisionid])[0]
author_user = phlcon_user.query_usernames_from_phids(
self._conduit, [revision.authorPHID])[0]
return author_user
def is_review_accepted(self, revisionid):
"""Return True if the supplied 'revisionid' is in 'accepted' status.
:revisionid: id of the Differential revision to query
:returns: True if accepted
"""
state = self._reviewstate_cache.get_state(revisionid)
return int(state.status) == phlcon_differential.ReviewStates.accepted
def is_review_abandoned(self, revisionid):
"""Return True if the supplied 'revisionid' is in 'abandoned' status.
:revisionid: id of the Differential revision to query
:returns: True if abandoned
"""
state = self._reviewstate_cache.get_state(revisionid)
return int(state.status) == phlcon_differential.ReviewStates.abandoned
def _get_update_age(self, revisionid):
state = self._reviewstate_cache.get_state(revisionid)
date_modified = state.date_modified
update_time = datetime.datetime.fromtimestamp(float(date_modified))
return datetime.datetime.now() - update_time
def is_review_recently_updated(self, revisionid):
"""Return True if the supplied 'revisionid' was updated recently.
'recently' is a subjective term, in the context of a review it seems
reasonable that if it hasn't been updated for at least a day then it
could be considered as not recently updated.
:revisionid: id of the Differential revision to query
:returns: True if recently updated
"""
update_age = self._get_update_age(revisionid)
one_day = datetime.timedelta(days=1)
return update_age < one_day
def update_revision(self, revisionid, raw_diff, message):
"""Update an existing Differential revision with a new diff.
:revisionid: id of the Differential revision to update
:raw_diff: raw output string from e.g. 'git diff master...'
:message: string message to annotate the update event with
:returns: None
"""
# do some sanity checks before committing to the expensive operation
# of storing a diff in Differential
state = self._reviewstate_cache.get_state(revisionid)
if state.status == phlcon_differential.ReviewStates.closed:
raise abdt_exception.AbdUserException(
"can't update a closed revision")
author_user = self._get_author_user(revisionid)
with phlsys_conduit.act_as_user_context(self._conduit, author_user):
diffid = phlcon_differential.create_raw_diff(
self._conduit, raw_diff).id
try:
phlcon_differential.update_revision(
self._conduit, revisionid, diffid, [], message)
except phlcon_differential.UpdateClosedRevisionError:
raise abdt_exception.AbdUserException(
"CONDUIT: can't update a closed revision")
self._log(
'conduit-updaterev',
'updated {} as {}'.format(revisionid, author_user))
def set_requires_revision(self, revisionid):
"""Set an existing Differential revision to 'requires revision'.
:revisionid: id of the Differential revision to update
:returns: None
"""
author_user = self._get_author_user(revisionid)
with phlsys_conduit.act_as_user_context(self._conduit, author_user):
phlcon_differential.create_comment(
self._conduit,
revisionid,
action=phlcon_differential.Action.rethink)
self._log(
'conduit-setrequiresrev',
'updated {} as {}'.format(revisionid, author_user))
def close_revision(self, revisionid):
"""Set an existing Differential revision to 'closed'.
:revisionid: id of the Differential revision to close
:returns: None
"""
author_user = self._get_author_user(revisionid)
with phlsys_conduit.act_as_user_context(self._conduit, author_user):
phlcon_differential.close(self._conduit, revisionid)
self._log(
'conduit-close',
'closed {} as {}'.format(revisionid, author_user))
def abandon_revision(self, revisionid):
"""Set an existing Differential revision to 'abandoned'.
:revisionid: id of the Differential revision to close
:returns: None
"""
author_user = self._get_author_user(revisionid)
with phlsys_conduit.act_as_user_context(self._conduit, author_user):
phlcon_differential.create_comment(
self._conduit,
revisionid,
action=phlcon_differential.Action.abandon)
self._log(
'conduit-abandon',
'abandoned {} as {}'.format(revisionid, author_user))
# XXX: test function - will disappear when moved to new processrepo tests
def accept_revision_as_user(self, revisionid, username):
"""Set an existing Differential revision to 'accepted'.
:revisionid: id of the Differential revision to accept
:username: username for the reviewer of the revision
:returns: None
"""
with phlsys_conduit.act_as_user_context(self._conduit, username):
phlcon_differential.create_comment(
self._conduit,
revisionid,
action=phlcon_differential.Action.accept)
self._log(
'conduit-accept',
'accepted {} as {}'.format(revisionid, username))
# XXX: test function currently but needed for changing owner in the case
# where no valid author is detected on a branch at creation but is
# valid later, after the review has been created
def commandeer_revision_as_user(self, revisionid, username):
"""Change the author of a revision to the specified 'username'.
:revisionid: id of the Differential revision to claim
:username: username for the author of the revision
:returns: None
"""
with phlsys_conduit.act_as_user_context(self._conduit, username):
phlcon_differential.create_comment(
self._conduit,
revisionid,
action=phlcon_differential.Action.claim)
self._log(
'conduit-commandeer',
'commandeered {} as {}'.format(revisionid, username))
def _log(self, identifier, description):
abdt_logging.on_io_event(identifier, '{}:{}'.format(
self.describe(), description))
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
"""
Database configuration functions.
Main class is DBConfig, which encapsulates a database configuration
passed in as a file or object. For example::
cfg1 = DBConfig() # use defaults
cfg2 = DBConfig("/path/to/myfile.json") # read from file
f = open("/other/file.json")
cfg3 = DBConfig(f) # read from file object
# access dict of parsed conf. settings
settings = cfg1.settings
"""
__author__ = "Dan Gunter <dkgunter@lbl.gov>"
__date__ = "4/25/14"
import os
import ruamel.yaml as yaml
# Constants for keys
HOST_KEY = "host"
PORT_KEY = "port"
DB_KEY = "database"
COLL_KEY = "collection"
USER_KEY = "user"
PASS_KEY = "password"
ALIASES_KEY = "aliases"
class ConfigurationFileError(Exception):
def __init__(self, filename, err):
msg = f"reading '{filename}': {err}"
Exception.__init__(self, msg)
class DBConfig:
"""Database configuration."""
DEFAULT_PORT = 27017
DEFAULT_FILE = "db.json"
ALL_SETTINGS = [
HOST_KEY,
PORT_KEY,
DB_KEY,
COLL_KEY,
ALIASES_KEY,
]
DEFAULT_SETTINGS = [
(HOST_KEY, "localhost"),
(PORT_KEY, DEFAULT_PORT),
(DB_KEY, "vasp"),
(ALIASES_KEY, {}),
]
def __init__(self, config_file=None, config_dict=None):
"""
Constructor.
Settings are created from config_dict, if given,
or parsed config_file, if given, otherwise
the DEFAULT_FILE is tried and if that is not present
the DEFAULT_SETTINGS are used without modification.
:param config_file: Read configuration from this file.
:type config_file: file or str path
:param config_dict: Set configuration from this dictionary.
:raises: ConfigurationFileError if cannot read/parse config_file
"""
self._cfg = dict(self.DEFAULT_SETTINGS)
settings = {}
if config_dict:
settings = config_dict.copy()
auth_aliases(settings)
else:
# Try to use DEFAULT_FILE if no config_file
if config_file is None:
if os.path.exists(self.DEFAULT_FILE):
config_file = self.DEFAULT_FILE
# If there was a config_file, parse it
if config_file is not None:
try:
settings = get_settings(config_file)
except Exception as err:
path = _as_file(config_file).name
raise ConfigurationFileError(path, err)
self._cfg.update(settings)
normalize_auth(self._cfg)
def __str__(self):
return str(self._cfg)
def copy(self):
"""Return a copy of self (internal settings are copied)."""
return DBConfig(config_dict=self._cfg.copy())
@property
def settings(self):
return self._cfg
@property
def host(self):
return self._cfg.get(HOST_KEY, None)
@property
def port(self):
return self._cfg.get(PORT_KEY, self.DEFAULT_PORT)
@property
def dbname(self):
"""Name of the database."""
return self._cfg.get(DB_KEY, None)
@dbname.setter
def dbname(self, value):
self._cfg[DB_KEY] = value
@property
def collection(self):
return self._cfg.get(COLL_KEY, None)
@collection.setter
def collection(self, value):
self._cfg[COLL_KEY] = value
@property
def user(self):
return self._cfg.get(USER_KEY, None)
@property
def password(self):
return self._cfg.get(PASS_KEY, None)
def get_settings(infile):
"""Read settings from input file.
:param infile: Input file for JSON settings.
:type infile: file or str path
:return: Settings parsed from file
:rtype: dict
"""
settings = yaml.safe_load(_as_file(infile))
if not hasattr(settings, "keys"):
raise ValueError(f"Settings not found in {infile}")
# Processing of namespaced parameters in .pmgrc.yaml.
processed_settings = {}
for k, v in settings.items():
if k.startswith("PMG_DB_"):
processed_settings[k[7:].lower()] = v
else:
processed_settings[k] = v
auth_aliases(processed_settings)
return processed_settings
def auth_aliases(d):
"""Interpret user/password aliases."""
for alias, real in ((USER_KEY, "readonly_user"), (PASS_KEY, "readonly_password")):
if alias in d:
d[real] = d[alias]
del d[alias]
def normalize_auth(settings, admin=True, readonly=True, readonly_first=False):
"""Transform the readonly/admin user and password to simple user/password,
as expected by QueryEngine. If return value is true, then
admin or readonly password will be in keys "user" and "password".
:param settings: Connection settings
:type settings: dict
:param admin: Check for admin password
:param readonly: Check for readonly password
:param readonly_first: Check for readonly password before admin
:return: Whether user/password were found
:rtype: bool
"""
U, P = USER_KEY, PASS_KEY
# If user/password, un-prefixed, exists, do nothing.
if U in settings and P in settings:
return True
# Set prefixes
prefixes = []
if readonly_first:
if readonly:
prefixes.append("readonly_")
if admin:
prefixes.append("admin_")
else:
if admin:
prefixes.append("admin_")
if readonly:
prefixes.append("readonly_")
# Look for first user/password matching.
found = False
for pfx in prefixes:
ukey, pkey = pfx + U, pfx + P
if ukey in settings and pkey in settings:
settings[U] = settings[ukey]
settings[P] = settings[pkey]
found = True
break
return found
def _as_file(f, mode="r"):
if isinstance(f, str):
return open(f, mode)
return f
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymongo
import json
import getopt, sys
import os
import subprocess
try:
import matplotlib.pyplot as plt
is_plt = True
except ImportError:
is_plt = False
class http_stats:
def __init__(self, server, outdir):
self._con = pymongo.Connection(server)
self._outdir = outdir
self._soa_rname = {}
self._in_graph = {}
self._out_graph = {}
self._max_ref = 0.0
self._top_n = set()
self._total_ref = 0
self._dot_full = 'http_full_graph.dot'
self._dot_pruned = 'http_pruned_graph.dot'
self._sif_full = 'http_full_graph.sif'
self._sif_pruned = 'http_pruned_graph.sif'
self._html = """
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="ja" lang="ja">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style type="text/css">
body { margin: 20px; }
a:hover {color: red}
div.content { margin: 20px; margin-top: 0px; }
h3.soa { margin-bottom: 5px;
margin-top: 0px;
padding-left: 3px;
border-left: 7px solid #000000; }
div.refs { border-left: 1px solid #000000;
border-bottom: 1px solid #000000;
padding: 5px;
margin-bottom: 10px; }
span.dst { margin-right : 10px; }
div.small { font-size: small; margin-bottom: 5px; }
div.bold { font-weight: bold;
padding-left: 3px;
border-left: 4px solid #000000;
border-bottom: 1px solid #000000; }
span.src { margin-right: 8px; }
div.srcs { line-height: 1.5em; }
</style>
<script language="JavaScript">
<!--
function set_hosts(id, hosts) {
var div = document.getElementById(id);
while (div.firstChild) {
div.removeChild(div.firstChild);
}
for (var i = 0; i < hosts.length; i++) {
var text = document.createTextNode(hosts[i] + "\u00A0\u00A0 ");
var span = document.createElement('span');
span.appendChild(text)
div.appendChild(span);
}
}
function show_stats(uri, hosts, trds, id_host, id_refered, id_truncated) {
var div = document.getElementById(id_host);
var text = document.createTextNode(uri);
while (div.firstChild) {
div.removeChild(div.firstChild);
}
div.appendChild(text);
set_hosts(id_refered, hosts);
set_hosts(id_truncated, trds);
}
function show_stats_soa(uri, hosts, trds, soa, id_host, id_refered, id_truncated, id_soa) {
var div = document.getElementById(id_soa);
var text = document.createTextNode(soa);
while (div.firstChild) {
div.removeChild(div.firstChild);
}
div.appendChild(text);
show_stats(uri, hosts, trds, id_host, id_refered, id_truncated);
}
-->
</script>
<title>Cattenacio DPI: HTTP Statistics</title>
</head>
<body>
<h1>HTTP Statistics</h1>
<h2>Most Refered URLs</h2>
<div class="content">
<table>
<tr valign="top">
<td width=65%%>%(top_n)s</td>
<td width=30%%>
<div class="bold">host</div>
<div id="host_dst" class="small"></div>
<div class="bold">refered by</div>
<div id="refered_dst" class="small"></div>
<div class="bold">truncated URLs</div>
<div id="truncated_dst" class="small"></div>
</td>
</tr>
</table>
</div>
<hr>
<h2>Sites Refering Most Refered URLs</h2>
<div class="content">
<table>
<tr valign="top">
<td width=65%%>%(refs)s</td>
<td width=30%%>
<div class="bold">host</div>
<div id="host_src" class="small"></div>
<div class="bold">SOA RNAME</div>
<div id="soa_src" class="small"></div>
<div class="bold">refering</div>
<div id="refered_src" class="small"></div>
<div class="bold">truncated URLs</div>
<div id="truncated_src" class="small"></div>
</td>
</tr>
</table>
</div>
<hr>
<h2>HTTP Refered Graph</h2>
<div class="content">
<p>
<a href="http_full_graph.dot">download Graphviz dot file</a>
</p>
<p>
<a href="http_full_graph.sif">download SIF file</a>
</p>
</div>
</body>
</html>
"""
def _get_full_sif(self):
sif = ''
for dst, srcs in self._in_graph.items():
for src in srcs:
sif += '%s referer %s\n' % (src, dst)
return sif
def _get_full_dot(self):
dot = 'digraph http_referre{\ngraph [rankdir = LR];\n'
for dst, srcs in self._in_graph.items():
for src in srcs:
dot += '"%s" -> "%s";\n' % (src, dst)
dot += '}'
return dot
def _get_graph(self):
db = self._con.HTTP
for i in db.graph_trunc_host.find():
dst = i['_id']
srcs = i['value']
self._in_graph[dst] = srcs
self._total_ref += len(srcs)
if self._max_ref < len(srcs):
self._max_ref = len(srcs)
for src in srcs:
if src in self._out_graph:
self._out_graph[src].append(dst)
else:
self._out_graph[src] = [dst]
def _get_soa_rname(self):
db = self._con.HTTP
for dst in self._in_graph.keys():
soa = db.soa.find_one({'_id': dst})
if soa == None:
continue
rname = soa['rname']
elm = {'dst': dst, 'srcs': self._in_graph[dst]}
if rname in self._soa_rname:
self._soa_rname[rname].append(elm)
else:
self._soa_rname[rname] = [elm]
def _get_top_n_refered(self, n):
soa = sorted(self._soa_rname.items(),
key = lambda x: sum([len(refs['srcs']) for refs in x[1]]),
reverse = True)
return soa[0:n]
def _get_top_n_leaking(self, n):
html = '<div class="srcs">'
uri = {}
for k, v in self._out_graph.items():
val = 0
for dst in v:
try:
score = (float(len(self._in_graph[dst])) / self._max_ref) ** 2
val += score
except:
continue
uri[k] = val
sites = sorted(uri.items(), key = lambda x: x[1], reverse = True)[0:n]
if len(sites) == 0:
return ''
i = 0
for k, v in sites:
score = round(v, 2)
db = self._con.HTTP
trd_hosts = []
for trd in db.trunc_hosts.find({"value": k}):
trd_hosts.append(trd['_id'])
soa = db.soa.find_one({'_id': k})
rname = ''
if soa != None:
rname = soa['rname']
params = {'src': k,
'weight': 125 - i * 1,
'score': score,
'uris': json.dumps(self._out_graph[k]),
'truncated': json.dumps(trd_hosts),
'soa': rname}
i += 1
html += '<span class="src"><a style="text-decoration: none; color: rgb(0, 0, 0);" href="javascript:void(0);" onclick=\'show_stats_soa("%(src)s", %(uris)s, %(truncated)s, "%(soa)s", "host_src", "refered_src", "truncated_src", "soa_src")\'>%(src)s(%(score)2.2f)</a></span> ' % params
html += '</div>'
return html
def _get_top_n_html(self, n):
html = ''
top_n = self._get_top_n_refered(n)
for (soa, refs) in top_n:
html += '<div class="refs"><h3 class="soa">' + soa + '</h3>'
for ref in refs:
len_src = len(ref['srcs'])
db = self._con.HTTP
trd_hosts = []
for trd in db.trunc_hosts.find({"value": ref['dst']}):
trd_hosts.append(trd['_id'])
params = {'dst': ref['dst'],
'len': len(ref['srcs']),
'uris': json.dumps(ref['srcs'].keys()),
'truncated': json.dumps(trd_hosts),
'color': 150 - int(150.0 * len_src / self._max_ref),
'weight': 80.0 + 150.0 * len_src / self._max_ref}
html += '<span class="dst" style="font-size: %(weight)d%%;"><a style="text-decoration: none; color: rgb(%(color)d, %(color)d, %(color)d);" href="javascript:void(0);" onclick=\'show_stats("%(dst)s", %(uris)s, %(truncated)s, "host_dst", "refered_dst", "truncated_dst")\'>%(dst)s(%(len)d)</a></span> ' % params
html += '</div>'
return html
def print_html(self):
self._get_graph()
self._get_soa_rname()
dot = self._get_full_dot()
sif = self._get_full_sif()
html = self._html % {'top_n': self._get_top_n_html(10),
'refs' : self._get_top_n_leaking(50) }
open(os.path.join(self._outdir, self._dot_full), 'w').write(dot)
open(os.path.join(self._outdir, self._sif_full), 'w').write(sif)
open(os.path.join(self._outdir, 'http_stats.html'),'w').write(html)
def usage():
print sys.argv[0], '-m mongo_server -o ouput_dir'
if __name__ == '__main__':
server = 'localhost:27017'
outdir = os.getcwd()
try:
opts, args = getopt.getopt(sys.argv[1:], "hm:o:", ["help", "mongo=", "output="])
except getopt.GetoptError:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-o", "--output"):
outdir = a
if o in ("-m", "--mongo"):
server = a
stats = http_stats(server, outdir)
stats.print_html()
|
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import contextlib
import importlib
import os
from os import path
import pkgutil
import platform
import shutil
import sys
import tempfile
import unittest
import six
import grpc
from grpc_tools import protoc
from tests.unit import test_common
_MESSAGES_IMPORT = b'import "messages.proto";'
_SPLIT_NAMESPACE = b'package grpc_protoc_plugin.invocation_testing.split;'
_COMMON_NAMESPACE = b'package grpc_protoc_plugin.invocation_testing;'
_RELATIVE_PROTO_PATH = 'relative_proto_path'
_RELATIVE_PYTHON_OUT = 'relative_python_out'
@contextlib.contextmanager
def _system_path(path_insertion):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
yield
sys.path = old_system_path
# NOTE(nathaniel): https://twitter.com/exoplaneteer/status/677259364256747520
# Life lesson "just always default to idempotence" reinforced.
def _create_directory_tree(root, path_components_sequence):
created = set()
for path_components in path_components_sequence:
thus_far = ''
for path_component in path_components:
relative_path = path.join(thus_far, path_component)
if relative_path not in created:
os.makedirs(path.join(root, relative_path))
created.add(relative_path)
thus_far = path.join(thus_far, path_component)
def _massage_proto_content(proto_content, test_name_bytes,
messages_proto_relative_file_name_bytes):
package_substitution = (b'package grpc_protoc_plugin.invocation_testing.' +
test_name_bytes + b';')
common_namespace_substituted = proto_content.replace(
_COMMON_NAMESPACE, package_substitution)
split_namespace_substituted = common_namespace_substituted.replace(
_SPLIT_NAMESPACE, package_substitution)
message_import_replaced = split_namespace_substituted.replace(
_MESSAGES_IMPORT,
b'import "' + messages_proto_relative_file_name_bytes + b'";')
return message_import_replaced
def _packagify(directory):
for subdirectory, _, _ in os.walk(directory):
init_file_name = path.join(subdirectory, '__init__.py')
with open(init_file_name, 'wb') as init_file:
init_file.write(b'')
class _Servicer(object):
def __init__(self, response_class):
self._response_class = response_class
def Call(self, request, context):
return self._response_class()
def _protoc(proto_path, python_out, grpc_python_out_flag, grpc_python_out,
absolute_proto_file_names):
args = [
'',
'--proto_path={}'.format(proto_path),
]
if python_out is not None:
args.append('--python_out={}'.format(python_out))
if grpc_python_out is not None:
args.append('--grpc_python_out={}:{}'.format(grpc_python_out_flag,
grpc_python_out))
args.extend(absolute_proto_file_names)
return protoc.main(args)
class _Mid2016ProtocStyle(object):
def name(self):
return 'Mid2016ProtocStyle'
def grpc_in_pb2_expected(self):
return True
def protoc(self, proto_path, python_out, absolute_proto_file_names):
return (_protoc(proto_path, python_out, 'grpc_1_0', python_out,
absolute_proto_file_names),)
class _SingleProtocExecutionProtocStyle(object):
def name(self):
return 'SingleProtocExecutionProtocStyle'
def grpc_in_pb2_expected(self):
return False
def protoc(self, proto_path, python_out, absolute_proto_file_names):
return (_protoc(proto_path, python_out, 'grpc_2_0', python_out,
absolute_proto_file_names),)
class _ProtoBeforeGrpcProtocStyle(object):
def name(self):
return 'ProtoBeforeGrpcProtocStyle'
def grpc_in_pb2_expected(self):
return False
def protoc(self, proto_path, python_out, absolute_proto_file_names):
pb2_protoc_exit_code = _protoc(proto_path, python_out, None, None,
absolute_proto_file_names)
pb2_grpc_protoc_exit_code = _protoc(
proto_path, None, 'grpc_2_0', python_out, absolute_proto_file_names)
return pb2_protoc_exit_code, pb2_grpc_protoc_exit_code,
class _GrpcBeforeProtoProtocStyle(object):
def name(self):
return 'GrpcBeforeProtoProtocStyle'
def grpc_in_pb2_expected(self):
return False
def protoc(self, proto_path, python_out, absolute_proto_file_names):
pb2_grpc_protoc_exit_code = _protoc(
proto_path, None, 'grpc_2_0', python_out, absolute_proto_file_names)
pb2_protoc_exit_code = _protoc(proto_path, python_out, None, None,
absolute_proto_file_names)
return pb2_grpc_protoc_exit_code, pb2_protoc_exit_code,
_PROTOC_STYLES = (
_Mid2016ProtocStyle(),
_SingleProtocExecutionProtocStyle(),
_ProtoBeforeGrpcProtocStyle(),
_GrpcBeforeProtoProtocStyle(),
)
@unittest.skipIf(platform.python_implementation() == 'PyPy',
'Skip test if run with PyPy!')
class _Test(six.with_metaclass(abc.ABCMeta, unittest.TestCase)):
def setUp(self):
self._directory = tempfile.mkdtemp(suffix=self.NAME, dir='.')
self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
os.makedirs(self._proto_path)
os.makedirs(self._python_out)
proto_directories_and_names = {
(
self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES,
self.MESSAGES_PROTO_FILE_NAME,
),
(
self.SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES,
self.SERVICES_PROTO_FILE_NAME,
),
}
messages_proto_relative_file_name_forward_slashes = '/'.join(
self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES +
(self.MESSAGES_PROTO_FILE_NAME,))
_create_directory_tree(self._proto_path,
(relative_proto_directory_names
for relative_proto_directory_names, _ in
proto_directories_and_names))
self._absolute_proto_file_names = set()
for relative_directory_names, file_name in proto_directories_and_names:
absolute_proto_file_name = path.join(
self._proto_path, *relative_directory_names + (file_name,))
raw_proto_content = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing',
path.join(*relative_directory_names + (file_name,)))
massaged_proto_content = _massage_proto_content(
raw_proto_content, self.NAME.encode(),
messages_proto_relative_file_name_forward_slashes.encode())
with open(absolute_proto_file_name, 'wb') as proto_file:
proto_file.write(massaged_proto_content)
self._absolute_proto_file_names.add(absolute_proto_file_name)
def tearDown(self):
shutil.rmtree(self._directory)
def _protoc(self):
protoc_exit_codes = self.PROTOC_STYLE.protoc(
self._proto_path, self._python_out, self._absolute_proto_file_names)
for protoc_exit_code in protoc_exit_codes:
self.assertEqual(0, protoc_exit_code)
_packagify(self._python_out)
generated_modules = {}
expected_generated_full_module_names = {
self.EXPECTED_MESSAGES_PB2,
self.EXPECTED_SERVICES_PB2,
self.EXPECTED_SERVICES_PB2_GRPC,
}
with _system_path([self._python_out]):
for full_module_name in expected_generated_full_module_names:
module = importlib.import_module(full_module_name)
generated_modules[full_module_name] = module
self._messages_pb2 = generated_modules[self.EXPECTED_MESSAGES_PB2]
self._services_pb2 = generated_modules[self.EXPECTED_SERVICES_PB2]
self._services_pb2_grpc = generated_modules[
self.EXPECTED_SERVICES_PB2_GRPC]
def _services_modules(self):
if self.PROTOC_STYLE.grpc_in_pb2_expected():
return self._services_pb2, self._services_pb2_grpc,
else:
return self._services_pb2_grpc,
def test_imported_attributes(self):
self._protoc()
self._messages_pb2.Request
self._messages_pb2.Response
self._services_pb2.DESCRIPTOR.services_by_name['TestService']
for services_module in self._services_modules():
services_module.TestServiceStub
services_module.TestServiceServicer
services_module.add_TestServiceServicer_to_server
def test_call(self):
self._protoc()
for services_module in self._services_modules():
server = test_common.test_server()
services_module.add_TestServiceServicer_to_server(
_Servicer(self._messages_pb2.Response), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = services_module.TestServiceStub(channel)
response = stub.Call(self._messages_pb2.Request())
self.assertEqual(self._messages_pb2.Response(), response)
server.stop(None)
def _create_test_case_class(split_proto, protoc_style):
attributes = {}
name = '{}{}'.format('SplitProto' if split_proto else 'SameProto',
protoc_style.name())
attributes['NAME'] = name
if split_proto:
attributes['MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES'] = (
'split_messages',
'sub',
)
attributes['MESSAGES_PROTO_FILE_NAME'] = 'messages.proto'
attributes['SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES'] = (
'split_services',)
attributes['SERVICES_PROTO_FILE_NAME'] = 'services.proto'
attributes['EXPECTED_MESSAGES_PB2'] = 'split_messages.sub.messages_pb2'
attributes['EXPECTED_SERVICES_PB2'] = 'split_services.services_pb2'
attributes['EXPECTED_SERVICES_PB2_GRPC'] = (
'split_services.services_pb2_grpc')
else:
attributes['MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES'] = ()
attributes['MESSAGES_PROTO_FILE_NAME'] = 'same.proto'
attributes['SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES'] = ()
attributes['SERVICES_PROTO_FILE_NAME'] = 'same.proto'
attributes['EXPECTED_MESSAGES_PB2'] = 'same_pb2'
attributes['EXPECTED_SERVICES_PB2'] = 'same_pb2'
attributes['EXPECTED_SERVICES_PB2_GRPC'] = 'same_pb2_grpc'
attributes['PROTOC_STYLE'] = protoc_style
attributes['__module__'] = _Test.__module__
return type('{}Test'.format(name), (_Test,), attributes)
def _create_test_case_classes():
for split_proto in (
False,
True,
):
for protoc_style in _PROTOC_STYLES:
yield _create_test_case_class(split_proto, protoc_style)
def load_tests(loader, tests, pattern):
tests = tuple(
loader.loadTestsFromTestCase(test_case_class)
for test_case_class in _create_test_case_classes())
return unittest.TestSuite(tests=tests)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
from __future__ import absolute_import
from __future__ import print_function
from pylab import *
from six.moves import map
from six.moves import range
def plot_mem(p, use_histogram=True):
with open(p, "r") as rfile:
ms = []
started = False
mx, mn = -Inf, Inf
for line in rfile:
# print line
msg, mem = list(map(str.strip, line.split(":")))
x = float(mem)
mn = min(mn, x)
mx = max(mx, x)
# print msg, mem
if not started and msg.startswith("<"):
started = True
s = x
elif started and msg.startswith(">"):
started = False
ms.append(x - s)
if use_histogram:
hist(ms, 30)
else:
plot(list(range(len(ms))), ms)
print(mn, mx, len(ms), (mx - mn) / float(len(ms)))
# try:
# yi = float(mem)
# continue
# except ValueError:
# continue
def plot_file(p, normalize=False, stacked=False, use_gradient=False, memory=False):
x = []
y = []
ts = []
cnt = 0
n = 0
mi = Inf
ma = -Inf
if memory:
subplot(2, 1, 1)
mxs = []
mys = []
with open(p, "r") as rfile:
xi = 0
ticked = False
# for line in fp:
# msg, mem = map(str.strip, line.split(':'))
# if msg.startswith('exp start'):
# break
for line in rfile:
# print line
msg, mem = list(map(str.strip, line.split(":")))
# print msg, mem
# if msg.startswith('exp start'):
# continue
# if msg.startswith('collect'):
# continue
try:
yi = float(mem)
y.append(yi)
x.append(xi)
mi = min(mi, yi)
ma = max(ma, yi)
xi += 1
ts.append(msg)
except ValueError:
continue
if msg.startswith(">"):
n += 1
if not ticked and stacked:
xticks(x, ts, rotation=-90)
ticked = True
start_mem = y[0]
y = array(y)
end_mem = y[-1]
mxs.append(cnt)
mys.append((max(y) - min(y)))
if normalize:
y -= y[0]
if use_gradient:
x = x[1:]
y = diff(y)
# y = gradient(y)
plot(x, y, label=os.path.basename(p) + str(cnt))
if stacked:
xi = 0
x = []
y = []
ts = []
cnt += 1
if len(x) > 1:
end_mem = y[-1]
n += 1
y = array(y)
if normalize:
y -= y[0]
if use_gradient:
y = diff(y)
x = x[1:]
# y = gradient(y)
if not ticked and stacked:
xticks(x, ts, rotation=-90)
plot(x, y, label=os.path.basename(p) + str(cnt))
if memory:
subplot(2, 1, 2)
print(mxs)
plot(mxs, mys)
print("Min: {} Max: {} avg: {} n: {}".format(mi, ma, (ma - mi) / float(n), n))
# print 'start: {} end: {}'.format(start_mem, end_mem)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-n,--normalize",
dest="normalize",
action="store_const",
const=bool,
default=False,
)
parser.add_argument(
"-s,--stacked", dest="stacked", action="store_const", const=bool, default=False
)
parser.add_argument(
"-g,--gradient",
dest="gradient",
action="store_const",
const=bool,
default=False,
)
parser.add_argument(
"-m,", dest="memory", action="store_const", const=bool, default=False
)
parser.add_argument(
"-u,", dest="usize", action="store_const", const=bool, default=False
)
parser.add_argument(
"-U,", dest="uhist", action="store_const", const=bool, default=False
)
parser.add_argument("paths", metavar="p", nargs="+")
args = parser.parse_args()
print(args)
root = os.path.expanduser("~")
d = os.path.join(root, "Desktop", "memtest")
if args:
paths = args.paths
normalize = args.normalize
stacked = args.stacked
grad = args.gradient
mem = args.memory
usize = args.usize
uhist = args.uhist
if paths[0] == "last":
i = 1
while 1:
pa = os.path.join(d, "mem-{:03d}.txt".format(i))
if os.path.isfile(pa):
i += 1
else:
pa = os.path.join(d, "mem-{:03d}.txt".format(i - 1))
if os.path.isfile(pa):
break
else:
i += 1
if usize:
plot_mem(pa, use_histogram=False)
elif uhist:
plot_mem(pa, use_histogram=True)
else:
plot_file(
pa,
normalize=normalize,
stacked=stacked,
use_gradient=grad,
memory=mem,
)
tight_layout()
show()
else:
for ai in paths:
n = "mem-{:03d}.txt".format(int(ai))
p = os.path.join(d, n)
plot_file(
p,
normalize=normalize,
stacked=stacked,
use_gradient=grad,
)
# legend(loc='upper left')
tight_layout()
show()
|
|
"""Support for KNX/IP lights."""
from enum import Enum
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, Light)
from homeassistant.const import CONF_ADDRESS, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
CONF_STATE_ADDRESS = 'state_address'
CONF_BRIGHTNESS_ADDRESS = 'brightness_address'
CONF_BRIGHTNESS_STATE_ADDRESS = 'brightness_state_address'
CONF_COLOR_ADDRESS = 'color_address'
CONF_COLOR_STATE_ADDRESS = 'color_state_address'
CONF_COLOR_TEMP_ADDRESS = 'color_temperature_address'
CONF_COLOR_TEMP_STATE_ADDRESS = 'color_temperature_state_address'
CONF_COLOR_TEMP_MODE = 'color_temperature_mode'
CONF_MIN_KELVIN = 'min_kelvin'
CONF_MAX_KELVIN = 'max_kelvin'
DEFAULT_NAME = 'KNX Light'
DEFAULT_COLOR = [255, 255, 255]
DEFAULT_BRIGHTNESS = 255
DEFAULT_COLOR_TEMP_MODE = 'absolute'
DEFAULT_MIN_KELVIN = 2700 # 370 mireds
DEFAULT_MAX_KELVIN = 6000 # 166 mireds
DEPENDENCIES = ['knx']
class ColorTempModes(Enum):
"""Color temperature modes for config validation."""
absolute = "DPT-7.600"
relative = "DPT-5.001"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE):
cv.enum(ColorTempModes),
vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN):
vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN):
vol.All(vol.Coerce(int), vol.Range(min=1)),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up lights for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up lights for KNX platform configured via xknx.yaml."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXLight(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up light for KNX platform configured within platform."""
import xknx
group_address_tunable_white = None
group_address_tunable_white_state = None
group_address_color_temp = None
group_address_color_temp_state = None
if config[CONF_COLOR_TEMP_MODE] == ColorTempModes.absolute:
group_address_color_temp = config.get(CONF_COLOR_TEMP_ADDRESS)
group_address_color_temp_state = \
config.get(CONF_COLOR_TEMP_STATE_ADDRESS)
elif config[CONF_COLOR_TEMP_MODE] == ColorTempModes.relative:
group_address_tunable_white = config.get(CONF_COLOR_TEMP_ADDRESS)
group_address_tunable_white_state = \
config.get(CONF_COLOR_TEMP_STATE_ADDRESS)
light = xknx.devices.Light(
hass.data[DATA_KNX].xknx,
name=config[CONF_NAME],
group_address_switch=config[CONF_ADDRESS],
group_address_switch_state=config.get(CONF_STATE_ADDRESS),
group_address_brightness=config.get(CONF_BRIGHTNESS_ADDRESS),
group_address_brightness_state=config.get(
CONF_BRIGHTNESS_STATE_ADDRESS),
group_address_color=config.get(CONF_COLOR_ADDRESS),
group_address_color_state=config.get(CONF_COLOR_STATE_ADDRESS),
group_address_tunable_white=group_address_tunable_white,
group_address_tunable_white_state=group_address_tunable_white_state,
group_address_color_temperature=group_address_color_temp,
group_address_color_temperature_state=group_address_color_temp_state,
min_kelvin=config[CONF_MIN_KELVIN],
max_kelvin=config[CONF_MAX_KELVIN])
hass.data[DATA_KNX].xknx.devices.add(light)
async_add_entities([KNXLight(light)])
class KNXLight(Light):
"""Representation of a KNX light."""
def __init__(self, device):
"""Initialize of KNX light."""
self.device = device
self._min_kelvin = device.min_kelvin
self._max_kelvin = device.max_kelvin
self._min_mireds = \
color_util.color_temperature_kelvin_to_mired(self._max_kelvin)
self._max_mireds = \
color_util.color_temperature_kelvin_to_mired(self._min_kelvin)
@callback
def async_register_callbacks(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
async def async_added_to_hass(self):
"""Store register state change callback."""
self.async_register_callbacks()
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self.device.supports_color:
if self.device.current_color is None:
return None
return max(self.device.current_color)
if self.device.supports_brightness:
return self.device.current_brightness
return None
@property
def hs_color(self):
"""Return the HS color value."""
if self.device.supports_color:
rgb = self.device.current_color
if rgb is None:
return None
return color_util.color_RGB_to_hs(*rgb)
return None
@property
def color_temp(self):
"""Return the color temperature in mireds."""
if self.device.supports_color_temperature:
kelvin = self.device.current_color_temperature
if kelvin is not None:
return color_util.color_temperature_kelvin_to_mired(kelvin)
if self.device.supports_tunable_white:
relative_ct = self.device.current_tunable_white
if relative_ct is not None:
# as KNX devices typically use Kelvin we use it as base for
# calculating ct from percent
return color_util.color_temperature_kelvin_to_mired(
self._min_kelvin + (
(relative_ct / 255) *
(self._max_kelvin - self._min_kelvin)))
return None
@property
def min_mireds(self):
"""Return the coldest color temp this light supports in mireds."""
return self._min_mireds
@property
def max_mireds(self):
"""Return the warmest color temp this light supports in mireds."""
return self._max_mireds
@property
def effect_list(self):
"""Return the list of supported effects."""
return None
@property
def effect(self):
"""Return the current effect."""
return None
@property
def is_on(self):
"""Return true if light is on."""
return self.device.state
@property
def supported_features(self):
"""Flag supported features."""
flags = 0
if self.device.supports_brightness:
flags |= SUPPORT_BRIGHTNESS
if self.device.supports_color:
flags |= SUPPORT_COLOR | SUPPORT_BRIGHTNESS
if self.device.supports_color_temperature or \
self.device.supports_tunable_white:
flags |= SUPPORT_COLOR_TEMP
return flags
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
mireds = kwargs.get(ATTR_COLOR_TEMP, self.color_temp)
update_brightness = ATTR_BRIGHTNESS in kwargs
update_color = ATTR_HS_COLOR in kwargs
update_color_temp = ATTR_COLOR_TEMP in kwargs
# always only go one path for turning on (avoid conflicting changes
# and weird effects)
if self.device.supports_brightness and \
(update_brightness and not update_color):
# if we don't need to update the color, try updating brightness
# directly if supported; don't do it if color also has to be
# changed, as RGB color implicitly sets the brightness as well
await self.device.set_brightness(brightness)
elif self.device.supports_color and \
(update_brightness or update_color):
# change RGB color (includes brightness)
# if brightness or hs_color was not yet set use the default value
# to calculate RGB from as a fallback
if brightness is None:
brightness = DEFAULT_BRIGHTNESS
if hs_color is None:
hs_color = DEFAULT_COLOR
await self.device.set_color(
color_util.color_hsv_to_RGB(*hs_color, brightness * 100 / 255))
elif self.device.supports_color_temperature and \
update_color_temp:
# change color temperature without ON telegram
kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds))
if kelvin > self._max_kelvin:
kelvin = self._max_kelvin
elif kelvin < self._min_kelvin:
kelvin = self._min_kelvin
await self.device.set_color_temperature(kelvin)
elif self.device.supports_tunable_white and \
update_color_temp:
# calculate relative_ct from Kelvin to fit typical KNX devices
kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds))
relative_ct = int(255 * (kelvin - self._min_kelvin) /
(self._max_kelvin - self._min_kelvin))
await self.device.set_tunable_white(relative_ct)
else:
# no color/brightness change requested, so just turn it on
await self.device.set_on()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self.device.set_off()
|
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List, cast
import re
import pytest
import sympy
import cirq
from cirq._compat import proper_repr
import numpy as np
class CustomXPowGate(cirq.EigenGate):
def _eigen_components(self) -> List[Tuple[float, np.ndarray]]:
return [
(0, np.array([[0.5, 0.5], [0.5, 0.5]])),
(1, np.array([[0.5, -0.5], [-0.5, 0.5]])),
]
def __str__(self) -> str:
if self._global_shift == 0:
if self._exponent == 1:
return 'CustomX'
return f'CustomX**{self._exponent}'
return f'CustomXPowGate(exponent={self._exponent}, global_shift={self._global_shift!r})'
def __repr__(self) -> str:
if self._global_shift == 0:
if self._exponent == 1:
return 'cirq.ops.gateset_test.CustomX'
return f'(cirq.ops.gateset_test.CustomX**{proper_repr(self._exponent)})'
return 'cirq.ops.gateset_test.CustomXPowGate(exponent={}, global_shift={!r})'.format(
proper_repr(self._exponent), self._global_shift
)
def _num_qubits_(self) -> int:
return 1
CustomX = CustomXPowGate()
@pytest.mark.parametrize('gate', [CustomX, CustomXPowGate])
def test_gate_family_init(gate):
name = 'test_name'
description = 'test_description'
g = cirq.GateFamily(gate=gate, name=name, description=description)
assert g.gate == gate
assert g.name == name
assert g.description == description
@pytest.mark.parametrize('gate', [CustomX, CustomXPowGate])
def test_gate_family_default_name_and_description(gate):
g = cirq.GateFamily(gate)
assert re.match('.*GateFamily.*CustomX.*', g.name)
assert re.match('Accepts.*instances.*CustomX.*', g.description)
def test_invalid_gate_family():
with pytest.raises(ValueError, match='instance or subclass of `cirq.Gate`'):
_ = cirq.GateFamily(gate=cirq.Operation)
with pytest.raises(ValueError, match='non-parameterized instance of `cirq.Gate`'):
_ = cirq.GateFamily(gate=CustomX ** sympy.Symbol('theta'))
def test_gate_family_immutable():
g = cirq.GateFamily(CustomX)
with pytest.raises(AttributeError, match="can't set attribute"):
g.gate = CustomXPowGate
with pytest.raises(AttributeError, match="can't set attribute"):
g.name = 'new name'
with pytest.raises(AttributeError, match="can't set attribute"):
g.description = 'new description'
@pytest.mark.parametrize(
'gate', [CustomX, CustomXPowGate(exponent=0.5, global_shift=0.1), CustomXPowGate]
)
@pytest.mark.parametrize('name, description', [(None, None), ('custom_name', 'custom_description')])
def test_gate_family_repr_and_str(gate, name, description):
g = cirq.GateFamily(gate, name=name, description=description)
cirq.testing.assert_equivalent_repr(g)
assert g.name in str(g)
assert g.description in str(g)
@pytest.mark.parametrize('gate', [cirq.X, cirq.XPowGate(), cirq.XPowGate])
@pytest.mark.parametrize('name, description', [(None, None), ('custom_name', 'custom_description')])
def test_gate_family_json(gate, name, description):
g = cirq.GateFamily(gate, name=name, description=description)
g_json = cirq.to_json(g)
assert cirq.read_json(json_text=g_json) == g
def test_gate_family_eq():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(cirq.GateFamily(CustomX))
eq.add_equality_group(cirq.GateFamily(CustomX ** 3))
eq.add_equality_group(
cirq.GateFamily(CustomX, name='custom_name', description='custom_description'),
cirq.GateFamily(CustomX ** 3, name='custom_name', description='custom_description'),
)
eq.add_equality_group(cirq.GateFamily(CustomXPowGate))
eq.add_equality_group(
cirq.GateFamily(CustomXPowGate, name='custom_name', description='custom_description')
)
@pytest.mark.parametrize(
'gate_family, gates_to_check',
[
(
cirq.GateFamily(CustomXPowGate),
[
(CustomX, True),
(CustomX ** 0.5, True),
(CustomX ** sympy.Symbol('theta'), True),
(CustomXPowGate(exponent=0.25, global_shift=0.15), True),
(cirq.SingleQubitGate(), False),
(cirq.X ** 0.5, False),
(None, False),
(cirq.global_phase_operation(1j), False),
],
),
(
cirq.GateFamily(CustomX),
[
(CustomX, True),
(CustomXPowGate(exponent=1, global_shift=0.15), True),
(CustomX ** 2, False),
(CustomX ** 3, True),
(CustomX ** sympy.Symbol('theta'), False),
(None, False),
(cirq.global_phase_operation(1j), False),
],
),
(
cirq.GateFamily(CustomX, ignore_global_phase=False),
[
(CustomX, True),
(CustomXPowGate(exponent=1, global_shift=0.15), False),
],
),
],
)
def test_gate_family_predicate_and_containment(gate_family, gates_to_check):
q = cirq.NamedQubit("q")
for gate, result in gates_to_check:
assert gate_family._predicate(gate) == result
assert (gate in gate_family) == result
if isinstance(gate, cirq.Gate):
assert (gate(q) in gate_family) == result
assert (gate(q).with_tags('tags') in gate_family) == result
class CustomXGateFamily(cirq.GateFamily):
"""Accepts all integer powers of CustomXPowGate"""
def __init__(self) -> None:
super().__init__(
gate=CustomXPowGate,
name='CustomXGateFamily',
description='Accepts all integer powers of CustomXPowGate',
)
def _predicate(self, g: cirq.Gate) -> bool:
"""Checks whether gate instance `g` belongs to this GateFamily."""
if not super()._predicate(g) or cirq.is_parameterized(g):
return False
exp = cast(CustomXPowGate, g).exponent
return int(exp) == exp
def __repr__(self):
return 'cirq.ops.gateset_test.CustomXGateFamily()'
gateset = cirq.Gateset(
CustomX ** 0.5, cirq.testing.TwoQubitGate, CustomXGateFamily(), name='custom gateset'
)
def test_gateset_init():
assert gateset.name == 'custom gateset'
assert gateset.gates == frozenset(
[
cirq.GateFamily(CustomX ** 0.5),
cirq.GateFamily(cirq.testing.TwoQubitGate),
CustomXGateFamily(),
]
)
def test_gateset_repr_and_str():
cirq.testing.assert_equivalent_repr(gateset)
assert gateset.name in str(gateset)
for gate_family in gateset.gates:
assert str(gate_family) in str(gateset)
@pytest.mark.parametrize(
'gate, result',
[
(CustomX, True),
(CustomX ** 2, True),
(CustomXPowGate(exponent=3, global_shift=0.5), True),
(CustomX ** 0.5, True),
(CustomXPowGate(exponent=0.5, global_shift=0.5), True),
(CustomX ** 0.25, False),
(CustomX ** sympy.Symbol('theta'), False),
(cirq.testing.TwoQubitGate(), True),
],
)
def test_gateset_contains(gate, result):
assert (gate in gateset) is result
op = gate(*cirq.LineQubit.range(gate.num_qubits()))
assert (op in gateset) is result
assert (op.with_tags('tags') in gateset) is result
circuit_op = cirq.CircuitOperation(cirq.FrozenCircuit([op] * 5), repetitions=5)
assert (circuit_op in gateset) is result
assert circuit_op not in gateset.with_params(unroll_circuit_op=False)
@pytest.mark.parametrize('use_circuit_op', [True, False])
@pytest.mark.parametrize('use_global_phase', [True, False])
def test_gateset_validate(use_circuit_op, use_global_phase):
def optree_and_circuit(optree):
yield optree
yield cirq.Circuit(optree)
def get_ops(use_circuit_op, use_global_phase):
q = cirq.LineQubit.range(3)
yield [CustomX(q[0]).with_tags('custom tags'), CustomX(q[1]) ** 2, CustomX(q[2]) ** 3]
yield [CustomX(q[0]) ** 0.5, cirq.testing.TwoQubitGate()(*q[:2])]
if use_circuit_op:
circuit_op = cirq.CircuitOperation(
cirq.FrozenCircuit(get_ops(False, False)), repetitions=10
).with_tags('circuit op tags')
recursive_circuit_op = cirq.CircuitOperation(
cirq.FrozenCircuit([circuit_op, CustomX(q[2]) ** 0.5]),
repetitions=10,
qubit_map={q[0]: q[1], q[1]: q[2], q[2]: q[0]},
)
yield [circuit_op, recursive_circuit_op]
if use_global_phase:
yield cirq.global_phase_operation(1j)
def assert_validate_and_contains_consistent(gateset, op_tree, result):
assert all(op in gateset for op in cirq.flatten_to_ops(op_tree)) is result
for item in optree_and_circuit(op_tree):
assert gateset.validate(item) is result
op_tree = [*get_ops(use_circuit_op, use_global_phase)]
assert_validate_and_contains_consistent(
gateset.with_params(
unroll_circuit_op=use_circuit_op,
accept_global_phase_op=use_global_phase,
),
op_tree,
True,
)
if use_circuit_op or use_global_phase:
assert_validate_and_contains_consistent(
gateset.with_params(
unroll_circuit_op=False,
accept_global_phase_op=False,
),
op_tree,
False,
)
def test_gateset_validate_circuit_op_negative_reps():
gate = CustomXPowGate(exponent=0.5)
op = cirq.CircuitOperation(cirq.FrozenCircuit(gate.on(cirq.LineQubit(0))), repetitions=-1)
assert op not in cirq.Gateset(gate)
assert op ** -1 in cirq.Gateset(gate)
def test_with_params():
assert gateset.with_params() is gateset
assert (
gateset.with_params(
name=gateset.name,
unroll_circuit_op=gateset._unroll_circuit_op,
accept_global_phase_op=gateset._accept_global_phase_op,
)
is gateset
)
gateset_with_params = gateset.with_params(
name='new name', unroll_circuit_op=False, accept_global_phase_op=False
)
assert gateset_with_params.name == 'new name'
assert gateset_with_params._unroll_circuit_op is False
assert gateset_with_params._accept_global_phase_op is False
def test_gateset_eq():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(cirq.Gateset(CustomX))
eq.add_equality_group(cirq.Gateset(CustomX ** 3))
eq.add_equality_group(cirq.Gateset(CustomX, name='Custom Gateset'))
eq.add_equality_group(cirq.Gateset(CustomX, name='Custom Gateset', unroll_circuit_op=False))
eq.add_equality_group(
cirq.Gateset(CustomX, name='Custom Gateset', accept_global_phase_op=False)
)
eq.add_equality_group(
cirq.Gateset(
cirq.GateFamily(CustomX, name='custom_name', description='custom_description'),
cirq.GateFamily(CustomX, name='custom_name', description='custom_description'),
),
cirq.Gateset(
cirq.GateFamily(CustomX ** 3, name='custom_name', description='custom_description'),
cirq.GateFamily(CustomX, name='custom_name', description='custom_description'),
),
)
eq.add_equality_group(
cirq.Gateset(CustomX, CustomXPowGate),
cirq.Gateset(CustomXPowGate, CustomX),
cirq.Gateset(CustomX, CustomX, CustomXPowGate),
cirq.Gateset(CustomXPowGate, CustomX, CustomXPowGate),
)
eq.add_equality_group(cirq.Gateset(CustomXGateFamily()))
eq.add_equality_group(
cirq.Gateset(
cirq.GateFamily(
gate=CustomXPowGate,
name='CustomXGateFamily',
description='Accepts all integer powers of CustomXPowGate',
)
)
)
|
|
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import webob.exc
from xml.dom import minidom
from xml.parsers import expat
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.scheduler import api as scheduler_api
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
authorize = extensions.extension_authorizer('compute', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
def shimmer(obj, do_raise=False):
# A bare list is passed in; we need to wrap it in a dict
return dict(hosts=obj)
root = xmlutil.TemplateElement('hosts', selector=shimmer)
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('host_name')
elem.set('service')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
root.set('maintenance_mode')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('power_action')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = minidom.parseString(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
for child in node.childNodes[0].childNodes:
updates[child.tagName] = self.extract_text(child)
return dict(body=updates)
def _list_hosts(req, service=None):
"""Returns a summary list of hosts, optionally filtering
by service type.
"""
context = req.environ['nova.context']
hosts = scheduler_api.get_host_list(context)
if service:
hosts = [host for host in hosts
if host["service"] == service]
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
raise exception.HostNotFound(host=id)
return wrapped
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
authorize(req.environ['nova.context'])
return {'hosts': _list_hosts(req)}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostDeserializer)
@check_host
def update(self, req, id, body):
authorize(req.environ['nova.context'])
update_values = {}
for raw_key, raw_val in body.iteritems():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
elif key == "maintenance_mode":
if val not in ['enable', 'disable']:
explanation = _("Invalid mode: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_values['maintenance_mode'] = val == 'enable'
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
# this is for handling multiple settings at the same time:
# the result dictionaries are merged in the first one.
# Note: the 'host' key will always be the same so it's
# okay that it gets overwritten.
update_setters = {'status': self._set_enabled_status,
'maintenance_mode': self._set_host_maintenance}
result = {}
for key, value in update_values.iteritems():
result.update(update_setters[key](req, id, value))
return result
def _set_host_maintenance(self, req, host, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
context = req.environ['nova.context']
LOG.audit(_("Putting host %(host)s in maintenance "
"mode %(mode)s.") % locals())
result = self.api.set_host_maintenance(context, host, mode)
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "maintenance_mode": result}
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new instances."""
context = req.environ['nova.context']
state = "enabled" if enabled else "disabled"
LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
result = self.api.set_host_enabled(context, host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "status": result}
def _host_power_action(self, req, host, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
result = self.api.host_power_action(context, host=host,
action=action)
except NotImplementedError as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
return {"host": host, "power_action": result}
@wsgi.serializers(xml=HostActionTemplate)
def startup(self, req, id):
return self._host_power_action(req, host=id, action="startup")
@wsgi.serializers(xml=HostActionTemplate)
def shutdown(self, req, id):
return self._host_power_action(req, host=id, action="shutdown")
@wsgi.serializers(xml=HostActionTemplate)
def reboot(self, req, id):
return self._host_power_action(req, host=id, action="reboot")
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
host = id
context = req.environ['nova.context']
if not context.is_admin:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
# Getting compute node info and related instances info
try:
compute_ref = db.service_get_all_compute_by_host(context, host)
compute_ref = compute_ref[0]
except exception.ComputeHostNotFound:
raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
instance_refs = db.instance_get_all_by_host(context,
compute_ref['host'])
# Getting total available/used resource
compute_ref = compute_ref['compute_node'][0]
resources = [{'resource': {'host': host, 'project': '(total)',
'cpu': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'disk_gb': compute_ref['local_gb']}},
{'resource': {'host': host, 'project': '(used_now)',
'cpu': compute_ref['vcpus_used'],
'memory_mb': compute_ref['memory_mb_used'],
'disk_gb': compute_ref['local_gb_used']}}]
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for i in instance_refs:
cpu_sum += i['vcpus']
mem_sum += i['memory_mb']
hdd_sum += i['root_gb'] + i['ephemeral_gb']
resources.append({'resource': {'host': host,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}})
# Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
vcpus = [i['vcpus'] for i in instance_refs
if i['project_id'] == project_id]
mem = [i['memory_mb'] for i in instance_refs
if i['project_id'] == project_id]
disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
if i['project_id'] == project_id]
resources.append({'resource': {'host': host,
'project': project_id,
'cpu': reduce(lambda x, y: x + y, vcpus),
'memory_mb': reduce(lambda x, y: x + y, mem),
'disk_gb': reduce(lambda x, y: x + y, disk)}})
return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration"""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/compute/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={'update': 'PUT'},
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.