gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import glob
import gzip
import os
import sqlite3
import traceback
from opensextant.utility import ensure_dirs
CATALOGS = {"googlebooks": "G"}
def _ignoreable(text, mn, mx):
if len(text) < mn or len(text) > mx:
# print("Ignore short or long")
return True
if text[-1].isdigit():
# Ignore numeric nonsense
return True
return False
class WordStats:
def __init__(self, db, minlen=2, maxlen=30):
"""
:param db: DB path
:param minlen: min length of tracked words
:param maxlen: max length of tracked words
"""
self.dbpath = db
self.counter = 0
self.ignored = 0
self.minlen = minlen
self.maxlen = maxlen
self.conn = None
self.commit_rate = 100000
self.cache = set([])
self.cache_loaded = False
if self.createdb():
self.reopen()
def reopen(self):
if self.conn is not None:
return
# really close cleanly
self.close()
self.conn = sqlite3.connect(self.dbpath)
self.conn.execute('PRAGMA cache_size = 8092')
self.conn.execute("PRAGMA encoding = 'UTF-8'")
self.conn.execute('PRAGMA synchronous = OFF')
self.conn.execute('PRAGMA journal_mode = MEMORY')
self.conn.execute('PRAGMA temp_store = MEMORY')
self.conn.row_factory = sqlite3.Row
def save(self, rows):
try:
sql = """insert into wordstats (word, pos, count, catalog) values (:w, :pos, :cnt, :cat)"""
self.conn.executemany(sql, rows)
self.conn.commit()
except:
print("Failed to save words")
print(traceback.format_exc(limit=5))
def createdb(self):
if os.path.exists(self.dbpath):
return True
ensure_dirs(self.dbpath)
self.reopen()
sql_script = """
create TABLE wordstats (
`word` TEXT NOT NULL,
`pos` TEXT NOT NULL,
`count` INTEGER DEFAULT 0,
`catalog` TEXT NOT NULL
);
create INDEX wd_idx on wordstats ("word");
create INDEX pos_idx on wordstats ("pos");
create INDEX cat_idx on wordstats ("catalog");
"""
self.conn.executescript(sql_script)
self.conn.commit()
return True
def purge(self, cat):
sql = "delete from wordstats where catalog = ?"
self.conn.execute(sql, (cat,))
self.conn.commit()
def close(self):
if self.conn:
self.conn.close()
del self.conn
def ingest(self, statsfile, cat):
files = []
if os.path.exists(statsfile) and os.path.isdir(statsfile):
files = glob.glob(f"{statsfile}/*.gz")
else:
files.append(statsfile)
for f in files:
print(f"INGEST WORDS from {cat}: FILE {f}")
with gzip.open(f, "rt", encoding="UTF-8") as fh:
linecount = 0
terms = {}
self.purge(cat)
for line in fh:
linecount += 1
term = line.strip().split("\t")
termtext = term[0].lower()
pos = ""
curr = termtext
if "_" in termtext:
curr, pos = termtext.rsplit("_", 1)
if not pos:
curr = termtext
if _ignoreable(curr, self.minlen, self.maxlen):
self.ignored += 1
continue
subcount = int(term[2])
key = f"{curr}#{pos}"
if key not in terms:
terms[key] = {"cnt": 0, "w": curr, "pos": pos, "cat": cat}
self.counter += 1
if self.counter % self.commit_rate == 0:
self.save(terms.values())
terms.clear()
terms[key] = {"cnt": 0, "w": curr, "pos": pos, "cat": cat}
terms[key]["cnt"] += subcount
# Flush last batch.
self.save(terms.values())
print(f"LINES {linecount} WORDS {self.counter} IGNORED {self.ignored}")
def find(self, word, threshold, catalog="googlebooks"):
"""
EXPERIMENTAL
Word look up. for Catalog lookup this is catalog prefix + word initial, e.g., Gp
is catalog ID in database when looking for "philadelphia" in googlebooks.
Threshold is a cut off -- all word counts above this will be returned.
If "word" contains "%", we assume this is a wildcard search.
Word stats include:
WORD "_" PARTOFSPEECH
WORD -- This query only uses bare word counts.
The bare WORD counts appear to be a sum of all sub-counts for WORD+POS occurrences.
:param word:
:param threshold:
:param catalog:
:return:
"""
cat = CATALOGS.get(catalog)
if cat:
cat = f"{cat}{word[0]}"
else:
cat = ""
word_clause = " word = ?"
if "%" in word:
word_clause = "word like ?"
sql = f"""select word, count as CNT from wordstats where pos = '' and catalog = ? and CNT > ?
and {word_clause} order by CNT desc"""
# Avoid making the SQL summation too difficult. For some reason there are multiple entries for certain
# word patterns -- POS may be NULL or "" or something else. But here we sum all bare word patterns
wordstats = {}
for row in self.conn.execute(sql, (cat, threshold, word)):
wd = row["word"]
if wd not in wordstats:
wordstats[wd] = 0
wordstats[wd] += row["CNT"]
return wordstats
def load_common(self, threshold=10000000):
"""
Find all commmon words. The discrete counts of words may have to be added up
as part-of-speech accounting confuses things a bit. There are no ground truth numbers in GoogleBooks Ngrams
about total counts.
:param threshold:
:return:
"""
sql = f"""select word, count as CNT from wordstats where pos = '' and CNT > 1000000 order by CNT desc"""
wordstats = {}
# Sum by word (which is already been lowercased, normalized)
for row in self.conn.execute(sql):
wd = row["word"]
if wd not in wordstats:
wordstats[wd] = 0
wordstats[wd] += row["CNT"]
# Filter by count
for wd in wordstats:
if wordstats[wd] > threshold:
self.cache.add(wd)
self.cache_loaded = True
def is_common(self, word, threshold=10000000):
"""
Check if a word is common. Threshold is ignored if cache was pre-loaded using load_common()
If not pre-loaded, then a query is made for each term not in the cache.
:param word: word lookup. Ideally caller has lowercased/normalized this
:param threshold: default 10mil or more occurrence is a common NGram in GoogleBooks
:return:
"""
if word in self.cache:
return True
if self.cache_loaded:
return False
found = False
# find() cursor returns a dict of found terms. Counts are not used here.
for wordnorm in self.find(word, threshold=threshold):
self.cache.add(wordnorm)
found = True
return found
|
|
"""Config flow to configure Xiaomi Aqara."""
import logging
from socket import gaierror
import voluptuous as vol
from xiaomi_gateway import MULTICAST_PORT, XiaomiGateway, XiaomiGatewayDiscovery
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME, CONF_PORT, CONF_PROTOCOL
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_INTERFACE,
CONF_KEY,
CONF_SID,
DEFAULT_DISCOVERY_RETRY,
DOMAIN,
ZEROCONF_ACPARTNER,
ZEROCONF_GATEWAY,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_GATEWAY_NAME = "Xiaomi Aqara Gateway"
DEFAULT_INTERFACE = "any"
GATEWAY_CONFIG = vol.Schema(
{vol.Optional(CONF_INTERFACE, default=DEFAULT_INTERFACE): str}
)
CONFIG_HOST = {
vol.Optional(CONF_HOST): str,
vol.Optional(CONF_MAC): str,
}
GATEWAY_CONFIG_HOST = GATEWAY_CONFIG.extend(CONFIG_HOST)
GATEWAY_SETTINGS = vol.Schema(
{
vol.Optional(CONF_KEY): vol.All(str, vol.Length(min=16, max=16)),
vol.Optional(CONF_NAME, default=DEFAULT_GATEWAY_NAME): str,
}
)
class XiaomiAqaraFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Xiaomi Aqara config flow."""
VERSION = 1
def __init__(self):
"""Initialize."""
self.host = None
self.interface = DEFAULT_INTERFACE
self.sid = None
self.gateways = None
self.selected_gateway = None
@callback
def async_show_form_step_user(self, errors):
"""Show the form belonging to the user step."""
schema = GATEWAY_CONFIG
if (self.host is None and self.sid is None) or errors:
schema = GATEWAY_CONFIG_HOST
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is None:
return self.async_show_form_step_user(errors)
self.interface = user_input[CONF_INTERFACE]
# allow optional manual setting of host and mac
if self.host is None:
self.host = user_input.get(CONF_HOST)
if self.sid is None:
# format sid from mac_address
if (mac_address := user_input.get(CONF_MAC)) is not None:
self.sid = format_mac(mac_address).replace(":", "")
# if host is already known by zeroconf discovery or manual optional settings
if self.host is not None and self.sid is not None:
# Connect to Xiaomi Aqara Gateway
self.selected_gateway = await self.hass.async_add_executor_job(
XiaomiGateway,
self.host,
self.sid,
None,
DEFAULT_DISCOVERY_RETRY,
self.interface,
MULTICAST_PORT,
None,
)
if self.selected_gateway.connection_error:
errors[CONF_HOST] = "invalid_host"
if self.selected_gateway.mac_error:
errors[CONF_MAC] = "invalid_mac"
if errors:
return self.async_show_form_step_user(errors)
return await self.async_step_settings()
# Discover Xiaomi Aqara Gateways in the netwerk to get required SIDs.
xiaomi = XiaomiGatewayDiscovery(self.hass.add_job, [], self.interface)
try:
await self.hass.async_add_executor_job(xiaomi.discover_gateways)
except gaierror:
errors[CONF_INTERFACE] = "invalid_interface"
return self.async_show_form_step_user(errors)
self.gateways = xiaomi.gateways
if len(self.gateways) == 1:
self.selected_gateway = list(self.gateways.values())[0]
self.sid = self.selected_gateway.sid
return await self.async_step_settings()
if len(self.gateways) > 1:
return await self.async_step_select()
errors["base"] = "discovery_error"
return self.async_show_form_step_user(errors)
async def async_step_select(self, user_input=None):
"""Handle multiple aqara gateways found."""
errors = {}
if user_input is not None:
ip_adress = user_input["select_ip"]
self.selected_gateway = self.gateways[ip_adress]
self.sid = self.selected_gateway.sid
return await self.async_step_settings()
select_schema = vol.Schema(
{
vol.Required("select_ip"): vol.In(
[gateway.ip_adress for gateway in self.gateways.values()]
)
}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_zeroconf(
self, discovery_info: zeroconf.ZeroconfServiceInfo
) -> FlowResult:
"""Handle zeroconf discovery."""
name = discovery_info.name
self.host = discovery_info.host
mac_address = discovery_info.properties.get("mac")
if not name or not self.host or not mac_address:
return self.async_abort(reason="not_xiaomi_aqara")
# Check if the discovered device is an xiaomi aqara gateway.
if not (
name.startswith(ZEROCONF_GATEWAY) or name.startswith(ZEROCONF_ACPARTNER)
):
_LOGGER.debug(
"Xiaomi device '%s' discovered with host %s, not identified as xiaomi aqara gateway",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_aqara")
# format mac (include semicolns and make lowercase)
mac_address = format_mac(mac_address)
# format sid from mac_address
self.sid = mac_address.replace(":", "")
unique_id = mac_address
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured(
{CONF_HOST: self.host, CONF_MAC: mac_address}
)
self.context.update({"title_placeholders": {"name": self.host}})
return await self.async_step_user()
async def async_step_settings(self, user_input=None):
"""Specify settings and connect aqara gateway."""
errors = {}
if user_input is not None:
# get all required data
name = user_input[CONF_NAME]
key = user_input.get(CONF_KEY)
ip_adress = self.selected_gateway.ip_adress
port = self.selected_gateway.port
protocol = self.selected_gateway.proto
if key is not None:
# validate key by issuing stop ringtone playback command.
self.selected_gateway.key = key
valid_key = self.selected_gateway.write_to_hub(self.sid, mid=10000)
else:
valid_key = True
if valid_key:
# format_mac, for a gateway the sid equels the mac address
mac_address = format_mac(self.sid)
# set unique_id
unique_id = mac_address
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=name,
data={
CONF_HOST: ip_adress,
CONF_PORT: port,
CONF_MAC: mac_address,
CONF_INTERFACE: self.interface,
CONF_PROTOCOL: protocol,
CONF_KEY: key,
CONF_SID: self.sid,
},
)
errors[CONF_KEY] = "invalid_key"
return self.async_show_form(
step_id="settings", data_schema=GATEWAY_SETTINGS, errors=errors
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from hashlib import md5
import itertools
import json
import os
from uuid import uuid4
from enum import Enum
from multipledispatch import dispatch
import networkx
from solar.computable_inputs import ComputablePassedTypes
from solar.core.resource.repository import read_meta
from solar.core.resource.repository import Repository
from solar.core.signals import get_mapping
from solar.core.tags_set_parser import Expression
from solar.core.tags_set_parser import get_string_tokens
from solar.core import validation
from solar.dblayer.model import NONE
from solar.dblayer.model import StrInt
from solar.dblayer.solar_models import CommitedResource
from solar.dblayer.solar_models import Resource as DBResource
from solar.events import api
from solar import utils
RESOURCE_STATE = Enum(
'ResourceState', 'created operational removed error updated')
class Resource(object):
_metadata = {}
# Create
@dispatch(basestring, basestring)
def __init__(self, name, spec, args=None, tags=None):
args = args or {}
self.name = name
if spec:
if spec.startswith('/'):
# it's full path, don't use repo
self.base_path = spec
metadata = read_meta(spec)
else:
repo, spec = Repository.parse(spec)
metadata = repo.get_metadata(spec)
self.base_path = repo.get_path(spec)
else:
metadata = deepcopy(self._metadata)
self.base_path = spec # TODO: remove this old method?
if tags is None:
tags = []
m_tags = metadata.get('tags', [])
tags.extend(m_tags)
tags.append('resource={}'.format(name))
inputs = metadata.get('input', {})
self.auto_extend_inputs(inputs)
self.db_obj = DBResource.from_dict(
name,
{
'id': name,
'name': name,
'actions_path': metadata.get('actions_path', ''),
'actions': metadata.get('actions', {}),
'base_name': metadata.get('base_name', ''),
'base_path': metadata.get('base_path', ''),
'handler': metadata.get('handler', ''),
'version': metadata.get('version', ''),
'meta_inputs': inputs,
'tags': tags,
'state': RESOURCE_STATE.created.name,
'managers': metadata.get('managers', [])
})
self.create_inputs(args)
self.db_obj.save()
# Load
@dispatch(object) # noqa
def __init__(self, resource_db):
self.db_obj = resource_db
self.name = resource_db.name
self.base_path = resource_db.base_path
def auto_extend_inputs(self, inputs):
# XXX: we didn't agree on `location_id` and `transports_id`
# that are added automaticaly to all resources
# using inputs for something like that may be not the best idea
# maybe we need something like `internal_input`
inputs.setdefault('location_id', {'value': "",
'schema': 'str!'})
inputs.setdefault('transports_id', {'value': "",
'schema': 'str'})
for inp in ('transports_id', 'location_id'):
if inputs[inp].get('value') == '$uuid':
inputs[inp]['value'] = md5(self.name + uuid4().hex).hexdigest()
def transports(self):
db_obj = self.db_obj
return db_obj.inputs._get_field_val('transports_id',
other='transports')
def ip(self):
db_obj = self.db_obj
return db_obj.inputs._get_field_val('location_id', other='ip')
@property
def actions(self):
if self.db_obj.actions:
return {action: os.path.join(
self.db_obj.actions_path, name)
for action, name in self.db_obj.actions.items()}
# else
ret = {
os.path.splitext(p)[0]: os.path.join(
self.db_obj.actions_path, p
)
for p in os.listdir(self.db_obj.actions_path)
}
return {
k: v for k, v in ret.items() if os.path.isfile(v)
}
def create_inputs(self, args=None):
args = args or {}
for name, v in self.db_obj.meta_inputs.items():
value = args.get(name, v.get('value'))
self.db_obj.inputs[name] = value
@property
def args(self):
return self.db_obj.inputs.as_dict()
def input_add(self, name, value=NONE, schema=None):
v = self.db_obj.inputs.add_new(name, value, schema)
self.db_obj.save_lazy()
return v
def input_computable_change(self, name, *args, **kwargs):
if args:
order = ('func', 'type', 'lang')
kwargs.update(dict(zip(order, args)))
kwargs = dict((x, kwargs[x]) for x in kwargs if kwargs[x] is not None)
db_obj = self.db_obj
mi = db_obj.meta_inputs
try:
computable = mi[name]['computable']
except KeyError:
raise Exception("Can't change computable input properties "
"when input is not computable.")
computable.update(kwargs)
if not isinstance(computable['type'], ComputablePassedTypes):
type_ = ComputablePassedTypes[computable['type']].name
else:
type_ = computable['type'].name
computable['type'] = type_
# we don't track nested dicts, only setting full dict will trigger
# change
mi[name]['computable'] = computable
db_obj.meta_inputs = mi
db_obj.save_lazy()
return True
def input_delete(self, name):
self.db_obj.inputs.remove_existing(name)
self.db_obj.save_lazy()
return
def update(self, args):
# TODO: disconnect input when it is updated and end_node
# for some input_to_input relation
self.db_obj.state = RESOURCE_STATE.updated.name
for k, v in args.items():
self.db_obj.inputs[k] = v
self.db_obj.save_lazy()
def delete(self):
return self.db_obj.delete()
def remove(self, force=False):
if force:
self.delete()
else:
self.db_obj.state = RESOURCE_STATE.removed.name
self.db_obj.save_lazy()
def set_operational(self):
self.db_obj.state = RESOURCE_STATE.operational.name
self.db_obj.save_lazy()
def set_error(self):
self.db_obj.state = RESOURCE_STATE.error.name
self.db_obj.save_lazy()
def set_created(self):
self.db_obj.state = RESOURCE_STATE.created.name
self.db_obj.save_lazy()
def to_be_removed(self):
return self.db_obj.state == RESOURCE_STATE.removed.name
@property
def tags(self):
return self.db_obj.tags
def add_tags(self, *tags):
for tag in tags:
self.db_obj.tags.set(tag)
self.db_obj.save_lazy()
def remove_tags(self, *tags):
for tag in tags:
self.db_obj.tags.remove(tag)
self.db_obj.save_lazy()
@property
def connections(self):
"""Gives you all incoming/outgoing connections for current resource.
Stored as:
[(emitter, emitter_input, receiver, receiver_input), ...]
"""
rst = set()
for (emitter_resource, emitter_input), (receiver_resource, receiver_input), meta in self.graph().edges(data=True): # NOQA
if meta:
receiver_input = '{}:{}|{}'.format(receiver_input,
meta['destination_key'],
meta['tag'])
rst.add(
(emitter_resource, emitter_input,
receiver_resource, receiver_input))
return [list(i) for i in rst]
def graph(self):
mdg = networkx.MultiDiGraph()
for u, v, data in self.db_obj.inputs._edges():
mdg.add_edge(u, v, attr_dict=data)
return mdg
def resource_inputs(self):
return self.db_obj.inputs
def to_dict(self, inputs=False):
ret = self.db_obj.to_dict()
if inputs:
ret['inputs'] = self.db_obj.inputs.as_dict()
return ret
def color_repr(self, inputs=False):
import click
arg_color = 'yellow'
return ("{resource_s}({name_s}='{key}', {base_path_s}={base_path} "
"{args_s}={inputs}, {tags_s}={tags})").format(
resource_s=click.style('Resource', fg='white', bold=True),
name_s=click.style('name', fg=arg_color, bold=True),
base_path_s=click.style('base_path', fg=arg_color, bold=True),
args_s=click.style('args', fg=arg_color, bold=True),
tags_s=click.style('tags', fg=arg_color, bold=True),
**self.to_dict(inputs)
)
def load_commited(self):
return CommitedResource.get_or_create(self.name)
def _connect_inputs(self, receiver, mapping):
if isinstance(mapping, set):
mapping = dict((x, x) for x in mapping)
self.db_obj.connect(receiver.db_obj, mapping=mapping)
self.db_obj.save_lazy()
receiver.db_obj.save_lazy()
def connect_with_events(self, receiver, mapping=None, events=None,
use_defaults=False):
mapping = get_mapping(self, receiver, mapping)
self._connect_inputs(receiver, mapping)
# signals.connect(self, receiver, mapping=mapping)
# TODO: implement events
if use_defaults:
if self != receiver:
api.add_default_events(self, receiver)
if events:
api.add_events(self.name, events)
def connect(self, receiver, mapping=None, events=None):
return self.connect_with_events(
receiver, mapping=mapping, events=events, use_defaults=True)
def disconnect(self, receiver):
inputs = self.db_obj.inputs.keys()
self.db_obj.disconnect(other=receiver.db_obj, inputs=inputs)
receiver.db_obj.save_lazy()
self.db_obj.save_lazy()
def prefetch(self):
if not self.db_obj.managers:
return
for manager in self.db_obj.managers:
manager_path = os.path.join(self.db_obj.base_path, manager)
rst = utils.communicate([manager_path], json.dumps(self.args))
if rst:
self.update(json.loads(rst))
def load(name):
r = DBResource.get(name)
if not r:
raise Exception('Resource {} does not exist in DB'.format(name))
return Resource(r)
def load_updated(since=None, with_childs=True):
if since is None:
startkey = StrInt.p_min()
else:
startkey = since
candids = DBResource.updated.filter(startkey, StrInt.p_max())
if with_childs:
candids = DBResource.childs(candids)
return [Resource(r) for r in DBResource.multi_get(candids)]
# TODO
def load_all():
candids = DBResource.updated.filter(StrInt.p_min(), StrInt.p_max())
return [Resource(r) for r in DBResource.multi_get(candids)]
def load_by_tags(query):
if isinstance(query, (list, set, tuple)):
query = '|'.join(query)
parsed_tags = get_string_tokens(query)
r_with_tags = [DBResource.tags.filter(tag) for tag in parsed_tags]
r_with_tags = set(itertools.chain(*r_with_tags))
candids = [Resource(r) for r in DBResource.multi_get(r_with_tags)]
nodes = filter(
lambda n: Expression(query, n.tags).evaluate(), candids)
return nodes
def validate_resources():
resources = load_all()
ret = []
for r in resources:
e = validation.validate_resource(r)
if e:
ret.append((r, e))
return ret
|
|
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http://unlicense.org/>
# Taken from https://github.com/zacharyvoase/strscan
import copy
import re
__all__ = ['Scanner', 'text_coords']
class Scanner(object):
"""
:class:`Scanner` is a near-direct port of Ruby's ``StringScanner``.
The aim is to provide for lexical scanning operations on strings::
>>> from strscan import Scanner
>>> s = Scanner("This is an example string")
>>> s.eos()
False
>>> s.scan(r'\w+')
'This'
>>> s.scan(r'\w+')
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\s+')
>>> s.scan(r'\w+')
'is'
>>> s.eos()
False
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\w+')
'an'
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\w+')
'example'
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\w+')
'string'
>>> s.eos()
True
>>> s.scan(r'\s+')
>>> s.scan(r'\w+')
Its mechanism of operation is similar to :class:`StringIO`, only instead of
reading by passing a number of bytes, you read by passing a regex. A scan
pointer tracks the current position through the string, and all scanning or
searching happens on the rest of the string after this pointer.
:meth:`scan` is the simple case of reading some text and advancing the scan
pointer, but there are several other related methods which fulfil different
requirements.
All the methods on :class:`Scanner` which take regexes will accept either
regex strings or compiled pattern objects (as would be returned from
``re.compile()``).
"""
def __init__(self, string):
self.string = string
self.pos_history, self._pos = [0], 0
self.match_history, self._match = [None], None
def __getitem__(self, index):
"""Proxy for ``self.match.group(index)``."""
if self.match:
return self.match.group(index)
raise IndexError("No match on this scanner")
def _get_pos(self):
return self._pos
def _set_pos(self, pos):
self._pos = pos
self.pos_history.append(pos)
pos = property(_get_pos, _set_pos,
doc="The current position of the scan pointer.")
prev = property(lambda self: self.pos_history[-2],
doc="The last position of the scan pointer.")
def coords(self):
r"""
Return the current scanner position as `(lineno, columnno, line)`.
This method is useful for displaying the scanner position in a human-
readable way. For example, you could use it to provide friendlier
debugging information when writing parsers.
>>> s = Scanner("abcdef\nghijkl\nmnopqr\nstuvwx\nyz")
>>> s.coords()
(0, 0, 'abcdef')
>>> s.pos += 4
>>> s.coords()
(0, 4, 'abcdef')
>>> s.pos += 2
>>> s.coords()
(0, 6, 'abcdef')
>>> s.pos += 1
>>> s.coords()
(1, 0, 'ghijkl')
>>> s.pos += 4
>>> s.coords()
(1, 4, 'ghijkl')
>>> s.pos += 4
>>> s.coords()
(2, 1, 'mnopqr')
"""
return text_coords(self.string, self.pos)
def _get_match(self):
return self._match
def _set_match(self, match):
self._match = match
self.match_history.append(match)
match = property(_get_match, _set_match,
doc="The latest scan match.")
def beginning_of_line(self):
r"""
Return true if the scan pointer is at the beginning of a line.
>>> s = Scanner("test\ntest\n")
>>> s.beginning_of_line()
True
>>> s.skip(r'te')
2
>>> s.beginning_of_line()
False
>>> s.skip(r'st\n')
3
>>> s.beginning_of_line()
True
>>> s.terminate()
>>> s.beginning_of_line()
True
"""
if self.pos > len(self.string):
return None
elif self.pos == 0:
return True
return self.string[self.pos - 1] == '\n'
def terminate(self):
"""Set the scan pointer to the end of the string; clear match data."""
self.pos = len(self.string)
self.match = None
def eos(self):
"""
Return true if the scan pointer is at the end of the string.
>>> s = Scanner("abc")
>>> s.eos()
False
>>> s.terminate()
>>> s.eos()
True
"""
return len(self.string) == self.pos
def getch(self):
"""
Get a single character and advance the scan pointer.
>>> s = Scanner("abc")
>>> s.getch()
'a'
>>> s.getch()
'b'
>>> s.getch()
'c'
>>> s.pos
3
"""
self.pos += 1
return self.string[self.pos - 1:self.pos]
def peek(self, length):
"""
Get a number of characters without advancing the scan pointer.
>>> s = Scanner("test string")
>>> s.peek(7)
'test st'
>>> s.peek(7)
'test st'
"""
return self.string[self.pos:self.pos + length]
def rest(self):
"""
Get the rest of the string that hasn't been scanned yet.
>>> s = Scanner("test string")
>>> s.scan(r'test')
'test'
>>> s.rest
' string'
"""
return self.string[self.pos:]
rest = property(rest)
def matched(self):
"""
Get the whole of the current match.
This method returns whatever would have been returned by the latest
:meth:`scan()` call.
>>> s = Scanner("test string")
>>> s.scan(r'test')
'test'
>>> s.matched()
'test'
"""
return self.match.group(0)
def pre_match(self):
r"""
Get whatever comes before the current match.
>>> s = Scanner('test string')
>>> s.skip(r'test')
4
>>> s.scan(r'\s')
' '
>>> s.pre_match()
'test'
"""
return self.string[:self.match.start()]
def post_match(self):
r"""
Get whatever comes after the current match.
>>> s = Scanner('test string')
>>> s.skip(r'test')
4
>>> s.scan(r'\s')
' '
>>> s.post_match()
'string'
"""
return self.string[self.match.end():]
def unscan(self):
"""
Undo the last scan, resetting the position and match registers.
>>> s = Scanner('test string')
>>> s.pos
0
>>> s.skip(r'te')
2
>>> s.rest
'st string'
>>> s.unscan()
>>> s.pos
0
>>> s.rest
'test string'
"""
self.pos_history.pop()
self._pos = self.pos_history[-1]
self.match_history.pop()
self._match = self.match_history[-1]
def scan_full(self, regex, return_string=True, advance_pointer=True):
"""
Match from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched.
>>> s = Scanner("test string")
>>> s.scan_full(r' ')
>>> s.scan_full(r'test ')
'test '
>>> s.pos
5
>>> s.scan_full(r'stri', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.scan_full(r'stri', return_string=False, advance_pointer=False)
4
>>> s.pos
5
"""
regex = get_regex(regex)
self.match = regex.match(self.string, self.pos)
if not self.match:
return
if advance_pointer:
self.pos = self.match.end()
if return_string:
return self.match.group(0)
return len(self.match.group(0))
def search_full(self, regex, return_string=True, advance_pointer=True):
"""
Search from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched (from the current position *up to* the end of the
match).
>>> s = Scanner("test string")
>>> s.search_full(r' ')
'test '
>>> s.pos
5
>>> s.search_full(r'i', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.search_full(r'i', return_string=False, advance_pointer=False)
4
>>> s.pos
5
"""
regex = get_regex(regex)
self.match = regex.search(self.string, self.pos)
if not self.match:
return
start_pos = self.pos
if advance_pointer:
self.pos = self.match.end()
if return_string:
return self.string[start_pos:self.match.end()]
return (self.match.end() - start_pos)
def scan(self, regex):
"""
Match a pattern from the current position.
If a match is found, advances the scan pointer and returns the matched
string. Otherwise returns ``None``.
>>> s = Scanner("test string")
>>> s.pos
0
>>> s.scan(r'foo')
>>> s.scan(r'bar')
>>> s.pos
0
>>> s.scan(r'test ')
'test '
>>> s.pos
5
"""
return self.scan_full(regex, return_string=True, advance_pointer=True)
def scan_until(self, regex):
"""
Search for a pattern from the current position.
If a match is found, advances the scan pointer and returns the matched
string, from the current position *up to* the end of the match.
Otherwise returns ``None``.
>>> s = Scanner("test string")
>>> s.pos
0
>>> s.scan_until(r'foo')
>>> s.scan_until(r'bar')
>>> s.pos
0
>>> s.scan_until(r' ')
'test '
>>> s.pos
5
"""
return self.search_full(regex, return_string=True, advance_pointer=True)
def scan_upto(self, regex):
"""
Scan up to, but not including, the given regex.
>>> s = Scanner("test string")
>>> s.scan('t')
't'
>>> s.scan_upto(r' ')
'est'
>>> s.pos
4
>>> s.pos_history
[0, 1, 4]
"""
pos = self.pos
if self.scan_until(regex) is not None:
self.pos -= len(self.matched())
# Remove the intermediate position history entry.
self.pos_history.pop(-2)
return self.pre_match()[pos:]
def skip(self, regex):
"""
Like :meth:`scan`, but return the number of characters matched.
>>> s = Scanner("test string")
>>> s.skip('test ')
5
"""
return self.scan_full(regex, return_string=False, advance_pointer=True)
def skip_until(self, regex):
"""
Like :meth:`scan_until`, but return the number of characters matched.
>>> s = Scanner("test string")
>>> s.skip_until(' ')
5
"""
return self.search_full(regex, return_string=False, advance_pointer=True)
def check(self, regex):
"""
See what :meth:`scan` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.check('test ')
'test '
>>> s.pos
0
"""
return self.scan_full(regex, return_string=True, advance_pointer=False)
def check_until(self, regex):
"""
See what :meth:`scan_until` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.check_until(' ')
'test '
>>> s.pos
0
"""
return self.search_full(regex, return_string=True, advance_pointer=False)
def exists(self, regex):
"""
See what :meth:`skip_until` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.exists(' ')
5
>>> s.pos
0
Returns the number of characters matched if it does exist, or ``None``
otherwise.
"""
return self.search_full(regex, return_string=False, advance_pointer=False)
def text_coords(string, position):
r"""
Transform a simple index into a human-readable position in a string.
This function accepts a string and an index, and will return a triple of
`(lineno, columnno, line)` representing the position through the text. It's
useful for displaying a string index in a human-readable way::
>>> s = "abcdef\nghijkl\nmnopqr\nstuvwx\nyz"
>>> text_coords(s, 0)
(0, 0, 'abcdef')
>>> text_coords(s, 4)
(0, 4, 'abcdef')
>>> text_coords(s, 6)
(0, 6, 'abcdef')
>>> text_coords(s, 7)
(1, 0, 'ghijkl')
>>> text_coords(s, 11)
(1, 4, 'ghijkl')
>>> text_coords(s, 15)
(2, 1, 'mnopqr')
"""
line_start = string.rfind('\n', 0, position) + 1
line_end = string.find('\n', position)
lineno = string.count('\n', 0, position)
columnno = position - line_start
line = string[line_start:line_end]
return (lineno, columnno, line)
def get_regex(regex):
"""
Ensure we have a compiled regular expression object.
>>> import re
>>> get_regex('string') # doctest: +ELLIPSIS
<_sre.SRE_Pattern object at 0x...>
>>> pattern = re.compile(r'string')
>>> get_regex(pattern) is pattern
True
>>> get_regex(3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Invalid regex type: 3
"""
if isinstance(regex, basestring):
return re.compile(regex)
elif not isinstance(regex, re._pattern_type):
raise TypeError("Invalid regex type: %r" % (regex,))
return regex
def _get_tests():
"""Enables ``python setup.py test``."""
import doctest
return doctest.DocTestSuite()
|
|
#
# The Python Imaging Library.
# $Id$
#
# SGI image file handling
#
# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli.
# <ftp://ftp.sgi.com/graphics/SGIIMAGESPEC>
#
#
# History:
# 2017-22-07 mb Add RLE decompression
# 2016-16-10 mb Add save method without compression
# 1995-09-10 fl Created
#
# Copyright (c) 2016 by Mickael Bonfill.
# Copyright (c) 2008 by Karsten Hiddemann.
# Copyright (c) 1997 by Secret Labs AB.
# Copyright (c) 1995 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile
from ._binary import i8, o8, i16be as i16
import struct
import os
__version__ = "0.3"
def _accept(prefix):
return len(prefix) >= 2 and i16(prefix) == 474
MODES = {
(1, 1, 1): "L",
(1, 2, 1): "L",
(2, 1, 1): "L;16B",
(2, 2, 1): "L;16B",
(1, 3, 3): "RGB",
(2, 3, 3): "RGB;16B",
(1, 3, 4): "RGBA",
(2, 3, 4): "RGBA;16B"
}
##
# Image plugin for SGI images.
class SgiImageFile(ImageFile.ImageFile):
format = "SGI"
format_description = "SGI Image File Format"
def _open(self):
# HEAD
headlen = 512
s = self.fp.read(headlen)
# magic number : 474
if i16(s) != 474:
raise ValueError("Not an SGI image file")
# compression : verbatim or RLE
compression = i8(s[2])
# bpc : 1 or 2 bytes (8bits or 16bits)
bpc = i8(s[3])
# dimension : 1, 2 or 3 (depending on xsize, ysize and zsize)
dimension = i16(s[4:])
# xsize : width
xsize = i16(s[6:])
# ysize : height
ysize = i16(s[8:])
# zsize : channels count
zsize = i16(s[10:])
# layout
layout = bpc, dimension, zsize
# determine mode from bits/zsize
rawmode = ""
try:
rawmode = MODES[layout]
except KeyError:
pass
if rawmode == "":
raise ValueError("Unsupported SGI image mode")
self.size = xsize, ysize
self.mode = rawmode.split(";")[0]
# orientation -1 : scanlines begins at the bottom-left corner
orientation = -1
# decoder info
if compression == 0:
pagesize = xsize * ysize * bpc
if bpc == 2:
self.tile = [("SGI16", (0, 0) + self.size,
headlen, (self.mode, 0, orientation))]
else:
self.tile = []
offset = headlen
for layer in self.mode:
self.tile.append(
("raw", (0, 0) + self.size,
offset, (layer, 0, orientation)))
offset += pagesize
elif compression == 1:
self.tile = [("sgi_rle", (0, 0) + self.size,
headlen, (rawmode, orientation, bpc))]
def _save(im, fp, filename):
if im.mode != "RGB" and im.mode != "RGBA" and im.mode != "L":
raise ValueError("Unsupported SGI image mode")
# Get the keyword arguments
info = im.encoderinfo
# Byte-per-pixel precision, 1 = 8bits per pixel
bpc = info.get("bpc", 1)
if bpc not in (1, 2):
raise ValueError("Unsupported number of bytes per pixel")
# Flip the image, since the origin of SGI file is the bottom-left corner
orientation = -1
# Define the file as SGI File Format
magicNumber = 474
# Run-Length Encoding Compression - Unsupported at this time
rle = 0
# Number of dimensions (x,y,z)
dim = 3
# X Dimension = width / Y Dimension = height
x, y = im.size
if im.mode == "L" and y == 1:
dim = 1
elif im.mode == "L":
dim = 2
# Z Dimension: Number of channels
z = len(im.mode)
if dim == 1 or dim == 2:
z = 1
# assert we've got the right number of bands.
if len(im.getbands()) != z:
raise ValueError("incorrect number of bands in SGI write: %s vs %s" %
(z, len(im.getbands())))
# Minimum Byte value
pinmin = 0
# Maximum Byte value (255 = 8bits per pixel)
pinmax = 255
# Image name (79 characters max, truncated below in write)
imgName = os.path.splitext(os.path.basename(filename))[0]
if str is not bytes:
imgName = imgName.encode('ascii', 'ignore')
# Standard representation of pixel in the file
colormap = 0
fp.write(struct.pack('>h', magicNumber))
fp.write(o8(rle))
fp.write(o8(bpc))
fp.write(struct.pack('>H', dim))
fp.write(struct.pack('>H', x))
fp.write(struct.pack('>H', y))
fp.write(struct.pack('>H', z))
fp.write(struct.pack('>l', pinmin))
fp.write(struct.pack('>l', pinmax))
fp.write(struct.pack('4s', b'')) # dummy
fp.write(struct.pack('79s', imgName)) # truncates to 79 chars
fp.write(struct.pack('s', b'')) # force null byte after imgname
fp.write(struct.pack('>l', colormap))
fp.write(struct.pack('404s', b'')) # dummy
rawmode = 'L'
if bpc == 2:
rawmode = 'L;16B'
for channel in im.split():
fp.write(channel.tobytes('raw', rawmode, 0, orientation))
fp.close()
class SGI16Decoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer):
rawmode, stride, orientation = self.args
pagesize = self.state.xsize * self.state.ysize
zsize = len(self.mode)
self.fd.seek(512)
for band in range(zsize):
channel = Image.new('L', (self.state.xsize, self.state.ysize))
channel.frombytes(self.fd.read(2 * pagesize), 'raw',
'L;16B', stride, orientation)
self.im.putband(channel.im, band)
return -1, 0
#
# registry
Image.register_decoder("SGI16", SGI16Decoder)
Image.register_open(SgiImageFile.format, SgiImageFile, _accept)
Image.register_save(SgiImageFile.format, _save)
Image.register_mime(SgiImageFile.format, "image/sgi")
Image.register_mime(SgiImageFile.format, "image/rgb")
Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"])
# End of file
|
|
import datetime
import urllib
from django.forms import widgets
from django.forms.utils import flatatt
from django import forms
from django.utils.encoding import force_unicode, python_2_unicode_compatible, force_text
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape, html_safe, format_html
from django.core.urlresolvers import reverse
from django.utils.dateparse import parse_time
from django.template.loader import render_to_string
from django.utils import timezone, formats, translation
from django.contrib.admin.widgets import url_params_from_lookup_dict
from . import settings
# NOT EVERYTHING IS SUPPORTED, I DON'T CARE.
TRANSLATION_DICT = {
# Day
'd': 'dd',
'l': 'DD',
'j': 'oo',
# Month
'B': 'MM',
'm': 'mm',
'b': 'M',
# Year
'Y': 'yy',
'y': 'y',
# Time
'p': 'TT',
'I': 'hh',
'H': 'HH',
'M': 'mm',
'S': 'ss',
}
@html_safe
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
@html_safe
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return format_html(
'<label{}>{} {}</label>', label_for, self.tag(attrs), self.choice_label
)
def is_checked(self):
return self.value == self.choice_value
def tag(self, attrs=None):
attrs = attrs or self.attrs
final_attrs = dict(attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
@html_safe
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
outer_html = '<ul{id_attr}>{content}</ul>'
inner_html = '<li>{choice_value}{sub_widgets}</li>'
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
return list(self)[idx]
def __iter__(self):
for idx, choice in enumerate(self.choices):
yield self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id')
output = []
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple, list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{}'.format(i)
sub_ul_renderer = self.__class__(
name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label,
)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html(
self.inner_html, choice_value=choice_value,
sub_widgets=sub_ul_renderer.render(),
))
else:
w = self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, i)
output.append(format_html(self.inner_html, choice_value=force_text(w), sub_widgets=''))
return format_html(
self.outer_html,
id_attr=format_html(' id="{}"', id_) if id_ else '',
content=mark_safe('\n'.join(output)),
)
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
def translate_format(format_string):
for k, v in TRANSLATION_DICT.items():
format_string = format_string.replace('%{0}'.format(k), v)
return format_string
class DateWidget(widgets.DateInput):
bc = 'date'
format_key = 'DATE_INPUT_FORMATS'
def __init__(self, *args, **kwargs):
super(DateWidget, self).__init__(*args, **kwargs)
self.attrs['class'] = self.bc
if not 'format' in kwargs:
self.format = None
def get_format(self):
if self.format:
return self.format
if settings.USE_SCARLET_DATE_FORMATS and hasattr(settings, self.format_key):
return getattr(settings, self.format_key)
return formats.get_format(self.format_key)[0]
def _format_value(self, value):
return formats.localize_input(value, self.get_format())
def build_attrs(self, *args, **kwargs):
args = super(DateWidget, self).build_attrs(*args, **kwargs)
args['data-date-format'] = translate_format(self.get_format())
args['data-timezone'] = timezone.get_current_timezone_name()
args['data-locale'] = translation.get_language()
return args
def value_from_datadict(self, data, files, name):
value = super(DateWidget, self).value_from_datadict(data, files, name)
df = self.get_format()
if isinstance(value, basestring):
try:
return datetime.datetime.strptime(value, df)
except ValueError:
pass
return value
class DateTimeWidget(DateWidget):
bc = 'datetime'
format_key = 'DATETIME_INPUT_FORMATS'
class TimeChoiceWidget(widgets.Select):
"""
Widget for time fields. A select widget that will have a 'now'
option plus an option for each block of time you want to
display. By default this will be one item in the drop down for
every 15 min block of a day.
:param attrs: HTML attributes for the widget; same as django's.
:param min_interval: Interval for minutes in your dropdown, \
should be a number between 1 and 60. Default is 15.
:param sec_interval: Interval for seconds in your dropdown, \
should be a number between 1 and 60. Default is 60.
:param twenty_four_hour: Display time in a 24hr format? \
Default is False.
"""
NOW = 'now'
def __init__(self, attrs=None, min_interval=15, sec_interval=60,
twenty_four_hour=False):
super(TimeChoiceWidget, self).__init__(attrs)
assert 60 >= min_interval > 0
assert 60 >= sec_interval > 0
self.twenty_four_hour = twenty_four_hour
self.choices = [(self.NOW, 'Now')]
self.choice_values = set()
self.repr_format = "%I:%M:%S %p"
if twenty_four_hour:
self.repr_format = "%H:%M:%S"
for hour in range(24):
for min_v in range(60 / min_interval):
min_v = min_v * min_interval
for sec in range(60 / sec_interval):
sec = sec * sec_interval
t = datetime.time(hour, min_v, sec)
self.choices.append((t.strftime("%H:%M:%S"),
t.strftime(self.repr_format)))
self.choice_values.add(t.strftime("%H:%M:%S"))
def value_from_datadict(self, *args):
data = super(TimeChoiceWidget, self).value_from_datadict(*args)
if data == self.NOW:
# Time should be naive, conversion happens later
data = datetime.datetime.now().strftime("%H:%M:%S")
return data
def render(self, name, value, attrs=None, choices=()):
if value:
if type(value) == type("") or type(value) == type(u""):
try:
value = parse_time(value)
except ValueError:
value = None
if value and isinstance(value, datetime.time):
value_str = value.strftime("%H:%M:%S")
if not value_str in self.choice_values:
choices = list(choices)
choices.append((value_str, value.strftime(self.repr_format)))
return super(TimeChoiceWidget, self).render(name, value, attrs=attrs,
choices=choices)
class SplitDateTime(widgets.SplitDateTimeWidget):
"""
Widget for datetime fields. Uses DateWidget, TimeChoiceWidget.
"""
def __init__(self, widgets=(DateWidget, TimeChoiceWidget), attrs=None):
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'%s %s' % (rendered_widgets[0], rendered_widgets[1]))
def value_from_datadict(self, data, files, name):
d = super(SplitDateTime, self).value_from_datadict(data, files, name)
if not self.is_required and len(d) and not d[0]:
return ['', '']
return d
class DateRadioInput(RadioChoiceInput):
label_text = "At a specific date and time"
def render(self, name=None, value=None, attrs=None, choices=()):
attrs = attrs or self.attrs
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
date_widget = attrs['date_widget']
return mark_safe(u'<label%s>%s %s: %s</label>' % (
label_for, self.tag(), self.label_text, date_widget))
def tag(self):
final_attrs = {
'type': 'radio',
'name': self.name,
'value': self.choice_value,
}
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class DateRenderer(RadioFieldRenderer):
def __init__(self, *args, **kwargs):
self.date_widget = kwargs.pop('date_widget')
super(DateRenderer, self).__init__(*args, **kwargs)
def return_choice(self, choice, idx):
cls = RadioChoiceInput
attrs = self.attrs.copy()
if choice[0] == RadioDateTimeWidget.DATE:
cls = DateRadioInput
attrs['date_widget'] = self.date_widget
return cls(self.name, self.value, attrs, choice, idx)
def __iter__(self):
for i, choice in enumerate(self.choices):
yield self.return_choice(choice, i)
def __getitem__(self, idx):
choice = self.choices[idx]
return self.return_choice(choice, idx)
def render(self):
return mark_safe(u'<fieldset class="datetime">\n%s\n</fieldset>' % u'\n'.join([u'%s' % force_unicode(w) for w in self]))
class RadioDateTimeWidget(widgets.RadioSelect):
NOW = 'now'
DATE = 'date'
def __init__(self, *args, **kwargs):
self.date_class = kwargs.pop('date_class', DateTimeWidget)
self.choices = [(self.NOW, 'Now'), (self.DATE, 'Date',)]
kwargs['choices'] = self.choices
super(RadioDateTimeWidget, self).__init__(*args, **kwargs)
def get_radio_key(self, name):
return "{0}_rdi".format(name)
def get_renderer(self, date_widget, name, value, attrs=None):
return DateRenderer(name, value, attrs, self.choices,
date_widget=date_widget)
def render(self, name, value, attrs=None):
widget = self.date_class()
date_widget = widget.render(name, value, attrs=attrs)
return self.get_renderer(date_widget, self.get_radio_key(name),
self.DATE, {}).render()
def value_from_datadict(self, data, files, name):
radio_value = data.get(self.get_radio_key(name))
if radio_value == self.NOW:
return timezone.now()
else:
widget = self.date_class()
return widget.value_from_datadict(data, files, name)
class APIChoiceWidget(widgets.Input):
"""
Widget for selecting a related object. This is used
as the default widget for ForeignKey fields. Outputs
text input field that is wrapped in a <div> that contains
3 data attributes.
* data-api: The url that can be queried to get the options \
for this field in json format.
* data-add: The url that should be called in a popup to add \
a new item. If not present adding is not supported.
* data-title: The title of the field.
In order for this widget to work it needs to know where those urls are and
if the rendering user has the needed permissions. This is
accomplished by having the code that prepares the form
call the `update_links` method. See the method documentation for
what parameters are needed.
:param rel: The rel attribute of the foreign key field that \
this widget is for.
:param attrs: HTML attributes for this field, same as django's.
:param using: The database to use. Defaults to None.
:param view: The string name of the view that will be used for \
getting the api url. Defaults to 'main'.
:param api_url: The api url. This is only used if the automatic url \
discovery fails.
:param add_view: The string name of the view that will be used for \
getting the add url. Defaults to 'add'.
:param add_url: The url for adding a new item. This is only used \
if the automatic url discovery fails.
:param extra_query_kwargs: Keyword arguments that you would like \
passed as part of the query string.
"""
input_type = 'hidden'
template = u'<div class="api-select" data-title="%(value)s" data-api="%(link)s" data-add="%(add_link)s">%(input)s</div>'
def __init__(self, rel, attrs=None, using=None, view="main", api_url='',
add_view="add", add_url='', extra_query_kwargs=None):
super(APIChoiceWidget, self).__init__(attrs=attrs)
self.rel = rel
self.model = self.rel.to
self.db = using
self.extra_query_kwargs = extra_query_kwargs
self.view = view
self.add_view = add_view
self._api_link = api_url
self._add_link = add_url
def render(self, name, value, attrs=None, choices=()):
data = {
'input': super(APIChoiceWidget, self).render(name, value, attrs=attrs),
'value': conditional_escape(self.label_for_value(value)),
'link': self.get_api_link(),
'add_link': self.get_add_link()
}
return mark_safe(self.template % data)
def get_qs(self):
"""
Returns a mapping that will be used to generate
the query string for the api url. Any values
in the the `limit_choices_to` specified on the
foreign key field and any arguments specified on
self.extra_query_kwargs are converted to a format
that can be used in a query string and returned as
a dictionary.
"""
qs = url_params_from_lookup_dict(self.rel.limit_choices_to)
if not qs:
qs = {}
if self.extra_query_kwargs:
qs.update(self.extra_query_kwargs)
return qs
def _get_bundle_link(self, bundle, view_name, user):
url = bundle.get_view_url(view_name, user)
if url:
return url
return ''
def _get_reverse(self, name, url_kwargs):
return reverse(name, kwargs=url_kwargs)
def update_links(self, request, admin_site=None):
"""
Called to update the widget's urls. Tries to find the
bundle for the model that this foreign key points to and then
asks it for the urls for adding and listing and sets them on
this widget instance. The urls are only set if request.user
has permissions on that url.
:param request: The request for which this widget is being rendered.
:param admin_site: If provided, the `admin_site` is used to lookup \
the bundle that is registered as the primary url for the model \
that this foreign key points to.
"""
if admin_site:
bundle = admin_site.get_bundle_for_model(self.model)
if bundle:
self._api_link = self._get_bundle_link(bundle, self.view,
request.user)
self._add_link = self._get_bundle_link(bundle, self.add_view,
request.user)
def get_api_link(self):
"""
Adds a query string to the api url. At minimum adds the type=choices
argument so that the return format is json. Any other filtering
arguments calculated by the `get_qs` method are then added to the
url. It is up to the destination url to respect them as filters.
"""
url = self._api_link
if url:
qs = self.get_qs()
url = "%s?type=choices" % url
if qs:
url = "%s&%s" % (url, u'&'.join([u'%s=%s' % (k, urllib.quote(unicode(v).encode('utf8'))) \
for k, v in qs.items()]))
url = "%s&%s" % (url, u'&'.join([u'exclude=%s' % x \
for x in qs.keys()]))
return url
def get_add_link(self):
"""
Appends the popup=1 query string to the url so the
destination url treats it as a popup.
"""
url = self._add_link
if url:
return "%s?popup=1" % url
return url
def label_for_value(self, value, key=None):
"""
Looks up the current value of the field and returns
a unicode representation. Default implementation does a lookup
on the target model and if a match is found calls force_unicode
on that object. Otherwise a blank string is returned.
"""
if not key:
key = self.rel.get_related_field().name
if value is not None:
try:
obj = self.model._default_manager.using(self.db).get(**{key: value})
return force_unicode(obj)
except (ValueError, self.model.DoesNotExist):
return ''
return ''
class APIModelChoiceWidget(APIChoiceWidget):
"""
Widget for selecting a related object. This is meant to
be used in forms that specify their own related fields.
Inherits from APIChoiceWidget but is based on a model
instead of a foreign key relation.
:param model: The model that this widget is for.
:param attrs: HTML attributes for this field, same as django's.
:param using: The database to use. Defaults to None.
:param limit_choices_to: Keyword arguments that you would like \
passed as part of the query string.
:param view: The string name of the view that will be used for \
getting the api url. Defaults to 'main'.
:param api_url: The api url. This is only used if the automatic url \
discovery fails.
:param add_view: The string name of the view that will be used for \
getting the add url. Defaults to 'add'.
:param add_url: The url for adding a new item. This is only used \
if the automatic url discovery fails.
"""
template = u'<div class="api-select" data-title="%(value)s" data-api="%(link)s" data-add="%(add_link)s">%(input)s</div>'
def __init__(self, model, attrs=None, using=None, limit_choices_to=None,
view="main", api_url='', add_view="add", add_url=''):
super(APIChoiceWidget, self).__init__(attrs=attrs)
self.limit_choices_to = limit_choices_to
self.model = model
self.db = using
self.view = view
self.add_view = add_view
self._api_link = api_url
self._add_link = add_url
def get_qs(self):
return url_params_from_lookup_dict(self.limit_choices_to)
def label_for_value(self, value):
return super(APIModelChoiceWidget, self).label_for_value(value, key='pk')
class APIManyChoiceWidget(APIChoiceWidget, widgets.SelectMultiple):
"""
Widget for selecting a many related objects. This is meant to
be used in forms that specify their own related fields.
Inherits from APIChoiceWidget but is based on a model
instead of a foreign key relation.
:param model: The model that this widget is for.
:param attrs: HTML attributes for this field, same as django's.
:param using: The database to use. Defaults to None.
:param limit_choices_to: Keyword arguments that you would like \
passed as part of the query string.
:param view: The string name of the view that will be used for \
getting the api url. Defaults to 'main'.
:param api_url: The api url. This is only used if the automatic url \
discovery fails.
:param add_view: The string name of the view that will be used for \
getting the add url. Defaults to 'add'.
:param add_url: The url for adding a new item. This is only used \
if the automatic url discovery fails.
"""
template = u'<div class="api-select" data-api="%(api_link)s" data-add="%(add_link)s">%(options)s</div>'
allow_multiple_selected = True
def __init__(self, model, attrs=None, using=None, limit_choices_to=None,
view="main", api_url='', add_view="add", add_url=''):
super(APIChoiceWidget, self).__init__(attrs=attrs)
self.limit_choices_to = limit_choices_to
self.model = model
self.db = using
self.view = view
self.add_view = add_view
self._api_link = api_url
self._add_link = add_url
def get_qs(self):
return url_params_from_lookup_dict(self.limit_choices_to)
def update_links(self, request, admin_site=None):
"""
Called to update the widget's urls. Tries to find the
bundle for the model that this foreign key points to and then
asks it for the urls for adding and listing and sets them on
this widget instance. The urls are only set if request.user
has permissions on that url.
:param request: The request for which this widget is being rendered.
:param admin_site: If provided, the `admin_site` is used to lookup \
the bundle that is registered as the primary url for the model \
that this foreign key points to.
"""
if admin_site:
bundle = admin_site.get_bundle_for_model(self.model.to)
if bundle:
self._api_link = self._get_bundle_link(bundle, self.view,
request.user)
self._add_link = self._get_bundle_link(bundle, self.add_view,
request.user)
def render(self, name, value, attrs=None, choices=()):
final_attrs = self.build_attrs(attrs, {name: name})
data = {
'api_link': self.get_api_link(),
'add_link': self.get_add_link(),
'options': self.get_options(value, name)
}
data.update(final_attrs)
return mark_safe(self.template % data)
def get_options(self, value, name, key=None):
if not key:
key = self.model.get_related_field().name
values = []
if value is not None:
try:
kwargs = {'{0}__in'.format(key): value}
if self.limit_choices_to:
kwargs.update(self.limit_choices_to)
objs = self.model.to._default_manager.using(self.db).filter(**kwargs)
for obj in objs:
d = {
'text': force_unicode(obj),
'value': getattr(obj, key),
'name': name
}
line = '<input type="hidden" data-multiple data-title="%(text)s" name="%(name)s" value="%(value)s" />' % d
values.append(line)
except ValueError:
pass
if not values:
values = ['<input type="hidden" data-multiple data-title="" name="{0}" value="" />'.format(name)]
return ''.join(values)
class HiddenTextInput(widgets.HiddenInput):
"""
Widget for order fields in lists. Inherits from HiddenInput
so it is marked as hidden in the form, but uses a 'text' input
type with a class attribute in the rendered html of
*orderfield*.
"""
input_type = 'text'
def __init__(self, *args, **kwargs):
super(HiddenTextInput, self).__init__(*args, **kwargs)
self.attrs['class'] = 'orderfield'
def is_hidden(self):
return True
class HTMLWidget(widgets.Textarea):
"""
WYSIWYG Widget. Adds *widget-wysiwyg* to the class attribute
in the rendered html.
"""
template = "cms/toolbar.html"
def __init__(self, *args, **kwargs):
super(HTMLWidget, self).__init__(*args, **kwargs)
classes = ["wysiwyg-textarea"]
if self.attrs.get('class'):
classes.append(self.attrs.get('class'))
self.attrs['class'] = " ".join(classes)
def render(self, *args, **kwargs):
text = super(HTMLWidget, self).render(*args, **kwargs)
return mark_safe(u"<div class=\"widget-wysiwyg\">{1} {0}</div>".format(text, render_to_string(self.template)))
class AnnotatedHTMLWidget(widgets.MultiWidget):
"""
Combines WYSIWYG with a hidden widget for seperating
annotation data from annotated text.
"""
template = "cms/toolbar_annotation.html"
START_HTML = '<div class="wysiwyg-annotation-data">'
END_HTML = '</div>'
def __init__(self, attrs=None):
_widgets = (
widgets.Textarea(attrs={'class': "wysiwyg-textarea"}),
widgets.Textarea(attrs={'class': "wysiwyg-annotations"}),
)
super(AnnotatedHTMLWidget, self).__init__(_widgets, attrs=attrs)
def decompress(self, value):
if value:
parts = value.rpartition(self.START_HTML)
if parts[1]:
annotation = parts[2]
if annotation.endswith(self.END_HTML):
annotation = annotation[:-len(self.END_HTML)]
return parts[0], annotation
return [value, ""]
return ["", ""]
def format_output(self, rendered_widgets):
return mark_safe(u"<div class=\"widget-wysiwyg annotation\">{0} {1} {2}</div>".format(
render_to_string(self.template), *rendered_widgets))
def value_from_datadict(self, data, files, name):
data = [
widget.value_from_datadict(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)]
if data and data[1]:
data[1] = self.START_HTML + data[1] + self.END_HTML
return data[0] + data[1]
|
|
"""(disabled by default) support for testing pytest and pytest plugins."""
import gc
import importlib
import os
import platform
import re
import subprocess
import sys
import time
import traceback
from collections.abc import Sequence
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
import py
import pytest
from _pytest._code import Source
from _pytest._io.saferepr import saferepr
from _pytest.capture import MultiCapture
from _pytest.capture import SysCapture
from _pytest.main import ExitCode
from _pytest.main import Session
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pathlib import Path
IGNORE_PAM = [ # filenames added when obtaining details about the current user
"/var/lib/sss/mc/passwd"
]
def pytest_addoption(parser):
parser.addoption(
"--lsof",
action="store_true",
dest="lsof",
default=False,
help="run FD checks if lsof is available",
)
parser.addoption(
"--runpytest",
default="inprocess",
dest="runpytest",
choices=("inprocess", "subprocess"),
help=(
"run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"
),
)
parser.addini(
"pytester_example_dir", help="directory to take the pytester example files from"
)
def pytest_configure(config):
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
config.addinivalue_line(
"markers",
"pytester_example_path(*path_segments): join the given path "
"segments to `pytester_example_dir` for this test.",
)
class LsofFdLeakChecker:
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
# py3: use subprocess.DEVNULL directly.
with open(os.devnull, "wb") as devnull:
return subprocess.check_output(
("lsof", "-Ffn0", "-p", str(pid)), stderr=devnull
).decode()
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith("f") and (
"deleted" not in line
and "mem" not in line
and "txt" not in line
and "cwd" not in line
)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split("\0")
fd = fields[0][1:]
filename = fields[1][1:]
if filename in IGNORE_PAM:
continue
if filename.startswith("/"):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
subprocess.check_output(("lsof", "-v"))
except (OSError, subprocess.CalledProcessError):
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn(pytest.PytestWarning("\n".join(error)))
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
hooks.
"""
return PytestArg(request)
class PytestArg:
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values):
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
class ParsedCall:
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d["_name"]
return "<ParsedCall {!r}(**{!r})>".format(self._name, d)
class HookRecorder:
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording each call
before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find {!r} check {!r}".format(name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call {!r}, in:".format(name)]
lines.extend([" %s" % x for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
values = self.getcalls(name)
assert len(values) == 1, (name, values)
return values[0]
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(
self,
inamepart="",
names="pytest_runtest_logreport pytest_collectreport",
when=None,
):
"""return a testreport whose dotted import path matches"""
values = []
for rep in self.getreports(names=names):
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
if when and rep.when != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
values.append(rep)
if not values:
raise ValueError(
"could not find test report matching %r: "
"no test reports at all!" % (inamepart,)
)
if len(values) > 1:
raise ValueError(
"found 2 or more testreports matching {!r}: {}".format(
inamepart, values
)
)
return values[0]
def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures("pytest_collectreport")
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if rep.when == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
else:
assert rep.failed, "Unexpected outcome: {!r}".format(rep)
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name="LineMatcher")
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
@pytest.fixture
def _sys_snapshot():
snappaths = SysPathsSnapshot()
snapmods = SysModulesSnapshot()
yield
snapmods.restore()
snappaths.restore()
@pytest.fixture
def _config_for_test():
from _pytest.config import get_config
config = get_config()
yield config
config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles.
# regex to match the session duration string in the summary: "74.34s"
rex_session_duration = re.compile(r"\d+\.\d\ds")
# regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped"
rex_outcome = re.compile(r"(\d+) (\w+)")
class RunResult:
"""The result of running a command.
Attributes:
:ivar ret: the return value
:ivar outlines: list of lines captured from stdout
:ivar errlines: list of lines captured from stderr
:ivar stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used ``stdout.fnmatch_lines()``
method
:ivar stderr: :py:class:`LineMatcher` of stderr
:ivar duration: duration in seconds
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def __repr__(self):
return (
"<RunResult ret=%r len(stdout.lines)=%d len(stderr.lines)=%d duration=%.2fs>"
% (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration)
)
def parseoutcomes(self):
"""Return a dictionary of outcomestring->num from parsing the terminal
output that the test process produced.
"""
for line in reversed(self.outlines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
return {noun: int(count) for (count, noun) in outcomes}
raise ValueError("Pytest terminal summary report not found")
def assert_outcomes(
self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0
):
"""Assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run.
"""
d = self.parseoutcomes()
obtained = {
"passed": d.get("passed", 0),
"skipped": d.get("skipped", 0),
"failed": d.get("failed", 0),
"error": d.get("error", 0),
"xpassed": d.get("xpassed", 0),
"xfailed": d.get("xfailed", 0),
}
expected = {
"passed": passed,
"skipped": skipped,
"failed": failed,
"error": error,
"xpassed": xpassed,
"xfailed": xfailed,
}
assert obtained == expected
class CwdSnapshot:
def __init__(self):
self.__saved = os.getcwd()
def restore(self):
os.chdir(self.__saved)
class SysModulesSnapshot:
def __init__(self, preserve=None):
self.__preserve = preserve
self.__saved = dict(sys.modules)
def restore(self):
if self.__preserve:
self.__saved.update(
(k, m) for k, m in sys.modules.items() if self.__preserve(k)
)
sys.modules.clear()
sys.modules.update(self.__saved)
class SysPathsSnapshot:
def __init__(self):
self.__saved = list(sys.path), list(sys.meta_path)
def restore(self):
sys.path[:], sys.meta_path[:] = self.__saved
class Testdir:
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of methods
which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
methods will use :py:attr:`tmpdir` as their current working directory.
Attributes:
:ivar tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
:ivar plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but plugins can
be added to the list. The type of items to add to the list depends on
the method using them so refer to them for details.
"""
CLOSE_STDIN = object
class TimeoutExpired(Exception):
pass
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
name = request.function.__name__
self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True)
self.plugins = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
mp = self.monkeypatch = MonkeyPatch()
mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self.test_tmproot))
# Ensure no unexpected caching via tox.
mp.delenv("TOX_ENV_DIR", raising=False)
# Discard outer pytest options.
mp.delenv("PYTEST_ADDOPTS", raising=False)
# Environment (updates) for inner runs.
tmphome = str(self.tmpdir)
self._env_run_update = {"HOME": tmphome, "USERPROFILE": tmphome}
def __repr__(self):
return "<Testdir {!r}>".format(self.tmpdir)
def __str__(self):
return str(self.tmpdir)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this tries to
clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
self.monkeypatch.undo()
def __take_sys_modules_snapshot(self):
# some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
# `zope.interface` for example
def preserve_module(name):
return name.startswith("zope")
return SysModulesSnapshot(preserve=preserve_module)
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
self.tmpdir.chdir()
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
def to_text(s):
return s.decode(encoding) if isinstance(s, bytes) else str(s)
if args:
source = "\n".join(to_text(x) for x in args)
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for basename, value in items:
p = self.tmpdir.join(basename).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
source = "\n".join(to_text(line) for line in source.lines)
p.write(source.strip().encode(encoding), "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
r"""Create new file(s) in the testdir.
:param str ext: The extension the file(s) should use, including the dot, e.g. `.py`.
:param list[str] args: All args will be treated as strings and joined using newlines.
The result will be written as contents to the file. The name of the
file will be based on the test function requesting this fixture.
:param kwargs: Each keyword is the name of a file, while the value of it will
be written as contents of the file.
Examples:
.. code-block:: python
testdir.makefile(".txt", "line1", "line2")
testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile(".ini", tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)["pytest"]
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile(".py", args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile(".txt", args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically when this object dies at the end of each
test.
"""
if path is None:
path = self.tmpdir
self.monkeypatch.syspath_prepend(str(path))
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
gets recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
def copy_example(self, name=None):
"""Copy file from project's directory into the testdir.
:param str name: The name of the file to copy.
:return: path to the copied directory (inside ``self.tmpdir``).
"""
import warnings
from _pytest.warning_types import PYTESTER_COPY_EXAMPLE
warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2)
example_dir = self.request.config.getini("pytester_example_dir")
if example_dir is None:
raise ValueError("pytester_example_dir is unset, can't copy examples")
example_dir = self.request.config.rootdir.join(example_dir)
for extra_element in self.request.node.iter_markers("pytester_example_path"):
assert extra_element.args
example_dir = example_dir.join(*extra_element.args)
if name is None:
func_name = self.request.function.__name__
maybe_dir = example_dir / func_name
maybe_file = example_dir / (func_name + ".py")
if maybe_dir.isdir():
example_path = maybe_dir
elif maybe_file.isfile():
example_path = maybe_file
else:
raise LookupError(
"{} cant be found as module or package in {}".format(
func_name, example_dir.bestrelpath(self.request.config.rootdir)
)
)
else:
example_path = example_dir.join(name)
if example_path.isdir() and not example_path.join("__init__.py").isfile():
example_path.copy(self.tmpdir)
return self.tmpdir
elif example_path.isfile():
result = self.tmpdir.join(example_path.basename)
example_path.copy(result)
return result
else:
raise LookupError(
'example "{}" is not found as a file or directory'.format(example_path)
)
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the
configuration
:param arg: a :py:class:`py.path.local` instance of the file
"""
session = Session(config)
assert "::" not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
:param path: a :py:class:`py.path.local` instance of the file
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the
test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
:param source: the source code of the test module
:param cmdlineargs: any extra command line arguments to use
:return: :py:class:`HookRecorder` instance of the result
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, plugins=(), no_reraise_ctrlc=False):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself. This means it can return a
:py:class:`HookRecorder` instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
:param args: command line arguments to pass to :py:func:`pytest.main`
:kwarg plugins: extra plugin instances the ``pytest.main()`` instance should use.
:kwarg no_reraise_ctrlc: typically we reraise keyboard interrupts from the child run. If
True, the KeyboardInterrupt exception is captured.
:return: a :py:class:`HookRecorder` instance
"""
# (maybe a cpython bug?) the importlib cache sometimes isn't updated
# properly between file creation and inline_run (especially if imports
# are interspersed with file creation)
importlib.invalidate_caches()
plugins = list(plugins)
finalizers = []
try:
# Do not load user config (during runs only).
mp_run = MonkeyPatch()
for k, v in self._env_run_update.items():
mp_run.setenv(k, v)
finalizers.append(mp_run.undo)
# Any sys.module or sys.path changes done while running pytest
# inline should be reverted after the test run completes to avoid
# clashing with later inline tests run within the same pytest test,
# e.g. just because they use matching test module names.
finalizers.append(self.__take_sys_modules_snapshot().restore)
finalizers.append(SysPathsSnapshot().restore)
# Important note:
# - our tests should not leave any other references/registrations
# laying around other than possibly loaded test modules
# referenced from sys.modules, as nothing will clean those up
# automatically
rec = []
class Collect:
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec:
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc:
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
finally:
for finalizer in finalizers:
finalizer()
def runpytest_inprocess(self, *args, **kwargs):
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides.
"""
syspathinsert = kwargs.pop("syspathinsert", False)
if syspathinsert:
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec:
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec:
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = list(args)
for x in args:
if str(x).startswith("--basetemp"):
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
a new :py:class:`_pytest.core.PluginManager` and call the
pytest_cmdline_parse hook to create a new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
:param source: the module source
:param funcname: the name of the test function for which to return a
test item
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "{!r} item not found in module:\n{}\nitems: {}".format(
funcname, source, items
)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning all test items contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
:param source: the source code of the module to collect
:param configargs: any extra arguments to pass to
:py:meth:`parseconfigure`
:param withinit: whether to also write an ``__init__.py`` file to the
same directory to ensure it is a package
"""
if isinstance(source, Path):
path = self.tmpdir.join(str(source))
assert not withinit, "not supported for paths"
else:
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
return self.getnode(config, path)
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection node
matching the given name.
:param modcol: a module collection node; see :py:meth:`getmodulecol`
:param name: the name of the node to return
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(
self,
cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=CLOSE_STDIN,
**kw
):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working directory
is in the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(
filter(None, [os.getcwd(), env.get("PYTHONPATH", "")])
)
env.update(self._env_run_update)
kw["env"] = env
if stdin is Testdir.CLOSE_STDIN:
kw["stdin"] = subprocess.PIPE
elif isinstance(stdin, bytes):
kw["stdin"] = subprocess.PIPE
else:
kw["stdin"] = stdin
popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
if stdin is Testdir.CLOSE_STDIN:
popen.stdin.close()
elif isinstance(stdin, bytes):
popen.stdin.write(stdin)
return popen
def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and stderr.
:param args: the sequence of arguments to pass to `subprocess.Popen()`
:kwarg timeout: the period in seconds after which to timeout and raise
:py:class:`Testdir.TimeoutExpired`
:kwarg stdin: optional standard input. Bytes are being send, closing
the pipe, otherwise it is passed through to ``popen``.
Defaults to ``CLOSE_STDIN``, which translates to using a pipe
(``subprocess.PIPE``) that gets closed.
Returns a :py:class:`RunResult`.
"""
__tracebackhide__ = True
cmdargs = [
str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs
]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", *cmdargs)
print(" in:", py.path.local())
f1 = open(str(p1), "w", encoding="utf8")
f2 = open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(
cmdargs,
stdin=stdin,
stdout=f1,
stderr=f2,
close_fds=(sys.platform != "win32"),
)
if isinstance(stdin, bytes):
popen.stdin.close()
def handle_timeout():
__tracebackhide__ = True
timeout_message = (
"{seconds} second timeout expired running:"
" {command}".format(seconds=timeout, command=cmdargs)
)
popen.kill()
popen.wait()
raise self.TimeoutExpired(timeout_message)
if timeout is None:
ret = popen.wait()
else:
try:
ret = popen.wait(timeout)
except subprocess.TimeoutExpired:
handle_timeout()
finally:
f1.close()
f2.close()
f1 = open(str(p1), "r", encoding="utf8")
f2 = open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to {} because of encoding".format(fp))
def _getpytestargs(self):
return sys.executable, "-mpytest"
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, timeout=None):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will be added using the
``-p`` command line option. Additionally ``--basetemp`` is used to put
any temporary files and directories in a numbered directory prefixed
with "runpytest-" to not conflict with the normal numbered pytest
location for temporary files and directories.
:param args: the sequence of arguments to pass to the pytest subprocess
:param timeout: the period in seconds after which to timeout and raise
:py:class:`Testdir.TimeoutExpired`
Returns a :py:class:`RunResult`.
"""
__tracebackhide__ = True
p = py.path.local.make_numbered_dir(
prefix="runpytest-", keep=None, rootdir=self.tmpdir
)
args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ("-p", plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args, timeout=timeout)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the temporary
directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "{} --basetemp={} {}".format(invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
if not hasattr(pexpect, "spawn"):
pytest.skip("pexpect.spawn not available")
logfile = self.tmpdir.join("spawn.out").open("wb")
# Do not load user config.
env = os.environ.copy()
env.update(self._env_run_update)
child = pexpect.spawn(cmd, logfile=logfile, env=env)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n{}".format(
saferepr(out)
)
class LineComp:
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
"""Assert that lines2 are contained (linearly) in lines1.
Return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher:
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing newlines, i.e.
``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output using in any order.
Lines are checked using ``fnmatch.fnmatch``. The argument is a list of
lines which have to occur in the output, in any order.
"""
self._match_lines_random(lines2, fnmatch)
def re_match_lines_random(self, lines2):
"""Check lines exist in the output using ``re.match``, in any order.
The argument is a list of lines which have to occur in the output, in
any order.
"""
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
def _match_lines_random(self, lines2, match_func):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the output, in
any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or match_func(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1 :]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(" ".join(str(x) for x in args))
@property
def _log_text(self):
return "\n".join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
The argument is a list of lines which have to match and can use glob
wildcards. If they do not match a pytest.fail() is called. The
matches and non-matches are also printed on stdout.
"""
__tracebackhide__ = True
self._match_lines(lines2, fnmatch, "fnmatch")
def re_match_lines(self, lines2):
"""Search captured text for matching lines using ``re.match``.
The argument is a list of lines which have to match using ``re.match``.
If they do not match a pytest.fail() is called.
The matches and non-matches are also printed on stdout.
"""
__tracebackhide__ = True
self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match")
def _match_lines(self, lines2, match_func, match_nickname):
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
:param list[str] lines2: list of string patterns to match. The actual
format depends on ``match_func``
:param match_func: a callable ``match_func(line, pattern)`` where line
is the captured line from stdout/stderr and pattern is the matching
pattern
:param str match_nickname: the nickname for the match function that
will be logged to stdout when a match occurs
"""
assert isinstance(lines2, Sequence)
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif match_func(nextline, line):
self._log("%s:" % match_nickname, repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: {!r}".format(line))
pytest.fail(self._log_text)
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.utils import timeutils
import six
from keystone import assignment
from keystone.common import controller
from keystone.common import dependency
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
CONF = config.CONF
def _trustor_trustee_only(trust, user_id):
if (user_id != trust.get('trustee_user_id') and
user_id != trust.get('trustor_user_id')):
raise exception.Forbidden()
def _admin_trustor_only(context, trust, user_id):
if user_id != trust.get('trustor_user_id') and not context['is_admin']:
raise exception.Forbidden()
@dependency.requires('assignment_api', 'identity_api', 'trust_api',
'token_api')
class TrustV3(controller.V3Controller):
collection_name = "trusts"
member_name = "trust"
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
# NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that
# V3Controller.base_url handles setting the self link correctly.
path = '/OS-TRUST/' + cls.collection_name
return super(TrustV3, cls).base_url(context, path=path)
def _get_user_id(self, context):
if 'token_id' in context:
token_id = context['token_id']
token = self.token_api.get_token(token_id)
user_id = token['user']['id']
return user_id
return None
def get_trust(self, context, trust_id):
user_id = self._get_user_id(context)
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
_trustor_trustee_only(trust, user_id)
self._fill_in_roles(context, trust,
self.assignment_api.list_roles())
return TrustV3.wrap_member(context, trust)
def _fill_in_roles(self, context, trust, all_roles):
if trust.get('expires_at') is not None:
trust['expires_at'] = (timeutils.isotime
(trust['expires_at'],
subsecond=True))
if 'roles' not in trust:
trust['roles'] = []
trust_full_roles = []
for trust_role in trust['roles']:
if isinstance(trust_role, six.string_types):
trust_role = {'id': trust_role}
matching_roles = [x for x in all_roles
if x['id'] == trust_role['id']]
if matching_roles:
full_role = assignment.controllers.RoleV3.wrap_member(
context, matching_roles[0])['role']
trust_full_roles.append(full_role)
trust['roles'] = trust_full_roles
trust['roles_links'] = {
'self': (self.base_url(context) + "/%s/roles" % trust['id']),
'next': None,
'previous': None}
def _clean_role_list(self, context, trust, all_roles):
trust_roles = []
all_role_names = dict((r['name'], r) for r in all_roles)
for role in trust.get('roles', []):
if 'id' in role:
trust_roles.append({'id': role['id']})
elif 'name' in role:
rolename = role['name']
if rolename in all_role_names:
trust_roles.append({'id':
all_role_names[rolename]['id']})
else:
raise exception.RoleNotFound("role %s is not defined" %
rolename)
else:
raise exception.ValidationError(attribute='id or name',
target='roles')
return trust_roles
@controller.protected()
def create_trust(self, context, trust=None):
"""Create a new trust.
The user creating the trust must be the trustor.
"""
# Explicitly prevent a trust token from creating a new trust.
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
if auth_context.get('is_delegated_auth'):
raise exception.Forbidden(
_('Cannot create a trust'
' with a token issued via delegation.'))
if not trust:
raise exception.ValidationError(attribute='trust',
target='request')
self._require_attributes(trust, ['impersonation', 'trustee_user_id',
'trustor_user_id'])
if trust.get('project_id'):
self._require_role(trust)
self._require_user_is_trustor(context, trust)
self._require_trustee_exists(trust['trustee_user_id'])
all_roles = self.assignment_api.list_roles()
clean_roles = self._clean_role_list(context, trust, all_roles)
self._require_trustor_has_role_in_project(trust, clean_roles)
trust['expires_at'] = self._parse_expiration_date(
trust.get('expires_at'))
trust_id = uuid.uuid4().hex
new_trust = self.trust_api.create_trust(trust_id, trust, clean_roles)
self._fill_in_roles(context, new_trust, all_roles)
return TrustV3.wrap_member(context, new_trust)
def _require_trustee_exists(self, trustee_user_id):
self.identity_api.get_user(trustee_user_id)
def _require_user_is_trustor(self, context, trust):
user_id = self._get_user_id(context)
if user_id != trust.get('trustor_user_id'):
raise exception.Forbidden(
_("The authenticated user should match the trustor."))
def _require_role(self, trust):
if not trust.get('roles'):
raise exception.Forbidden(
_('At least one role should be specified.'))
def _get_user_role(self, trust):
if not self._attribute_is_empty(trust, 'project_id'):
return self.assignment_api.get_roles_for_user_and_project(
trust['trustor_user_id'], trust['project_id'])
else:
return []
def _require_trustor_has_role_in_project(self, trust, clean_roles):
user_roles = self._get_user_role(trust)
for trust_role in clean_roles:
matching_roles = [x for x in user_roles
if x == trust_role['id']]
if not matching_roles:
raise exception.RoleNotFound(role_id=trust_role['id'])
def _parse_expiration_date(self, expiration_date):
if expiration_date is None:
return None
if not expiration_date.endswith('Z'):
expiration_date += 'Z'
try:
return timeutils.parse_isotime(expiration_date)
except ValueError:
raise exception.ValidationTimeStampError()
@controller.protected()
def list_trusts(self, context):
query = context['query_string']
trusts = []
if not query:
self.assert_admin(context)
trusts += self.trust_api.list_trusts()
if 'trustor_user_id' in query:
user_id = query['trustor_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += (self.trust_api.
list_trusts_for_trustor(user_id))
if 'trustee_user_id' in query:
user_id = query['trustee_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += self.trust_api.list_trusts_for_trustee(user_id)
for trust in trusts:
# get_trust returns roles, list_trusts does not
# It seems in some circumstances, roles does not
# exist in the query response, so check first
if 'roles' in trust:
del trust['roles']
if trust.get('expires_at') is not None:
trust['expires_at'] = (timeutils.isotime
(trust['expires_at'],
subsecond=True))
return TrustV3.wrap_collection(context, trusts)
@controller.protected()
def delete_trust(self, context, trust_id):
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_admin_trustor_only(context, trust, user_id)
self.trust_api.delete_trust(trust_id)
userid = trust['trustor_user_id']
self.token_api.delete_tokens(userid, trust_id=trust_id)
@controller.protected()
def list_roles_for_trust(self, context, trust_id):
trust = self.get_trust(context, trust_id)['trust']
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
return {'roles': trust['roles'],
'links': trust['roles_links']}
@controller.protected()
def check_role_for_trust(self, context, trust_id, role_id):
"""Checks if a role has been assigned to a trust."""
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
if not any(role['id'] == role_id for role in trust['roles']):
raise exception.RoleNotFound(role_id=role_id)
@controller.protected()
def get_role_for_trust(self, context, trust_id, role_id):
"""Get a role that has been assigned to a trust."""
self.check_role_for_trust(context, trust_id, role_id)
role = self.assignment_api.get_role(role_id)
return assignment.controllers.RoleV3.wrap_member(context, role)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a variety of device interactions based on adb.
Eventually, this will be based on adb_wrapper.
"""
# pylint: disable=W0613
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import zipfile
import pylib.android_commands
from pylib import cmd_helper
from pylib import constants
from pylib.device import adb_wrapper
from pylib.device import decorators
from pylib.device import device_errors
from pylib.device import intent
from pylib.device.commands import install_commands
from pylib.utils import apk_helper
from pylib.utils import device_temp_file
from pylib.utils import host_utils
from pylib.utils import md5sum
from pylib.utils import parallelizer
from pylib.utils import timeout_retry
from pylib.utils import zip_utils
_DEFAULT_TIMEOUT = 30
_DEFAULT_RETRIES = 3
# A sentinel object for default values
# TODO(jbudorick,perezju): revisit how default values are handled by
# the timeout_retry decorators.
DEFAULT = object()
@decorators.WithExplicitTimeoutAndRetries(
_DEFAULT_TIMEOUT, _DEFAULT_RETRIES)
def GetAVDs():
"""Returns a list of Android Virtual Devices.
Returns:
A list containing the configured AVDs.
"""
lines = cmd_helper.GetCmdOutput([
os.path.join(constants.ANDROID_SDK_ROOT, 'tools', 'android'),
'list', 'avd']).splitlines()
avds = []
for line in lines:
if 'Name:' not in line:
continue
key, value = (s.strip() for s in line.split(':', 1))
if key == 'Name':
avds.append(value)
return avds
@decorators.WithExplicitTimeoutAndRetries(
_DEFAULT_TIMEOUT, _DEFAULT_RETRIES)
def RestartServer():
"""Restarts the adb server.
Raises:
CommandFailedError if we fail to kill or restart the server.
"""
def adb_killed():
return not adb_wrapper.AdbWrapper.IsServerOnline()
def adb_started():
return adb_wrapper.AdbWrapper.IsServerOnline()
adb_wrapper.AdbWrapper.KillServer()
if not timeout_retry.WaitFor(adb_killed, wait_period=1, max_tries=5):
# TODO(perezju): raise an exception after fixng http://crbug.com/442319
logging.warning('Failed to kill adb server')
adb_wrapper.AdbWrapper.StartServer()
if not timeout_retry.WaitFor(adb_started, wait_period=1, max_tries=5):
raise device_errors.CommandFailedError('Failed to start adb server')
def _GetTimeStamp():
"""Return a basic ISO 8601 time stamp with the current local time."""
return time.strftime('%Y%m%dT%H%M%S', time.localtime())
class DeviceUtils(object):
_VALID_SHELL_VARIABLE = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
def __init__(self, device, default_timeout=_DEFAULT_TIMEOUT,
default_retries=_DEFAULT_RETRIES):
"""DeviceUtils constructor.
Args:
device: Either a device serial, an existing AdbWrapper instance, or an
an existing AndroidCommands instance.
default_timeout: An integer containing the default number of seconds to
wait for an operation to complete if no explicit value
is provided.
default_retries: An integer containing the default number or times an
operation should be retried on failure if no explicit
value is provided.
"""
self.adb = None
self.old_interface = None
if isinstance(device, basestring):
self.adb = adb_wrapper.AdbWrapper(device)
self.old_interface = pylib.android_commands.AndroidCommands(device)
elif isinstance(device, adb_wrapper.AdbWrapper):
self.adb = device
self.old_interface = pylib.android_commands.AndroidCommands(str(device))
elif isinstance(device, pylib.android_commands.AndroidCommands):
self.adb = adb_wrapper.AdbWrapper(device.GetDevice())
self.old_interface = device
else:
raise ValueError('Unsupported device value: %r' % device)
self._commands_installed = None
self._default_timeout = default_timeout
self._default_retries = default_retries
self._cache = {}
assert hasattr(self, decorators.DEFAULT_TIMEOUT_ATTR)
assert hasattr(self, decorators.DEFAULT_RETRIES_ATTR)
@decorators.WithTimeoutAndRetriesFromInstance()
def IsOnline(self, timeout=None, retries=None):
"""Checks whether the device is online.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device is online, False otherwise.
Raises:
CommandTimeoutError on timeout.
"""
try:
return self.adb.GetState() == 'device'
except device_errors.BaseError as exc:
logging.info('Failed to get state: %s', exc)
return False
@decorators.WithTimeoutAndRetriesFromInstance()
def HasRoot(self, timeout=None, retries=None):
"""Checks whether or not adbd has root privileges.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if adbd has root privileges, False otherwise.
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
try:
self.RunShellCommand('ls /root', check_return=True)
return True
except device_errors.AdbCommandFailedError:
return False
def NeedsSU(self, timeout=DEFAULT, retries=DEFAULT):
"""Checks whether 'su' is needed to access protected resources.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if 'su' is available on the device and is needed to to access
protected resources; False otherwise if either 'su' is not available
(e.g. because the device has a user build), or not needed (because adbd
already has root privileges).
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if 'needs_su' not in self._cache:
try:
self.RunShellCommand(
'su -c ls /root && ! ls /root', check_return=True,
timeout=self._default_timeout if timeout is DEFAULT else timeout,
retries=self._default_retries if retries is DEFAULT else retries)
self._cache['needs_su'] = True
except device_errors.AdbCommandFailedError:
self._cache['needs_su'] = False
return self._cache['needs_su']
@decorators.WithTimeoutAndRetriesFromInstance()
def EnableRoot(self, timeout=None, retries=None):
"""Restarts adbd with root privileges.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if root could not be enabled.
CommandTimeoutError on timeout.
"""
if self.IsUserBuild():
raise device_errors.CommandFailedError(
'Cannot enable root in user builds.', str(self))
if 'needs_su' in self._cache:
del self._cache['needs_su']
self.adb.Root()
self.adb.WaitForDevice()
@decorators.WithTimeoutAndRetriesFromInstance()
def IsUserBuild(self, timeout=None, retries=None):
"""Checks whether or not the device is running a user build.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device is running a user build, False otherwise (i.e. if
it's running a userdebug build).
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
return self.build_type == 'user'
@decorators.WithTimeoutAndRetriesFromInstance()
def GetExternalStoragePath(self, timeout=None, retries=None):
"""Get the device's path to its SD card.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
The device's path to its SD card.
Raises:
CommandFailedError if the external storage path could not be determined.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if 'external_storage' in self._cache:
return self._cache['external_storage']
value = self.RunShellCommand('echo $EXTERNAL_STORAGE',
single_line=True,
check_return=True)
if not value:
raise device_errors.CommandFailedError('$EXTERNAL_STORAGE is not set',
str(self))
self._cache['external_storage'] = value
return value
@decorators.WithTimeoutAndRetriesFromInstance()
def GetApplicationPath(self, package, timeout=None, retries=None):
"""Get the path of the installed apk on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
# 'pm path' is liable to incorrectly exit with a nonzero number starting
# in Lollipop.
# TODO(jbudorick): Check if this is fixed as new Android versions are
# released to put an upper bound on this.
should_check_return = (self.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP)
output = self.RunShellCommand(['pm', 'path', package], single_line=True,
check_return=should_check_return)
if not output:
return None
if not output.startswith('package:'):
raise device_errors.CommandFailedError('pm path returned: %r' % output,
str(self))
return output[len('package:'):]
@decorators.WithTimeoutAndRetriesFromInstance()
def WaitUntilFullyBooted(self, wifi=False, timeout=None, retries=None):
"""Wait for the device to fully boot.
This means waiting for the device to boot, the package manager to be
available, and the SD card to be ready. It can optionally mean waiting
for wifi to come up, too.
Args:
wifi: A boolean indicating if we should wait for wifi to come up or not.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError on failure.
CommandTimeoutError if one of the component waits times out.
DeviceUnreachableError if the device becomes unresponsive.
"""
def sd_card_ready():
try:
self.RunShellCommand(['test', '-d', self.GetExternalStoragePath()],
check_return=True)
return True
except device_errors.AdbCommandFailedError:
return False
def pm_ready():
try:
return self.GetApplicationPath('android')
except device_errors.CommandFailedError:
return False
def boot_completed():
return self.GetProp('sys.boot_completed') == '1'
def wifi_enabled():
return 'Wi-Fi is enabled' in self.RunShellCommand(['dumpsys', 'wifi'],
check_return=False)
self.adb.WaitForDevice()
timeout_retry.WaitFor(sd_card_ready)
timeout_retry.WaitFor(pm_ready)
timeout_retry.WaitFor(boot_completed)
if wifi:
timeout_retry.WaitFor(wifi_enabled)
REBOOT_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT
REBOOT_DEFAULT_RETRIES = _DEFAULT_RETRIES
@decorators.WithTimeoutAndRetriesDefaults(
REBOOT_DEFAULT_TIMEOUT,
REBOOT_DEFAULT_RETRIES)
def Reboot(self, block=True, timeout=None, retries=None):
"""Reboot the device.
Args:
block: A boolean indicating if we should wait for the reboot to complete.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
def device_offline():
return not self.IsOnline()
self.adb.Reboot()
self._cache = {}
timeout_retry.WaitFor(device_offline, wait_period=1)
if block:
self.WaitUntilFullyBooted()
INSTALL_DEFAULT_TIMEOUT = 4 * _DEFAULT_TIMEOUT
INSTALL_DEFAULT_RETRIES = _DEFAULT_RETRIES
@decorators.WithTimeoutAndRetriesDefaults(
INSTALL_DEFAULT_TIMEOUT,
INSTALL_DEFAULT_RETRIES)
def Install(self, apk_path, reinstall=False, timeout=None, retries=None):
"""Install an APK.
Noop if an identical APK is already installed.
Args:
apk_path: A string containing the path to the APK to install.
reinstall: A boolean indicating if we should keep any existing app data.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if the installation fails.
CommandTimeoutError if the installation times out.
DeviceUnreachableError on missing device.
"""
package_name = apk_helper.GetPackageName(apk_path)
device_path = self.GetApplicationPath(package_name)
if device_path is not None:
should_install = bool(self._GetChangedFilesImpl(apk_path, device_path))
if should_install and not reinstall:
self.adb.Uninstall(package_name)
else:
should_install = True
if should_install:
self.adb.Install(apk_path, reinstall=reinstall)
@decorators.WithTimeoutAndRetriesFromInstance()
def RunShellCommand(self, cmd, check_return=False, cwd=None, env=None,
as_root=False, single_line=False,
timeout=None, retries=None):
"""Run an ADB shell command.
The command to run |cmd| should be a sequence of program arguments or else
a single string.
When |cmd| is a sequence, it is assumed to contain the name of the command
to run followed by its arguments. In this case, arguments are passed to the
command exactly as given, without any further processing by the shell. This
allows to easily pass arguments containing spaces or special characters
without having to worry about getting quoting right. Whenever possible, it
is recomended to pass |cmd| as a sequence.
When |cmd| is given as a string, it will be interpreted and run by the
shell on the device.
This behaviour is consistent with that of command runners in cmd_helper as
well as Python's own subprocess.Popen.
TODO(perezju) Change the default of |check_return| to True when callers
have switched to the new behaviour.
Args:
cmd: A string with the full command to run on the device, or a sequence
containing the command and its arguments.
check_return: A boolean indicating whether or not the return code should
be checked.
cwd: The device directory in which the command should be run.
env: The environment variables with which the command should be run.
as_root: A boolean indicating whether the shell command should be run
with root privileges.
single_line: A boolean indicating if only a single line of output is
expected.
timeout: timeout in seconds
retries: number of retries
Returns:
If single_line is False, the output of the command as a list of lines,
otherwise, a string with the unique line of output emmited by the command
(with the optional newline at the end stripped).
Raises:
AdbCommandFailedError if check_return is True and the exit code of
the command run on the device is non-zero.
CommandFailedError if single_line is True but the output contains two or
more lines.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
def env_quote(key, value):
if not DeviceUtils._VALID_SHELL_VARIABLE.match(key):
raise KeyError('Invalid shell variable name %r' % key)
# using double quotes here to allow interpolation of shell variables
return '%s=%s' % (key, cmd_helper.DoubleQuote(value))
if not isinstance(cmd, basestring):
cmd = ' '.join(cmd_helper.SingleQuote(s) for s in cmd)
if env:
env = ' '.join(env_quote(k, v) for k, v in env.iteritems())
cmd = '%s %s' % (env, cmd)
if cwd:
cmd = 'cd %s && %s' % (cmd_helper.SingleQuote(cwd), cmd)
if as_root and self.NeedsSU():
# "su -c sh -c" allows using shell features in |cmd|
cmd = 'su -c sh -c %s' % cmd_helper.SingleQuote(cmd)
if timeout is None:
timeout = self._default_timeout
try:
output = self.adb.Shell(cmd)
except device_errors.AdbCommandFailedError as e:
if check_return:
raise
else:
output = e.output
output = output.splitlines()
if single_line:
if not output:
return ''
elif len(output) == 1:
return output[0]
else:
msg = 'one line of output was expected, but got: %s'
raise device_errors.CommandFailedError(msg % output, str(self))
else:
return output
@decorators.WithTimeoutAndRetriesFromInstance()
def KillAll(self, process_name, signum=9, as_root=False, blocking=False,
timeout=None, retries=None):
"""Kill all processes with the given name on the device.
Args:
process_name: A string containing the name of the process to kill.
signum: An integer containing the signal number to send to kill. Defaults
to 9 (SIGKILL).
as_root: A boolean indicating whether the kill should be executed with
root privileges.
blocking: A boolean indicating whether we should wait until all processes
with the given |process_name| are dead.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if no process was killed.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
pids = self._GetPidsImpl(process_name)
if not pids:
raise device_errors.CommandFailedError(
'No process "%s"' % process_name, str(self))
cmd = ['kill', '-%d' % signum] + pids.values()
self.RunShellCommand(cmd, as_root=as_root, check_return=True)
if blocking:
wait_period = 0.1
while self._GetPidsImpl(process_name):
time.sleep(wait_period)
return len(pids)
@decorators.WithTimeoutAndRetriesFromInstance()
def StartActivity(self, intent_obj, blocking=False, trace_file_name=None,
force_stop=False, timeout=None, retries=None):
"""Start package's activity on the device.
Args:
intent_obj: An Intent object to send.
blocking: A boolean indicating whether we should wait for the activity to
finish launching.
trace_file_name: If present, a string that both indicates that we want to
profile the activity and contains the path to which the
trace should be saved.
force_stop: A boolean indicating whether we should stop the activity
before starting it.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if the activity could not be started.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
cmd = ['am', 'start']
if blocking:
cmd.append('-W')
if trace_file_name:
cmd.extend(['--start-profiler', trace_file_name])
if force_stop:
cmd.append('-S')
cmd.extend(intent_obj.am_args)
for line in self.RunShellCommand(cmd, check_return=True):
if line.startswith('Error:'):
raise device_errors.CommandFailedError(line, str(self))
@decorators.WithTimeoutAndRetriesFromInstance()
def StartInstrumentation(self, component, finish=True, raw=False,
extras=None, timeout=None, retries=None):
if extras is None:
extras = {}
cmd = ['am', 'instrument']
if finish:
cmd.append('-w')
if raw:
cmd.append('-r')
for k, v in extras.iteritems():
cmd.extend(['-e', k, v])
cmd.append(component)
return self.RunShellCommand(cmd, check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def BroadcastIntent(self, intent_obj, timeout=None, retries=None):
"""Send a broadcast intent.
Args:
intent: An Intent to broadcast.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
cmd = ['am', 'broadcast'] + intent_obj.am_args
self.RunShellCommand(cmd, check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def GoHome(self, timeout=None, retries=None):
"""Return to the home screen.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
self.StartActivity(
intent.Intent(action='android.intent.action.MAIN',
category='android.intent.category.HOME'),
blocking=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def ForceStop(self, package, timeout=None, retries=None):
"""Close the application.
Args:
package: A string containing the name of the package to stop.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
self.RunShellCommand(['am', 'force-stop', package], check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def ClearApplicationState(self, package, timeout=None, retries=None):
"""Clear all state for the given package.
Args:
package: A string containing the name of the package to stop.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
if self.GetApplicationPath(package):
self.RunShellCommand(['pm', 'clear', package], check_return=True)
@decorators.WithTimeoutAndRetriesFromInstance()
def SendKeyEvent(self, keycode, timeout=None, retries=None):
"""Sends a keycode to the device.
See: http://developer.android.com/reference/android/view/KeyEvent.html
Args:
keycode: A integer keycode to send to the device.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
self.RunShellCommand(['input', 'keyevent', format(keycode, 'd')],
check_return=True)
PUSH_CHANGED_FILES_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT
PUSH_CHANGED_FILES_DEFAULT_RETRIES = _DEFAULT_RETRIES
@decorators.WithTimeoutAndRetriesDefaults(
PUSH_CHANGED_FILES_DEFAULT_TIMEOUT,
PUSH_CHANGED_FILES_DEFAULT_RETRIES)
def PushChangedFiles(self, host_device_tuples, timeout=None,
retries=None):
"""Push files to the device, skipping files that don't need updating.
Args:
host_device_tuples: A list of (host_path, device_path) tuples, where
|host_path| is an absolute path of a file or directory on the host
that should be minimially pushed to the device, and |device_path| is
an absolute path of the destination on the device.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
files = []
for h, d in host_device_tuples:
if os.path.isdir(h):
self.RunShellCommand(['mkdir', '-p', d], check_return=True)
files += self._GetChangedFilesImpl(h, d)
if not files:
return
size = sum(host_utils.GetRecursiveDiskUsage(h) for h, _ in files)
file_count = len(files)
dir_size = sum(host_utils.GetRecursiveDiskUsage(h)
for h, _ in host_device_tuples)
dir_file_count = 0
for h, _ in host_device_tuples:
if os.path.isdir(h):
dir_file_count += sum(len(f) for _r, _d, f in os.walk(h))
else:
dir_file_count += 1
push_duration = self._ApproximateDuration(
file_count, file_count, size, False)
dir_push_duration = self._ApproximateDuration(
len(host_device_tuples), dir_file_count, dir_size, False)
zip_duration = self._ApproximateDuration(1, 1, size, True)
self._InstallCommands()
if dir_push_duration < push_duration and (
dir_push_duration < zip_duration or not self._commands_installed):
self._PushChangedFilesIndividually(host_device_tuples)
elif push_duration < zip_duration or not self._commands_installed:
self._PushChangedFilesIndividually(files)
else:
self._PushChangedFilesZipped(files)
self.RunShellCommand(
['chmod', '-R', '777'] + [d for _, d in host_device_tuples],
as_root=True, check_return=True)
def _GetChangedFilesImpl(self, host_path, device_path):
real_host_path = os.path.realpath(host_path)
try:
real_device_path = self.RunShellCommand(
['realpath', device_path], single_line=True, check_return=True)
except device_errors.CommandFailedError:
real_device_path = None
if not real_device_path:
return [(host_path, device_path)]
host_hash_tuples = md5sum.CalculateHostMd5Sums([real_host_path])
device_paths_to_md5 = (
real_device_path if os.path.isfile(real_host_path)
else ('%s/%s' % (real_device_path, os.path.relpath(p, real_host_path))
for _, p in host_hash_tuples))
device_hash_tuples = md5sum.CalculateDeviceMd5Sums(
device_paths_to_md5, self)
if os.path.isfile(host_path):
if (not device_hash_tuples
or device_hash_tuples[0].hash != host_hash_tuples[0].hash):
return [(host_path, device_path)]
else:
return []
else:
device_tuple_dict = dict((d.path, d.hash) for d in device_hash_tuples)
to_push = []
for host_hash, host_abs_path in (
(h.hash, h.path) for h in host_hash_tuples):
device_abs_path = '%s/%s' % (
real_device_path, os.path.relpath(host_abs_path, real_host_path))
if (device_abs_path not in device_tuple_dict
or device_tuple_dict[device_abs_path] != host_hash):
to_push.append((host_abs_path, device_abs_path))
return to_push
def _InstallCommands(self):
if self._commands_installed is None:
try:
if not install_commands.Installed(self):
install_commands.InstallCommands(self)
self._commands_installed = True
except Exception as e:
logging.warning('unzip not available: %s' % str(e))
self._commands_installed = False
@staticmethod
def _ApproximateDuration(adb_calls, file_count, byte_count, is_zipping):
# We approximate the time to push a set of files to a device as:
# t = c1 * a + c2 * f + c3 + b / c4 + b / (c5 * c6), where
# t: total time (sec)
# c1: adb call time delay (sec)
# a: number of times adb is called (unitless)
# c2: push time delay (sec)
# f: number of files pushed via adb (unitless)
# c3: zip time delay (sec)
# c4: zip rate (bytes/sec)
# b: total number of bytes (bytes)
# c5: transfer rate (bytes/sec)
# c6: compression ratio (unitless)
# All of these are approximations.
ADB_CALL_PENALTY = 0.1 # seconds
ADB_PUSH_PENALTY = 0.01 # seconds
ZIP_PENALTY = 2.0 # seconds
ZIP_RATE = 10000000.0 # bytes / second
TRANSFER_RATE = 2000000.0 # bytes / second
COMPRESSION_RATIO = 2.0 # unitless
adb_call_time = ADB_CALL_PENALTY * adb_calls
adb_push_setup_time = ADB_PUSH_PENALTY * file_count
if is_zipping:
zip_time = ZIP_PENALTY + byte_count / ZIP_RATE
transfer_time = byte_count / (TRANSFER_RATE * COMPRESSION_RATIO)
else:
zip_time = 0
transfer_time = byte_count / TRANSFER_RATE
return adb_call_time + adb_push_setup_time + zip_time + transfer_time
def _PushChangedFilesIndividually(self, files):
for h, d in files:
self.adb.Push(h, d)
def _PushChangedFilesZipped(self, files):
if not files:
return
with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file:
zip_proc = multiprocessing.Process(
target=DeviceUtils._CreateDeviceZip,
args=(zip_file.name, files))
zip_proc.start()
zip_proc.join()
zip_on_device = '%s/tmp.zip' % self.GetExternalStoragePath()
try:
self.adb.Push(zip_file.name, zip_on_device)
self.RunShellCommand(
['unzip', zip_on_device],
as_root=True,
env={'PATH': '$PATH:%s' % install_commands.BIN_DIR},
check_return=True)
finally:
if zip_proc.is_alive():
zip_proc.terminate()
if self.IsOnline():
self.RunShellCommand(['rm', zip_on_device], check_return=True)
@staticmethod
def _CreateDeviceZip(zip_path, host_device_tuples):
with zipfile.ZipFile(zip_path, 'w') as zip_file:
for host_path, device_path in host_device_tuples:
zip_utils.WriteToZipFile(zip_file, host_path, device_path)
@decorators.WithTimeoutAndRetriesFromInstance()
def FileExists(self, device_path, timeout=None, retries=None):
"""Checks whether the given file exists on the device.
Args:
device_path: A string containing the absolute path to the file on the
device.
timeout: timeout in seconds
retries: number of retries
Returns:
True if the file exists on the device, False otherwise.
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
try:
self.RunShellCommand(['test', '-e', device_path], check_return=True)
return True
except device_errors.AdbCommandFailedError:
return False
@decorators.WithTimeoutAndRetriesFromInstance()
def PullFile(self, device_path, host_path, timeout=None, retries=None):
"""Pull a file from the device.
Args:
device_path: A string containing the absolute path of the file to pull
from the device.
host_path: A string containing the absolute path of the destination on
the host.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
"""
# Create the base dir if it doesn't exist already
dirname = os.path.dirname(host_path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
self.adb.Pull(device_path, host_path)
@decorators.WithTimeoutAndRetriesFromInstance()
def ReadFile(self, device_path, as_root=False, timeout=None, retries=None):
"""Reads the contents of a file from the device.
Args:
device_path: A string containing the absolute path of the file to read
from the device.
as_root: A boolean indicating whether the read should be executed with
root privileges.
timeout: timeout in seconds
retries: number of retries
Returns:
The contents of the file at |device_path| as a list of lines.
Raises:
CommandFailedError if the file can't be read.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
# TODO(jbudorick) Evaluate whether we want to return a list of lines after
# the implementation switch, and if file not found should raise exception.
if as_root:
if not self.old_interface.CanAccessProtectedFileContents():
raise device_errors.CommandFailedError(
'Cannot read from %s with root privileges.' % device_path)
return self.old_interface.GetProtectedFileContents(device_path)
else:
return self.old_interface.GetFileContents(device_path)
@decorators.WithTimeoutAndRetriesFromInstance()
def WriteFile(self, device_path, contents, as_root=False, force_push=False,
timeout=None, retries=None):
"""Writes |contents| to a file on the device.
Args:
device_path: A string containing the absolute path to the file to write
on the device.
contents: A string containing the data to write to the device.
as_root: A boolean indicating whether the write should be executed with
root privileges (if available).
force_push: A boolean indicating whether to force the operation to be
performed by pushing a file to the device. The default is, when the
contents are short, to pass the contents using a shell script instead.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if the file could not be written on the device.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if len(contents) < 512 and not force_push:
cmd = 'echo -n %s > %s' % (cmd_helper.SingleQuote(contents),
cmd_helper.SingleQuote(device_path))
self.RunShellCommand(cmd, as_root=as_root, check_return=True)
else:
with tempfile.NamedTemporaryFile() as host_temp:
host_temp.write(contents)
host_temp.flush()
if as_root and self.NeedsSU():
with device_temp_file.DeviceTempFile(self.adb) as device_temp:
self.adb.Push(host_temp.name, device_temp.name)
# Here we need 'cp' rather than 'mv' because the temp and
# destination files might be on different file systems (e.g.
# on internal storage and an external sd card)
self.RunShellCommand(['cp', device_temp.name, device_path],
as_root=True, check_return=True)
else:
self.adb.Push(host_temp.name, device_path)
@decorators.WithTimeoutAndRetriesFromInstance()
def Ls(self, device_path, timeout=None, retries=None):
"""Lists the contents of a directory on the device.
Args:
device_path: A string containing the path of the directory on the device
to list.
timeout: timeout in seconds
retries: number of retries
Returns:
A list of pairs (filename, stat) for each file found in the directory,
where the stat object has the properties: st_mode, st_size, and st_time.
Raises:
AdbCommandFailedError if |device_path| does not specify a valid and
accessible directory in the device.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
return self.adb.Ls(device_path)
@decorators.WithTimeoutAndRetriesFromInstance()
def Stat(self, device_path, timeout=None, retries=None):
"""Get the stat attributes of a file or directory on the device.
Args:
device_path: A string containing the path of from which to get attributes
on the device.
timeout: timeout in seconds
retries: number of retries
Returns:
A stat object with the properties: st_mode, st_size, and st_time
Raises:
CommandFailedError if device_path cannot be found on the device.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
dirname, target = device_path.rsplit('/', 1)
for filename, stat in self.adb.Ls(dirname):
if filename == target:
return stat
raise device_errors.CommandFailedError(
'Cannot find file or directory: %r' % device_path, str(self))
@decorators.WithTimeoutAndRetriesFromInstance()
def SetJavaAsserts(self, enabled, timeout=None, retries=None):
"""Enables or disables Java asserts.
Args:
enabled: A boolean indicating whether Java asserts should be enabled
or disabled.
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device-side property changed and a restart is required as a
result, False otherwise.
Raises:
CommandTimeoutError on timeout.
"""
return self.old_interface.SetJavaAssertsEnabled(enabled)
@property
def build_description(self):
"""Returns the build description of the system.
For example:
nakasi-user 4.4.4 KTU84P 1227136 release-keys
"""
return self.GetProp('ro.build.description', cache=True)
@property
def build_fingerprint(self):
"""Returns the build fingerprint of the system.
For example:
google/nakasi/grouper:4.4.4/KTU84P/1227136:user/release-keys
"""
return self.GetProp('ro.build.fingerprint', cache=True)
@property
def build_id(self):
"""Returns the build ID of the system (e.g. 'KTU84P')."""
return self.GetProp('ro.build.id', cache=True)
@property
def build_product(self):
"""Returns the build product of the system (e.g. 'grouper')."""
return self.GetProp('ro.build.product', cache=True)
@property
def build_type(self):
"""Returns the build type of the system (e.g. 'user')."""
return self.GetProp('ro.build.type', cache=True)
@property
def build_version_sdk(self):
"""Returns the build version sdk of the system as a number (e.g. 19).
For version code numbers see:
http://developer.android.com/reference/android/os/Build.VERSION_CODES.html
For named constants see:
pylib.constants.ANDROID_SDK_VERSION_CODES
Raises:
CommandFailedError if the build version sdk is not a number.
"""
value = self.GetProp('ro.build.version.sdk', cache=True)
try:
return int(value)
except ValueError:
raise device_errors.CommandFailedError(
'Invalid build version sdk: %r' % value)
@property
def product_cpu_abi(self):
"""Returns the product cpu abi of the device (e.g. 'armeabi-v7a')."""
return self.GetProp('ro.product.cpu.abi', cache=True)
@property
def product_model(self):
"""Returns the name of the product model (e.g. 'Nexus 7')."""
return self.GetProp('ro.product.model', cache=True)
@property
def product_name(self):
"""Returns the product name of the device (e.g. 'nakasi')."""
return self.GetProp('ro.product.name', cache=True)
def GetProp(self, property_name, cache=False, timeout=DEFAULT,
retries=DEFAULT):
"""Gets a property from the device.
Args:
property_name: A string containing the name of the property to get from
the device.
cache: A boolean indicating whether to cache the value of this property.
timeout: timeout in seconds
retries: number of retries
Returns:
The value of the device's |property_name| property.
Raises:
CommandTimeoutError on timeout.
"""
assert isinstance(property_name, basestring), (
"property_name is not a string: %r" % property_name)
cache_key = '_prop:' + property_name
if cache and cache_key in self._cache:
return self._cache[cache_key]
else:
# timeout and retries are handled down at run shell, because we don't
# want to apply them in the other branch when reading from the cache
value = self.RunShellCommand(
['getprop', property_name], single_line=True, check_return=True,
timeout=self._default_timeout if timeout is DEFAULT else timeout,
retries=self._default_retries if retries is DEFAULT else retries)
if cache or cache_key in self._cache:
self._cache[cache_key] = value
return value
@decorators.WithTimeoutAndRetriesFromInstance()
def SetProp(self, property_name, value, check=False, timeout=None,
retries=None):
"""Sets a property on the device.
Args:
property_name: A string containing the name of the property to set on
the device.
value: A string containing the value to set to the property on the
device.
check: A boolean indicating whether to check that the property was
successfully set on the device.
timeout: timeout in seconds
retries: number of retries
Raises:
CommandFailedError if check is true and the property was not correctly
set on the device (e.g. because it is not rooted).
CommandTimeoutError on timeout.
"""
assert isinstance(property_name, basestring), (
"property_name is not a string: %r" % property_name)
assert isinstance(value, basestring), "value is not a string: %r" % value
self.RunShellCommand(['setprop', property_name, value], check_return=True)
if property_name in self._cache:
del self._cache[property_name]
# TODO(perezju) remove the option and make the check mandatory, but using a
# single shell script to both set- and getprop.
if check and value != self.GetProp(property_name):
raise device_errors.CommandFailedError(
'Unable to set property %r on the device to %r'
% (property_name, value), str(self))
@decorators.WithTimeoutAndRetriesFromInstance()
def GetABI(self, timeout=None, retries=None):
"""Gets the device main ABI.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
The device's main ABI name.
Raises:
CommandTimeoutError on timeout.
"""
return self.GetProp('ro.product.cpu.abi')
@decorators.WithTimeoutAndRetriesFromInstance()
def GetPids(self, process_name, timeout=None, retries=None):
"""Returns the PIDs of processes with the given name.
Note that the |process_name| is often the package name.
Args:
process_name: A string containing the process name to get the PIDs for.
timeout: timeout in seconds
retries: number of retries
Returns:
A dict mapping process name to PID for each process that contained the
provided |process_name|.
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
return self._GetPidsImpl(process_name)
def _GetPidsImpl(self, process_name):
procs_pids = {}
for line in self.RunShellCommand('ps', check_return=True):
try:
ps_data = line.split()
if process_name in ps_data[-1]:
procs_pids[ps_data[-1]] = ps_data[1]
except IndexError:
pass
return procs_pids
@decorators.WithTimeoutAndRetriesFromInstance()
def TakeScreenshot(self, host_path=None, timeout=None, retries=None):
"""Takes a screenshot of the device.
Args:
host_path: A string containing the path on the host to save the
screenshot to. If None, a file name in the current
directory will be generated.
timeout: timeout in seconds
retries: number of retries
Returns:
The name of the file on the host to which the screenshot was saved.
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
if not host_path:
host_path = os.path.abspath('screenshot-%s.png' % _GetTimeStamp())
with device_temp_file.DeviceTempFile(self.adb, suffix='.png') as device_tmp:
self.RunShellCommand(['/system/bin/screencap', '-p', device_tmp.name],
check_return=True)
self.PullFile(device_tmp.name, host_path)
return host_path
@decorators.WithTimeoutAndRetriesFromInstance()
def GetIOStats(self, timeout=None, retries=None):
"""Gets cumulative disk IO stats since boot for all processes.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
A dict containing |num_reads|, |num_writes|, |read_ms|, and |write_ms|.
Raises:
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
return self.old_interface.GetIoStats()
@decorators.WithTimeoutAndRetriesFromInstance()
def GetMemoryUsageForPid(self, pid, timeout=None, retries=None):
"""Gets the memory usage for the given PID.
Args:
pid: PID of the process.
timeout: timeout in seconds
retries: number of retries
Returns:
A 2-tuple containing:
- A dict containing the overall memory usage statistics for the PID.
- A dict containing memory usage statistics broken down by mapping.
Raises:
CommandTimeoutError on timeout.
"""
return self.old_interface.GetMemoryUsageForPid(pid)
def __str__(self):
"""Returns the device serial."""
return self.adb.GetDeviceSerial()
@classmethod
def parallel(cls, devices=None, async=False):
"""Creates a Parallelizer to operate over the provided list of devices.
If |devices| is either |None| or an empty list, the Parallelizer will
operate over all attached devices.
Args:
devices: A list of either DeviceUtils instances or objects from
from which DeviceUtils instances can be constructed. If None,
all attached devices will be used.
async: If true, returns a Parallelizer that runs operations
asynchronously.
Returns:
A Parallelizer operating over |devices|.
"""
if not devices:
devices = adb_wrapper.AdbWrapper.GetDevices()
devices = [d if isinstance(d, cls) else cls(d) for d in devices]
if async:
return parallelizer.Parallelizer(devices)
else:
return parallelizer.SyncParallelizer(devices)
|
|
"""
Selectors for the Posix event loop.
"""
from __future__ import unicode_literals, absolute_import
import sys
import abc
import errno
import select
import six
__all__ = [
'AutoSelector',
'PollSelector',
'SelectSelector',
'Selector',
'fd_to_int',
]
def fd_to_int(fd):
assert isinstance(fd, int) or hasattr(fd, 'fileno')
if isinstance(fd, int):
return fd
else:
return fd.fileno()
class Selector(six.with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def register(self, fd):
assert isinstance(fd, int)
@abc.abstractmethod
def unregister(self, fd):
assert isinstance(fd, int)
@abc.abstractmethod
def select(self, timeout):
pass
@abc.abstractmethod
def close(self):
pass
class AutoSelector(Selector):
def __init__(self):
self._fds = []
self._select_selector = SelectSelector()
self._selectors = [self._select_selector]
# When 'select.poll' exists, create a PollSelector.
if hasattr(select, 'poll'):
self._poll_selector = PollSelector()
self._selectors.append(self._poll_selector)
else:
self._poll_selector = None
# Use of the 'select' module, that was introduced in Python3.4. We don't
# use it before 3.5 however, because this is the point where this module
# retries interrupted system calls.
if sys.version_info >= (3, 5):
self._py3_selector = Python3Selector()
self._selectors.append(self._py3_selector)
else:
self._py3_selector = None
def register(self, fd):
assert isinstance(fd, int)
self._fds.append(fd)
for sel in self._selectors:
sel.register(fd)
def unregister(self, fd):
assert isinstance(fd, int)
self._fds.remove(fd)
for sel in self._selectors:
sel.unregister(fd)
def select(self, timeout):
# Try Python 3 selector first.
if self._py3_selector:
try:
return self._py3_selector.select(timeout)
except PermissionError: # noqa (PermissionError doesn't exist in Py2)
# We had a situation (in pypager) where epoll raised a
# PermissionError when a local file descriptor was registered,
# however poll and select worked fine. So, in that case, just
# try using select below.
pass
try:
# Prefer 'select.select', if we don't have much file descriptors.
# This is more universal.
return self._select_selector.select(timeout)
except ValueError:
# When we have more than 1024 open file descriptors, we'll always
# get a "ValueError: filedescriptor out of range in select()" for
# 'select'. In this case, try, using 'poll' instead.
if self._poll_selector is not None:
return self._poll_selector.select(timeout)
else:
raise
def close(self):
for sel in self._selectors:
sel.close()
class Python3Selector(Selector):
"""
Use of the Python3 'selectors' module.
NOTE: Only use on Python 3.5 or newer!
"""
def __init__(self):
assert sys.version_info >= (3, 5)
import selectors # Inline import: Python3 only!
self._sel = selectors.DefaultSelector()
def register(self, fd):
assert isinstance(fd, int)
import selectors # Inline import: Python3 only!
self._sel.register(fd, selectors.EVENT_READ, None)
def unregister(self, fd):
assert isinstance(fd, int)
self._sel.unregister(fd)
def select(self, timeout):
events = self._sel.select(timeout=timeout)
return [key.fileobj for key, mask in events]
def close(self):
self._sel.close()
class PollSelector(Selector):
def __init__(self):
self._poll = select.poll()
def register(self, fd):
assert isinstance(fd, int)
self._poll.register(fd, select.POLLIN)
def unregister(self, fd):
assert isinstance(fd, int)
def select(self, timeout):
tuples = self._poll.poll(timeout) # Returns (fd, event) tuples.
return [t[0] for t in tuples]
def close(self):
pass # XXX
class SelectSelector(Selector):
"""
Wrapper around select.select.
When the SIGWINCH signal is handled, other system calls, like select
are aborted in Python. This wrapper will retry the system call.
"""
def __init__(self):
self._fds = []
def register(self, fd):
self._fds.append(fd)
def unregister(self, fd):
self._fds.remove(fd)
def select(self, timeout):
while True:
try:
return select.select(self._fds, [], [], timeout)[0]
except select.error as e:
# Retry select call when EINTR
if e.args and e.args[0] == errno.EINTR:
continue
else:
raise
def close(self):
pass
def select_fds(read_fds, timeout, selector=AutoSelector):
"""
Wait for a list of file descriptors (`read_fds`) to become ready for
reading. This chooses the most appropriate select-tool for use in
prompt-toolkit.
"""
# Map to ensure that we return the objects that were passed in originally.
# Whether they are a fd integer or an object that has a fileno().
# (The 'poll' implementation for instance, returns always integers.)
fd_map = dict((fd_to_int(fd), fd) for fd in read_fds)
# Wait, using selector.
sel = selector()
try:
for fd in read_fds:
sel.register(fd)
result = sel.select(timeout)
if result is not None:
return [fd_map[fd_to_int(fd)] for fd in result]
finally:
sel.close()
|
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.go.util_rules import (
assembly,
build_pkg,
first_party_pkg,
go_mod,
import_analysis,
link,
sdk,
tests_analysis,
third_party_pkg,
)
from pants.backend.go.util_rules.tests_analysis import GeneratedTestMain, GenerateTestMainRequest
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
from pants.util.ordered_set import FrozenOrderedSet
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*assembly.rules(),
*build_pkg.rules(),
*import_analysis.rules(),
*go_mod.rules(),
*first_party_pkg.rules(),
*third_party_pkg.rules(),
*tests_analysis.rules(),
*link.rules(),
*sdk.rules(),
QueryRule(GeneratedTestMain, [GenerateTestMainRequest]),
],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def test_basic_test_analysis(rule_runner: RuleRunner) -> None:
input_digest = rule_runner.make_snapshot(
{
"foo_test.go": dedent(
"""
package foo
func TestThisIsATest(t *testing.T) {
}
func Test(t *testing.T) {
}
"""
),
"bar_test.go": dedent(
"""
package foo_test
func BenchmarkThisIsABenchmark(b *testing.B) {
}
func Benchmark(b *testing.B) {
}
"""
),
},
).digest
metadata = rule_runner.request(
GeneratedTestMain,
[
GenerateTestMainRequest(
input_digest,
FrozenOrderedSet(["foo_test.go"]),
FrozenOrderedSet(["bar_test.go"]),
"foo",
Address("foo"),
)
],
)
assert metadata.digest != EMPTY_DIGEST
assert metadata.has_tests
assert metadata.has_xtests
def test_collect_examples(rule_runner: RuleRunner) -> None:
input_digest = rule_runner.make_snapshot(
{
"foo_test.go": dedent(
"""
package foo
func ExampleEmptyOutputExpected() {
// Output:
}
// This does not have an `Output` comment and will be skipped.
func ExampleEmptyOutputAndNoOutputDirective() {
}
func ExampleSomeOutput() {
fmt.Println("foo")
// Output: foo
}
func ExampleAnotherOne() {
fmt.Println("foo\\nbar\\n")
// Output:
// foo
// bar
}
"""
),
},
).digest
metadata = rule_runner.request(
GeneratedTestMain,
[
GenerateTestMainRequest(
input_digest,
FrozenOrderedSet(["foo_test.go"]),
FrozenOrderedSet(),
"foo",
Address("foo"),
)
],
)
assert metadata.digest != EMPTY_DIGEST
assert metadata.has_tests
assert not metadata.has_xtests
def test_incorrect_signatures(rule_runner: RuleRunner) -> None:
test_cases = [
("TestFoo(t *testing.T, a int)", "wrong signature for TestFoo"),
("TestFoo()", "wrong signature for TestFoo"),
("TestFoo(t *testing.B)", "wrong signature for TestFoo"),
("TestFoo(t *testing.M)", "wrong signature for TestFoo"),
("TestFoo(a int)", "wrong signature for TestFoo"),
("BenchmarkFoo(t *testing.B, a int)", "wrong signature for BenchmarkFoo"),
("BenchmarkFoo()", "wrong signature for BenchmarkFoo"),
("BenchmarkFoo(t *testing.T)", "wrong signature for BenchmarkFoo"),
("BenchmarkFoo(t *testing.M)", "wrong signature for BenchmarkFoo"),
("BenchmarkFoo(a int)", "wrong signature for BenchmarkFoo"),
]
for test_sig, err_msg in test_cases:
input_digest = rule_runner.make_snapshot(
{
"foo_test.go": dedent(
f"""
package foo
func {test_sig} {{
}}
"""
),
},
).digest
result = rule_runner.request(
GeneratedTestMain,
[
GenerateTestMainRequest(
input_digest,
FrozenOrderedSet(["foo_test.go"]),
FrozenOrderedSet(),
"foo",
Address("foo"),
)
],
)
assert result.failed_exit_code_and_stderr is not None
exit_code, stderr = result.failed_exit_code_and_stderr
assert exit_code == 1
assert err_msg in stderr
def test_duplicate_test_mains_same_file(rule_runner: RuleRunner) -> None:
input_digest = rule_runner.make_snapshot(
{
"foo_test.go": dedent(
"""
package foo
func TestMain(m *testing.M) {
}
func TestMain(m *testing.M) {
}
"""
),
},
).digest
result = rule_runner.request(
GeneratedTestMain,
[
GenerateTestMainRequest(
input_digest,
FrozenOrderedSet(["foo_test.go", "bar_test.go"]),
FrozenOrderedSet(),
"foo",
Address("foo"),
)
],
)
assert result.failed_exit_code_and_stderr is not None
exit_code, stderr = result.failed_exit_code_and_stderr
assert exit_code == 1
assert "multiple definitions of TestMain" in stderr
def test_duplicate_test_mains_different_files(rule_runner: RuleRunner) -> None:
input_digest = rule_runner.make_snapshot(
{
"foo_test.go": dedent(
"""
package foo
func TestMain(m *testing.M) {
}
"""
),
"bar_test.go": dedent(
"""
package foo
func TestMain(m *testing.M) {
}
"""
),
},
).digest
result = rule_runner.request(
GeneratedTestMain,
[
GenerateTestMainRequest(
input_digest,
FrozenOrderedSet(["foo_test.go", "bar_test.go"]),
FrozenOrderedSet(),
"foo",
Address("foo"),
)
],
)
assert result.failed_exit_code_and_stderr is not None
exit_code, stderr = result.failed_exit_code_and_stderr
assert exit_code == 1
assert "multiple definitions of TestMain" in stderr
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
from math import cos, sin
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.modeling import models
from astropy.modeling import rotations
from astropy.wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6]),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(1e-5, 1e-4), (40, -20.56), (21.5, 45.9),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_roundtrip_sky_rotation(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90*u.deg)
x, y = model(1*u.deg, 0*u.arcsec)
assert_quantity_allclose([x, y], [0, 1]*u.deg, atol=1e-10*u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, 'zyz')
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, 'yzy')
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx']
@pytest.mark.parametrize(('axes_order'), euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)],
[(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)],
[(s2*s3), (c3*s2), (c2)]]),
'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)],
[(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)],
[(-c3*s2), (s2*s3), (c2)]]),
'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)],
[(c3*s2), (c2), (s2*s3)],
[(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]),
'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)],
[(s2*s3), (c2), (-c3*s2)],
[(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]),
'xyx': np.array([[(c2), (s2*s3), (c3*s2)],
[(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)],
[(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]),
'xzx': np.array([[(c2), (-c3*s2), (s2*s3)],
[(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)],
[(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]])
}
mat = rotations._create_matrix([phi, theta, psi], axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
def test_rotation_3d():
"""
A sanity test - when V2_REF = 0 and V3_REF = 0,
for V2, V3 close to the origin
ROLL_REF should be approximately PA_V3 .
(Test taken from JWST SIAF report.)
"""
def _roll_angle_from_matrix(matrix, v2, v3):
X = -(matrix[2, 0] * np.cos(v2) + matrix[2, 1] * np.sin(v2)) * \
np.sin(v3) + matrix[2, 2] * np.cos(v3)
Y = (matrix[0, 0] * matrix[1, 2] - matrix[1, 0] * matrix[0, 2]) * np.cos(v2) + \
(matrix[0, 1] * matrix[1, 2] - matrix[1, 1] * matrix[0, 2]) * np.sin(v2)
new_roll = np.rad2deg(np.arctan2(Y, X))
if new_roll < 0:
new_roll += 360
return new_roll
# reference points on sky and in a coordinate frame associated
# with the telescope
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = 0
v3_ref = 0
pa_v3 = 37 # in deg
v2 = np.deg2rad(2.7e-6) # in deg.01 # in arcsec
v3 = np.deg2rad(2.7e-6) # in deg .01 # in arcsec
angles = [v2_ref, -v3_ref, pa_v3, dec_ref, -ra_ref]
axes = "zyxyz"
M = rotations._create_matrix(np.deg2rad(angles) * u.deg, axes)
roll_angle = _roll_angle_from_matrix(M, v2, v3)
assert_allclose(roll_angle, pa_v3, atol=1e-3)
def test_spherical_rotation():
"""
Test taken from JWST INS report - converts
JWST telescope (V2, V3) coordinates to RA, DEC.
"""
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = -503.654472 / 3600 # in deg
v3_ref = -318.742464 / 3600 # in deg
r0 = 37 # in deg
v2 = 210 # in deg
v3 = -75 # in deg
expected_ra_dec = (107.12810484789563, -35.97940247128502) # in deg
angles = np.array([v2_ref, -v3_ref, r0, dec_ref, -ra_ref])
axes = "zyxyz"
v2s = rotations.RotationSequence3D(angles, axes_order=axes)
x, y, z = rotations.spherical2cartesian(v2, v3)
x1, y1, z1 = v2s(x, y, z)
radec = rotations.cartesian2spherical(x1, y1, z1)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
v2s = rotations.SphericalRotationSequence(angles, axes_order=axes)
radec = v2s(v2, v3)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
#assert_allclose(v2s.inverse(*v2s(v2, v3)), (v2, v3))
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova import notifications
from nova.objects import base
from nova.objects import instance_fault
from nova.objects import instance_info_cache
from nova.objects import pci_device
from nova.objects import security_group
from nova.objects import utils as obj_utils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining."""
if not expected_attrs:
return expected_attrs
return [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
class Instance(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added info_cache
# Version 1.2: Added security_groups
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
# Version 1.5: Added cleaned
# Version 1.6: Added pci_devices
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
VERSION = '1.8'
fields = {
'id': int,
'user_id': obj_utils.str_or_none,
'project_id': obj_utils.str_or_none,
'image_ref': obj_utils.str_or_none,
'kernel_id': obj_utils.str_or_none,
'ramdisk_id': obj_utils.str_or_none,
'hostname': obj_utils.str_or_none,
'launch_index': obj_utils.int_or_none,
'key_name': obj_utils.str_or_none,
'key_data': obj_utils.str_or_none,
'power_state': obj_utils.int_or_none,
'vm_state': obj_utils.str_or_none,
'task_state': obj_utils.str_or_none,
'memory_mb': obj_utils.int_or_none,
'vcpus': obj_utils.int_or_none,
'root_gb': obj_utils.int_or_none,
'ephemeral_gb': obj_utils.int_or_none,
'host': obj_utils.str_or_none,
'node': obj_utils.str_or_none,
'instance_type_id': obj_utils.int_or_none,
'user_data': obj_utils.str_or_none,
'reservation_id': obj_utils.str_or_none,
'scheduled_at': obj_utils.datetime_or_str_or_none,
'launched_at': obj_utils.datetime_or_str_or_none,
'terminated_at': obj_utils.datetime_or_str_or_none,
'availability_zone': obj_utils.str_or_none,
'display_name': obj_utils.str_or_none,
'display_description': obj_utils.str_or_none,
'launched_on': obj_utils.str_or_none,
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': bool,
'locked_by': obj_utils.str_or_none,
'os_type': obj_utils.str_or_none,
'architecture': obj_utils.str_or_none,
'vm_mode': obj_utils.str_or_none,
'uuid': obj_utils.str_or_none,
'root_device_name': obj_utils.str_or_none,
'default_ephemeral_device': obj_utils.str_or_none,
'default_swap_device': obj_utils.str_or_none,
'config_drive': obj_utils.str_or_none,
'access_ip_v4': obj_utils.ip_or_none(4),
'access_ip_v6': obj_utils.ip_or_none(6),
'auto_disk_config': bool,
'progress': obj_utils.int_or_none,
'shutdown_terminate': bool,
'disable_terminate': bool,
'cell_name': obj_utils.str_or_none,
'metadata': dict,
'system_metadata': dict,
'info_cache': obj_utils.nested_object(
instance_info_cache.InstanceInfoCache),
'security_groups': obj_utils.nested_object(
security_group.SecurityGroupList, none_ok=False),
'fault': obj_utils.nested_object(
instance_fault.InstanceFault),
'cleaned': bool,
'pci_devices': obj_utils.nested_object(
pci_device.PciDeviceList, none_ok=False),
}
obj_extra_fields = ['name']
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self.obj_reset_changes()
def obj_reset_changes(self, fields=None):
super(Instance, self).obj_reset_changes(fields)
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
def _attr_access_ip_v4_to_primitive(self):
if self.access_ip_v4 is not None:
return str(self.access_ip_v4)
else:
return None
def _attr_access_ip_v6_to_primitive(self):
if self.access_ip_v6 is not None:
return str(self.access_ip_v6)
else:
return None
_attr_scheduled_at_to_primitive = obj_utils.dt_serializer('scheduled_at')
_attr_launched_at_to_primitive = obj_utils.dt_serializer('launched_at')
_attr_terminated_at_to_primitive = obj_utils.dt_serializer('terminated_at')
_attr_info_cache_to_primitive = obj_utils.obj_serializer('info_cache')
_attr_security_groups_to_primitive = obj_utils.obj_serializer(
'security_groups')
_attr_pci_devices_to_primitive = obj_utils.obj_serializer(
'pci_devices')
_attr_scheduled_at_from_primitive = obj_utils.dt_deserializer
_attr_launched_at_from_primitive = obj_utils.dt_deserializer
_attr_terminated_at_from_primitive = obj_utils.dt_deserializer
def _attr_info_cache_from_primitive(self, val):
if val is None:
return val
return base.NovaObject.obj_from_primitive(val)
def _attr_security_groups_from_primitive(self, val):
return base.NovaObject.obj_from_primitive(val)
def _attr_pci_devices_from_primitive(self, val):
if val is None:
# Only possible in version <= 1.7
return pci_device.PciDeviceList()
return base.NovaObject.obj_from_primitive(val)
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
instance_fault.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'pci_devices' in expected_attrs:
pci_devices = pci_device._make_pci_list(
context, pci_device.PciDeviceList(),
db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
if 'info_cache' in expected_attrs:
if db_inst['info_cache'] is None:
info_cache = None
else:
info_cache = instance_info_cache.InstanceInfoCache()
instance_info_cache.InstanceInfoCache._from_db_object(
context, info_cache, db_inst['info_cache'])
instance['info_cache'] = info_cache
if 'security_groups' in expected_attrs:
sec_groups = security_group._make_secgroup_list(
context, security_group.SecurityGroupList(),
db_inst['security_groups'])
instance['security_groups'] = sec_groups
instance._context = context
instance.obj_reset_changes()
return instance
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
updates.pop('id', None)
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
db_inst = db.instance_create(context, updates)
Instance._from_db_object(context, self, db_inst, expected_attrs)
@base.remotable
def destroy(self, context):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
try:
db.instance_destroy(context, self.uuid, constraint=constraint)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
self.info_cache.save(context)
def _save_security_groups(self, context):
for secgroup in self.security_groups:
secgroup.save(context)
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
@base.remotable
def save(self, context, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param context: Security context
:param expected_task_state: Optional tuple of valid task states
for the instance to be in.
:param expected_vm_state: Optional tuple of valid vm states
for the instance to be in.
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state.
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api' and self.cell_name:
# NOTE(comstud): We need to stash a copy of ourselves
# before any updates are applied. When we call the save
# methods on nested objects, we will lose any changes to
# them. But we need to make sure child cells can tell
# what is changed.
#
# We also need to nuke any updates to vm_state and task_state
# unless admin_state_reset is True. compute cells are
# authoritative for their view of vm_state and task_state.
stale_instance = self.obj_clone()
def _handle_cell_update_from_api():
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_from_api(context, stale_instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
else:
stale_instance = None
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(self[field], base.NovaObject)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception(_('No save handler for %s') % field,
instance=self)
elif field in changes:
updates[field] = self[field]
if not updates:
if stale_instance:
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates, update_cells=False,
columns_to_join=_expected_cols(expected_attrs))
if stale_instance:
_handle_cell_update_from_api()
elif cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context, inst_ref)
self._from_db_object(context, self, inst_ref, expected_attrs)
notifications.send_update(context, old_ref, inst_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(context, uuid=self.uuid,
expected_attrs=extra)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
LOG.debug(_("Lazy-loading `%(attr)s' on %(name) uuid %(uuid)s"),
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# FIXME(comstud): This should be optimized to only load the attr.
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
self[attrname] = instance[attrname]
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='loading %s requires recursion' % attrname)
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = Instance._from_db_object(context, Instance(), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
class InstanceList(base.ObjectListBase, base.NovaObject):
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None):
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
if fault.instance_uuid not in faults_by_uuid:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
|
|
# Copyright (c) 2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools and calculations for interpolating specifically to a grid."""
import numpy as np
from .points import (interpolate_to_points, inverse_distance_to_points,
natural_neighbor_to_points)
from ..package_tools import Exporter
from ..pandas import preprocess_pandas
exporter = Exporter(globals())
def generate_grid(horiz_dim, bbox):
r"""Generate a meshgrid based on bounding box and x & y resolution.
Parameters
----------
horiz_dim: integer
Horizontal resolution
bbox: dictionary
Dictionary containing coordinates for corners of study area.
Returns
-------
grid_x: (X, Y) ndarray
X dimension meshgrid defined by given bounding box
grid_y: (X, Y) ndarray
Y dimension meshgrid defined by given bounding box
"""
x_steps, y_steps = get_xy_steps(bbox, horiz_dim)
grid_x = np.linspace(bbox['west'], bbox['east'], x_steps)
grid_y = np.linspace(bbox['south'], bbox['north'], y_steps)
gx, gy = np.meshgrid(grid_x, grid_y)
return gx, gy
def generate_grid_coords(gx, gy):
r"""Calculate x,y coordinates of each grid cell.
Parameters
----------
gx: numeric
x coordinates in meshgrid
gy: numeric
y coordinates in meshgrid
Returns
-------
(X, Y) ndarray
List of coordinates in meshgrid
"""
return np.stack([gx.ravel(), gy.ravel()], axis=1)
def get_xy_range(bbox):
r"""Return x and y ranges in meters based on bounding box.
bbox: dictionary
dictionary containing coordinates for corners of study area
Returns
-------
x_range: float
Range in meters in x dimension.
y_range: float
Range in meters in y dimension.
"""
x_range = bbox['east'] - bbox['west']
y_range = bbox['north'] - bbox['south']
return x_range, y_range
def get_xy_steps(bbox, h_dim):
r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
y_steps: (Y, ) ndarray
Number of grids in y dimension.
"""
x_range, y_range = get_xy_range(bbox)
x_steps = np.ceil(x_range / h_dim)
y_steps = np.ceil(y_range / h_dim)
return int(x_steps), int(y_steps)
def get_boundary_coords(x, y, spatial_pad=0):
r"""Return bounding box based on given x and y coordinates assuming northern hemisphere.
x: numeric
x coordinates.
y: numeric
y coordinates.
spatial_pad: numeric
Number of meters to add to the x and y dimensions to reduce
edge effects.
Returns
-------
bbox: dictionary
dictionary containing coordinates for corners of study area
"""
west = np.min(x) - spatial_pad
east = np.max(x) + spatial_pad
north = np.max(y) + spatial_pad
south = np.min(y) - spatial_pad
return {'west': west, 'south': south, 'east': east, 'north': north}
@exporter.export
def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y):
r"""Generate a natural neighbor interpolation of the given points to a regular grid.
This assigns values to the given grid using the Liang and Hale [Liang2010]_.
approach.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations
yp: (N, ) ndarray
y-coordinates of observations
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i])
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension
Returns
-------
img: (M, N) ndarray
Interpolated values on a 2-dimensional grid
See Also
--------
natural_neighbor_to_points
"""
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = list(zip(xp, yp))
points_grid = generate_grid_coords(grid_x, grid_y)
img = natural_neighbor_to_points(points_obs, variable, points_grid)
return img.reshape(grid_x.shape)
@exporter.export
def inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None,
min_neighbors=3, kind='cressman'):
r"""Generate an inverse distance interpolation of the given points to a regular grid.
Values are assigned to the given grid using inverse distance weighting based on either
[Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations.
yp: (N, ) ndarray
y-coordinates of observations.
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i]).
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension.
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension.
r: float
Radius from grid center, within which observations
are considered and weighted.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
kappa: float
Response parameter for barnes interpolation. Default None.
min_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation
for a point. Default is 3.
kind: str
Specify what inverse distance weighting interpolation to use.
Options: 'cressman' or 'barnes'. Default 'cressman'
Returns
-------
img: (M, N) ndarray
Interpolated values on a 2-dimensional grid
See Also
--------
inverse_distance_to_points
"""
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = list(zip(xp, yp))
points_grid = generate_grid_coords(grid_x, grid_y)
img = inverse_distance_to_points(points_obs, variable, points_grid, r, gamma=gamma,
kappa=kappa, min_neighbors=min_neighbors, kind=kind)
return img.reshape(grid_x.shape)
@exporter.export
@preprocess_pandas
def interpolate_to_grid(x, y, z, interp_type='linear', hres=50000,
minimum_neighbors=3, gamma=0.25, kappa_star=5.052,
search_radius=None, rbf_func='linear', rbf_smooth=0,
boundary_coords=None):
r"""Interpolate given (x,y), observation (z) pairs to a grid based on given parameters.
Parameters
----------
x: array_like
x coordinate, can have units of linear distance or degrees
y: array_like
y coordinate, can have units of linear distance or degrees
z: array_like
observation value
interp_type: str
What type of interpolation to use. Available options include:
1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`.
2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`.
Default "linear".
hres: float
The horizontal resolution of the generated grid, given in the same units as the
x and y parameters. Default 50000.
minimum_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation for a
point. Default is 3.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 0.25.
kappa_star: float
Response parameter for barnes interpolation, specified nondimensionally
in terms of the Nyquist. Default 5.052
search_radius: float
A search radius to use for the barnes and cressman interpolation schemes.
If search_radius is not specified, it will default to the average spacing of
observations.
rbf_func: str
Specifies which function to use for Rbf interpolation.
Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic',
'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more
information.
rbf_smooth: float
Smoothing value applied to rbf interpolation. Higher values result in more smoothing.
boundary_coords: dictionary
Optional dictionary containing coordinates of the study area boundary. Dictionary
should be in format: {'west': west, 'south': south, 'east': east, 'north': north}
Returns
-------
grid_x: (N, 2) ndarray
Meshgrid for the resulting interpolation in the x dimension
grid_y: (N, 2) ndarray
Meshgrid for the resulting interpolation in the y dimension ndarray
img: (M, N) ndarray
2-dimensional array representing the interpolated values for each grid.
Notes
-----
This function acts as a wrapper for `interpolate_points` to allow it to generate a regular
grid.
This function interpolates points to a Cartesian plane, even if lat/lon coordinates
are provided.
See Also
--------
interpolate_to_points
"""
# Generate the grid
if boundary_coords is None:
boundary_coords = get_boundary_coords(x, y)
grid_x, grid_y = generate_grid(hres, boundary_coords)
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = np.array(list(zip(x, y)))
points_grid = generate_grid_coords(grid_x, grid_y)
img = interpolate_to_points(points_obs, z, points_grid, interp_type=interp_type,
minimum_neighbors=minimum_neighbors, gamma=gamma,
kappa_star=kappa_star, search_radius=search_radius,
rbf_func=rbf_func, rbf_smooth=rbf_smooth)
return grid_x, grid_y, img.reshape(grid_x.shape)
@exporter.export
@preprocess_pandas
def interpolate_to_isosurface(level_var, interp_var, level, bottom_up_search=True):
r"""Linear interpolation of a variable to a given vertical level from given values.
This function assumes that highest vertical level (lowest pressure) is zeroth index.
A classic use of this function would be to compute the potential temperature on the
dynamic tropopause (2 PVU surface).
Parameters
----------
level_var: array_like (P, M, N)
Level values in 3D grid on common vertical coordinate (e.g., PV values on
isobaric levels). Assumes height dimension is highest to lowest in atmosphere.
interp_var: array_like (P, M, N)
Variable on 3D grid with same vertical coordinate as level_var to interpolate to
given level (e.g., potential temperature on isobaric levels)
level: int or float
Desired interpolated level (e.g., 2 PVU surface)
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Returns
-------
interp_level: (M, N) ndarray
The interpolated variable (e.g., potential temperature) on the desired level (e.g.,
2 PVU surface)
Notes
-----
This function implements a linear interpolation to estimate values on a given surface.
The prototypical example is interpolation of potential temperature to the dynamic
tropopause (e.g., 2 PVU surface)
"""
from ..calc import find_bounding_indices
# Find index values above and below desired interpolated surface value
above, below, good = find_bounding_indices(level_var, [level], axis=0,
from_below=bottom_up_search)
# Linear interpolation of variable to interpolated surface value
interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above]))
* (interp_var[below] - interp_var[above])) + interp_var[above]
# Handle missing values and instances where no values for surface exist above and below
interp_level[~good] = np.nan
minvar = (np.min(level_var, axis=0) >= level)
maxvar = (np.max(level_var, axis=0) <= level)
interp_level[0][minvar] = interp_var[-1][minvar]
interp_level[0][maxvar] = interp_var[0][maxvar]
return interp_level.squeeze()
|
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Fast neuron IO module'''
import logging
from collections import defaultdict, namedtuple
from neurom.core.dataformat import POINT_TYPE, COLS, ROOT_ID
import numpy as np
L = logging.getLogger(__name__)
TYPE, ID, PID = 0, 1, 2
class DataWrapper(object):
'''Class holding a raw data block and section information'''
def __init__(self, data_block, fmt, sections=None):
'''Section Data Wrapper
data_block is np.array-like with the following columns:
[X, Y, Z, R, TYPE, ID, P]
X(float): x-coordinate
Y(float): y-coordinate
Z(float): z-coordinate
R(float): radius
TYPE(integer): one of the types described by POINT_TYPE
ID(integer): unique integer given to each point, the `ROOT_ID` is -1
P(integer): the ID of the parent
Args:
data_block: as defined above
fmt: File format designation, eg: SWC
sections: Already extracted sections, otherwise data_block will be used
Notes:
- there is no ordering constraint: a child can reference a parent ID that comes
later in the block
- there is no requirement that the IDs are dense
- there is no upper bound on the number of rows with the same 'P'arent: in other
words, multifurcations are allowed
'''
self.data_block = data_block
self.fmt = fmt
# list of DataBlockSection
self.sections = sections if sections is not None else _extract_sections(data_block)
def neurite_root_section_ids(self):
'''Get the section IDs of the intitial neurite sections'''
sec = self.sections
return [i for i, ss in enumerate(sec)
if ss.pid > -1 and (sec[ss.pid].ntype == POINT_TYPE.SOMA and
ss.ntype != POINT_TYPE.SOMA)]
def soma_points(self):
'''Get the soma points'''
db = self.data_block
return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
def _merge_sections(sec_a, sec_b):
'''Merge two sections
Merges sec_a into sec_b and sets sec_a attributes to default
'''
sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:])
sec_b.ntype = sec_a.ntype
sec_b.pid = sec_a.pid
sec_a.ids = []
sec_a.pid = -1
sec_a.ntype = 0
def _section_end_points(structure_block, id_map):
'''Get the section end-points'''
soma_idx = structure_block[:, TYPE] == POINT_TYPE.SOMA
soma_ids = structure_block[soma_idx, ID]
neurite_idx = structure_block[:, TYPE] != POINT_TYPE.SOMA
neurite_rows = structure_block[neurite_idx, :]
soma_end_pts = set(id_map[id_]
for id_ in soma_ids[np.in1d(soma_ids, neurite_rows[:, PID])])
# end points have either no children or more than one
# ie: leaf or multifurcation nodes
n_children = defaultdict(int)
for row in structure_block:
n_children[row[PID]] += 1
end_pts = set(i for i, row in enumerate(structure_block)
if n_children[row[ID]] != 1)
return end_pts.union(soma_end_pts)
class DataBlockSection(object):
'''sections ((ids), type, parent_id)'''
def __init__(self, ids=None, ntype=0, pid=-1):
self.ids = [] if ids is None else ids
self.ntype = ntype
self.pid = pid
def __eq__(self, other):
return (self.ids == other.ids and
self.ntype == other.ntype and
self.pid == other.pid)
def __str__(self):
return ('%s: ntype=%s, pid=%s: n_ids=%d' %
(self.__class__, self.ntype, self.pid, len(self.ids)))
__repr__ = __str__
def _extract_sections(data_block):
'''Make a list of sections from an SWC-style data wrapper block'''
structure_block = data_block[:, COLS.TYPE:COLS.COL_COUNT].astype(np.int)
# SWC ID -> structure_block position
id_map = {-1: -1}
for i, row in enumerate(structure_block):
id_map[row[ID]] = i
# end points have either no children, more than one, or are the start
# of a new gap
sec_end_pts = _section_end_points(structure_block, id_map)
# a 'gap' is when a section has part of it's segments interleaved
# with those of another section
gap_sections = set()
sections = []
def new_section():
'''new_section'''
sections.append(DataBlockSection())
return sections[-1]
curr_section = new_section()
parent_section = {-1: -1}
for row in structure_block:
row_id = id_map[row[ID]]
parent_id = id_map[row[PID]]
if not curr_section.ids:
# first in section point is parent
curr_section.ids.append(parent_id)
curr_section.ntype = row[TYPE]
gap = parent_id != curr_section.ids[-1]
# If parent is not the previous point, create a section end-point.
# Else add the point to this section
if gap:
sec_end_pts.add(row_id)
else:
curr_section.ids.append(row_id)
if row_id in sec_end_pts:
parent_section[curr_section.ids[-1]] = len(sections) - 1
# Parent-child discontinuity section
if gap:
curr_section = new_section()
curr_section.ids.extend((parent_id, row_id))
curr_section.ntype = row[TYPE]
gap_sections.add(len(sections) - 2)
elif row_id != len(data_block) - 1:
# avoid creating an extra DataBlockSection for last row if it's a leaf
curr_section = new_section()
for sec in sections:
# get the section parent ID from the id of the first point.
if sec.ids:
sec.pid = parent_section[sec.ids[0]]
# join gap sections and "disable" first half
if sec.pid in gap_sections:
_merge_sections(sections[sec.pid], sec)
# TODO find a way to remove empty sections. Currently they are
# required to maintain tree integrity.
return sections
class BlockNeuronBuilder(object):
'''Helper to create DataWrapper for 'block' sections
This helps create a new DataWrapper when one already has 'blocks'
(ie: contiguous points, forming all the segments) of a section, and they
just need to connect them together based on their parent.
Example:
>>> builder = BlockNeuronBuilder()
>>> builder.add_section(segment_id, parent_id, segment_type, points)
...
>>> morph = builder.get_datawrapper()
Note:
This will re-number the IDs if they are not 'dense' (ie: have gaps)
'''
BlockSection = namedtuple('BlockSection', 'parent_id section_type points')
def __init__(self):
self.sections = {}
def add_section(self, id_, parent_id, section_type, points):
'''add a section
Args:
id_(int): identifying number of the section
parent_id(int): identifying number of the parent of this section
section_type(int): the section type as defined by POINT_TYPE
points is an array of [X, Y, Z, R]
'''
# L.debug('Adding section %d, with parent %d, of type: %d with count: %d',
# id_, parent_id, section_type, len(points))
assert id_ not in self.sections, 'id %s already exists in sections' % id_
self.sections[id_] = BlockNeuronBuilder.BlockSection(parent_id, section_type, points)
def _make_datablock(self):
'''Make a data_block and sections list as required by DataWrapper'''
section_ids = sorted(self.sections)
# create all insertion id's, this needs to be done ahead of time
# as some of the children may have a lower id than their parents
id_to_insert_id = {}
row_count = 0
for section_id in section_ids:
row_count += len(self.sections[section_id].points)
id_to_insert_id[section_id] = row_count - 1
datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float)
datablock[:, COLS.ID] = np.arange(len(datablock))
datablock[:, COLS.P] = datablock[:, COLS.ID] - 1
sections = []
insert_index = 0
for id_ in section_ids:
sec = self.sections[id_]
points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id
idx = slice(insert_index, insert_index + len(points))
datablock[idx, COLS.XYZR] = points
datablock[idx, COLS.TYPE] = section_type
datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID)
sections.append(DataBlockSection(idx, section_type, parent_id))
insert_index = idx.stop
return datablock, sections
def _check_consistency(self):
'''see if the sections have obvious errors'''
type_count = defaultdict(int)
for _, section in sorted(self.sections.items()):
type_count[section.section_type] += 1
if type_count[POINT_TYPE.SOMA] != 1:
L.info('Have %d somas, expected 1', type_count[POINT_TYPE.SOMA])
def get_datawrapper(self, file_format='BlockNeuronBuilder', data_wrapper=DataWrapper):
'''returns a DataWrapper'''
self._check_consistency()
datablock, sections = self._make_datablock()
return data_wrapper(datablock, file_format, sections)
|
|
"""Test APRS device tracker."""
from unittest.mock import Mock, patch
import aprslib
import homeassistant.components.aprs.device_tracker as device_tracker
from homeassistant.const import EVENT_HOMEASSISTANT_START
from tests.common import get_test_home_assistant
DEFAULT_PORT = 14580
TEST_CALLSIGN = "testcall"
TEST_COORDS_NULL_ISLAND = (0, 0)
TEST_FILTER = "testfilter"
TEST_HOST = "testhost"
TEST_PASSWORD = "testpass"
def test_make_filter():
"""Test filter."""
callsigns = ["CALLSIGN1", "callsign2"]
res = device_tracker.make_filter(callsigns)
assert res == "b/CALLSIGN1 b/CALLSIGN2"
def test_gps_accuracy_0():
"""Test GPS accuracy level 0."""
acc = device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, 0)
assert acc == 0
def test_gps_accuracy_1():
"""Test GPS accuracy level 1."""
acc = device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, 1)
assert acc == 186
def test_gps_accuracy_2():
"""Test GPS accuracy level 2."""
acc = device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, 2)
assert acc == 1855
def test_gps_accuracy_3():
"""Test GPS accuracy level 3."""
acc = device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, 3)
assert acc == 18553
def test_gps_accuracy_4():
"""Test GPS accuracy level 4."""
acc = device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, 4)
assert acc == 111319
def test_gps_accuracy_invalid_int():
"""Test GPS accuracy with invalid input."""
level = 5
try:
device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, level)
assert False, "No exception."
except ValueError:
pass
def test_gps_accuracy_invalid_string():
"""Test GPS accuracy with invalid input."""
level = "not an int"
try:
device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, level)
assert False, "No exception."
except ValueError:
pass
def test_gps_accuracy_invalid_float():
"""Test GPS accuracy with invalid input."""
level = 1.2
try:
device_tracker.gps_accuracy(TEST_COORDS_NULL_ISLAND, level)
assert False, "No exception."
except ValueError:
pass
def test_aprs_listener():
"""Test listener thread."""
with patch("aprslib.IS") as mock_ais:
callsign = TEST_CALLSIGN
password = TEST_PASSWORD
host = TEST_HOST
server_filter = TEST_FILTER
port = DEFAULT_PORT
see = Mock()
listener = device_tracker.AprsListenerThread(
callsign, password, host, server_filter, see
)
listener.run()
assert listener.callsign == callsign
assert listener.host == host
assert listener.server_filter == server_filter
assert listener.see == see
assert listener.start_event.is_set()
assert listener.start_success
assert listener.start_message == "Connected to testhost with callsign testcall."
mock_ais.assert_called_with(callsign, passwd=password, host=host, port=port)
def test_aprs_listener_start_fail():
"""Test listener thread start failure."""
with patch(
"aprslib.IS.connect", side_effect=aprslib.ConnectionError("Unable to connect.")
):
callsign = TEST_CALLSIGN
password = TEST_PASSWORD
host = TEST_HOST
server_filter = TEST_FILTER
see = Mock()
listener = device_tracker.AprsListenerThread(
callsign, password, host, server_filter, see
)
listener.run()
assert listener.callsign == callsign
assert listener.host == host
assert listener.server_filter == server_filter
assert listener.see == see
assert listener.start_event.is_set()
assert not listener.start_success
assert listener.start_message == "Unable to connect."
def test_aprs_listener_stop():
"""Test listener thread stop."""
with patch("aprslib.IS"):
callsign = TEST_CALLSIGN
password = TEST_PASSWORD
host = TEST_HOST
server_filter = TEST_FILTER
see = Mock()
listener = device_tracker.AprsListenerThread(
callsign, password, host, server_filter, see
)
listener.ais.close = Mock()
listener.run()
listener.stop()
assert listener.callsign == callsign
assert listener.host == host
assert listener.server_filter == server_filter
assert listener.see == see
assert listener.start_event.is_set()
assert listener.start_message == "Connected to testhost with callsign testcall."
assert listener.start_success
listener.ais.close.assert_called_with()
def test_aprs_listener_rx_msg():
"""Test rx_msg."""
with patch("aprslib.IS"):
callsign = TEST_CALLSIGN
password = TEST_PASSWORD
host = TEST_HOST
server_filter = TEST_FILTER
see = Mock()
sample_msg = {
device_tracker.ATTR_FORMAT: "uncompressed",
device_tracker.ATTR_FROM: "ZZ0FOOBAR-1",
device_tracker.ATTR_LATITUDE: 0.0,
device_tracker.ATTR_LONGITUDE: 0.0,
device_tracker.ATTR_ALTITUDE: 0,
}
listener = device_tracker.AprsListenerThread(
callsign, password, host, server_filter, see
)
listener.run()
listener.rx_msg(sample_msg)
assert listener.callsign == callsign
assert listener.host == host
assert listener.server_filter == server_filter
assert listener.see == see
assert listener.start_event.is_set()
assert listener.start_success
assert listener.start_message == "Connected to testhost with callsign testcall."
see.assert_called_with(
dev_id=device_tracker.slugify("ZZ0FOOBAR-1"),
gps=(0.0, 0.0),
attributes={"altitude": 0},
)
def test_aprs_listener_rx_msg_ambiguity():
"""Test rx_msg with posambiguity."""
with patch("aprslib.IS"):
callsign = TEST_CALLSIGN
password = TEST_PASSWORD
host = TEST_HOST
server_filter = TEST_FILTER
see = Mock()
sample_msg = {
device_tracker.ATTR_FORMAT: "uncompressed",
device_tracker.ATTR_FROM: "ZZ0FOOBAR-1",
device_tracker.ATTR_LATITUDE: 0.0,
device_tracker.ATTR_LONGITUDE: 0.0,
device_tracker.ATTR_POS_AMBIGUITY: 1,
}
listener = device_tracker.AprsListenerThread(
callsign, password, host, server_filter, see
)
listener.run()
listener.rx_msg(sample_msg)
assert listener.callsign == callsign
assert listener.host == host
assert listener.server_filter == server_filter
assert listener.see == see
assert listener.start_event.is_set()
assert listener.start_success
assert listener.start_message == "Connected to testhost with callsign testcall."
see.assert_called_with(
dev_id=device_tracker.slugify("ZZ0FOOBAR-1"),
gps=(0.0, 0.0),
attributes={device_tracker.ATTR_GPS_ACCURACY: 186},
)
def test_aprs_listener_rx_msg_ambiguity_invalid():
"""Test rx_msg with invalid posambiguity."""
with patch("aprslib.IS"):
callsign = TEST_CALLSIGN
password = TEST_PASSWORD
host = TEST_HOST
server_filter = TEST_FILTER
see = Mock()
sample_msg = {
device_tracker.ATTR_FORMAT: "uncompressed",
device_tracker.ATTR_FROM: "ZZ0FOOBAR-1",
device_tracker.ATTR_LATITUDE: 0.0,
device_tracker.ATTR_LONGITUDE: 0.0,
device_tracker.ATTR_POS_AMBIGUITY: 5,
}
listener = device_tracker.AprsListenerThread(
callsign, password, host, server_filter, see
)
listener.run()
listener.rx_msg(sample_msg)
assert listener.callsign == callsign
assert listener.host == host
assert listener.server_filter == server_filter
assert listener.see == see
assert listener.start_event.is_set()
assert listener.start_success
assert listener.start_message == "Connected to testhost with callsign testcall."
see.assert_called_with(
dev_id=device_tracker.slugify("ZZ0FOOBAR-1"), gps=(0.0, 0.0), attributes={}
)
def test_aprs_listener_rx_msg_no_position():
"""Test rx_msg with non-position report."""
with patch("aprslib.IS"):
callsign = TEST_CALLSIGN
password = TEST_PASSWORD
host = TEST_HOST
server_filter = TEST_FILTER
see = Mock()
sample_msg = {device_tracker.ATTR_FORMAT: "invalid"}
listener = device_tracker.AprsListenerThread(
callsign, password, host, server_filter, see
)
listener.run()
listener.rx_msg(sample_msg)
assert listener.callsign == callsign
assert listener.host == host
assert listener.server_filter == server_filter
assert listener.see == see
assert listener.start_event.is_set()
assert listener.start_success
assert listener.start_message == "Connected to testhost with callsign testcall."
see.assert_not_called()
def test_setup_scanner():
"""Test setup_scanner."""
with patch(
"homeassistant.components." "aprs.device_tracker.AprsListenerThread"
) as listener:
hass = get_test_home_assistant()
hass.start()
config = {
"username": TEST_CALLSIGN,
"password": TEST_PASSWORD,
"host": TEST_HOST,
"callsigns": ["XX0FOO*", "YY0BAR-1"],
}
see = Mock()
res = device_tracker.setup_scanner(hass, config, see)
hass.bus.fire(EVENT_HOMEASSISTANT_START)
hass.stop()
assert res
listener.assert_called_with(
TEST_CALLSIGN, TEST_PASSWORD, TEST_HOST, "b/XX0FOO* b/YY0BAR-1", see
)
def test_setup_scanner_timeout():
"""Test setup_scanner failure from timeout."""
hass = get_test_home_assistant()
hass.start()
config = {
"username": TEST_CALLSIGN,
"password": TEST_PASSWORD,
"host": "localhost",
"timeout": 0.01,
"callsigns": ["XX0FOO*", "YY0BAR-1"],
}
see = Mock()
try:
assert not device_tracker.setup_scanner(hass, config, see)
finally:
hass.stop()
|
|
from sympy.core import pi, oo, symbols, Function, Rational, Integer, GoldenRatio, EulerGamma, Catalan, Lambda, Dummy
from sympy.functions import Piecewise, sin, cos, Abs, exp, ceiling, sqrt
from sympy.utilities.pytest import raises
from sympy.printing.jscode import JavascriptCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
from sympy.matrices import Matrix, MatrixSymbol
# import test
from sympy import jscode
x, y, z = symbols('x,y,z')
def test_printmethod():
assert jscode(Abs(x)) == "Math.abs(x)"
def test_jscode_sqrt():
assert jscode(sqrt(x)) == "Math.sqrt(x)"
assert jscode(x**0.5) == "Math.sqrt(x)"
assert jscode(sqrt(x)) == "Math.sqrt(x)"
def test_jscode_Pow():
g = implemented_function('g', Lambda(x, 2*x))
assert jscode(x**3) == "Math.pow(x, 3)"
assert jscode(x**(y**3)) == "Math.pow(x, Math.pow(y, 3))"
assert jscode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"Math.pow(3.5*2*x, -x + Math.pow(y, x))/(Math.pow(x, 2) + y)"
assert jscode(x**-1.0) == '1/x'
def test_jscode_constants_mathh():
assert jscode(exp(1)) == "Math.E"
assert jscode(pi) == "Math.PI"
assert jscode(oo) == "Number.POSITIVE_INFINITY"
assert jscode(-oo) == "Number.NEGATIVE_INFINITY"
def test_jscode_constants_other():
assert jscode(
2*GoldenRatio) == "var GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert jscode(2*Catalan) == "var Catalan = 0.915965594177219;\n2*Catalan"
assert jscode(
2*EulerGamma) == "var EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_jscode_Rational():
assert jscode(Rational(3, 7)) == "3/7"
assert jscode(Rational(18, 9)) == "2"
assert jscode(Rational(3, -7)) == "-3/7"
assert jscode(Rational(-3, -7)) == "3/7"
def test_jscode_Integer():
assert jscode(Integer(67)) == "67"
assert jscode(Integer(-1)) == "-1"
def test_jscode_functions():
assert jscode(sin(x) ** cos(x)) == "Math.pow(Math.sin(x), Math.cos(x))"
def test_jscode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert jscode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert jscode(g(x)) == "var Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert jscode(g(A[i]), assign_to=A[i]) == (
"for (var i=0; i<n; i++){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
def test_jscode_exceptions():
assert jscode(ceiling(x)) == "Math.ceil(x)"
assert jscode(Abs(x)) == "Math.abs(x)"
def test_jscode_boolean():
assert jscode(x & y) == "x && y"
assert jscode(x | y) == "x || y"
assert jscode(~x) == "!x"
assert jscode(x & y & z) == "x && y && z"
assert jscode(x | y | z) == "x || y || z"
assert jscode((x & y) | z) == "z || x && y"
assert jscode((x | y) & z) == "z && (x || y)"
def test_jscode_Piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
p = jscode(expr)
s = \
"""\
((x < 1) ? (
x
)
: (
Math.pow(x, 2)
))\
"""
assert p == s
assert jscode(expr, assign_to="c") == (
"if (x < 1) {\n"
" c = x;\n"
"}\n"
"else {\n"
" c = Math.pow(x, 2);\n"
"}")
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: jscode(expr))
def test_jscode_Piecewise_deep():
p = jscode(2*Piecewise((x, x < 1), (x**2, True)))
s = \
"""\
2*((x < 1) ? (
x
)
: (
Math.pow(x, 2)
))\
"""
assert p == s
def test_jscode_settings():
raises(TypeError, lambda: jscode(sin(x), method="garbage"))
def test_jscode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = JavascriptCodePrinter()
p._not_c = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[%s]' % (m*i+j)
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[%s]' % (i*o*m+j*o+k)
assert p._not_c == set()
def test_jscode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' y[i] = x[j]*A[n*i + j] + y[i];\n'
' }\n'
'}'
)
c = jscode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (var i_%(icount)i=0; i_%(icount)i<m_%(mcount)i; i_%(icount)i++){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = jscode(x[i], assign_to=y[i])
assert code == expected
def test_jscode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' y[i] = x[j]*A[n*i + j] + y[i];\n'
' }\n'
'}'
)
c = jscode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_jscode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' for (var k=0; k<o; k++){\n'
' for (var l=0; l<p; l++){\n'
' y[i] = y[i] + b[%s]*a[%s];\n' % (j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = jscode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_jscode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' for (var k=0; k<o; k++){\n'
' for (var l=0; l<p; l++){\n'
' y[i] = (a[%s] + b[%s])*c[%s] + y[i];\n' % (i*n*o*p + j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l, j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = jscode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_jscode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' for (var k=0; k<o; k++){\n'
' y[i] = b[j]*b[k]*c[%s] + y[i];\n' % (i*n*o + j*o + k) +\
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (var i=0; i<m; i++){\n'
' for (var k=0; k<o; k++){\n'
' y[i] = b[k]*a[%s] + y[i];\n' % (i*o + k) +\
' }\n'
'}\n'
)
s3 = (
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' y[i] = b[j]*a[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}\n'
)
c = jscode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
assert (c == s0 + s1 + s2 + s3[:-1] or
c == s0 + s1 + s3 + s2[:-1] or
c == s0 + s2 + s1 + s3[:-1] or
c == s0 + s2 + s3 + s1[:-1] or
c == s0 + s3 + s1 + s2[:-1] or
c == s0 + s3 + s2 + s1[:-1])
def test_Matrix_printing():
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
assert jscode(mat, A) == (
"A[0] = x*y;\n"
"if (y > 0) {\n"
" A[1] = x + 2;\n"
"}\n"
"else {\n"
" A[1] = y;\n"
"}\n"
"A[2] = Math.sin(z);")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
assert jscode(expr) == (
"((x > 0) ? (\n"
" 2*A[2]\n"
")\n"
": (\n"
" A[2]\n"
")) + Math.sin(A[1]) + A[0]")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert jscode(m, M) == (
"M[0] = Math.sin(q[1]);\n"
"M[1] = 0;\n"
"M[2] = Math.cos(q[2]);\n"
"M[3] = q[1] + q[2];\n"
"M[4] = q[3];\n"
"M[5] = 5;\n"
"M[6] = 2*q[4]*1/q[1];\n"
"M[7] = 4 + Math.sqrt(q[0]);\n"
"M[8] = 0;")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import Alias
from ._models_py3 import AliasPath
from ._models_py3 import AliasPathMetadata
from ._models_py3 import AliasPattern
from ._models_py3 import ApiProfile
from ._models_py3 import BasicDependency
from ._models_py3 import DebugSetting
from ._models_py3 import Dependency
from ._models_py3 import Deployment
from ._models_py3 import DeploymentExportResult
from ._models_py3 import DeploymentExtended
from ._models_py3 import DeploymentExtendedFilter
from ._models_py3 import DeploymentListResult
from ._models_py3 import DeploymentOperation
from ._models_py3 import DeploymentOperationProperties
from ._models_py3 import DeploymentOperationsListResult
from ._models_py3 import DeploymentProperties
from ._models_py3 import DeploymentPropertiesExtended
from ._models_py3 import DeploymentValidateResult
from ._models_py3 import DeploymentWhatIf
from ._models_py3 import DeploymentWhatIfProperties
from ._models_py3 import DeploymentWhatIfSettings
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorResponse
from ._models_py3 import ExportTemplateRequest
from ._models_py3 import ExpressionEvaluationOptions
from ._models_py3 import ExtendedLocation
from ._models_py3 import GenericResource
from ._models_py3 import GenericResourceExpanded
from ._models_py3 import GenericResourceFilter
from ._models_py3 import HttpMessage
from ._models_py3 import Identity
from ._models_py3 import IdentityUserAssignedIdentitiesValue
from ._models_py3 import OnErrorDeployment
from ._models_py3 import OnErrorDeploymentExtended
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import ParametersLink
from ._models_py3 import Permission
from ._models_py3 import Plan
from ._models_py3 import Provider
from ._models_py3 import ProviderConsentDefinition
from ._models_py3 import ProviderExtendedLocation
from ._models_py3 import ProviderListResult
from ._models_py3 import ProviderPermission
from ._models_py3 import ProviderPermissionListResult
from ._models_py3 import ProviderRegistrationRequest
from ._models_py3 import ProviderResourceType
from ._models_py3 import ProviderResourceTypeListResult
from ._models_py3 import Resource
from ._models_py3 import ResourceGroup
from ._models_py3 import ResourceGroupExportResult
from ._models_py3 import ResourceGroupFilter
from ._models_py3 import ResourceGroupListResult
from ._models_py3 import ResourceGroupPatchable
from ._models_py3 import ResourceGroupProperties
from ._models_py3 import ResourceListResult
from ._models_py3 import ResourceProviderOperationDisplayProperties
from ._models_py3 import ResourceReference
from ._models_py3 import ResourcesMoveInfo
from ._models_py3 import RoleDefinition
from ._models_py3 import ScopedDeployment
from ._models_py3 import ScopedDeploymentWhatIf
from ._models_py3 import Sku
from ._models_py3 import StatusMessage
from ._models_py3 import SubResource
from ._models_py3 import TagCount
from ._models_py3 import TagDetails
from ._models_py3 import TagValue
from ._models_py3 import Tags
from ._models_py3 import TagsListResult
from ._models_py3 import TagsPatchResource
from ._models_py3 import TagsResource
from ._models_py3 import TargetResource
from ._models_py3 import TemplateHashResult
from ._models_py3 import TemplateLink
from ._models_py3 import WhatIfChange
from ._models_py3 import WhatIfOperationResult
from ._models_py3 import WhatIfPropertyChange
from ._models_py3 import ZoneMapping
from ._resource_management_client_enums import (
AliasPathAttributes,
AliasPathTokenType,
AliasPatternType,
AliasType,
ChangeType,
DeploymentMode,
ExpressionEvaluationOptionsScopeType,
ExtendedLocationType,
OnErrorDeploymentType,
PropertyChangeType,
ProviderAuthorizationConsentState,
ProvisioningOperation,
ProvisioningState,
ResourceIdentityType,
TagsPatchOperation,
WhatIfResultFormat,
)
__all__ = [
'Alias',
'AliasPath',
'AliasPathMetadata',
'AliasPattern',
'ApiProfile',
'BasicDependency',
'DebugSetting',
'Dependency',
'Deployment',
'DeploymentExportResult',
'DeploymentExtended',
'DeploymentExtendedFilter',
'DeploymentListResult',
'DeploymentOperation',
'DeploymentOperationProperties',
'DeploymentOperationsListResult',
'DeploymentProperties',
'DeploymentPropertiesExtended',
'DeploymentValidateResult',
'DeploymentWhatIf',
'DeploymentWhatIfProperties',
'DeploymentWhatIfSettings',
'ErrorAdditionalInfo',
'ErrorResponse',
'ExportTemplateRequest',
'ExpressionEvaluationOptions',
'ExtendedLocation',
'GenericResource',
'GenericResourceExpanded',
'GenericResourceFilter',
'HttpMessage',
'Identity',
'IdentityUserAssignedIdentitiesValue',
'OnErrorDeployment',
'OnErrorDeploymentExtended',
'Operation',
'OperationDisplay',
'OperationListResult',
'ParametersLink',
'Permission',
'Plan',
'Provider',
'ProviderConsentDefinition',
'ProviderExtendedLocation',
'ProviderListResult',
'ProviderPermission',
'ProviderPermissionListResult',
'ProviderRegistrationRequest',
'ProviderResourceType',
'ProviderResourceTypeListResult',
'Resource',
'ResourceGroup',
'ResourceGroupExportResult',
'ResourceGroupFilter',
'ResourceGroupListResult',
'ResourceGroupPatchable',
'ResourceGroupProperties',
'ResourceListResult',
'ResourceProviderOperationDisplayProperties',
'ResourceReference',
'ResourcesMoveInfo',
'RoleDefinition',
'ScopedDeployment',
'ScopedDeploymentWhatIf',
'Sku',
'StatusMessage',
'SubResource',
'TagCount',
'TagDetails',
'TagValue',
'Tags',
'TagsListResult',
'TagsPatchResource',
'TagsResource',
'TargetResource',
'TemplateHashResult',
'TemplateLink',
'WhatIfChange',
'WhatIfOperationResult',
'WhatIfPropertyChange',
'ZoneMapping',
'AliasPathAttributes',
'AliasPathTokenType',
'AliasPatternType',
'AliasType',
'ChangeType',
'DeploymentMode',
'ExpressionEvaluationOptionsScopeType',
'ExtendedLocationType',
'OnErrorDeploymentType',
'PropertyChangeType',
'ProviderAuthorizationConsentState',
'ProvisioningOperation',
'ProvisioningState',
'ResourceIdentityType',
'TagsPatchOperation',
'WhatIfResultFormat',
]
|
|
from __future__ import division, print_function, absolute_import
import os
import sys
import functools
from textwrap import TextWrapper
from collections import defaultdict
from plumbum.lib import six, getdoc
from plumbum.cli.terminal import get_terminal_size
from plumbum.cli.switches import (SwitchError, UnknownSwitch, MissingArgument, WrongArgumentType,
MissingMandatorySwitch, SwitchCombinationError, PositionalArgumentsError, switch,
SubcommandError, Flag, CountOf)
from plumbum import colors, local
class ShowHelp(SwitchError):
pass
class ShowHelpAll(SwitchError):
pass
class ShowVersion(SwitchError):
pass
class SwitchParseInfo(object):
__slots__ = ["swname", "val", "index"]
def __init__(self, swname, val, index):
self.swname = swname
self.val = val
self.index = index
class Subcommand(object):
def __init__(self, name, subapplication):
self.name = name
self.subapplication = subapplication
def get(self):
if isinstance(self.subapplication, str):
modname, clsname = self.subapplication.rsplit(".", 1)
mod = __import__(modname, None, None, "*")
try:
cls = getattr(mod, clsname)
except AttributeError:
raise ImportError("cannot import name %s" % (clsname,))
self.subapplication = cls
return self.subapplication
def __repr__(self):
return "Subcommand(%r, %r)" % (self.name, self.subapplication)
#===================================================================================================
# CLI Application base class
#===================================================================================================
class Application(object):
"""
The base class for CLI applications; your "entry point" class should derive from it,
define the relevant switch functions and attributes, and the ``main()`` function.
The class defines two overridable "meta switches" for version (``-v``, ``--version``)
and help (``-h``, ``--help``).
The signature of the main function matters: any positional arguments (e.g., non-switch
arguments) given on the command line are passed to the ``main()`` function; if you wish
to allow unlimited number of positional arguments, use varargs (``*args``). The names
of the arguments will be shown in the help message.
The classmethod ``run`` serves as the entry point of the class. It parses the command-line
arguments, invokes switch functions and enter ``main``. You should **not override** this
method.
Usage::
class FileCopier(Application):
stat = Flag("p", "copy stat info as well")
def main(self, src, dst):
if self.stat:
shutil.copy2(src, dst)
else:
shutil.copy(src, dst)
if __name__ == "__main__":
FileCopier.run()
There are several class-level attributes you may set:
* ``PROGNAME`` - the name of the program; if ``None`` (the default), it is set to the
name of the executable (``argv[0]``), can be in color. If only a color, will be applied to the name.
* ``VERSION`` - the program's version (defaults to ``1.0``, can be in color)
* ``DESCRIPTION`` - a short description of your program (shown in help). If not set,
the class' ``__doc__`` will be used. Can be in color.
* ``USAGE`` - the usage line (shown in help)
* ``COLOR_USAGE`` - The color of the usage line
* ``COLOR_GROUPS`` - A dictionary that sets colors for the groups, like Meta-switches, Switches,
and Subcommands
* ``SUBCOMMAND_HELPMSG`` - Controls the printing of extra "see subcommand -h" help message.
Default is a message, set to false to remove.
A note on sub-commands: when an application is the root, its ``parent`` attribute is set to
``None``. When it is used as a nested-command, ``parent`` will point to be its direct ancestor.
Likewise, when an application is invoked with a sub-command, its ``nested_command`` attribute
will hold the chosen sub-application and its command-line arguments (a tuple); otherwise, it
will be set to ``None``
"""
PROGNAME = None
DESCRIPTION = None
VERSION = None
USAGE = None
COLOR_USAGE = None
COLOR_GROUPS = None
CALL_MAIN_IF_NESTED_COMMAND = True
SUBCOMMAND_HELPMSG = "see '{parent} {sub} --help' for more info"
parent = None
nested_command = None
_unbound_switches = ()
def __new__(cls, executable=None):
"""Allows running the class directly as a shortcut for main.
This is neccisary for some setup scripts that want a single function,
instead of an expression with a dot in it."""
if executable is None:
return cls.run()
# This return value was not a class instance, so __init__ is never called
else:
return super(Application, cls).__new__(cls)
def __init__(self, executable):
# Filter colors
if self.PROGNAME is None:
self.PROGNAME = os.path.basename(executable)
elif isinstance(self.PROGNAME, colors._style):
self.PROGNAME = self.PROGNAME | os.path.basename(executable)
elif colors.filter(self.PROGNAME) == '':
self.PROGNAME = colors.extract(self.PROGNAME) | os.path.basename(executable)
if self.DESCRIPTION is None:
self.DESCRIPTION = getdoc(self)
# Allow None for the colors
self.COLOR_GROUPS=defaultdict(lambda:colors.do_nothing, dict() if type(self).COLOR_GROUPS is None else type(self).COLOR_GROUPS )
if type(self).COLOR_USAGE is None:
self.COLOR_USAGE=colors.do_nothing
self.executable = executable
self._switches_by_name = {}
self._switches_by_func = {}
self._switches_by_envar = {}
self._subcommands = {}
for cls in reversed(type(self).mro()):
for obj in cls.__dict__.values():
if isinstance(obj, Subcommand):
name = colors.filter(obj.name)
if name.startswith("-"):
raise SubcommandError("Subcommand names cannot start with '-'")
# it's okay for child classes to override subcommands set by their parents
self._subcommands[name] = obj
continue
swinfo = getattr(obj, "_switch_info", None)
if not swinfo:
continue
for name in swinfo.names:
if name in self._unbound_switches:
continue
if name in self._switches_by_name and not self._switches_by_name[name].overridable:
raise SwitchError("Switch %r already defined and is not overridable" % (name,))
self._switches_by_name[name] = swinfo
self._switches_by_func[swinfo.func] = swinfo
if swinfo.envname:
self._switches_by_envar[swinfo.envname] = swinfo
@property
def root_app(self):
return self.parent.root_app if self.parent else self
@classmethod
def unbind_switches(cls, *switch_names):
"""Unbinds the given switch names from this application. For example
::
class MyApp(cli.Application):
pass
MyApp.unbind_switches("--version")
"""
cls._unbound_switches += tuple(name.lstrip("-") for name in switch_names if name)
@classmethod
def subcommand(cls, name, subapp = None):
"""Registers the given sub-application as a sub-command of this one. This method can be
used both as a decorator and as a normal ``classmethod``::
@MyApp.subcommand("foo")
class FooApp(cli.Application):
pass
Or ::
MyApp.subcommand("foo", FooApp)
.. versionadded:: 1.1
.. versionadded:: 1.3
The subcommand can also be a string, in which case it is treated as a
fully-qualified class name and is imported on demand. For examples,
MyApp.subcommand("foo", "fully.qualified.package.FooApp")
"""
def wrapper(subapp):
attrname = "_subcommand_%s" % (subapp if isinstance(subapp, str) else subapp.__name__,)
setattr(cls, attrname, Subcommand(name, subapp))
return subapp
return wrapper(subapp) if subapp else wrapper
def _parse_args(self, argv):
tailargs = []
swfuncs = {}
index = 0
while argv:
index += 1
a = argv.pop(0)
val = None
if a == "--":
# end of options, treat the rest as tailargs
tailargs.extend(argv)
break
if a in self._subcommands:
subcmd = self._subcommands[a].get()
self.nested_command = (subcmd, [self.PROGNAME + " " + self._subcommands[a].name] + argv)
break
elif a.startswith("--") and len(a) >= 3:
# [--name], [--name=XXX], [--name, XXX], [--name, ==, XXX],
# [--name=, XXX], [--name, =XXX]
eqsign = a.find("=")
if eqsign >= 0:
name = a[2:eqsign]
argv.insert(0, a[eqsign:])
else:
name = a[2:]
swname = "--" + name
if name not in self._switches_by_name:
raise UnknownSwitch("Unknown switch %s" % (swname,))
swinfo = self._switches_by_name[name]
if swinfo.argtype:
if not argv:
raise MissingArgument("Switch %s requires an argument" % (swname,))
a = argv.pop(0)
if a and a[0] == "=":
if len(a) >= 2:
val = a[1:]
else:
if not argv:
raise MissingArgument("Switch %s requires an argument" % (swname))
val = argv.pop(0)
else:
val = a
elif a.startswith("-") and len(a) >= 2:
# [-a], [-a, XXX], [-aXXX], [-abc]
name = a[1]
swname = "-" + name
if name not in self._switches_by_name:
raise UnknownSwitch("Unknown switch %s" % (swname,))
swinfo = self._switches_by_name[name]
if swinfo.argtype:
if len(a) >= 3:
val = a[2:]
else:
if not argv:
raise MissingArgument("Switch %s requires an argument" % (swname,))
val = argv.pop(0)
elif len(a) >= 3:
argv.insert(0, "-" + a[2:])
else:
if a.startswith("-"):
raise UnknownSwitch("Unknown switch %s" % (a,))
tailargs.append(a)
continue
# handle argument
val = self._handle_argument(val, swinfo.argtype, name)
if swinfo.func in swfuncs:
if swinfo.list:
swfuncs[swinfo.func].val[0].append(val)
else:
if swfuncs[swinfo.func].swname == swname:
raise SwitchError("Switch %r already given" % (swname,))
else:
raise SwitchError("Switch %r already given (%r is equivalent)" % (
swfuncs[swinfo.func].swname, swname))
else:
if swinfo.list:
swfuncs[swinfo.func] = SwitchParseInfo(swname, ([val],), index)
elif val is NotImplemented:
swfuncs[swinfo.func] = SwitchParseInfo(swname, (), index)
else:
swfuncs[swinfo.func] = SwitchParseInfo(swname, (val,), index)
# Extracting arguments from environment variables
envindex = 0
for env, swinfo in self._switches_by_envar.items():
envindex -= 1
envval = local.env.get(env)
if envval is None:
continue
if swinfo.func in swfuncs:
continue # skip if overridden by command line arguments
val = self._handle_argument(envval, swinfo.argtype, env)
envname = "$%s" % (env,)
if swinfo.list:
# multiple values over environment variables are not supported,
# this will require some sort of escaping and separator convention
swfuncs[swinfo.func] = SwitchParseInfo(envname, ([val],), envindex)
elif val is NotImplemented:
swfuncs[swinfo.func] = SwitchParseInfo(envname, (), envindex)
else:
swfuncs[swinfo.func] = SwitchParseInfo(envname, (val,), envindex)
return swfuncs, tailargs
@classmethod
def autocomplete(cls, argv):
"""This is supplied to make subclassing and testing argument completion methods easier"""
pass
@staticmethod
def _handle_argument(val, argtype, name):
if argtype:
try:
return argtype(val)
except (TypeError, ValueError):
ex = sys.exc_info()[1] # compat
raise WrongArgumentType("Argument of %s expected to be %r, not %r:\n %r" % (
name, argtype, val, ex))
else:
return NotImplemented
def _validate_args(self, swfuncs, tailargs):
if six.get_method_function(self.help) in swfuncs:
raise ShowHelp()
if six.get_method_function(self.helpall) in swfuncs:
raise ShowHelpAll()
if six.get_method_function(self.version) in swfuncs:
raise ShowVersion()
requirements = {}
exclusions = {}
for swinfo in self._switches_by_func.values():
if swinfo.mandatory and not swinfo.func in swfuncs:
raise MissingMandatorySwitch("Switch %s is mandatory" %
("/".join(("-" if len(n) == 1 else "--") + n for n in swinfo.names),))
requirements[swinfo.func] = set(self._switches_by_name[req] for req in swinfo.requires)
exclusions[swinfo.func] = set(self._switches_by_name[exc] for exc in swinfo.excludes)
# TODO: compute topological order
gotten = set(swfuncs.keys())
for func in gotten:
missing = set(f.func for f in requirements[func]) - gotten
if missing:
raise SwitchCombinationError("Given %s, the following are missing %r" %
(swfuncs[func].swname, [self._switches_by_func[f].names[0] for f in missing]))
invalid = set(f.func for f in exclusions[func]) & gotten
if invalid:
raise SwitchCombinationError("Given %s, the following are invalid %r" %
(swfuncs[func].swname, [swfuncs[f].swname for f in invalid]))
m = six.getfullargspec(self.main)
max_args = six.MAXSIZE if m.varargs else len(m.args) - 1
min_args = len(m.args) - 1 - (len(m.defaults) if m.defaults else 0)
if len(tailargs) < min_args:
raise PositionalArgumentsError("Expected at least %d positional arguments, got %r" %
(min_args, tailargs))
elif len(tailargs) > max_args:
raise PositionalArgumentsError("Expected at most %d positional arguments, got %r" %
(max_args, tailargs))
# Positional arguement validataion
if hasattr(self.main, 'positional'):
tailargs = self._positional_validate(tailargs, self.main.positional, self.main.positional_varargs, m.args[1:], m.varargs)
elif hasattr(m, 'annotations'):
args_names = list(m.args[1:])
positional = [None]*len(args_names)
varargs = None
# All args are positional, so convert kargs to positional
for item in m.annotations:
if item == m.varargs:
varargs = m.annotations[item]
else:
positional[args_names.index(item)] = m.annotations[item]
tailargs = self._positional_validate(tailargs, positional, varargs,
m.args[1:], m.varargs)
ordered = [(f, a) for _, f, a in
sorted([(sf.index, f, sf.val) for f, sf in swfuncs.items()])]
return ordered, tailargs
def _positional_validate(self, args, validator_list, varargs, argnames, varargname):
"""Makes sure args follows the validation given input"""
out_args = list(args)
for i in range(min(len(args),len(validator_list))):
if validator_list[i] is not None:
out_args[i] = self._handle_argument(args[i], validator_list[i], argnames[i])
if len(args) > len(validator_list):
if varargs is not None:
out_args[len(validator_list):] = [
self._handle_argument(a, varargs, varargname) for a in args[len(validator_list):]]
else:
out_args[len(validator_list):] = args[len(validator_list):]
return out_args
@classmethod
def run(cls, argv = None, exit = True): # @ReservedAssignment
"""
Runs the application, taking the arguments from ``sys.argv`` by default if
nothing is passed. If ``exit`` is
``True`` (the default), the function will exit with the appropriate return code;
otherwise it will return a tuple of ``(inst, retcode)``, where ``inst`` is the
application instance created internally by this function and ``retcode`` is the
exit code of the application.
.. note::
Setting ``exit`` to ``False`` is intendend for testing/debugging purposes only -- do
not override it other situations.
"""
if argv is None:
argv = sys.argv
cls.autocomplete(argv)
argv = list(argv)
inst = cls(argv.pop(0))
retcode = 0
try:
swfuncs, tailargs = inst._parse_args(argv)
ordered, tailargs = inst._validate_args(swfuncs, tailargs)
except ShowHelp:
inst.help()
except ShowHelpAll:
inst.helpall()
except ShowVersion:
inst.version()
except SwitchError:
ex = sys.exc_info()[1] # compatibility with python 2.5
print("Error: %s" % (ex,))
print("------")
inst.help()
retcode = 2
else:
for f, a in ordered:
f(inst, *a)
cleanup = None
if not inst.nested_command or inst.CALL_MAIN_IF_NESTED_COMMAND:
retcode = inst.main(*tailargs)
cleanup = functools.partial(inst.cleanup, retcode)
if not retcode and inst.nested_command:
subapp, argv = inst.nested_command
subapp.parent = inst
inst, retcode = subapp.run(argv, exit = False)
if cleanup:
cleanup()
if retcode is None:
retcode = 0
if exit:
sys.exit(retcode)
else:
return inst, retcode
@classmethod
def invoke(cls, *args, **switches):
"""Invoke this application programmatically (as a function), in the same way ``run()``
would. There are two key differences: the return value of ``main()`` is not converted to
an integer (returned as-is), and exceptions are not swallowed either.
:param args: any positional arguments for ``main()``
:param switches: command-line switches are passed as keyword arguments,
e.g., ``foo=5`` for ``--foo=5``
"""
inst = cls("")
swfuncs = inst._parse_kwd_args(switches)
ordered, tailargs = inst._validate_args(swfuncs, args)
for f, a in ordered:
f(inst, *a)
cleanup = None
if not inst.nested_command or inst.CALL_MAIN_IF_NESTED_COMMAND:
retcode = inst.main(*tailargs)
cleanup = functools.partial(inst.cleanup, retcode)
if not retcode and inst.nested_command:
subapp, argv = inst.nested_command
subapp.parent = inst
inst, retcode = subapp.run(argv, exit = False)
if cleanup:
cleanup()
return inst, retcode
def _parse_kwd_args(self, switches):
"""Parses keywords (positional arguments), used by invoke."""
swfuncs = {}
for index, (swname, val) in enumerate(switches.items(), 1):
switch = getattr(type(self), swname)
swinfo = self._switches_by_func[switch._switch_info.func]
if isinstance(switch, CountOf):
p = (range(val),)
elif swinfo.list and not hasattr(val, "__iter__"):
raise SwitchError("Switch %r must be a sequence (iterable)" % (swname,))
elif not swinfo.argtype:
# a flag
if val not in (True, False, None, Flag):
raise SwitchError("Switch %r is a boolean flag" % (swname,))
p = ()
else:
p = (val,)
swfuncs[swinfo.func] = SwitchParseInfo(swname, p, index)
return swfuncs
def main(self, *args):
"""Implement me (no need to call super)"""
if self._subcommands:
if args:
print("Unknown sub-command %r" % (args[0],))
print("------")
self.help()
return 1
if not self.nested_command:
print("No sub-command given")
print("------")
self.help()
return 1
else:
print("main() not implemented")
return 1
def cleanup(self, retcode):
"""Called after ``main()`` and all subapplications have executed, to perform any necessary cleanup.
:param retcode: the return code of ``main()``
"""
@switch(["--help-all"], overridable = True, group = "Meta-switches")
def helpall(self):
"""Print help messages of all subcommands and quit"""
self.help()
print("")
if self._subcommands:
for name, subcls in sorted(self._subcommands.items()):
subapp = (subcls.get())("%s %s" % (self.PROGNAME, name))
subapp.parent = self
for si in subapp._switches_by_func.values():
if si.group == "Meta-switches":
si.group = "Hidden-switches"
subapp.helpall()
@switch(["-h", "--help"], overridable = True, group = "Meta-switches")
def help(self): # @ReservedAssignment
"""Prints this help message and quits"""
if self._get_prog_version():
self.version()
print("")
if self.DESCRIPTION:
print(self.DESCRIPTION.strip() + '\n')
m = six.getfullargspec(self.main)
tailargs = m.args[1:] # skip self
if m.defaults:
for i, d in enumerate(reversed(m.defaults)):
tailargs[-i - 1] = "[%s=%r]" % (tailargs[-i - 1], d)
if m.varargs:
tailargs.append("%s..." % (m.varargs,))
tailargs = " ".join(tailargs)
with self.COLOR_USAGE:
print("Usage:")
if not self.USAGE:
if self._subcommands:
self.USAGE = " %(progname)s [SWITCHES] [SUBCOMMAND [SWITCHES]] %(tailargs)s\n"
else:
self.USAGE = " %(progname)s [SWITCHES] %(tailargs)s\n"
print(self.USAGE % {"progname": colors.filter(self.PROGNAME), "tailargs": tailargs})
by_groups = {}
for si in self._switches_by_func.values():
if si.group not in by_groups:
by_groups[si.group] = []
by_groups[si.group].append(si)
def switchs(by_groups, show_groups):
for grp, swinfos in sorted(by_groups.items(), key = lambda item: item[0]):
if show_groups:
print(self.COLOR_GROUPS[grp] | grp)
for si in sorted(swinfos, key = lambda si: si.names):
swnames = ", ".join(("-" if len(n) == 1 else "--") + n for n in si.names
if n in self._switches_by_name and self._switches_by_name[n] == si)
if si.argtype:
if isinstance(si.argtype, type):
typename = si.argtype.__name__
else:
typename = str(si.argtype)
argtype = " %s:%s" % (si.argname.upper(), typename)
else:
argtype = ""
prefix = swnames + argtype
yield si, prefix, self.COLOR_GROUPS[grp]
if show_groups:
print("")
sw_width = max(len(prefix) for si, prefix, color in switchs(by_groups, False)) + 4
cols, _ = get_terminal_size()
description_indent = " %s%s%s"
wrapper = TextWrapper(width = max(cols - min(sw_width, 60), 50) - 6)
indentation = "\n" + " " * (cols - wrapper.width)
for si, prefix, color in switchs(by_groups, True):
help = si.help # @ReservedAssignment
if si.list:
help += "; may be given multiple times"
if si.mandatory:
help += "; required"
if si.requires:
help += "; requires %s" % (", ".join((("-" if len(s) == 1 else "--") + s) for s in si.requires))
if si.excludes:
help += "; excludes %s" % (", ".join((("-" if len(s) == 1 else "--") + s) for s in si.excludes))
msg = indentation.join(wrapper.wrap(" ".join(l.strip() for l in help.splitlines())))
if len(prefix) + wrapper.width >= cols:
padding = indentation
else:
padding = " " * max(cols - wrapper.width - len(prefix) - 4, 1)
print(description_indent % (color | prefix, padding, color | msg))
if self._subcommands:
gc = self.COLOR_GROUPS["Subcommands"]
print(gc | "Subcommands:")
for name, subcls in sorted(self._subcommands.items()):
with gc:
subapp = subcls.get()
doc = subapp.DESCRIPTION if subapp.DESCRIPTION else getdoc(subapp)
if self.SUBCOMMAND_HELPMSG:
help = doc + "; " if doc else "" # @ReservedAssignment
help += self.SUBCOMMAND_HELPMSG.format(parent=self.PROGNAME, sub=name)
else:
help = doc if doc else "" # @ReservedAssignment
msg = indentation.join(wrapper.wrap(" ".join(l.strip() for l in help.splitlines())))
if len(name) + wrapper.width >= cols:
padding = indentation
else:
padding = " " * max(cols - wrapper.width - len(name) - 4, 1)
if colors.contains_colors(subcls.name):
bodycolor = colors.extract(subcls.name)
else:
bodycolor = gc
print(description_indent
% (subcls.name, padding,
bodycolor | colors.filter(msg)))
def _get_prog_version(self):
ver = None
curr = self
while curr is not None:
ver = getattr(curr, "VERSION", None)
if ver is not None:
return ver
curr = curr.parent
return ver
@switch(["-v", "--version"], overridable = True, group = "Meta-switches")
def version(self):
"""Prints the program's version and quits"""
ver = self._get_prog_version()
ver_name = ver if ver is not None else "(version not set)"
print('{0} {1}'.format(self.PROGNAME, ver_name))
|
|
#!/usr/bin/env python
import argparse
import atexit
import copy
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.log import DEFAULT_LOGGING
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango30Warning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_discovery_sample',
'test_discovery_sample2',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
# Load all the test model apps.
test_modules = get_test_modules()
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run on Python 3.6+ (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags):
state = setup(verbosity, test_labels, parallel)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true', dest='failfast',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', dest='selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', dest='parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
options = parser.parse_args()
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(options.bisect, options, options.modules, options.parallel)
elif options.pair:
paired_tests(options.pair, options, options.modules, options.parallel)
else:
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
)
if failures:
sys.exit(1)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import inspect
import json
import os
import sys
import yaml
from oslo_config import cfg
from st2common.exceptions.plugins import IncompatiblePluginException
from st2common import log as logging
__all__ = [
'register_plugin',
'register_plugin_class'
]
LOG = logging.getLogger(__name__)
PYTHON_EXTENSIONS = ('.py')
# Cache for dynamically loaded runner modules
RUNNER_MODULES_CACHE = {}
QUERIER_MODULES_CACHE = {}
def _register_plugin_path(plugin_dir_abs_path):
if not os.path.isdir(plugin_dir_abs_path):
raise Exception('Directory "%s" with plugins doesn\'t exist' % (plugin_dir_abs_path))
for x in sys.path:
if plugin_dir_abs_path in (x, x + os.sep):
return
sys.path.append(plugin_dir_abs_path)
def _get_plugin_module(plugin_file_path):
plugin_module = os.path.basename(plugin_file_path)
if plugin_module.endswith(PYTHON_EXTENSIONS):
plugin_module = plugin_module[:plugin_module.rfind('.py')]
else:
plugin_module = None
return plugin_module
def _get_classes_in_module(module):
return [kls for name, kls in inspect.getmembers(module,
lambda member: inspect.isclass(member) and member.__module__ == module.__name__)]
def _get_plugin_classes(module_name):
return _get_classes_in_module(module_name)
def _get_plugin_methods(plugin_klass):
"""
Return a list of names of all the methods in the provided class.
Note: Abstract methods which are not implemented are excluded from the
list.
:rtype: ``list`` of ``str``
"""
methods = inspect.getmembers(plugin_klass, inspect.ismethod)
# Exclude inherited abstract methods from the parent class
method_names = []
for name, method in methods:
method_properties = method.__dict__
is_abstract = method_properties.get('__isabstractmethod__', False)
if is_abstract:
continue
method_names.append(name)
return method_names
def _validate_methods(plugin_base_class, plugin_klass):
'''
XXX: This is hacky but we'd like to validate the methods
in plugin_impl at least has all the *abstract* methods in
plugin_base_class.
'''
expected_methods = plugin_base_class.__abstractmethods__
plugin_methods = _get_plugin_methods(plugin_klass)
for method in expected_methods:
if method not in plugin_methods:
message = 'Class "%s" doesn\'t implement required "%s" method from the base class'
raise IncompatiblePluginException(message % (plugin_klass.__name__, method))
def _register_plugin(plugin_base_class, plugin_impl):
_validate_methods(plugin_base_class, plugin_impl)
plugin_base_class.register(plugin_impl)
def register_plugin_class(base_class, file_path, class_name):
"""
Retrieve a register plugin class from the provided file.
This method also validate that the class implements all the abstract methods
from the base plugin class.
:param base_class: Base plugin class.
:param base_class: ``class``
:param file_path: File absolute path to the plugin module file.
:type file_path: ``str``
:param class_name: Class name of a plugin.
:type class_name: ``str``
"""
plugin_dir = os.path.dirname(os.path.realpath(file_path))
_register_plugin_path(plugin_dir)
module_name = _get_plugin_module(file_path)
if module_name is None:
return None
module = imp.load_source(module_name, file_path)
klass = getattr(module, class_name, None)
if not klass:
raise Exception('Plugin file "%s" doesn\'t expose class named "%s"' %
(file_path, class_name))
_register_plugin(base_class, klass)
return klass
def register_plugin(plugin_base_class, plugin_abs_file_path):
registered_plugins = []
plugin_dir = os.path.dirname(os.path.realpath(plugin_abs_file_path))
_register_plugin_path(plugin_dir)
module_name = _get_plugin_module(plugin_abs_file_path)
if module_name is None:
return None
module = imp.load_source(module_name, plugin_abs_file_path)
klasses = _get_plugin_classes(module)
# Try registering classes in plugin file. Some may fail.
for klass in klasses:
try:
_register_plugin(plugin_base_class, klass)
registered_plugins.append(klass)
except Exception as e:
LOG.exception(e)
LOG.debug('Skipping class %s as it doesn\'t match specs.', klass)
continue
if len(registered_plugins) == 0:
raise Exception('Found no classes in plugin file "%s" matching requirements.' %
(plugin_abs_file_path))
return registered_plugins
def register_runner(module_name):
base_path = cfg.CONF.system.base_path
module_path = os.path.join(
"%s/runners/%s/%s.py" % (base_path, module_name, module_name)
)
if module_name not in RUNNER_MODULES_CACHE:
LOG.info('Loading runner module from "%s".', module_path)
RUNNER_MODULES_CACHE[module_name] = imp.load_source(module_name, module_path)
else:
LOG.info('Reusing runner module "%s" from cache.', module_path)
return RUNNER_MODULES_CACHE[module_name]
def register_query_module(module_name):
base_path = cfg.CONF.system.base_path
module_path = os.path.join(
"%s/runners/%s/query/%s.py" % (base_path, module_name, module_name)
)
if module_name not in QUERIER_MODULES_CACHE:
LOG.info('Loading query module from "%s".', module_path)
QUERIER_MODULES_CACHE[module_name] = imp.load_source(module_name, module_path)
else:
LOG.info('Reusing query module "%s" from cache.', module_path)
return QUERIER_MODULES_CACHE[module_name]
ALLOWED_EXTS = ['.json', '.yaml', '.yml']
PARSER_FUNCS = {'.json': json.load, '.yml': yaml.safe_load, '.yaml': yaml.safe_load}
def load_meta_file(file_path):
if not os.path.isfile(file_path):
raise Exception('File "%s" does not exist.' % file_path)
file_name, file_ext = os.path.splitext(file_path)
if file_ext not in ALLOWED_EXTS:
raise Exception('Unsupported meta type %s, file %s. Allowed: %s' %
(file_ext, file_path, ALLOWED_EXTS))
with open(file_path, 'r') as f:
return PARSER_FUNCS[file_ext](f)
|
|
"""The Hyperion component."""
from __future__ import annotations
import asyncio
from contextlib import suppress
import logging
from typing import Any, Callable, cast
from awesomeversion import AwesomeVersion
from hyperion import client, const as hyperion_const
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_TOKEN
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry,
async_get_registry,
)
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
CONF_INSTANCE_CLIENTS,
CONF_ON_UNLOAD,
CONF_ROOT_CLIENT,
DEFAULT_NAME,
DOMAIN,
HYPERION_RELEASES_URL,
HYPERION_VERSION_WARN_CUTOFF,
SIGNAL_INSTANCE_ADD,
SIGNAL_INSTANCE_REMOVE,
)
PLATFORMS = [LIGHT_DOMAIN, SWITCH_DOMAIN]
_LOGGER = logging.getLogger(__name__)
# Unique ID
# =========
# A config entry represents a connection to a single Hyperion server. The config entry
# unique_id is the server id returned from the Hyperion instance (a unique ID per
# server).
#
# Each server connection may create multiple entities. The unique_id for each entity is
# <server id>_<instance #>_<name>, where <server_id> will be the unique_id on the
# relevant config entry (as above), <instance #> will be the server instance # and
# <name> will be a unique identifying type name for each entity associated with this
# server/instance (e.g. "hyperion_light").
#
# The get_hyperion_unique_id method will create a per-entity unique id when given the
# server id, an instance number and a name.
# hass.data format
# ================
#
# hass.data[DOMAIN] = {
# <config_entry.entry_id>: {
# "ROOT_CLIENT": <Hyperion Client>,
# "ON_UNLOAD": [<callable>, ...],
# }
# }
def get_hyperion_unique_id(server_id: str, instance: int, name: str) -> str:
"""Get a unique_id for a Hyperion instance."""
return f"{server_id}_{instance}_{name}"
def split_hyperion_unique_id(unique_id: str) -> tuple[str, int, str] | None:
"""Split a unique_id into a (server_id, instance, type) tuple."""
data = tuple(unique_id.split("_", 2))
if len(data) != 3:
return None
try:
return (data[0], int(data[1]), data[2])
except ValueError:
return None
def create_hyperion_client(
*args: Any,
**kwargs: Any,
) -> client.HyperionClient:
"""Create a Hyperion Client."""
return client.HyperionClient(*args, **kwargs)
async def async_create_connect_hyperion_client(
*args: Any,
**kwargs: Any,
) -> client.HyperionClient | None:
"""Create and connect a Hyperion Client."""
hyperion_client = create_hyperion_client(*args, **kwargs)
if not await hyperion_client.async_client_connect():
return None
return hyperion_client
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Hyperion component."""
hass.data[DOMAIN] = {}
return True
@callback
def listen_for_instance_updates(
hass: HomeAssistant,
config_entry: ConfigEntry,
add_func: Callable,
remove_func: Callable,
) -> None:
"""Listen for instance additions/removals."""
hass.data[DOMAIN][config_entry.entry_id][CONF_ON_UNLOAD].extend(
[
async_dispatcher_connect(
hass,
SIGNAL_INSTANCE_ADD.format(config_entry.entry_id),
add_func,
),
async_dispatcher_connect(
hass,
SIGNAL_INSTANCE_REMOVE.format(config_entry.entry_id),
remove_func,
),
]
)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up Hyperion from a config entry."""
host = config_entry.data[CONF_HOST]
port = config_entry.data[CONF_PORT]
token = config_entry.data.get(CONF_TOKEN)
hyperion_client = await async_create_connect_hyperion_client(
host, port, token=token, raw_connection=True
)
# Client won't connect? => Not ready.
if not hyperion_client:
raise ConfigEntryNotReady
version = await hyperion_client.async_sysinfo_version()
if version is not None:
with suppress(ValueError):
if AwesomeVersion(version) < AwesomeVersion(HYPERION_VERSION_WARN_CUTOFF):
_LOGGER.warning(
"Using a Hyperion server version < %s is not recommended -- "
"some features may be unavailable or may not function correctly. "
"Please consider upgrading: %s",
HYPERION_VERSION_WARN_CUTOFF,
HYPERION_RELEASES_URL,
)
# Client needs authentication, but no token provided? => Reauth.
auth_resp = await hyperion_client.async_is_auth_required()
if (
auth_resp is not None
and client.ResponseOK(auth_resp)
and auth_resp.get(hyperion_const.KEY_INFO, {}).get(
hyperion_const.KEY_REQUIRED, False
)
and token is None
):
await hyperion_client.async_client_disconnect()
raise ConfigEntryAuthFailed
# Client login doesn't work? => Reauth.
if not await hyperion_client.async_client_login():
await hyperion_client.async_client_disconnect()
raise ConfigEntryAuthFailed
# Cannot switch instance or cannot load state? => Not ready.
if (
not await hyperion_client.async_client_switch_instance()
or not client.ServerInfoResponseOK(await hyperion_client.async_get_serverinfo())
):
await hyperion_client.async_client_disconnect()
raise ConfigEntryNotReady
# We need 1 root client (to manage instances being removed/added) and then 1 client
# per Hyperion server instance which is shared for all entities associated with
# that instance.
hass.data[DOMAIN][config_entry.entry_id] = {
CONF_ROOT_CLIENT: hyperion_client,
CONF_INSTANCE_CLIENTS: {},
CONF_ON_UNLOAD: [],
}
async def async_instances_to_clients(response: dict[str, Any]) -> None:
"""Convert instances to Hyperion clients."""
if not response or hyperion_const.KEY_DATA not in response:
return
await async_instances_to_clients_raw(response[hyperion_const.KEY_DATA])
async def async_instances_to_clients_raw(instances: list[dict[str, Any]]) -> None:
"""Convert instances to Hyperion clients."""
registry = await async_get_registry(hass)
running_instances: set[int] = set()
stopped_instances: set[int] = set()
existing_instances = hass.data[DOMAIN][config_entry.entry_id][
CONF_INSTANCE_CLIENTS
]
server_id = cast(str, config_entry.unique_id)
# In practice, an instance can be in 3 states as seen by this function:
#
# * Exists, and is running: Should be present in HASS/registry.
# * Exists, but is not running: Cannot add it yet, but entity may have be
# registered from a previous time it was running.
# * No longer exists at all: Should not be present in HASS/registry.
# Add instances that are missing.
for instance in instances:
instance_num = instance.get(hyperion_const.KEY_INSTANCE)
if instance_num is None:
continue
if not instance.get(hyperion_const.KEY_RUNNING, False):
stopped_instances.add(instance_num)
continue
running_instances.add(instance_num)
if instance_num in existing_instances:
continue
hyperion_client = await async_create_connect_hyperion_client(
host, port, instance=instance_num, token=token
)
if not hyperion_client:
continue
existing_instances[instance_num] = hyperion_client
instance_name = instance.get(hyperion_const.KEY_FRIENDLY_NAME, DEFAULT_NAME)
async_dispatcher_send(
hass,
SIGNAL_INSTANCE_ADD.format(config_entry.entry_id),
instance_num,
instance_name,
)
# Remove entities that are are not running instances on Hyperion.
for instance_num in set(existing_instances) - running_instances:
del existing_instances[instance_num]
async_dispatcher_send(
hass, SIGNAL_INSTANCE_REMOVE.format(config_entry.entry_id), instance_num
)
# Deregister entities that belong to removed instances.
for entry in async_entries_for_config_entry(registry, config_entry.entry_id):
data = split_hyperion_unique_id(entry.unique_id)
if not data:
continue
if data[0] == server_id and (
data[1] not in running_instances and data[1] not in stopped_instances
):
registry.async_remove(entry.entity_id)
hyperion_client.set_callbacks(
{
f"{hyperion_const.KEY_INSTANCE}-{hyperion_const.KEY_UPDATE}": async_instances_to_clients,
}
)
async def setup_then_listen() -> None:
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_setup(config_entry, platform)
for platform in PLATFORMS
]
)
assert hyperion_client
if hyperion_client.instances is not None:
await async_instances_to_clients_raw(hyperion_client.instances)
hass.data[DOMAIN][config_entry.entry_id][CONF_ON_UNLOAD].append(
config_entry.add_update_listener(_async_entry_updated)
)
hass.async_create_task(setup_then_listen())
return True
async def _async_entry_updated(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> None:
"""Handle entry updates."""
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_unload_entry(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok and config_entry.entry_id in hass.data[DOMAIN]:
config_data = hass.data[DOMAIN].pop(config_entry.entry_id)
for func in config_data[CONF_ON_UNLOAD]:
func()
# Disconnect the shared instance clients.
await asyncio.gather(
*[
config_data[CONF_INSTANCE_CLIENTS][
instance_num
].async_client_disconnect()
for instance_num in config_data[CONF_INSTANCE_CLIENTS]
]
)
# Disconnect the root client.
root_client = config_data[CONF_ROOT_CLIENT]
await root_client.async_client_disconnect()
return unload_ok
|
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
import StringIO
from vulpo.exception import VulpoClientError
from vulpo.scs.key import Key as SCSKey
from vulpo.scs.keyfile import KeyFile
from vulpo.utils import compute_hash
from vulpo.utils import get_utf8_value
class Key(SCSKey):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`vulpo.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_restore_headers(self, response):
return
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`vulpo.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from
it. The default behaviour is False which reads from
the current position of the file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise VulpoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and SCS use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS
:type headers: dict
:param headers: Additional headers to pass along with the request to GS.
:type replace: bool
:param replace: If True, replaces the contents of the file if it
already exists.
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from GS and the second representing
the total number of bytes that need to be transmitted.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`vulpo.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`vulpo.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`vulpo.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`vulpo.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise VulpoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
|
|
import os
import unicodedata
from defcon import Font, UnicodeData, Component
from defcon.tools import unicodeTools
# Tool that walks a directory of UFOs and makes them componted again. This is very strict, things have to match 100%,
# but it does get you some of the way done for recomponenting a font.
# It relies on unicode decompsition to know what pieces to try as components for a glyph.
# a mapping of unicodes to accent names. Edit as need be.
accent_mapping = {u'\u0300': ['grave'], u'\u0301': ['acute'], u'\u0302': ['circumflex'], u'\u0303': ['tilde'], u'\u0304': ['macron'], u'\u0305': ['macron'], u'\u0306': ['breve'], u'\u0307': ['dotaccent'], u'\u0308': ['dieresis'], u'\u030A': ['ring'], u'\u030B': ['hungarumlaut'], u'\u030C': ['caron', 'space_uni0326'], u'\u0327': ['cedilla'], u'\u0328': ['ogonek'], u'\u0326': ['space_uni0326']}
# glyphs that you want to have a second look at.
double_check = {'space.lining': ['space'], 'dollar.lining': ['dollar'], 'cent.lining': ['cent'], 'Euro.lining': ['Euro'], 'sterling.lining': ['sterling'], 'yen.lining': ['yen'], 'florin.lining': ['florin'], 'zero.lining': ['zero'], 'one.lining': ['one'], 'two.lining': ['two'], 'three.lining': ['three'], 'four.lining': ['four'], 'five.lining': ['five'], 'six.lining': ['six'], 'seven.lining': ['seven'], 'eight.lining': ['eight'], 'nine.lining': ['nine'],}
# Glyphs that are composites that a unicode decomposition isn't going to pick up on
composites = {'oneeighth': ['one', 'fraction', 'eight'], 'onesixth': ['one', 'fraction', 'six'], 'onefifth': ['one', 'fraction', 'five'], 'onethird': ['one', 'fraction', 'three'], 'threeeighths': ['three', 'fraction', 'eight'], 'twofifths': ['two', 'fraction', 'five'], 'threefifths': ['three', 'fraction', 'five'], 'fiveeighths': ['five', 'fraction', 'eight'], 'twothirds': ['two', 'fraction', 'three'], 'fourfifths': ['four', 'fraction', 'five'], 'fivesixths': ['five', 'fraction', 'six'], 'seveneighths': ['seven', 'fraction', 'eight'], 'guillemotleft': ['guilsinglleft'], 'guillemotright': ['guilsinglright'], 'onehalf': ['one', 'fraction', 'two'], 'onequarter': ['one', 'fraction', 'four'], 'threequarters': ['three', 'fraction', 'four'], 'germandbls.scRound': ['S.sc'], 'germandbls.sc': ['S.sc'], }
def _make_test_glyph():
import robofab.world
f = robofab.world.RFont()
newGlyph = f.newGlyph("test_glyph", clear=True)
newGlyph.width = 150
pen = newGlyph.getPen()
pen.moveTo((0,0))
pen.lineTo((10,0))
pen.lineTo((20,20))
pen.lineTo((0,30))
pen.lineTo((0,0))
pen.closePath()
newGlyph.update()
return newGlyph
def _make_test_glyphs():
import robofab.world
f = robofab.world.RFont()
newGlyph = f.newGlyph("test_glyph", clear=True)
newGlyph.width = 150
pen = newGlyph.getPen()
pen.moveTo((0,0))
pen.lineTo((10,0))
pen.lineTo((20,20))
pen.lineTo((0,30))
pen.lineTo((0,0))
pen.closePath()
newGlyph.update()
accentedGlyph = f.newGlyph("test_glyph_accented", clear=True)
accentedGlyph.width = 150
pen = accentedGlyph.getPen()
pen.moveTo((0,0))
pen.lineTo((10,0))
pen.lineTo((20,20))
pen.lineTo((0,30))
pen.lineTo((0,0))
pen.closePath()
pen.moveTo((5,35))
pen.lineTo((10,35))
pen.lineTo((10,40))
pen.lineTo((5,40))
pen.lineTo((5,35))
pen.closePath()
pen.moveTo((15,35))
pen.lineTo((20,35))
pen.lineTo((20,40))
pen.lineTo((15,40))
pen.lineTo((15,35))
pen.closePath()
accentedGlyph.update()
componetGlyph = f.newGlyph("test_glyph_component", clear=True)
componetGlyph.width = 200
pen = componetGlyph.getPen()
pen.moveTo((25,55))
pen.lineTo((30,55))
pen.lineTo((30,60))
pen.lineTo((25,60))
pen.lineTo((25,55))
pen.closePath()
pen.moveTo((35,55))
pen.lineTo((40,55))
pen.lineTo((40,60))
pen.lineTo((35,60))
pen.lineTo((35,55))
pen.closePath()
componetGlyph.update()
componetGlyphWrong = f.newGlyph("test_glyph_component_wrong", clear=True)
componetGlyphWrong.width = 200
pen = componetGlyphWrong.getPen()
pen.moveTo((25,55))
pen.lineTo((30,55))
pen.lineTo((30,60))
pen.lineTo((25,60))
pen.lineTo((25,55))
pen.closePath()
pen.moveTo((40,60))
pen.lineTo((45,60))
pen.lineTo((45,65))
pen.lineTo((40,65))
pen.lineTo((40,60))
pen.closePath()
componetGlyphWrong.update()
return newGlyph, accentedGlyph, componetGlyph, componetGlyphWrong
def _findAvailablePathName(path):
import time
folder = os.path.dirname(path)
fileName = os.path.basename(path)
fileName, extension = os.path.splitext(fileName)
stamp = time.strftime("%Y-%m-%d %H-%M-%S %Z")
newFileName = "%s (%s)%s" % (fileName, stamp, extension)
newPath = os.path.join(folder, newFileName)
# intentionally break to prevent a file overwrite
# this could happen if the user has a director full
# of files with future time stamped file names.
# not likely, but avoid it all the same.
assert not os.path.exists(newPath)
return newPath
def _get_pt_digest(glyph):
"""
Returns a list of tuples that represent the (x,y) difference between the last point and the current point.
This starts with the first point in the contour compared with the last point, and then the second point compared to the first, etc.
>>> glyph = _make_test_glyph()
>>> _get_pt_digest(glyph)
[(0, [(0, 30), (-10, 0), (-10, -20), (20, -10)])]
"""
digest = []
for contour in glyph:
contour_digest = []
for i, point in enumerate(contour):
if i is 0:
contour_digest.append((contour[len(contour)-1].x-point.x, contour[len(contour)-1].y-point.y))
else:
contour_digest.append((contour[i-1].x-point.x, contour[i-1].y-point.y))
digest.append((glyph.contourIndex(contour), contour_digest))
return digest
def _shift_in_place(l, n):
"""
Shifts a list in place with n as the number of slots to shift the head
>>> _shift_in_place([1,2,3,4], 1)
[2, 3, 4, 1]
>>> _shift_in_place([1,2,3,4], 2)
[3, 4, 1, 2]
"""
n = n % len(l)
head = l[:n]
l[:n] = []
l.extend(head)
return l
def _get_bounding_bounds(glyph, contour_list):
"""
Need a way to take in a bunch of contours and get the overall bounding bounds, to compare widths/heights with
a componet to be sure that you don't replace something that may have the same contours, but arranged differntly
>>> glyph, accentedGlyph, accent, accent_wrong = _make_test_glyphs()
>>> _get_bounding_bounds(glyph, [0])
(0, 0, 20, 30)
>>> _get_bounding_bounds(accentedGlyph, [0,1])
(0, 0, 20, 40)
>>> _get_bounding_bounds(accentedGlyph, [0,1,2])
(0, 0, 20, 40)
>>> _get_bounding_bounds(accentedGlyph, [2])
(15, 35, 20, 40)
"""
start = glyph[contour_list[0]].bounds
for c in contour_list:
xMin, yMin, xMax, yMax = glyph[c].bounds
if start[0] < xMin: xMin = start[0]
if start[1] < yMin: yMin = start[1]
if start[2] > xMax: xMax = start[2]
if start[3] > yMax: yMax = start[3]
start = xMin, yMin, xMax, yMax
return start
def _digest_equal(d1, d2):
"""
Looks to see if two digests are the same. First step is easy, then it sifts
the second digest around in place to see if the start points were just different.
>>> _digest_equal([(0,10), (10,0), (0,10), (-10,0)],[(0,10), (10,0), (0,10), (-10,0)])
True
>>> _digest_equal([(0,10), (10,0), (0,10), (-10,0)],[(10,0), (0,10), (-10,0), (0,10)])
True
>>> _digest_equal([(0,10), (10,0), (0,10), (-10,0)],[(0,10), (10,0), (0,10), (0,0)])
False
"""
if d1 == d2:
return True
else:
count = len(d2)
while count > 0:
count = count-1
d2 = _shift_in_place(d2,1)
if d1 == d2:
return True
return False
def _decompose_helper(font, uniValue, parts):
letterCategories = ("Ll", "Lu", "Lt", "Lo")
try:
c = unichr(uniValue)
# see not in category function
except ValueError:
return -1
decomposition = unicodedata.decomposition(c)
if decomposition.startswith("<"):
return -1
if " " not in decomposition:
return -1
parts_internal = decomposition.split(" ")
unichrs = [unichr(int(i, 16)) for i in parts_internal if i]
letters = [ord(i) for i in unichrs if unicodedata.category(i) in letterCategories]
parts = parts + [i for i in unichrs if i not in letters]
if len(letters) != 1:
return -1
decomposedUniValue = letters[0]
if _decompose_helper(font, decomposedUniValue, parts) != -1:
furtherDecomposedUniValue, furtherParts = _decompose_helper(font, decomposedUniValue, parts)
if _decompose_helper(font, furtherDecomposedUniValue, furtherParts) != -1:
furtherFurtherDecomposedUniValue, furtherFurtherParts = _decompose_helper(font, furtherDecomposedUniValue, furtherParts)
decomposedUniValue = furtherFurtherDecomposedUniValue
parts = furtherFurtherParts
else:
decomposedUniValue = furtherDecomposedUniValue
parts = furtherParts
return decomposedUniValue, parts
def decompose_glyph(font, glyphname, glyphs, allowPseudoUnicode=True):
if allowPseudoUnicode:
uniValue = UnicodeData.pseudoUnicodeForGlyphName(font, glyphname)
else:
uniValue = UnicodeData.unicodeForGlyphName(font, glyphname)
if uniValue is None:
return [glyphname, ]
else:
decomposed = _decompose_helper(font, uniValue, [])
if decomposed == -1:
parts = -1
else:
parts = []
for g in decomposed[1]:
if g in accent_mapping.keys():
for a in accent_mapping[g]:
parts.append(a)
elif font.glyphNameForUnicode(ord(g)) is not None:
parts.append(font.glyphNameForUnicode(ord(g)))
possible_components = []
if parts is not None and parts != -1:
for part in parts:
if part in glyphs:
for x in glyphs[part]:
possible_components.append(x)
else:
if part not in possible_components:
possible_components.append(part)
return possible_components
def compare_glyph(font, glyph, component_glyph):
"""
Looks at a glyph and a possible componet to see if there is a match
Returns the lowest and leftest corner of the outlines the componet is replacing & the contour number, if it can
>>> glyph, accentedGlyph, accent, accent_wrong = _make_test_glyphs()
>>> compare_glyph(font, glyph, accent)
False
>>> compare_glyph(font, accentedGlyph, glyph)
((0, 0), [0])
>>> compare_glyph(font, accentedGlyph, accent)
((5, 35), [1, 2])
>>> compare_glyph(font, accentedGlyph, accentedGlyph)
((0, 0), [0, 1, 2])
>>> compare_glyph(font, accentedGlyph, accent_wrong)
False
"""
search = []
contours_replaced = []
# make sure that a componet glyph isn't a componet itself
if len(component_glyph) is 0 and len(component_glyph.components) is not 0:
component_glyph = font[component_glyph.components[0].baseGlyph]
glyph_digest = _get_pt_digest(glyph)
component_digest = _get_pt_digest(component_glyph)
for d in component_digest:
for d1 in glyph_digest:
if _digest_equal(d1[1], d[1]):
search.append((d1[0], d[0]))
if d1[0] not in contours_replaced:
contours_replaced.append(d1[0])
else:
pass
test = {}
for x in search:
if x[1] not in test:
test[x[1]] = x[0]
if len(search) is not 0 and len(test) is len(component_glyph):
# Need to figure out if we've hit the case of contours matching
# but they aren't actually equal in terms of placement (think
# of a dieresis that has had the distance between dots changed)
componet_bounds = component_glyph.bounds
replace_bounds = _get_bounding_bounds(glyph, [x[0] for x in search])
if componet_bounds[2] - componet_bounds[0] == replace_bounds[2] - replace_bounds[0] and componet_bounds[3] - componet_bounds[1] == replace_bounds[3] - replace_bounds[1]:
start = (glyph[search[0][0]].bounds[0], glyph[search[0][0]].bounds[1])
for i in search:
if glyph[i[0]].bounds[0] < start[0]:
start = (glyph[i[0]].bounds[0], start[1])
if glyph[i[0]].bounds[1] < start[1]:
start = (start[0], glyph[i[0]].bounds[1])
return start, contours_replaced
else:
return -1
else:
return -1
def recomponet(path_to_orignal, path_to_new=None):
assert os.path.exists(path_to_orignal)
font = Font(path_to_orignal)
if path_to_new is not None:
assert os.path.exists(path_to_new)
font.save(path_to_new)
font = Font(path_to_new)
else:
new_path = _findAvailablePathName(path_to_orignal)
font.save(new_path)
font = Font(new_path)
ordered_glyphs = {}
clean_up = []
for key in font.keys():
parts = key.split('.')
if len(parts) == 1:
part = key
if key.endswith('comb'):
part = key[:-4]
clean_up.append(key)
if part not in ordered_glyphs:
ordered_glyphs[part] = [key, ]
else:
glyphs = ordered_glyphs[part]
if key not in glyphs:
glyphs.append(key)
ordered_glyphs[part] = glyphs
else:
part = parts[0]
if part.endswith('comb'):
part = parts[0][:-4]
clean_up.append(key)
if part not in ordered_glyphs:
ordered_glyphs[part] = [key, ]
else:
glyphs = ordered_glyphs[part]
if key not in glyphs:
glyphs.append(key)
ordered_glyphs[part] = glyphs
for i in clean_up:
if i not in ordered_glyphs:
part = i[:-4]
if part in ordered_glyphs:
glyphs = ordered_glyphs[part]
ordered_glyphs[i] = glyphs
# Cleanup for the i
i = ordered_glyphs['i']
i.append('dotlessi')
ordered_glyphs['i'] = i
# Additional cleanup for the pesky commaaccent
if 'uni0327' not in ordered_glyphs:
ordered_glyphs['uni0327'] = ['uni0326', ]
else:
if 'uni0326' not in ordered_glyphs['uni0327']:
glyphs = ordered_glyphs['uni0327']
glyphs.append('uni0326')
ordered_glyphs['uni0327'] = glyphs
found = []
for glyph in font:
if len(glyph) is not 0:
parts = decompose_glyph(font.unicodeData, glyph.name, ordered_glyphs)
if len(parts) > 1:
print 'normal'
print glyph.name
for part in parts:
if part in font.keys() and compare_glyph(font, glyph, font[part]) is not -1:
orgin, delete = compare_glyph(font, glyph, font[part])
if len(font[part]) is 0 and len(font[part].components) is not 0:
part = font[part].components[0].baseGlyph
found.append(glyph.name)
for x in [glyph[x] for x in delete]:
glyph.removeContour(x)
component = Component()
component.baseGlyph = part
glyph.appendComponent(component)
xMin, yMin, xMax, yMax = component.bounds
moveX = orgin[0] - xMin
moveY = orgin[1] - yMin
component.move((moveX, moveY))
elif glyph.name in double_check.keys():
parts = double_check[glyph.name]
print glyph.name
print 'double check'
print parts
for part in parts:
print part
if part in font.keys() and compare_glyph(font, glyph, font[part]) is not -1:
orgin, delete = compare_glyph(font, glyph, font[part])
if len(font[part]) is 0 and len(font[part].components) is not 0:
part = font[part].components[0].baseGlyph
found.append(glyph.name)
for x in [glyph[x] for x in delete]:
glyph.removeContour(x)
component = Component()
component.baseGlyph = part
glyph.appendComponent(component)
xMin, yMin, xMax, yMax = component.bounds
moveX = orgin[0] - xMin
moveY = orgin[1] - yMin
component.move((moveX, moveY))
print 'done'
break
else:
print part
print 'did not check out'
elif glyph.name in composites.keys():
preparts = composites[glyph.name]
parts = []
for p in preparts:
parts.append(p)
if p in ordered_glyphs:
for x in ordered_glyphs[p]:
parts.append(x)
print glyph.name
print 'composite'
print parts
for part in parts:
if compare_glyph(font, glyph, font[part]) is not -1:
orgin, delete = compare_glyph(font, glyph, font[part])
if len(font[part]) is 0 and len(font[part].components) is not 0:
part = font[part].components[0].baseGlyph
found.append(glyph.name)
for x in [glyph[x] for x in delete]:
glyph.removeContour(x)
component = Component()
component.baseGlyph = part
glyph.appendComponent(component)
xMin, yMin, xMax, yMax = component.bounds
moveX = orgin[0] - xMin
moveY = orgin[1] - yMin
component.move((moveX, moveY))
font.save()
print 'Found:'
print ' '
for x in found:
print x
print '----------------'
print str(len(found)) + ' Glyphs'
def walk(someFolder, extension):
extension = extension.lower()
files = []
names = os.listdir(someFolder)
for n in names:
p = os.path.join(someFolder, n)
if n.lower().find(extension) <> -1:
files.append(p)
return files
def main():
fonts = walk(os.getcwd(), '.ufo')
for font in fonts:
print font
recomponet(font)
if __name__ == "__main__":
main()
|
|
import sys, time, os, array, optparse
usage = "usage: %prog [options] BSMAP_MAPPING_FILES"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-o", "--out", dest="outfile", metavar="FILE", help="output file name. (required)", default="")
parser.add_option("-d", "--ref", dest="reffile", metavar="FILE", help="reference genome fasta file. (required)", default="")
parser.add_option("-c", "--chr", dest="chroms", metavar="CHR", help="process only specified chromosomes, separated by ','. [default: all]\nexample: --chroms=chr1,chr2", default=[])
parser.add_option("-s", "--sam-path", dest="sam_path", metavar="PATH", help="path to samtools. [default: none]", default='')
parser.add_option("-u", "--unique", action="store_true", dest="unique", help="process only unique mappings/pairs.", default=False)
parser.add_option("-p", "--pair", action="store_true", dest="pair", help="process only properly paired mappings.", default=False)
parser.add_option("-z", "--zero-meth", action="store_true", dest="meth0", help="report loci with zero methylation ratios.", default=False)
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", help="don't print progress on stderr.", default=False)
parser.add_option("-r", "--remove-duplicate", action="store_true", dest="rm_dup", help="remove duplicated reads.", default=False)
parser.add_option("-t", "--trim-fillin", dest="trim_fillin", type="int", metavar='N', help="trim N end-repairing fill-in nucleotides. [default: 2]", default=2)
parser.add_option("-g", "--combine-CpG", action="store_true", dest="combine_CpG", help="combine CpG methylaion ratios on both strands.", default=False)
parser.add_option("-m", "--min-depth", dest="min_depth", type="int", metavar='FOLD', help="report loci with sequencing depth>=FOLD. [default: 0]", default=0)
parser.add_option("-n", "--no-header", action="store_true", dest="no_header", help="don't print a header line", default=False)
parser.add_option("-i", "--ct-snp", dest="CT_SNP", help='how to handle CT SNP ("no-action", "correct", "skip"), default: "correct".', default="correct")
options, infiles = parser.parse_args()
## Add any() by Shujia Huang 2012-10-10
try:
any
except NameError:
def any(s):
for v in s:
if v:
return True
return False
# Add end
if len(options.reffile) == 0: parser.error("Missing reference file, use -d or --ref option.")
if len(options.outfile) == 0: parser.error("Missing output file name, use -o or --out option.")
if len(infiles) == 0: parser.error("Require at least one BSMAP_MAPPING_FILE.")
if any(options.chroms): options.chroms = options.chroms.split(',')
CT_SNP_val = {"no-action": 0, "correct": 1, "skip": 2}
try: options.CT_SNP = CT_SNP_val[options.CT_SNP.lower()]
except: parser.error('Invalid -i value, select "no-action", "correct" or "skip"')
if options.min_depth < 0: parser.error('Invalid -m value, must >= 0')
if options.trim_fillin < 0: parser.error('Invalid -t value, must >= 0')
if any(options.sam_path):
if options.sam_path[-1] != '/': options.sam_path += '/'
################### Add context_type by Shujia Huang 2012-10-10 ############
def context_type ( context, strand, is_head ) :
type_map = { 'A' : 'H', 'T' : 'H', 'C' : 'H', 'G' : 'G', 'N' : 'H', 'W' : 'W', 'K' : 'K', 'Y' : 'Y', 'S' : 'S', 'M' : 'M', 'R' : 'R' }
ale_map = { 'C' : 'G', 'G' : 'C', 'A' : 'T', 'T' : 'A', 'N' : 'N', 'W' : 'S', 'S' : 'W', 'K' : 'M', 'M' : 'K', 'Y' : 'R', 'R' : 'Y' }
#if strand == "-" : context = ''.join(["ATCGNWKY"["TAGCNSMR".index(i)] for i in context[::-1]]) # Reversal complementary
if strand == "-" : context = ''.join([ ale_map[ale] for ale in context[::-1] ]) # Reversal complementary
if len(context) < 5 :
if strand == "+" and is_head == 1 : context = context[2:]
if strand == "-" and is_head == 0 : context = context[2:]
else : # len(context) == 5
context = context[2:]
if len( context ) > 1 and context[0] == "C" and context[1] == "G":
context = context[0:2]
else :
context = context[0] + ''.join( [ type_map[i] for i in context[1:] ] ) # Just get 3 bases
return context
###################### Add End ###################################
def disp(txt, nt=0):
if not options.quiet: print >> sys.stderr, ''.join(['\t' for i in xrange(nt)]+['@ ',time.asctime(),': ',txt])
def get_alignment(line):
col = line.split('\t')
if sam_format:
flag = col[1]
if 'u' in flag: return []
if options.unique and 's' in flag: return []
if options.pair and 'P' not in flag: return []
cr, pos, cigar, seq, strand, insert = col[2], int(col[3])-1, col[5], col[9], '', int(col[8])
if cr not in options.chroms: return []
for aux in col[11:]:
if aux[:5] == 'ZS:Z:':
strand = aux[5:7]
break
assert strand, 'missing strand information "ZS:Z:xx"'
gap_pos, gap_size = 0, 0
while 'I' in cigar or 'D' in cigar:
for sep in 'MID':
try: gap_size = int(cigar.split(sep, 1)[0])
except ValueError: continue
break
if sep == 'M': gap_pos += gap_size
elif sep == 'I': seq = seq[:gap_pos] + seq[gap_pos+gap_size:]
elif sep == 'D':
seq = seq[:gap_pos] + '-' * gap_size + seq[gap_pos:]
gap_pos += gap_size
cigar = cigar[cigar.index(sep)+1:]
else:
flag = col[3][:2]
if flag == 'NM' or flag == 'QC': return []
if options.unique and flag != 'UM': return []
if options.pair and col[7] == '0': return []
seq, strand, cr, pos, insert, mm = col[1], col[6], col[4], int(col[5])-1, int(col[7]), col[9]
if cr not in options.chroms: return []
if ':' in mm:
tmp = mm.split(':')
gap_pos, gap_size = int(tmp[1]), int(tmp[2])
if gap_size < 0: seq = seq[:gap_pos] + seq[gap_pos-gap_size:] # insertion on reference
else: seq = seq[:gap_pos] + '-' * gap_size + seq[gap_pos:]
if pos + len(seq) > len(ref[cr]): return []
if options.rm_dup: # remove duplicate hits
if strand == '+-' or strand == '-+': frag_end, direction = pos+len(seq), 2
else: frag_end, direction = pos, 1
if coverage[cr][frag_end] & direction: return []
coverage[cr][frag_end] |= direction
if options.trim_fillin > 0: # trim fill in nucleotides
if strand == '+-': seq = seq[:-options.trim_fillin]
elif strand == '--': seq, pos = seq[options.trim_fillin:], pos+options.trim_fillin
elif insert != 0 and len(seq) > abs(insert) - options.trim_fillin:
trim_nt = len(seq) - (abs(insert) - options.trim_fillin)
if strand == '++': seq = seq[:-trim_nt]
elif strand == '-+': seq, pos =seq[trim_nt:], pos+trim_nt
if sam_format and insert > 0: seq = seq[:int(col[7])-1-pos] # remove overlapped regions in paired hits, SAM format only
return (seq, strand[0], cr, pos)
ref, cr, seq = {}, '', ''
disp('reading reference %s ...' % options.reffile)
for line in open(options.reffile):
if line[0] == '>':
if any(cr):
if len(options.chroms) == 0 or cr in options.chroms: ref[cr] = seq.upper()
cr, seq = line[1:-1].split()[0], ''
else: seq += line.strip()
if len(options.chroms) == 0 or cr in options.chroms: ref[cr] = seq.upper()
del seq
meth, depth, coverage, meth1, depth1 = {}, {}, {}, {}, {}
for cr in ref:
meth[cr] = array.array('H', [0]) * len(ref[cr])
depth[cr] = array.array('H', [0]) * len(ref[cr])
if options.rm_dup: coverage[cr] = array.array('B', [0]) * len(ref[cr])
if options.CT_SNP > 0:
meth1[cr] = array.array('H', [0]) * len(ref[cr])
depth1[cr] = array.array('H', [0]) * len(ref[cr])
options.chroms = set(ref.keys())
BS_conversion = {'+': ('C','T','G','A'), '-': ('G','A','C','T')}
nmap = 0
for infile in infiles:
nline = 0
disp('reading %s ...' % infile)
if infile[-4:].upper() == '.SAM': sam_format, fin = True, os.popen('%ssamtools view -XS %s' % (options.sam_path, infile))
elif infile[-4:].upper() == '.BAM': sam_format, fin = True, os.popen('%ssamtools view -X %s' % (options.sam_path, infile))
else: sam_format, fin = False, open(infile)
for line in fin:
nline += 1
if nline % 10000000 == 0: disp('read %d lines' % nline, nt=1)
map_info = get_alignment(line)
if len(map_info) == 0: continue
seq, strand, cr, pos = map_info
depthcr = depth[cr]
if pos + len(seq) > len(depthcr): continue
nmap += 1
methcr = meth[cr]
refseq = ref[cr][pos:pos+len(seq)]
match, convert, rc_match, rc_convert = BS_conversion[strand]
index = refseq.find(match)
while index >= 0:
if depthcr[pos+index] < 65535:
if seq[index] == convert: depthcr[pos+index] += 1
elif seq[index] == match:
methcr[pos+index] += 1
depthcr[pos+index] += 1
index = refseq.find(match, index+1)
if options.CT_SNP == 0: continue
methcr1 = meth1[cr]
depthcr1 = depth1[cr]
index = refseq.find(rc_match)
while index >= 0:
if depthcr1[pos+index] < 65535:
if seq[index] == rc_convert: depthcr1[pos+index] += 1
if seq[index] == rc_match:
methcr1[pos+index] += 1
depthcr1[pos+index] += 1
index = refseq.find(rc_match, index+1)
fin.close()
if options.combine_CpG:
disp('combining CpG methylation from both strands ...')
for cr in depth:
methcr, depthcr, refcr = depth[cr], meth[cr], ref[cr]
if options.CT_SNP > 0: depthcr1, methcr1 = depth1[cr], meth1[cr]
pos = refcr.find('CG')
while pos >= 0:
if depthcr[pos] + depthcr[pos+1] <= 65535:
depthcr[pos] += depthcr[pos+1]
methcr[pos] += methcr[pos+1]
else:
depthcr[pos] = (depthcr[pos] + depthcr[pos+1]) / 2
methcr[pos] = (methcr[pos] + methcr[pos+1]) / 2
depthcr[pos+1] = 0
methcr[pos+1] = 0
if options.CT_SNP > 0:
if depthcr1[pos] + depthcr1[pos+1] <= 65535:
depthcr1[pos] += depthcr1[pos+1]
methcr1[pos] += methcr1[pos+1]
else:
depthcr1[pos] = (depthcr1[pos] + depthcr1[pos+1]) / 2
methcr1[pos] = (methcr1[pos] + methcr1[pos+1]) / 2
pos = refcr.find('CG', pos+2)
disp('writing %s ...' % options.outfile)
ss = {'C': '+', 'G': '-'}
fout = open(options.outfile, 'w')
if not options.no_header:
# fout.write('chr\tpos\tstrand\tcontext\tratio\teff_CT_count\tC_count\tCT_count\trev_G_count\trev_GA_count\tCI_lower\tCI_upper\n')
fout.write('#chr\tpos\tstrand\tcontext_type\tcontext\tratio\tmethy_C\tN_methy_C\tCT_count\trev_G_count\trev_GA_count\tCI_lower\tCI_upper\n')
########################## Some of these codes have been modified by Shujia Huang from here to the end # Just mark #############################
z95, z95sq = 1.96, 1.96 * 1.96
nc, nd, flag, dep0 = 0, 0, 0, options.min_depth
for cr in sorted(depth.keys()):
depthcr, methcr, refcr = depth[cr], meth[cr], ref[cr]
if options.CT_SNP > 0: depthcr1, methcr1 = depth1[cr], meth1[cr]
for i, dd in enumerate(depthcr):
if refcr[i] is not "C" and refcr[i] is not "G" : continue
if dd < dep0: continue
if options.CT_SNP > 0:
m1, d1 = methcr1[i], depthcr1[i]
if m1 != d1:
#if options.CT_SNP == 2: continue
d = float(dd) * m1 / d1
else: d = float(dd)
else: d = float(dd)
if dd > 0 :
nc += 1
nd += d
m = methcr[i]
#if m == 0 and not options.meth0: continue
#seq = refcr[i-2:i+3]
flag = 0
if i + 3 > len( refcr ):
seq = refcr[i-2:i+1]
flag= 1
elif i-2 >= 0 :
seq = refcr[i-2:i+3]
else : # i-2 < 0
seq = refcr[i:i+3]
flag= 0
strand = ss[refcr[i]]
c_type = context_type( seq, strand, flag )
try: ratio = float(min(m,d)) / d
except ZeroDivisionError:
if options.CT_SNP:
fout.write('%s\t%d\t%c\t%s\t%s\tNA\t%.0f\t%d\t%d\t%d\t%d\tNA\tNA\n'
% (cr, i+1, strand, c_type, seq, min(m,d), d - min(m,d), dd, d1, m1))
else:
fout.write('%s\t%d\t%c\t%s\t%s\tNA\t%.0f\t%d\t%d\tNA\tNA\tNA\tNA\n'
% (cr, i+1, strand, c_type, seq, min(m,d), d - min(m,d), dd))
continue
pmid = ratio + z95sq / (2 * d)
sd = z95 * ((ratio*(1-ratio)/d + z95sq/(4*d*d)) ** 0.5)
norminator = 1 + z95sq / d
CIl, CIu = (pmid - sd) / norminator, (pmid + sd) / norminator
if options.CT_SNP:
fout.write('%s\t%d\t%c\t%s\t%s\t%.3f\t%.0f\t%d\t%d\t%d\t%d\t%.3f\t%.3f\n'
% (cr, i+1, strand, c_type, seq, ratio, min(m,d), d - min(m,d), dd, d1, m1, CIl, CIu))
else:
fout.write('%s\t%d\t%c\t%s\t%s\t%.3f\t%.0f\t%d\t%d\tNA\tNA\t%.3f\t%.3f\n'
% (cr, i+1, strand, c_type, seq, ratio, min(m,d), d - min(m,d), dd, CIl, CIu))
fout.close()
disp('done.')
if nc > 0: print 'total %d valid mappings, %d covered cytosines, average coverage: %.2f fold.' % (nmap, nc, float(nd)/nc)
|
|
class Scope(object):
def __init__(self, name=None, parent=None):
self.name = name
self.parent = parent
self.types = {}
def find(self, name):
if name in self.types:
return self.types[name]
if self.parent is not None:
return self.parent.find(name)
raise KeyError(name)
def introduce(self, name, type):
self.types[name] = type
if self.parent is not None and isinstance(type, ConstType):
self.parent.introduce((self.name,) + name, type)
def spawn(self, name):
s = Scope(name, self)
for k, t in self.types.items():
head, *tail = k
if head == s.name and tail:
s.types[tuple(tail)] = t
return s
def get_path(self):
if self.parent is None:
return ()
return self.parent.get_path() + (self.name,)
def get_root(self):
if self.parent is None:
return self
return self.parent.get_root()
class TypingJudgment(object):
def __init__(self, scope):
self.scope = scope
class ConstType(TypingJudgment):
def __init__(self, scope, value):
super().__init__(scope)
self.value = value
@property
def arity(self):
return len(self.value.params)
class ModuleType(TypingJudgment):
arity = 0
dotted = False
def __init__(self, scope, module_scope, value):
super().__init__(scope)
self.module_scope = module_scope
self.value = value
class InstantiatedType(TypingJudgment):
arity = 0
def __init__(self, scope, value, params):
super().__init__(scope)
self.value = value
self.params = params
class TypeVar(TypingJudgment):
arity = 0
class BadType(TypingJudgment):
arity = None
class PrimitiveType(TypingJudgment):
arity = 0
TYPES = [
'Nil',
'String',
'Bytes',
'Int8',
'Int16',
'Int32',
'Int64',
'UInt8',
'UInt16',
'UInt32',
'UInt64',
'Float32',
'Float64',
'Bool',
]
for name in PrimitiveType.TYPES:
setattr(PrimitiveType, name, type(name, (PrimitiveType,), {}))
class SymbolFindingVisitor(object):
def __init__(self, compilation, scope, name):
self.compilation = compilation
self.scope = scope
self.name = name
def _visit_aggregate(self, type):
head, *rest = self.name
t = self.scope.find((head,))
if not rest:
return t
return t.accept(SymbolFindingVisitor(self.compilation, self.scope,
tuple(rest)))
def visit_module(self, type):
return self._visit_aggregate(type)
def visit_record(self, type):
return self._visit_aggregate(type)
def visit_enum(self, type):
return self._visit_aggregate(type)
def visit_alias(self, type):
return type.inst.accept(self)
def visit_imported(self, type):
return self._visit_aggregate(type)
class TypeCheckingVisitor(object):
def __init__(self, compilation, module, scope, assumptions,
apply_judgments):
self.compilation = compilation
self.module = module
self.scope = scope
self.assumptions = assumptions
self.apply_judgments = apply_judgments
def _apply_judgment(self, node, type):
if self.apply_judgments:
node.with_judgment(type)
return type
def _make_typing_judgment(self, type, *args, **kwargs):
return type(self.scope, *args, **kwargs)
def _check_arity(self, type):
if type.arity != 0 and type.arity is not None:
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.ERROR,
"expected {} argument(s) for type '{}'".format(
type.arity, type.value.name),
type.value.ast_node.id.lineno, type.value.ast_node.id.colno,
len(type.value.ast_node.id.raw))
def _visit_type(self, type):
self.scope = self.scope.spawn(type.name)
for p in type.params:
self.scope.introduce((p.name,),
self._make_typing_judgment(TypeVar))
type.accept(self)
self.scope = self.scope.parent
self.scope.introduce((type.name,),
self._make_typing_judgment(ConstType, type))
def _visit_types(self, types):
for type in types:
if (type.name,) not in self.scope.types:
self.scope.introduce(
(type.name,), self._make_typing_judgment(ConstType, type))
for type in types:
self._visit_type(type)
def _visit_aggregate(self, type):
self._visit_types(type.types.values())
for field in type.fields.values():
field.accept(self)
return self._make_typing_judgment(ConstType, type)
def visit_module(self, node):
self._visit_types(node.types.values())
return self._apply_judgment(
node,
self._make_typing_judgment(ModuleType, self.scope, self.module))
def visit_enum(self, node):
return self._apply_judgment(node, self._visit_aggregate(node))
def visit_record(self, node):
return self._apply_judgment(node, self._visit_aggregate(node))
def visit_field(self, node):
if node.type is not None:
t = node.type.accept(self)
self._check_arity(t)
else:
t = self._make_typing_judgment(PrimitiveType.Nil)
return self._apply_judgment(node, t)
def visit_instantiation(self, node):
t = node.type.accept(self)
if t.arity == 0:
# Instantiate a type with zero arity -> not ok, but return
# uninstantiated
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.ERROR,
'type has no parameters',
node.ast_node.lineno, node.ast_node.colno, 1)
out_t = t
elif t.arity is None:
# Instantiate a type with undefined arity -> ok
out_t = t
else:
params = [param.accept(self) for param in node.params]
if len(params) != t.arity:
# Instantiate a type with differing arity -> not ok
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.ERROR,
"type '{}' takes {} parameters".format(t.value.name,
t.arity),
node.ast_node.lineno, node.ast_node.colno,
1)
out_t = self._make_typing_judgment(BadType)
else:
if t.value not in self.assumptions and \
t.value.module is self.module:
# Instantiate a type with matching arity -> ok
scope = self.scope.parent.spawn(t.value.name)
for param, v in zip(t.value.params, params):
scope.introduce((param.name,), v)
c = self.compilation.without_diagnostics()
# If the type has any recursions, we can assume that the
# type is correct during instantiations (as long as the
# arity is correct). We don't support any kind of
# conditional recursion, so a type definition with recursion
# is always non-terminating.
#
# We also don't apply any judgments here -- if we do, we'll
# apply judgments for instantiated type parameters
# incorrectly.
_check(c, t.value, self.module, scope, self.assumptions |
{t.value}, False)
if c.diagnostics:
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.NOTE,
'instantiated from here',
node.ast_node.lineno, node.ast_node.colno, 1)
self.compilation.diagnostics.extend(c.diagnostics)
# Even if we failed to instantiate, pretend we didn't to not
# stop compilation.
out_t = self._make_typing_judgment(InstantiatedType, t.value,
params)
return self._apply_judgment(node, out_t)
def visit_alias(self, node):
t = node.inst.accept(self)
self._check_arity(t)
if isinstance(t, ModuleType):
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.ERROR,
"cannot alias module",
node.ast_node.id.lineno, node.ast_node.id.colno,
len(node.ast_node.id.raw))
t = self._make_typing_judgment(BadType)
return self._apply_judgment(node, t)
def visit_member(self, node):
t = node.type.accept(self)
self._check_arity(t)
if t.arity != 0:
out_t = self._make_typing_judgment(BadType)
elif isinstance(t, PrimitiveType):
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.ERROR,
'primitive type does not have members',
node.ast_node.lineno, node.ast_node.colno, 1)
out_t = self._make_typing_judgment(BadType)
elif isinstance(t, TypeVar):
out_t = self._make_typing_judgment(TypeVar)
elif isinstance(t, BadType):
out_t = self._make_typing_judgment(BadType)
else:
if isinstance(t, ModuleType):
scope = t.module_scope
else:
scope = self.scope.parent.spawn(t.value.name)
try:
out_t = t.value.accept(SymbolFindingVisitor(
self.compilation, scope, (node.member.name,)))
except KeyError:
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.ERROR,
"no member type named '{}' in type '{}'".format(
node.member.name, t.value.name),
node.member.ast_node.lineno, node.member.ast_node.colno,
len(node.member.name))
out_t = self._make_typing_judgment(BadType)
# NOTE: This isn't strictly correct, but for the purposes of typing
# judgment it's fine.
self._apply_judgment(node.member, out_t)
return self._apply_judgment(node, out_t)
def visit_imported(self, node):
mod = self.compilation.imports[node.name]
if mod is None:
t = self._make_typing_judgment(BadType)
else:
t = self._make_typing_judgment(
ModuleType, _check(self.compilation, mod, mod).scope, node)
return self._apply_judgment(node, t)
def visit_symbol(self, node):
if node.name in PrimitiveType.TYPES:
t = self._make_typing_judgment(getattr(PrimitiveType, node.name))
else:
try:
t = self.scope.find((node.name,))
except KeyError:
self.compilation.add_diagnostic(
self.compilation.Diagnostic.Severity.ERROR,
"could not resolve type '{}'".format(node.name),
node.ast_node.lineno, node.ast_node.colno,
len(node.ast_node.raw))
t = self._make_typing_judgment(BadType)
self.scope.introduce((node.name,), t)
return self._apply_judgment(node, t)
def _check(compilation, type, module, s=None, assumptions=None,
apply_judgments=True):
if s is None:
s = Scope()
if assumptions is None:
assumptions = set()
v = TypeCheckingVisitor(compilation, module, s, assumptions,
apply_judgments)
type.accept(v)
return v
def check(compilation, type, module=None):
if module is None:
module = type.module
_check(compilation, type, module)
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that verify MMDS related functionality."""
import json
import random
import string
import time
import pytest
from framework.artifacts import DEFAULT_DEV_NAME, NetIfaceConfig
from framework.builder import MicrovmBuilder, SnapshotBuilder, SnapshotType
from framework.utils import generate_mmds_session_token, \
generate_mmds_v2_get_request
import host_tools.network as net_tools
# Minimum lifetime of token.
MIN_TOKEN_TTL_SECONDS = 1
# Maximum lifetime of token.
MAX_TOKEN_TTL_SECONDS = 21600
# Default IPv4 value for MMDS.
DEFAULT_IPV4 = '169.254.169.254'
# MMDS versions supported.
MMDS_VERSIONS = ['V2', 'V1']
def _assert_out(stdout, stderr, expected):
assert stderr.read() == ''
assert stdout.read() == expected
def _populate_data_store(test_microvm, data_store):
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == {}
response = test_microvm.mmds.put(json=data_store)
assert test_microvm.api_session.is_status_no_content(response.status_code)
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == data_store
def _configure_mmds(test_microvm, iface_id, version, ipv4_address=None):
mmds_config = {
'version': version,
'network_interfaces': [iface_id]
}
if ipv4_address:
mmds_config['ipv4_address'] = ipv4_address
response = test_microvm.mmds.put_config(json=mmds_config)
assert test_microvm.api_session.is_status_no_content(response.status_code)
@pytest.mark.parametrize(
"version",
MMDS_VERSIONS
)
def test_custom_ipv4(test_microvm_with_api, network_config, version):
"""
Test the API for MMDS custom ipv4 support.
@type: functional
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
data_store = {
'latest': {
'meta-data': {
'ami-id': 'ami-12345678',
'reservation-id': 'r-fea54097',
'local-hostname': 'ip-10-251-50-12.ec2.internal',
'public-hostname': 'ec2-203-0-113-25.compute-1.amazonaws.com',
'network': {
'interfaces': {
'macs': {
'02:29:96:8f:6a:2d': {
'device-number': '13345342',
'local-hostname': 'localhost',
'subnet-id': 'subnet-be9b61d'
}
}
}
}
}
}
}
_populate_data_store(test_microvm, data_store)
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Invalid values IPv4 address.
response = test_microvm.mmds.put_config(json={
'ipv4_address': '',
'network_interfaces': ['1']
})
assert test_microvm.api_session.is_status_bad_request(response.status_code)
response = test_microvm.mmds.put_config(json={
'ipv4_address': '1.1.1.1',
'network_interfaces': ['1']
})
assert test_microvm.api_session.is_status_bad_request(response.status_code)
ipv4_address = '169.254.169.250'
# Configure MMDS with custom IPv4 address.
_configure_mmds(
test_microvm,
iface_id='1',
version=version,
ipv4_address=ipv4_address
)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
cmd = 'ip route add {} dev eth0'.format(ipv4_address)
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, '')
if version == 'V2':
# Generate token.
token = generate_mmds_session_token(
ssh_connection,
ipv4_address=ipv4_address,
token_ttl=60
)
pre = generate_mmds_v2_get_request(
ipv4_address=ipv4_address,
token=token
)
else:
pre = 'curl -s -H "Accept: application/json" ' \
'http://{}/'.format(ipv4_address)
cmd = pre + 'latest/meta-data/ami-id'
_, stdout, _ = ssh_connection.execute_command(cmd)
assert json.load(stdout) == 'ami-12345678'
# The request is still valid if we append a
# trailing slash to a leaf node.
cmd = pre + 'latest/meta-data/ami-id/'
_, stdout, _ = ssh_connection.execute_command(cmd)
assert json.load(stdout) == 'ami-12345678'
cmd = pre + 'latest/meta-data/network/interfaces/macs/' \
'02:29:96:8f:6a:2d/subnet-id'
_, stdout, _ = ssh_connection.execute_command(cmd)
assert json.load(stdout) == 'subnet-be9b61d'
# Test reading a non-leaf node WITHOUT a trailing slash.
cmd = pre + 'latest/meta-data'
_, stdout, _ = ssh_connection.execute_command(cmd)
assert json.load(stdout) == data_store['latest']['meta-data']
# Test reading a non-leaf node with a trailing slash.
cmd = pre + 'latest/meta-data/'
_, stdout, _ = ssh_connection.execute_command(cmd)
assert json.load(stdout) == data_store['latest']['meta-data']
@pytest.mark.parametrize(
"version",
MMDS_VERSIONS
)
def test_json_response(test_microvm_with_api, network_config, version):
"""
Test the MMDS json response.
@type: functional
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
data_store = {
'latest': {
'meta-data': {
'ami-id': 'ami-12345678',
'reservation-id': 'r-fea54097',
'local-hostname': 'ip-10-251-50-12.ec2.internal',
'public-hostname': 'ec2-203-0-113-25.compute-1.amazonaws.com',
'dummy_res': ['res1', 'res2']
},
"Limits": {
"CPU": 512,
"Memory": 512
},
"Usage": {
"CPU": 12.12
}
}
}
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Configure MMDS version.
_configure_mmds(test_microvm, iface_id='1', version=version)
# Populate data store with contents.
_populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
cmd = 'ip route add {} dev eth0'.format(DEFAULT_IPV4)
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, '')
if version == 'V2':
# Generate token.
token = generate_mmds_session_token(
ssh_connection,
ipv4_address=DEFAULT_IPV4,
token_ttl=60
)
pre = generate_mmds_v2_get_request(DEFAULT_IPV4, token)
else:
pre = 'curl -s -H "Accept: application/json"' \
' http://{}/'.format(DEFAULT_IPV4)
cmd = pre + 'latest/meta-data/'
_, stdout, _ = ssh_connection.execute_command(cmd)
assert json.load(stdout) == data_store['latest']['meta-data']
cmd = pre + 'latest/meta-data/ami-id/'
_, stdout, stderr = ssh_connection.execute_command(cmd)
assert json.load(stdout) == 'ami-12345678'
cmd = pre + 'latest/meta-data/dummy_res/0'
_, stdout, stderr = ssh_connection.execute_command(cmd)
assert json.load(stdout) == 'res1'
cmd = pre + 'latest/Usage/CPU'
_, stdout, stderr = ssh_connection.execute_command(cmd)
assert json.load(stdout) == 12.12
cmd = pre + 'latest/Limits/CPU'
_, stdout, stderr = ssh_connection.execute_command(cmd)
assert json.load(stdout) == 512
@pytest.mark.parametrize(
"version",
MMDS_VERSIONS
)
def test_mmds_response(test_microvm_with_api, network_config, version):
"""
Test MMDS responses to various datastore requests.
@type: functional
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
data_store = {
'latest': {
'meta-data': {
'ami-id': 'ami-12345678',
'reservation-id': 'r-fea54097',
'local-hostname': 'ip-10-251-50-12.ec2.internal',
'public-hostname': 'ec2-203-0-113-25.compute-1.amazonaws.com',
'dummy_obj': {
'res_key': 'res_value',
},
'dummy_array': [
'arr_val1',
'arr_val2'
]
},
"Limits": {
"CPU": 512,
"Memory": 512
},
"Usage": {
"CPU": 12.12
}
}
}
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Configure MMDS version.
_configure_mmds(test_microvm, iface_id='1', version=version)
# Populate data store with contents.
_populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
cmd = 'ip route add {} dev eth0'.format(DEFAULT_IPV4)
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, '')
if version == 'V2':
# Generate token.
token = generate_mmds_session_token(
ssh_connection,
ipv4_address=DEFAULT_IPV4,
token_ttl=60
)
pre = generate_mmds_v2_get_request(
ipv4_address=DEFAULT_IPV4,
token=token,
app_json=False
)
else:
pre = 'curl -s http://{}/'.format(DEFAULT_IPV4)
cmd = pre + 'latest/meta-data/'
_, stdout, stderr = ssh_connection.execute_command(cmd)
expected = "ami-id\n" \
"dummy_array\n" \
"dummy_obj/\n" \
"local-hostname\n" \
"public-hostname\n" \
"reservation-id"
_assert_out(stdout, stderr, expected)
cmd = pre + 'latest/meta-data/ami-id/'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, 'ami-12345678')
cmd = pre + 'latest/meta-data/dummy_array/0'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, 'arr_val1')
cmd = pre + 'latest/Usage/CPU'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, 'Cannot retrieve value. The value has an'
' unsupported type.')
cmd = pre + 'latest/Limits/CPU'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, 'Cannot retrieve value. The value has an'
' unsupported type.')
@pytest.mark.parametrize(
"version",
MMDS_VERSIONS
)
def test_larger_than_mss_payloads(
test_microvm_with_api,
network_config,
version):
"""
Test MMDS content for payloads larger than MSS.
@type: functional
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Configure MMDS version.
_configure_mmds(test_microvm, iface_id='1', version=version)
# The MMDS is empty at this point.
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == {}
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
# Make sure MTU is 1500 bytes.
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
cmd = 'ip link set dev eth0 mtu 1500'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, "")
cmd = 'ip a s eth0 | grep -i mtu | tr -s " " | cut -d " " -f 4,5'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, "mtu 1500\n")
# These values are usually used by booted up guest network interfaces.
mtu = 1500
ipv4_packet_headers_len = 20
tcp_segment_headers_len = 20
mss = mtu - ipv4_packet_headers_len - tcp_segment_headers_len
# Generate a random MMDS content, double of MSS.
letters = string.ascii_lowercase
larger_than_mss = ''.join(random.choice(letters) for i in range(2 * mss))
mss_equal = ''.join(random.choice(letters) for i in range(mss))
lower_than_mss = ''.join(random.choice(letters) for i in range(mss - 2))
data_store = {
'larger_than_mss': larger_than_mss,
'mss_equal': mss_equal,
'lower_than_mss': lower_than_mss
}
response = test_microvm.mmds.put(json=data_store)
assert test_microvm.api_session.is_status_no_content(response.status_code)
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == data_store
cmd = 'ip route add {} dev eth0'.format(DEFAULT_IPV4)
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, '')
if version == 'V2':
# Generate token.
token = generate_mmds_session_token(
ssh_connection,
ipv4_address=DEFAULT_IPV4,
token_ttl=60
)
pre = generate_mmds_v2_get_request(
ipv4_address=DEFAULT_IPV4,
token=token,
app_json=False
)
else:
pre = 'curl -s http://{}/'.format(DEFAULT_IPV4)
cmd = pre + 'larger_than_mss'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, larger_than_mss)
cmd = pre + 'mss_equal'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, mss_equal)
cmd = pre + 'lower_than_mss'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, lower_than_mss)
@pytest.mark.parametrize(
"version",
MMDS_VERSIONS
)
def test_mmds_dummy(test_microvm_with_api, network_config, version):
"""
Test the API and guest facing features of the microVM MetaData Service.
@type: functional
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Configure MMDS version.
_configure_mmds(test_microvm, iface_id='1', version=version)
# The MMDS is empty at this point.
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == {}
# Test that patch return NotInitialized when the MMDS is not initialized.
dummy_json = {
'latest': {
'meta-data': {
'ami-id': 'dummy'
}
}
}
response = test_microvm.mmds.patch(json=dummy_json)
assert test_microvm.api_session.is_status_bad_request(response.status_code)
fault_json = {
"fault_message": "The MMDS data store is not initialized."
}
assert response.json() == fault_json
# Test that using the same json with a PUT request, the MMDS data-store is
# created.
response = test_microvm.mmds.put(json=dummy_json)
assert test_microvm.api_session.is_status_no_content(response.status_code)
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == dummy_json
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == dummy_json
dummy_json = {
'latest': {
'meta-data': {
'ami-id': 'another_dummy',
'secret_key': 'eaasda48141411aeaeae'
}
}
}
response = test_microvm.mmds.patch(json=dummy_json)
assert test_microvm.api_session.is_status_no_content(response.status_code)
response = test_microvm.mmds.get()
assert test_microvm.api_session.is_status_ok(response.status_code)
assert response.json() == dummy_json
@pytest.mark.parametrize(
"version",
MMDS_VERSIONS
)
def test_guest_mmds_hang(test_microvm_with_api, network_config, version):
"""
Test the MMDS json endpoint when Content-Length larger than actual length.
@type: functional
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Configure MMDS version.
_configure_mmds(test_microvm, iface_id='1', version=version)
data_store = {
'latest': {
'meta-data': {
'ami-id': 'ami-12345678'
}
}
}
_populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
cmd = 'ip route add {} dev eth0'.format(DEFAULT_IPV4)
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, '')
get_cmd = 'curl -m 2 -s'
get_cmd += ' -X GET'
get_cmd += ' -H "Content-Length: 100"'
get_cmd += ' -H "Accept: application/json"'
get_cmd += ' -d "some body"'
get_cmd += ' http://{}/'.format(DEFAULT_IPV4)
if version == 'V1':
_, stdout, _ = ssh_connection.execute_command(get_cmd)
assert 'Invalid request' in stdout.read()
else:
# Generate token.
token = generate_mmds_session_token(
ssh_connection,
ipv4_address=DEFAULT_IPV4,
token_ttl=60
)
get_cmd += ' -H "X-metadata-token: {}"'.format(token)
_, stdout, _ = ssh_connection.execute_command(get_cmd)
assert 'Invalid request' in stdout.read()
# Do the same for a PUT request.
cmd = 'curl -m 2 -s'
cmd += ' -X PUT'
cmd += ' -H "Content-Length: 100"'
cmd += ' -H "X-metadata-token: {}"'.format(token)
cmd += ' -H "Accept: application/json"'
cmd += ' -d "some body"'
cmd += ' http://{}/'.format(DEFAULT_IPV4)
_, stdout, _ = ssh_connection.execute_command(cmd)
assert 'Invalid request' in stdout.read()
@pytest.mark.parametrize(
"version",
MMDS_VERSIONS
)
def test_patch_dos_scenario(test_microvm_with_api, network_config, version):
"""
Test the MMDS json endpoint when data store size reaches the limit.
@type: negative
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Configure MMDS version.
_configure_mmds(test_microvm, iface_id='1', version=version)
dummy_json = {
'latest': {
'meta-data': {
'ami-id': 'dummy'
}
}
}
# Populate data-store.
response = test_microvm.mmds.put(json=dummy_json)
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Send a request that will fill the data store.
aux = "a" * 51137
dummy_json = {
'latest': {
'meta-data': {
'ami-id': "smth",
'secret_key': aux
}
}
}
response = test_microvm.mmds.patch(json=dummy_json)
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Try to send a new patch thaw will increase the data store size. Since the
# actual size is equal with the limit this request should fail with
# PayloadTooLarge.
aux = "b" * 10
dummy_json = {
'latest': {
'meta-data': {
'ami-id': "smth",
'secret_key2': aux
}
}
}
response = test_microvm.mmds.patch(json=dummy_json)
assert test_microvm.api_session.\
is_status_payload_too_large(response.status_code)
# Check that the patch actually failed and the contents of the data store
# has not changed.
response = test_microvm.mmds.get()
assert str(response.json()).find(aux) == -1
# Delete something from the mmds so we will be able to send new data.
dummy_json = {
'latest': {
'meta-data': {
'ami-id': "smth",
'secret_key': "a"
}
}
}
response = test_microvm.mmds.patch(json=dummy_json)
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Check that the size has shrunk.
response = test_microvm.mmds.get()
assert len(str(response.json()).replace(" ", "")) == 59
# Try to send a new patch, this time the request should succeed.
aux = "a" * 100
dummy_json = {
'latest': {
'meta-data': {
'ami-id': "smth",
'secret_key': aux
}
}
}
response = test_microvm.mmds.patch(json=dummy_json)
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Check that the size grew as expected.
response = test_microvm.mmds.get()
assert len(str(response.json()).replace(" ", "")) == 158
def test_mmds_snapshot(bin_cloner_path):
"""
Exercise MMDS behavior with snapshots.
Ensures that MMDS V2 behavior is not affected by taking a snapshot
and that MMDS V2 is not available after snapshot load.
@type: functional
"""
vm_builder = MicrovmBuilder(bin_cloner_path)
net_iface = NetIfaceConfig()
vm_instance = vm_builder.build_vm_nano(
net_ifaces=[net_iface],
diff_snapshots=True
)
test_microvm = vm_instance.vm
root_disk = vm_instance.disks[0]
ssh_key = vm_instance.ssh_key
ipv4_address = '169.254.169.250'
# Configure MMDS version with custom IPv4 address.
_configure_mmds(
test_microvm,
version='V2',
iface_id=DEFAULT_DEV_NAME,
ipv4_address=ipv4_address
)
data_store = {
'latest': {
'meta-data': {
'ami-id': 'ami-12345678'
}
}
}
_populate_data_store(test_microvm, data_store)
test_microvm.start()
snapshot_builder = SnapshotBuilder(test_microvm)
disks = [root_disk.local_path()]
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
cmd = 'ip route add {} dev eth0'.format(ipv4_address)
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, '')
# Generate token.
token = generate_mmds_session_token(
ssh_connection,
ipv4_address=ipv4_address,
token_ttl=60
)
pre = 'curl -m 2 -s'
pre += ' -X GET'
pre += ' -H "X-metadata-token: {}"'.format(token)
pre += ' http://{}/'.format(ipv4_address)
# Fetch metadata.
cmd = pre + 'latest/meta-data/'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, "ami-id")
# Create diff snapshot.
snapshot = snapshot_builder.create(disks,
ssh_key,
SnapshotType.DIFF)
# Resume microVM and ensure session token is still valid on the base.
response = test_microvm.vm.patch(state='Resumed')
assert test_microvm.api_session.is_status_no_content(response.status_code)
_, stdout, stderr = ssh_connection.execute_command(
pre + 'latest/meta-data/'
)
_assert_out(stdout, stderr, "ami-id")
# Kill base microVM.
test_microvm.kill()
# Load microVM clone from snapshot.
test_microvm, _ = vm_builder.build_from_snapshot(snapshot,
resume=True,
diff_snapshots=True)
_populate_data_store(test_microvm, data_store)
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
# Mmds V2 is not available with snapshots.
# Test that `PUT` requests are not allowed.
cmd = 'curl -m 2 -s'
cmd += ' -X PUT'
cmd += ' -H "X-metadata-token-ttl-seconds: 1"'
cmd += ' http://{}/latest/api/token'.format(ipv4_address)
_, stdout, stderr = ssh_connection.execute_command(cmd)
expected = "Not allowed HTTP method."
_assert_out(stdout, stderr, expected)
# Fetch metadata using V1 requests and ensure IPv4 configuration
# is persistent between snapshots.
cmd = 'curl -s http://{}/latest/meta-data/ami-id/'.format(ipv4_address)
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, 'ami-12345678')
def test_mmds_v2_negative(test_microvm_with_api, network_config):
"""
Test invalid MMDS GET/PUT requests when using V2.
@type: negative
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
_tap = test_microvm.ssh_network_config(network_config, '1')
# Configure MMDS version.
_configure_mmds(test_microvm, version='V2', iface_id='1')
data_store = {
'latest': {
'meta-data': {
'ami-id': 'ami-12345678',
'reservation-id': 'r-fea54097',
'local-hostname': 'ip-10-251-50-12.ec2.internal',
'public-hostname': 'ec2-203-0-113-25.compute-1.amazonaws.com'
}
}
}
_populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
cmd = 'ip route add 169.254.169.254 dev eth0'
_, stdout, stderr = ssh_connection.execute_command(cmd)
_assert_out(stdout, stderr, '')
# Check `GET` request fails when token is not provided.
cmd = 'curl -s http://169.254.169.254/latest/meta-data/'
_, stdout, stderr = ssh_connection.execute_command(cmd)
expected = "No MMDS token provided. Use `X-metadata-token` header " \
"to specify the session token."
_assert_out(stdout, stderr, expected)
# Generic `GET` request.
get_cmd = 'curl -m 2 -s'
get_cmd += ' -X GET'
get_cmd += ' -H "X-metadata-token: {}"'
get_cmd += ' http://169.254.169.254/latest/meta-data'
# Check `GET` request fails when token is not valid.
_, stdout, stderr = ssh_connection.execute_command(get_cmd.format("foo"))
_assert_out(stdout, stderr, "MMDS token not valid.")
# Check `PUT` request fails when token TTL is not provided.
_, stdout, stderr = ssh_connection.execute_command(
'curl -m 2 -s -X PUT http://169.254.169.254/latest/api/token'
)
expected = "Token time to live value not found. Use " \
"`X-metadata-token-ttl_seconds` header to specify " \
"the token's lifetime."
_assert_out(stdout, stderr, expected)
# Check `PUT` request fails when `X-Forwarded-For` header is provided.
cmd = 'curl -m 2 -s'
cmd += ' -X PUT'
cmd += ' -H "X-Forwarded-For: foo"'
cmd += ' http://169.254.169.254'
_, stdout, stderr = ssh_connection.execute_command(cmd)
expected = "Invalid header. Reason: Unsupported header name. " \
"Key: X-Forwarded-For"
_assert_out(stdout, stderr, expected)
# Generic `PUT` request.
put_cmd = 'curl -m 2 -s'
put_cmd += ' -X PUT'
put_cmd += ' -H "X-metadata-token-ttl-seconds: {}"'
put_cmd += ' http://169.254.169.254/latest/api/token'
# Check `PUT` request fails when path is invalid.
# Path is invalid because we remove the last character
# at the end of the valid uri.
_, stdout, stderr = ssh_connection.execute_command(put_cmd[:-1].format(60))
_assert_out(stdout, stderr, "Resource not found: /latest/api/toke.")
# Check `PUT` request fails when token TTL is not valid.
ttl_values = [MIN_TOKEN_TTL_SECONDS - 1, MAX_TOKEN_TTL_SECONDS + 1]
for ttl in ttl_values:
_, stdout, stderr = ssh_connection.execute_command(put_cmd.format(ttl))
expected = "Invalid time to live value provided for token: {}. " \
"Please provide a value between {} and {}." \
.format(ttl, MIN_TOKEN_TTL_SECONDS, MAX_TOKEN_TTL_SECONDS)
_assert_out(stdout, stderr, expected)
# Valid `PUT` request to generate token.
_, stdout, stderr = ssh_connection.execute_command(put_cmd.format(1))
token = stdout.read()
assert len(token) > 0
# Wait for token to expire.
time.sleep(1)
# Check `GET` request fails when expired token is provided.
_, stdout, stderr = ssh_connection.execute_command(get_cmd.format(token))
_assert_out(stdout, stderr, "MMDS token not valid.")
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import shutil
from os.path import join
from cms.utils.conf import get_cms_setting
from djangocms_text_ckeditor.models import Text
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
import reversion
from reversion.models import Revision, Version
from cms.models import Page, Title, Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.project.fileapp.models import FileModel
from cms.test_utils.testcases import CMSTestCase, TransactionCMSTestCase, URL_CMS_PAGE, URL_CMS_PAGE_CHANGE, URL_CMS_PAGE_ADD, \
URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT
from cms.test_utils.util.context_managers import SettingsOverride
if hasattr(reversion.models, 'VERSION_CHANGE'):
from reversion.models import VERSION_CHANGE
class BasicReversionTestCase(CMSTestCase):
def setUp(self):
self.user = self._create_user("test", True, True)
def test_number_revisions(self):
with self.login_user_context(self.user):
self.assertEqual(Revision.objects.all().count(), 0)
self.page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 1)
class ReversionTestCase(TransactionCMSTestCase):
def setUp(self):
u = self._create_user("test", True, True)
with self.login_user_context(u):
# add a new text plugin
self.page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE)
page = Page.objects.all()[0]
placeholderpk = page.placeholders.get(slot="body").pk
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholderpk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content.decode('utf8').split("edit-plugin/")[1].split("/")[
0] + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(edit_url, {"body": "Hello World"})
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Hello World", txt.body)
self.txt = txt
# change the content
response = self.client.post(edit_url, {"body": "Bye Bye World"})
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Bye Bye World", txt.body)
p_data = self.page_data.copy()
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk, p_data)
self.assertRedirects(response, URL_CMS_PAGE)
page.publish('en')
self.user = u
def test_revert(self):
"""
Test that you can revert a plugin
"""
with self.login_user_context(self.user):
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[2]
version = Version.objects.get(content_type=ctype, revision=revision)
page = Page.objects.all()[0]
history_url = URL_CMS_PAGE_CHANGE % (page.pk) + "history/"
response = self.client.get(history_url)
self.assertEqual(response.status_code, 200)
revert_url = history_url + "%s/" % version.pk
response = self.client.get(revert_url)
self.assertEqual(response.status_code, 200)
response = self.client.post("%s?language=en&" % revert_url, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE_CHANGE % page.pk)
# test for publisher_is_draft, published is set for both draft and
# published page
self.assertEqual(Page.objects.all()[0].publisher_is_draft, True)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# test that CMSPlugin subclasses are reverted
self.assertEqual(Text.objects.all().count(), 2)
self.assertEqual(Text.objects.get(pk=self.txt.pk).body, "Hello World")
self.assertEqual(Revision.objects.all().count(), 6)
def test_undo_redo(self):
"""
Test that you can revert a plugin
"""
with self.login_user_context(self.user):
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 5)
self.assertEqual(Placeholder.objects.count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[2]
Version.objects.get(content_type=ctype, revision=revision)
page = Page.objects.all()[0]
undo_url = reverse("admin:cms_page_undo", args=[page.pk])
response = self.client.post(undo_url)
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertTrue(page.revision_id != 0)
rev = page.revision_id
redo_url = reverse("admin:cms_page_redo", args=[page.pk])
response = self.client.post(redo_url)
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertTrue(page.revision_id != rev)
txt = Text.objects.all()[0]
edit_url = URL_CMS_PLUGIN_EDIT + str(txt.pk) + "/"
response = self.client.post(edit_url, {"body": "Hello World2"})
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertEqual(page.revision_id, 0)
self.assertEqual(2, CMSPlugin.objects.all().count())
placeholderpk = page.placeholders.filter(slot="body")[0].pk
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholderpk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content.decode('utf8').split("edit-plugin/")[1].split("/")[
0] + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(edit_url, {"body": "Hello World"})
self.assertEqual(response.status_code, 200)
self.assertEqual(3, CMSPlugin.objects.all().count())
self.client.post(undo_url)
self.client.post(undo_url)
self.assertEqual(2, CMSPlugin.objects.all().count())
self.assertEqual(Placeholder.objects.count(), 5)
def test_undo_slug_collision(self):
data1 = self.get_new_page_data()
data2 = self.get_new_page_data()
data1['slug'] = 'page1'
data2['slug'] = 'page2'
with self.login_user_context(self.get_superuser()):
response = self.client.post(URL_CMS_PAGE_ADD, data1)
self.assertEqual(response.status_code, 302)
response = self.client.post(URL_CMS_PAGE_ADD, data2)
self.assertEqual(response.status_code, 302)
page1 = Page.objects.get(title_set__slug='page1')
page2 = Page.objects.get(title_set__slug='page2')
data1['slug'] = 'page3'
response = self.client.post(URL_CMS_PAGE_CHANGE % page1.pk, data1)
self.assertEqual(response.status_code, 302)
data2['slug'] = 'page1'
response = self.client.post(URL_CMS_PAGE_CHANGE % page2.pk, data2)
self.assertEqual(response.status_code, 302)
undo_url = reverse("admin:cms_page_undo", args=[page1.pk])
response = self.client.post(undo_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(Title.objects.get(page=page1).slug, 'page3')
response = self.client.get(reverse("admin:cms_page_changelist"))
self.assertEqual(response.status_code, 200)
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
response = self.client.get('/en/page1/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
def test_recover(self):
"""
Test that you can recover a page
"""
with self.login_user_context(self.user):
self.assertEqual(Revision.objects.all().count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[4]
version = Version.objects.filter(content_type=ctype, revision=revision)[0]
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Text.objects.all().count(), 2)
page = Page.objects.all()[0]
page_pk = page.pk
page.delete()
self.assertEqual(Page.objects.all().count(), 0)
self.assertEqual(CMSPlugin.objects.all().count(), 0)
self.assertEqual(Text.objects.all().count(), 0)
recover_url = URL_CMS_PAGE + "recover/"
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
recover_url += "%s/" % version.pk
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(recover_url, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE_CHANGE % page_pk)
self.assertEqual(Page.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# test that CMSPlugin subclasses are recovered
self.assertEqual(Text.objects.all().count(), 1)
def test_publish_limits(self):
with self.login_user_context(self.user):
with SettingsOverride(CMS_MAX_PAGE_PUBLISH_REVERSIONS=2, CMS_MAX_PAGE_HISTORY_REVERSIONS=2):
page = Page.objects.all()[0]
page_pk = page.pk
self.assertEqual(Revision.objects.all().count(), 5)
for x in range(10):
publish_url = URL_CMS_PAGE + "%s/en/publish/" % page_pk
response = self.client.get(publish_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Revision.objects.all().count(), 4)
class ReversionFileFieldTests(CMSTestCase):
def tearDown(self):
shutil.rmtree(join(settings.MEDIA_ROOT, 'fileapp'))
def test_file_persistence(self):
content = b'content1'
with reversion.create_revision():
# add a file instance
file1 = FileModel()
file1.test_file.save('file1.txt', SimpleUploadedFile('file1.txt', content), False)
file1.save()
# manually add a revision because we use the explicit way
# django-cms uses too.
adapter = reversion.get_adapter(FileModel)
if hasattr(reversion.models, 'VERSION_CHANGE'):
reversion.revision_context_manager.add_to_context(
reversion.default_revision_manager, file1,
adapter.get_version_data(file1, VERSION_CHANGE))
else:
reversion.revision_context_manager.add_to_context(
reversion.default_revision_manager, file1,
adapter.get_version_data(file1))
# reload the instance from db
file2 = FileModel.objects.all()[0]
# delete the instance.
file2.delete()
# revert the old version
file_version = reversion.get_for_object(file1)[0]
file_version.revert()
# reload the reverted instance and check for its content
file1 = FileModel.objects.all()[0]
self.assertEqual(file1.test_file.file.read(), content)
|
|
"""
Created on Thu Oct 5 16:44:23 2017
@author: Christian Bender
This Python library contains some useful functions to deal with
prime numbers and whole numbers.
Overview:
isPrime(number)
sieveEr(N)
getPrimeNumbers(N)
primeFactorization(number)
greatestPrimeFactor(number)
smallestPrimeFactor(number)
getPrime(n)
getPrimesBetween(pNumber1, pNumber2)
----
isEven(number)
isOdd(number)
gcd(number1, number2) // greatest common divisor
kgV(number1, number2) // least common multiple
getDivisors(number) // all divisors of 'number' inclusive 1, number
isPerfectNumber(number)
NEW-FUNCTIONS
simplifyFraction(numerator, denominator)
factorial (n) // n!
fib (n) // calculate the n-th fibonacci term.
-----
goldbach(number) // Goldbach's assumption
"""
from math import sqrt
def isPrime(number):
"""
input: positive integer 'number'
returns true if 'number' is prime otherwise false.
"""
# precondition
assert isinstance(number, int) and (
number >= 0
), "'number' must been an int and positive"
status = True
# 0 and 1 are none primes.
if number <= 1:
status = False
for divisor in range(2, int(round(sqrt(number))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
status = False
break
# precondition
assert isinstance(status, bool), "'status' must been from type bool"
return status
# ------------------------------------------
def sieveEr(N):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N.
This function implements the algorithm called
sieve of erathostenes.
"""
# precondition
assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
beginList = [x for x in range(2, N + 1)]
ans = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(beginList)):
for j in range(i + 1, len(beginList)):
if (beginList[i] != 0) and (beginList[j] % beginList[i] == 0):
beginList[j] = 0
# filters actual prime numbers.
ans = [x for x in beginList if x != 0]
# precondition
assert isinstance(ans, list), "'ans' must been from type list"
return ans
# --------------------------------
def getPrimeNumbers(N):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N (inclusive)
This function is more efficient as function 'sieveEr(...)'
"""
# precondition
assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2"
ans = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, N + 1):
if isPrime(number):
ans.append(number)
# precondition
assert isinstance(ans, list), "'ans' must been from type list"
return ans
# -----------------------------------------
def primeFactorization(number):
"""
input: positive integer 'number'
returns a list of the prime number factors of 'number'
"""
# precondition
assert isinstance(number, int) and number >= 0, "'number' must been an int and >= 0"
ans = [] # this list will be returns of the function.
# potential prime number factors.
factor = 2
quotient = number
if number == 0 or number == 1:
ans.append(number)
# if 'number' not prime then builds the prime factorization of 'number'
elif not isPrime(number):
while quotient != 1:
if isPrime(factor) and (quotient % factor == 0):
ans.append(factor)
quotient /= factor
else:
factor += 1
else:
ans.append(number)
# precondition
assert isinstance(ans, list), "'ans' must been from type list"
return ans
# -----------------------------------------
def greatestPrimeFactor(number):
"""
input: positive integer 'number' >= 0
returns the greatest prime number factor of 'number'
"""
# precondition
assert isinstance(number, int) and (
number >= 0
), "'number' bust been an int and >= 0"
ans = 0
# prime factorization of 'number'
primeFactors = primeFactorization(number)
ans = max(primeFactors)
# precondition
assert isinstance(ans, int), "'ans' must been from type int"
return ans
# ----------------------------------------------
def smallestPrimeFactor(number):
"""
input: integer 'number' >= 0
returns the smallest prime number factor of 'number'
"""
# precondition
assert isinstance(number, int) and (
number >= 0
), "'number' bust been an int and >= 0"
ans = 0
# prime factorization of 'number'
primeFactors = primeFactorization(number)
ans = min(primeFactors)
# precondition
assert isinstance(ans, int), "'ans' must been from type int"
return ans
# ----------------------
def isEven(number):
"""
input: integer 'number'
returns true if 'number' is even, otherwise false.
"""
# precondition
assert isinstance(number, int), "'number' must been an int"
assert isinstance(number % 2 == 0, bool), "compare bust been from type bool"
return number % 2 == 0
# ------------------------
def isOdd(number):
"""
input: integer 'number'
returns true if 'number' is odd, otherwise false.
"""
# precondition
assert isinstance(number, int), "'number' must been an int"
assert isinstance(number % 2 != 0, bool), "compare bust been from type bool"
return number % 2 != 0
# ------------------------
def goldbach(number):
"""
Goldbach's assumption
input: a even positive integer 'number' > 2
returns a list of two prime numbers whose sum is equal to 'number'
"""
# precondition
assert (
isinstance(number, int) and (number > 2) and isEven(number)
), "'number' must been an int, even and > 2"
ans = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
primeNumbers = getPrimeNumbers(number)
lenPN = len(primeNumbers)
# run variable for while-loops.
i = 0
j = None
# exit variable. for break up the loops
loop = True
while i < lenPN and loop:
j = i + 1
while j < lenPN and loop:
if primeNumbers[i] + primeNumbers[j] == number:
loop = False
ans.append(primeNumbers[i])
ans.append(primeNumbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(ans, list)
and (len(ans) == 2)
and (ans[0] + ans[1] == number)
and isPrime(ans[0])
and isPrime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
# ----------------------------------------------
def gcd(number1, number2):
"""
Greatest common divisor
input: two positive integer 'number1' and 'number2'
returns the greatest common divisor of 'number1' and 'number2'
"""
# precondition
assert (
isinstance(number1, int)
and isinstance(number2, int)
and (number1 >= 0)
and (number2 >= 0)
), "'number1' and 'number2' must been positive integer."
rest = 0
while number2 != 0:
rest = number1 % number2
number1 = number2
number2 = rest
# precondition
assert isinstance(number1, int) and (
number1 >= 0
), "'number' must been from type int and positive"
return number1
# ----------------------------------------------------
def kgV(number1, number2):
"""
Least common multiple
input: two positive integer 'number1' and 'number2'
returns the least common multiple of 'number1' and 'number2'
"""
# precondition
assert (
isinstance(number1, int)
and isinstance(number2, int)
and (number1 >= 1)
and (number2 >= 1)
), "'number1' and 'number2' must been positive integer."
ans = 1 # actual answer that will be return.
# for kgV (x,1)
if number1 > 1 and number2 > 1:
# builds the prime factorization of 'number1' and 'number2'
primeFac1 = primeFactorization(number1)
primeFac2 = primeFactorization(number2)
elif number1 == 1 or number2 == 1:
primeFac1 = []
primeFac2 = []
ans = max(number1, number2)
count1 = 0
count2 = 0
done = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in primeFac1:
if n not in done:
if n in primeFac2:
count1 = primeFac1.count(n)
count2 = primeFac2.count(n)
for i in range(max(count1, count2)):
ans *= n
else:
count1 = primeFac1.count(n)
for i in range(count1):
ans *= n
done.append(n)
# iterates through primeFac2
for n in primeFac2:
if n not in done:
count2 = primeFac2.count(n)
for i in range(count2):
ans *= n
done.append(n)
# precondition
assert isinstance(ans, int) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
# ----------------------------------
def getPrime(n):
"""
Gets the n-th prime number.
input: positive integer 'n' >= 0
returns the n-th prime number, beginning at index 0
"""
# precondition
assert isinstance(n, int) and (n >= 0), "'number' must been a positive int"
index = 0
ans = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not isPrime(ans):
ans += 1
# precondition
assert isinstance(ans, int) and isPrime(
ans
), "'ans' must been a prime number and from type int"
return ans
# ---------------------------------------------------
def getPrimesBetween(pNumber1, pNumber2):
"""
input: prime numbers 'pNumber1' and 'pNumber2'
pNumber1 < pNumber2
returns a list of all prime numbers between 'pNumber1' (exclusive)
and 'pNumber2' (exclusive)
"""
# precondition
assert (
isPrime(pNumber1) and isPrime(pNumber2) and (pNumber1 < pNumber2)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
number = pNumber1 + 1 # jump to the next number
ans = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not isPrime(number):
number += 1
while number < pNumber2:
ans.append(number)
number += 1
# fetch the next prime number.
while not isPrime(number):
number += 1
# precondition
assert (
isinstance(ans, list) and ans[0] != pNumber1 and ans[len(ans) - 1] != pNumber2
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
# ----------------------------------------------------
def getDivisors(n):
"""
input: positive integer 'n' >= 1
returns all divisors of n (inclusive 1 and 'n')
"""
# precondition
assert isinstance(n, int) and (n >= 1), "'n' must been int and >= 1"
ans = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(divisor)
# precondition
assert ans[0] == 1 and ans[len(ans) - 1] == n, "Error in function getDivisiors(...)"
return ans
# ----------------------------------------------------
def isPerfectNumber(number):
"""
input: positive integer 'number' > 1
returns true if 'number' is a perfect number otherwise false.
"""
# precondition
assert isinstance(number, int) and (
number > 1
), "'number' must been an int and >= 1"
divisors = getDivisors(number)
# precondition
assert (
isinstance(divisors, list)
and (divisors[0] == 1)
and (divisors[len(divisors) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
# ------------------------------------------------------------
def simplifyFraction(numerator, denominator):
"""
input: two integer 'numerator' and 'denominator'
assumes: 'denominator' != 0
returns: a tuple with simplify numerator and denominator.
"""
# precondition
assert (
isinstance(numerator, int)
and isinstance(denominator, int)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
gcdOfFraction = gcd(abs(numerator), abs(denominator))
# precondition
assert (
isinstance(gcdOfFraction, int)
and (numerator % gcdOfFraction == 0)
and (denominator % gcdOfFraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcdOfFraction, denominator // gcdOfFraction)
# -----------------------------------------------------------------
def factorial(n):
"""
input: positive integer 'n'
returns the factorial of 'n' (n!)
"""
# precondition
assert isinstance(n, int) and (n >= 0), "'n' must been a int and >= 0"
ans = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
# -------------------------------------------------------------------
def fib(n):
"""
input: positive integer 'n'
returns the n-th fibonacci term , indexing by 0
"""
# precondition
assert isinstance(n, int) and (n >= 0), "'n' must been an int and >= 0"
tmp = 0
fib1 = 1
ans = 1 # this will be return
for i in range(n - 1):
tmp = ans
ans += fib1
fib1 = tmp
return ans
|
|
#
# Copyright 2012, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
try:
import json
except:
import simplejson as json
import time
from copy import deepcopy
from threading import Thread, Lock
import urllib
import warnings
import logging
from rest_client import RestConnection
from couchbaseclient import CouchbaseClient
class Couchbase(object):
def __init__(self, host, username, password):
if (':' in host):
[ip, port] = host.split(':')
else:
[ip, port] = host, 8091
server = {'ip': ip,
'port': port,
'username': username,
'password': password
}
self.servers = [server]
self.servers_lock = Lock()
self.rest_username = username
self.rest_password = password
server_config_uri = "http://%s:%s/pools/default" % (server['ip'],
server['port'])
config = ServerHelper.parse_server_config(server_config_uri, username,
password)
#couchApiBase will not be in node config before Couchbase Server 2.0
try:
self.couch_api_base = config["nodes"][0].get("couchApiBase")
except TypeError:
self.couch_api_base = "http://%s:8092/" % server['ip']
self.streaming_thread = Thread(name="streaming",
target=self._start_streaming, args=())
self.streaming_thread.daemon = True
self.streaming_thread.start()
def _start_streaming(self):
# this will dynamically update servers
urlopener = urllib.FancyURLopener()
urlopener.prompt_user_passwd = lambda host, realm: (self.rest_username,
self.rest_password)
current_servers = True
while current_servers:
self.servers_lock.acquire()
current_servers = deepcopy(self.servers)
self.servers_lock.release()
for server in current_servers:
url = "http://%s:%s/poolsStreaming/default" % (server["ip"],
server["port"])
f = urlopener.open(url)
while f:
try:
d = f.readline()
if not d:
# try next server if we get an EOF
f.close()
break
except:
# try next server if we fail to read
f.close()
break
try:
data = json.loads(d)
except:
continue
new_servers = []
nodes = data["nodes"]
for node in nodes:
if (node["clusterMembership"] == "active" and
node["status"] == "healthy"):
ip, port = node["hostname"].split(":")
couch_api_base = node.get("couchApiBase")
new_servers.append({"ip": ip,
"port": port,
"username": self.rest_username,
"password": self.rest_password,
"couchApiBase": couch_api_base
})
if new_servers:
new_servers.sort()
self.servers_lock.acquire()
self.servers = deepcopy(new_servers)
self.servers_lock.release()
def bucket(self, bucket_name):
return Bucket(bucket_name, self)
def buckets(self):
"""Get a list of all buckets as Buckets"""
rest = self._rest()
buckets = []
for rest_bucket in rest.get_buckets():
buckets.append(Bucket(rest_bucket.name, self))
return buckets
def create(self, bucket_name, bucket_password='', ram_quota_mb=100,
replica=0):
rest = self._rest()
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=ram_quota_mb,
authType='sasl',
saslPassword=bucket_password,
replicaNumber=replica,
bucketType='membase')
ip, port, _, _ = self._rest_info()
while True:
try:
content = '{"basicStats":{"quotaPercentUsed":0.0}}'
formatter_uri = "http://%s:%s/pools/default/buckets/%s"
status, content = rest._http_request(formatter_uri %
(ip, port, bucket_name),
method='GET', params='',
headers=None, timeout=120)
except ValueError:
pass
if json.loads(content)['basicStats']['quotaPercentUsed'] > 0.0:
time.sleep(2)
break
time.sleep(1)
return Bucket(bucket_name, self)
def delete(self, bucket_name):
rest = self._rest()
rest.delete_bucket(bucket_name)
def __getitem__(self, key):
return self.bucket(key)
def __iter__(self):
return BucketIterator(self.buckets())
def _rest(self):
self.servers_lock.acquire()
server_info = deepcopy(self.servers[0])
self.servers_lock.release()
server_info['username'] = self.rest_username
server_info['password'] = self.rest_password
server_info['couchApiBase'] = self.couch_api_base
rest = RestConnection(server_info)
return rest
def _rest_info(self):
self.servers_lock.acquire()
server_info = deepcopy(self.servers[0])
self.servers_lock.release()
return (server_info['ip'], server_info['port'],
server_info['username'], server_info['password'])
class Server(Couchbase):
def __init__(self, host, username, password):
warnings.warn("Server is deprecated; use Couchbase instead",
DeprecationWarning)
Couchbase.__init__(self, host, username, password)
class BucketIterator(object):
def __init__(self, buckets):
self.buckets = buckets
def __iter__(self):
return self
def next(self):
try:
return self.buckets.pop(0)
except IndexError:
raise StopIteration
class Bucket(object):
def __init__(self, bucket_name, server):
self.server = server
self.bucket_name = bucket_name
rest = server._rest()
self.bucket_password = rest.get_bucket(bucket_name).saslPassword
ip, port, rest_username, rest_password = server._rest_info()
formatter_uri = "http://%s:%s/pools/default"
self.mc_client = CouchbaseClient(formatter_uri % (ip, port),
self.bucket_name,
self.bucket_password)
def append(self, key, value, cas=0):
return self.mc_client.append(key, value, cas)
def prepend(self, key, value, cas=0):
return self.mc_client.prepend(key, value, cas)
def incr(self, key, amt=1, init=0, exp=0):
return self.mc_client.incr(key, amt, init, exp)
def decr(self, key, amt=1, init=0, exp=0):
return self.mc_client.decr(key, amt, init, exp)
def set(self, key, expiration, flags, value):
self.mc_client.set(key, expiration, flags, value)
def add(self, key, exp, flags, val):
return self.mc_client.add(key, exp, flags, val)
def replace(self, key, exp, flags, val):
return self.mc_client.replace(key, exp, flags, val)
def get(self, key):
return self.mc_client.get(key)
def send_get(self, key):
return self.mc_client.send_get(key)
def getl(self, key, exp=15):
return self.mc_client.getl(key, exp)
def cas(self, key, exp, flags, oldVal, val):
return self.mc_client.cas(key, exp, flags, oldVal, val)
def touch(self, key, exp):
return self.mc_client.touch(key, exp)
def gat(self, key, exp):
return self.mc_client.gat(key, exp)
def getMulti(self, keys):
return self.mc_client.getMulti(keys)
def stats(self, sub=''):
return self.mc_client.stats(sub)
def delete(self, key, cas=0):
if key.startswith('_design/'):
# this is a design doc, we need to handle it differently
view = key.split('/')[1]
rest = self.server._rest()
rest.delete_view(self.bucket_name, view)
else:
return self.mc_client.delete(key, cas)
def save(self, document):
value = deepcopy(document)
if '_id' in value:
key = value['_id']
del value['_id']
else:
key = str(uuid.uuid4())
if '$flags' in value:
flags = value['$flags']
del value['$flags']
else:
flags = 0
if '$expiration' in value:
expiration = value['$expiration']
del value['$expiration']
else:
expiration = 0
if key.startswith('_design/'):
# this is a design doc, we need to handle it differently
view = key.split('/')[1]
rest = self.server._rest()
rest.create_design_doc(self.bucket_name, view, json.dumps(value))
else:
if '_rev' in value:
# couchbase works in clobber mode so for "set" _rev is useless
del value['_rev']
self.set(key, expiration, flags, json.dumps(value))
return key
def __setitem__(self, key, value):
if isinstance(value, dict):
self.set(key, value['expiration'], value['flags'], value['value'])
else:
self.set(key, 0, 0, value)
def __getitem__(self, key):
return self.get(key)
def view(self, view, **options):
params = deepcopy(options)
limit = None
if 'limit' in params:
limit = params['limit']
del params['limit']
if view.startswith("_design/"):
view_s = view.split('/')
view_doc = view_s[1]
view_map = view_s[3]
else:
view_doc = view
view_map = None
rest = self.server._rest()
results = rest.view_results(self.bucket_name, view_doc, view_map,
params, limit)
if 'rows' in results:
return results['rows']
else:
return None
class ServerHelper(object):
@staticmethod
def parse_server_config(uri, username="", password=""):
urlopener = urllib.FancyURLopener()
if username and len(username) > 0 and password and len(password) > 0:
urlopener.prompt_user_passwd = lambda host, realm: (username, password)
response = urlopener.open(uri)
try:
line = response.readline()
data = json.loads(line)
return data
except:
raise Exception("unexpected error - unable to parse server config"
" at %s" % (uri))
|
|
"""
Drop-in replacement for ``django.conf.settings`` that provides a
consistent access method for settings defined in applications, the project
or Django itself. Settings can also be made editable via the admin.
"""
from __future__ import unicode_literals
from future.builtins import bytes, str
from functools import partial
from warnings import warn
from django.conf import settings as django_settings
from django.utils.functional import Promise
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from mezzanine import __version__
registry = {}
def register_setting(name=None, label=None, editable=False, description=None,
default=None, choices=None, append=False):
"""
Registers a setting that can be edited via the admin. This mostly
equates to storing the given args as a dict in the ``registry``
dict by name.
"""
if name is None:
raise TypeError("mezzanine.conf.register_setting requires the "
"'name' keyword argument.")
if editable and default is None:
raise TypeError("mezzanine.conf.register_setting requires the "
"'default' keyword argument when 'editable' is True.")
# append is True when called from an app (typically external)
# after the setting has already been registered, with the
# intention of appending to its default value.
if append and name in registry:
registry[name]["default"] += default
return
# If an editable setting has a value defined in the
# project's settings.py module, it can't be editable, since
# these lead to a lot of confusion once its value gets
# defined in the db.
if hasattr(django_settings, name):
editable = False
if label is None:
label = name.replace("_", " ").title()
# Python 2/3 compatibility. isinstance() is overridden by future
# on Python 2 to behave as Python 3 in conjunction with either
# Python 2's native types or the future.builtins types.
if isinstance(default, bool):
# Prevent bools treated as ints
setting_type = bool
elif isinstance(default, int):
# An int or long or subclass on Py2
setting_type = int
elif isinstance(default, (str, Promise)):
# A unicode or subclass on Py2
setting_type = str
elif isinstance(default, bytes):
# A byte-string or subclass on Py2
setting_type = bytes
else:
setting_type = type(default)
registry[name] = {"name": name, "label": label, "editable": editable,
"description": description, "default": default,
"choices": choices, "type": setting_type}
class Settings(object):
"""
An object that provides settings via dynamic attribute access.
Settings that are registered as editable will be stored in the
database once the site settings form in the admin is first saved.
When these values are accessed via this settings object, *all*
database stored settings get retrieved from the database.
When accessing uneditable settings their default values are used,
unless they've been given a value in the project's settings.py
module.
The settings object also provides access to Django settings via
``django.conf.settings``, in order to provide a consistent method
of access for all settings.
"""
# These functions map setting types to the functions that should be
# used to convert them from the Unicode string stored in the database.
# If a type doesn't appear in this map, the type itself will be used.
TYPE_FUNCTIONS = {
bool: lambda val: val != "False",
bytes: partial(bytes, encoding='utf8')
}
def __init__(self):
"""
The ``_loaded`` attribute is a flag for defining whether
editable settings have been loaded from the database. It
defaults to ``True`` here to avoid errors when the DB table
is first created. It's then set to ``False`` whenever the
``use_editable`` method is called, which should be called
before using editable settings in the database.
``_editable_cache`` is the dict that stores the editable
settings once they're loaded from the database, the first
time an editable setting is accessed.
"""
self._loaded = True
self._editable_cache = {}
def use_editable(self):
"""
Empty the editable settings cache and set the loaded flag to
``False`` so that settings will be loaded from the DB on next
access. If the conf app is not installed then set the loaded
flag to ``True`` in order to bypass DB lookup entirely.
"""
self._loaded = __name__ not in getattr(self, "INSTALLED_APPS")
self._editable_cache = {}
def _load(self):
"""
Load settings from the database into cache. Delete any settings from
the database that are no longer registered, and emit a warning if
there are settings that are defined in settings.py and the database.
"""
from mezzanine.conf.models import Setting
removed_settings = []
conflicting_settings = []
for setting_obj in Setting.objects.all():
try:
registry[setting_obj.name]
except KeyError:
# Setting in DB isn't registered (removed from code),
# so add to removal list and skip remaining handling.
removed_settings.append(setting_obj.name)
continue
# Convert DB value to correct type.
setting_type = registry[setting_obj.name]["type"]
type_fn = self.TYPE_FUNCTIONS.get(setting_type, setting_type)
try:
setting_value = type_fn(setting_obj.value)
except ValueError:
# Shouldn't occur, but just a safeguard
# for if the db value somehow ended up as
# an invalid type.
setting_value = registry[setting_obj.name]["default"]
# Only use DB setting if it's not defined in settings.py
# module, in which case add it to conflicting list for
# warning.
try:
getattr(django_settings, setting_obj.name)
except AttributeError:
self._editable_cache[setting_obj.name] = setting_value
else:
if setting_value != registry[setting_obj.name]["default"]:
conflicting_settings.append(setting_obj.name)
if removed_settings:
Setting.objects.filter(name__in=removed_settings).delete()
if conflicting_settings:
warn("These settings are defined in both settings.py and "
"the database: %s. The settings.py values will be used."
% ", ".join(conflicting_settings))
self._loaded = True
def __getattr__(self, name):
# Lookup name as a registered setting or a Django setting.
try:
setting = registry[name]
except KeyError:
return getattr(django_settings, name)
# First access for an editable setting - load from DB into cache.
if setting["editable"] and not self._loaded:
self._load()
# Use cached editable setting if found, otherwise use the
# value defined in the project's settings.py module if it
# exists, finally falling back to the default defined when
# registered.
try:
return self._editable_cache[name]
except KeyError:
return getattr(django_settings, name, setting["default"])
mezz_first = lambda app: not app.startswith("mezzanine.")
for app in sorted(django_settings.INSTALLED_APPS, key=mezz_first):
module = import_module(app)
try:
import_module("%s.defaults" % app)
except:
if module_has_submodule(module, "defaults"):
raise
settings = Settings()
|
|
from basic import Basic
from sympify import _sympify
from cache import cacheit
from symbol import Symbol, Wild
from sympy import mpmath
from math import log as _log
def integer_nthroot(y, n):
"""
Return a tuple containing x = floor(y**(1/n))
and a boolean indicating whether the result is exact (that is,
whether x**n == y).
>>> from sympy import integer_nthroot
>>> integer_nthroot(16,2)
(4, True)
>>> integer_nthroot(26,2)
(5, False)
"""
if y < 0: raise ValueError("y must be nonnegative")
if n < 1: raise ValueError("n must be positive")
if y in (0, 1): return y, True
if n == 1: return y, True
if n == 2:
x, rem = mpmath.libmpf.sqrtrem(y)
return int(x), not rem
if n > y: return 1, False
# Get initial estimate for Newton's method. Care must be taken to
# avoid overflow
try:
guess = int(y ** (1./n)+0.5)
except OverflowError:
expt = _log(y,2)/n
if expt > 53:
shift = int(expt-53)
guess = int(2.0**(expt-shift)+1) << shift
else:
guess = int(2.0**expt)
#print n
if guess > 2**50:
# Newton iteration
xprev, x = -1, guess
while 1:
t = x**(n-1)
#xprev, x = x, x - (t*x-y)//(n*t)
xprev, x = x, ((n-1)*x + y//t)//n
#print n, x-xprev, abs(x-xprev) < 2
if abs(x - xprev) < 2:
break
else:
x = guess
# Compensate
t = x**n
while t < y:
x += 1
t = x**n
while t > y:
x -= 1
t = x**n
return x, t == y
class Pow(Basic):
is_Pow = True
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, b, e, **assumptions):
b = _sympify(b)
e = _sympify(e)
if assumptions.get('evaluate') is False:
return Basic.__new__(cls, b, e, **assumptions)
if e is S.Zero:
return S.One
if e is S.One:
return b
obj = b._eval_power(e)
if obj is None:
obj = Basic.__new__(cls, b, e, **assumptions)
obj.is_commutative = (b.is_commutative and e.is_commutative)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
def _eval_power(self, other):
if other == S.NegativeOne:
return Pow(self.base, self.exp * other)
if self.exp.is_integer and other.is_integer:
return Pow(self.base, self.exp * other)
if self.base.is_nonnegative and self.exp.is_real and other.is_real:
return Pow(self.base, self.exp * other)
if self.exp.is_even and self.base.is_real:
return Pow(abs(self.base), self.exp * other)
if self.exp.is_real and other.is_real and abs(self.exp) < S.One:
return Pow(self.base, self.exp * other)
return
def _eval_is_comparable(self):
c1 = self.base.is_comparable
if c1 is None: return
c2 = self.exp.is_comparable
if c2 is None: return
return c1 and c2
def _eval_is_even(self):
if self.exp.is_integer and self.exp.is_positive:
if self.base.is_even:
return True
if self.base.is_integer:
return False
def _eval_is_positive(self):
if self.base.is_positive:
if self.exp.is_real:
return True
elif self.base.is_negative:
if self.exp.is_even:
return True
if self.exp.is_odd:
return False
elif self.base.is_nonpositive:
if self.exp.is_odd:
return False
def _eval_is_negative(self):
if self.base.is_negative:
if self.exp.is_odd:
return True
if self.exp.is_even:
return False
elif self.base.is_positive:
if self.exp.is_real:
return False
elif self.base.is_nonnegative:
if self.exp.is_real:
return False
elif self.base.is_nonpositive:
if self.exp.is_even:
return False
elif self.base.is_real:
if self.exp.is_even:
return False
def _eval_is_integer(self):
c1 = self.base.is_integer
c2 = self.exp.is_integer
if c1 is None or c2 is None:
return None
if not c1:
if self.exp.is_nonnegative:
return False
if c1 and c2:
if self.exp.is_nonnegative or self.exp.is_positive:
return True
if self.exp.is_negative:
return False
def _eval_is_real(self):
c1 = self.base.is_real
if c1 is None: return
c2 = self.exp.is_real
if c2 is None: return
if c1 and c2:
if self.base.is_positive:
return True
else: # negative or zero (or positive)
if self.exp.is_integer:
return True
elif self.base.is_negative:
if self.exp.is_Rational:
return False
def _eval_is_odd(self):
if not (self.base.is_integer and self.exp.is_nonnegative): return
return self.base.is_odd
def _eval_is_bounded(self):
if self.exp.is_negative:
if self.base.is_infinitesimal:
return False
if self.base.is_unbounded:
return True
c1 = self.base.is_bounded
if c1 is None: return
c2 = self.exp.is_bounded
if c2 is None: return
if c1 and c2:
if self.exp.is_nonnegative:
return True
def _eval_subs(self, old, new):
if self == old:
return new
if old.func is self.func and self.base == old.base:
coeff1, terms1 = self.exp.as_coeff_terms()
coeff2, terms2 = old.exp.as_coeff_terms()
if terms1==terms2: return new ** (coeff1/coeff2) # (x**(2*y)).subs(x**(3*y),z) -> z**(2/3*y)
if old.func is C.exp:
coeff1,terms1 = old.args[0].as_coeff_terms()
coeff2,terms2 = (self.exp * C.log(self.base)).as_coeff_terms()
if terms1==terms2: return new ** (coeff1/coeff2) # (x**(2*y)).subs(exp(3*y*log(x)),z) -> z**(2/3*y)
return self.base._eval_subs(old, new) ** self.exp._eval_subs(old, new)
def as_powers_dict(self):
return { self.base : self.exp }
def as_base_exp(self):
if self.base.is_Rational and self.base.p==1:
return 1/self.base, -self.exp
return self.base, self.exp
def _eval_conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
return c(self.base)**self.exp
def _eval_expand_basic(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, *args, **hints):
"""a**(n+m) -> a**n*a**m"""
if deep:
b = self.base.expand(deep=deep, **hints)
e = self.exp.expand(deep=deep, **hints)
else:
b = self.base
e = self.exp
if e.is_Add:
expr = 1
for x in e.args:
if deep:
x = x.expand(deep=deep, **hints)
expr *= (self.base**x)
return expr
return b**e
def _eval_expand_power_base(self, deep=True, **hints):
"""(a*b)**n -> a**n * b**n"""
b = self.base
if deep:
e = self.exp.expand(deep=deep, **hints)
else:
e = self.exp
if b.is_Mul:
if deep:
return Mul(*(Pow(t.expand(deep=deep, **hints), e)\
for t in b.args))
else:
return Mul(*[Pow(t, e) for t in b.args])
else:
return b**e
def _eval_expand_mul(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_mul'):
newterm = term._eval_expand_mul(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_multinomial(self, deep=True, **hints):
"""(a+b+..) ** n -> a**n + n*a**(n-1)*b + .., n is nonzero integer"""
if deep:
b = self.base.expand(deep=deep, **hints)
e = self.exp.expand(deep=deep, **hints)
else:
b = self.base
e = self.exp
if b is None:
base = self.base
else:
base = b
if e is None:
exp = self.exp
else:
exp = e
if e is not None or b is not None:
result = base**exp
if result.is_Pow:
base, exp = result.base, result.exp
else:
return result
else:
result = None
if exp.is_Integer and exp.p > 0 and base.is_Add:
n = int(exp)
if base.is_commutative:
order_terms, other_terms = [], []
for order in base.args:
if order.is_Order:
order_terms.append(order)
else:
other_terms.append(order)
if order_terms:
# (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n)
f = Add(*other_terms)
g = (f**(n-1)).expand()
return (f*g).expand() + n*g*Add(*order_terms)
if base.is_number:
# Efficiently expand expressions of the form (a + b*I)**n
# where 'a' and 'b' are real numbers and 'n' is integer.
a, b = base.as_real_imag()
if a.is_Rational and b.is_Rational:
if not a.is_Integer:
if not b.is_Integer:
k = (a.q * b.q) ** n
a, b = a.p*b.q, a.q*b.p
else:
k = a.q ** n
a, b = a.p, a.q*b
elif not b.is_Integer:
k = b.q ** n
a, b = a*b.q, b.p
else:
k = 1
a, b, c, d = int(a), int(b), 1, 0
while n:
if n & 1:
c, d = a*c-b*d, b*c+a*d
n -= 1
a, b = a*a-b*b, 2*a*b
n //= 2
I = S.ImaginaryUnit
if k == 1:
return c + I*d
else:
return Integer(c)/k + I*d/k
p = other_terms
# (x+y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3
# in this particular example:
# p = [x,y]; n = 3
# so now it's easy to get the correct result -- we get the
# coefficients first:
from sympy import multinomial_coefficients
expansion_dict = multinomial_coefficients(len(p), n)
# in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3}
# and now construct the expression.
# An elegant way would be to use Poly, but unfortunately it is
# slower than the direct method below, so it is commented out:
#b = {}
#for k in expansion_dict:
# b[k] = Integer(expansion_dict[k])
#return Poly(b, *p).as_basic()
from sympy.polys.polynomial import multinomial_as_basic
result = multinomial_as_basic(expansion_dict, *p)
return result
else:
if n == 2:
return Add(*[f*g for f in base.args for g in base.args])
else:
multi = (base**(n-1))._eval_expand_multinomial(deep=False)
if multi.is_Add:
return Add(*[f*g for f in base.args for g in base.args])
else:
return Add(*[f*multi for f in base.args])
elif exp.is_Integer and exp.p < 0 and base.is_Add:
return 1 / Pow(base, -exp.p)._eval_expand_multinomial(deep=False)
elif exp.is_Add and base.is_Number:
# a + b a b
# n --> n n , where n, a, b are Numbers
coeff, tail = S.One, S.Zero
for term in exp.args:
if term.is_Number:
coeff *= base**term
else:
tail += term
return coeff * base**tail
else:
return result
def _eval_expand_log(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
if self.exp.is_Integer:
exp = self.exp
re, im = self.base.as_real_imag()
if exp >= 0:
base = re + S.ImaginaryUnit*im
else:
mag = re**2 + im**2
base = re/mag - S.ImaginaryUnit*(im/mag)
exp = -exp
return (base**exp).expand()
elif self.exp.is_Rational:
# NOTE: This is not totally correct since for x**(p/q) with
# x being imaginary there are actually q roots, but
# only a single one is returned from here.
re, im = self.base.as_real_imag()
r = (re**2 + im**2)**S.Half
t = C.atan2(im, re)
rp, tp = r**self.exp, t*self.exp
return rp*C.cos(tp) + rp*C.sin(tp)*S.ImaginaryUnit
else:
if deep:
hints['complex'] = False
return C.re(self.expand(deep, **hints)) + \
S.ImaginaryUnit*C.im(self. expand(deep, **hints))
else:
return C.re(self) + S.ImaginaryUnit*C.im(self)
return C.re(self) + S.ImaginaryUnit*C.im(self)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_derivative(self, s):
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
return self * (dexp * C.log(self.base) + dbase * self.exp/self.base)
def _eval_evalf(self, prec):
base, exp = self.as_base_exp()
base = base._evalf(prec)
if not exp.is_Integer:
exp = exp._evalf(prec)
if exp < 0 and not base.is_real:
base = base.conjugate() / (base * base.conjugate())._evalf(prec)
exp = -exp
return (base ** exp).expand()
@cacheit
def count_ops(self, symbolic=True):
if symbolic:
return Add(*[t.count_ops(symbolic) for t in self.args]) + Symbol('POW')
return Add(*[t.count_ops(symbolic) for t in self.args]) + 1
def _eval_is_polynomial(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
# it would be nice to have is_nni working
return self.base._eval_is_polynomial(syms) and \
self.exp.is_nonnegative and \
self.exp.is_integer
else:
return True
def as_numer_denom(self):
base, exp = self.as_base_exp()
n, d = base.as_numer_denom()
if exp.is_Integer:
if exp.is_negative:
n, d = d, n
exp = -exp
return n ** exp, d ** exp
elif exp.is_Rational:
if d.is_negative is None:
# we won't split up the base
if exp.is_negative:
#return d * base ** -exp, n
return S.One, base ** -exp
else:
return self, S.One
if d.is_negative:
n = -n
d = -d
if exp.is_negative:
n, d = d, n
exp = -exp
return n ** exp, d ** exp
# unprocessed Real and NumberSymbol
return self, S.One
def matches(self, expr, repl_dict={}, evaluate=False):
if evaluate:
return self.subs(repl_dict).matches(expr, repl_dict)
expr = _sympify(expr)
b, e = expr.as_base_exp()
# special case, pattern = 1 and expr.exp can match to 0
if expr is S.One:
d = repl_dict.copy()
d = self.exp.matches(S.Zero, d)
if d is not None:
return d
d = repl_dict.copy()
d = self.base.matches(b, d)
if d is None:
return None
d = self.exp.subs(d).matches(e, d)
if d is None:
return Basic.matches(self, expr, repl_dict, evaluate)
return d
def _eval_nseries(self, x, x0, n):
from sympy import powsimp, collect
def geto(e):
"Returns the O(..) symbol, or None if there is none."
if e.is_Order:
return e
if e.is_Add:
for x in e.args:
if x.is_Order:
return x
def getn(e):
"""
Returns the order of the expression "e".
The order is determined either from the O(...) term. If there
is no O(...) term, it returns None.
Example:
>>> getn(1+x+O(x**2))
2
>>> getn(1+x)
>>>
"""
o = geto(e)
if o is None:
return None
else:
o = o.expr
if o.is_Symbol:
return Integer(1)
if o.is_Pow:
return o.args[1]
n, d = o.as_numer_denom()
if d.func is log:
# i.e. o = x**2/log(x)
if n.is_Symbol:
return Integer(1)
if n.is_Pow:
return n.args[1]
raise NotImplementedError()
base, exp = self.args
if exp.is_Integer:
if exp > 0:
# positive integer powers are easy to expand, e.g.:
# sin(x)**4 = (x-x**3/3+...)**4 = ...
return (base.nseries(x, x0, n) ** exp)._eval_expand_multinomial(deep = False)
elif exp == -1:
# this is also easy to expand using the formula:
# 1/(1 + x) = 1 + x + x**2 + x**3 ...
# so we need to rewrite base to the form "1+x"
from sympy import log
if base.has(log(x)):
# we need to handle the log(x) singularity:
assert x0 == 0
y = Symbol("y", dummy=True)
p = self.subs(log(x), -1/y)
if not p.has(x):
p = p.nseries(y, x0, n)
p = p.subs(y, -1/log(x))
return p
base = base.nseries(x, x0, n)
if base.has(log(x)):
# we need to handle the log(x) singularity:
assert x0 == 0
y = Symbol("y", dummy=True)
self0 = 1/base
p = self0.subs(log(x), -1/y)
if not p.has(x):
p = p.nseries(y, x0, n)
p = p.subs(y, -1/log(x))
return p
prefactor = base.as_leading_term(x)
# express "rest" as: rest = 1 + k*x**l + ... + O(x**n)
rest = ((base-prefactor)/prefactor)._eval_expand_mul()
if rest == 0:
# if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to
# factor the w**4 out using collect:
return 1/collect(prefactor, x)
if rest.is_Order:
return (1+rest)/prefactor
n2 = getn(rest)
if n2 is not None:
n = n2
term2 = collect(rest.as_leading_term(x), x)
k, l = Wild("k"), Wild("l")
r = term2.match(k*x**l)
k, l = r[k], r[l]
if l.is_Rational and l>0:
pass
elif l.is_number and l>0:
l = l.evalf()
else:
raise NotImplementedError()
from sympy.functions import ceiling
terms = [1/prefactor]
for m in xrange(1,ceiling(n/l)):
new_term = terms[-1]*(-rest)
if new_term.is_Pow:
new_term = new_term._eval_expand_multinomial(deep = False)
else:
new_term = new_term._eval_expand_mul(deep = False)
terms.append(new_term)
r = Add(*terms)
if n2 is None:
# Append O(...) because it is not included in "r"
from sympy import O
r += O(x**n)
return powsimp(r, deep=True, combine='exp')
else:
# negative powers are rewritten to the cases above, for example:
# sin(x)**(-4) = 1/( sin(x)**4) = ...
# and expand the denominator:
denominator = (base**(-exp)).nseries(x, x0, n)
if 1/denominator == self:
return self
# now we have a type 1/f(x), that we know how to expand
return (1/denominator).nseries(x, x0, n)
if exp.has(x):
import sympy
return sympy.exp(exp*sympy.log(base)).nseries(x, x0, n)
if base == x:
return powsimp(self, deep=True, combine='exp')
order = C.Order(x**n, x)
x = order.symbols[0]
e = self.exp
b = self.base
ln = C.log
exp = C.exp
if e.has(x):
return exp(e * ln(b)).nseries(x, x0, n)
if b==x:
return self
b0 = b.limit(x,0)
if b0 is S.Zero or b0.is_unbounded:
lt = b.as_leading_term(x)
o = order * lt**(1-e)
bs = b.nseries(x, x0, n-e)
if bs.is_Add:
bs = bs.removeO()
if bs.is_Add:
# bs -> lt + rest -> lt * (1 + (bs/lt - 1))
return (lt**e * ((bs/lt).expand()**e).nseries(x,
x0, n-e)).expand() + order
return bs**e+order
o2 = order * (b0**-e)
# b -> b0 + (b-b0) -> b0 * (1 + (b/b0-1))
z = (b/b0-1)
#r = self._compute_oseries3(z, o2, self.taylor_term)
x = o2.symbols[0]
ln = C.log
o = C.Order(z, x)
if o is S.Zero:
r = (1+z)
else:
if o.expr.is_number:
e2 = ln(o2.expr*x)/ln(x)
else:
e2 = ln(o2.expr)/ln(o.expr)
n = e2.limit(x,0) + 1
if n.is_unbounded:
# requested accuracy gives infinite series,
# order is probably nonpolynomial e.g. O(exp(-1/x), x).
r = (1+z)
else:
try:
n = int(n)
except TypeError:
#well, the n is something more complicated (like 1+log(2))
n = int(n.evalf()) + 1
assert n>=0,`n`
l = []
g = None
for i in xrange(n+2):
g = self.taylor_term(i, z, g)
g = g.nseries(x, x0, n)
l.append(g)
r = Add(*l)
return r * b0**e + order
def _eval_as_leading_term(self, x):
if not self.exp.has(x):
return self.base.as_leading_term(x) ** self.exp
return C.exp(self.exp * C.log(self.base)).as_leading_term(x)
@cacheit
def taylor_term(self, n, x, *previous_terms): # of (1+x)**e
if n<0: return S.Zero
x = _sympify(x)
return C.Binomial(self.exp, n) * x**n
def _sage_(self):
return self.args[0]._sage_() ** self.args[1]._sage_()
from basic import Basic, S, C
from add import Add
from numbers import Integer
from mul import Mul
|
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from robot.errors import DataError
from robot.variables import is_var
from robot.output import LOGGER
from robot.writer import DataFileWriter
from robot.utils import abspath, is_string, normalize, py2to3, NormalizedDict
from .comments import Comment
from .populators import FromFilePopulator, FromDirectoryPopulator
from .settings import (Documentation, Fixture, Timeout, Tags, Metadata,
Library, Resource, Variables, Arguments, Return,
Template, MetadataList, ImportList)
def TestData(parent=None, source=None, include_suites=None,
warn_on_skipped=False):
"""Parses a file or directory to a corresponding model object.
:param parent: (optional) parent to be used in creation of the model object.
:param source: path where test data is read from.
:returns: :class:`~.model.TestDataDirectory` if `source` is a directory,
:class:`~.model.TestCaseFile` otherwise.
"""
if os.path.isdir(source):
return TestDataDirectory(parent, source).populate(include_suites,
warn_on_skipped)
return TestCaseFile(parent, source).populate()
class _TestData(object):
_setting_table_names = 'Setting', 'Settings', 'Metadata'
_variable_table_names = 'Variable', 'Variables'
_testcase_table_names = 'Test Case', 'Test Cases'
_keyword_table_names = 'Keyword', 'Keywords', 'User Keyword', 'User Keywords'
_deprecated = NormalizedDict({'Metadata': 'Settings',
'User Keyword': 'Keywords',
'User Keywords': 'Keywords'})
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = abspath(source) if source else None
self.children = []
self._tables = NormalizedDict(self._get_tables())
def _get_tables(self):
for names, table in [(self._setting_table_names, self.setting_table),
(self._variable_table_names, self.variable_table),
(self._testcase_table_names, self.testcase_table),
(self._keyword_table_names, self.keyword_table)]:
for name in names:
yield name, table
def start_table(self, header_row):
try:
name = header_row[0]
table = self._tables[name]
if name in self._deprecated:
self._report_deprecated(name)
except (KeyError, IndexError):
return None
if not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
def _report_deprecated(self, name):
self.report_invalid_syntax("Table name '%s' is deprecated. Please use '%s' instead." %
(name, self._deprecated[name]), level='WARN')
@property
def name(self):
return self._format_name(self._get_basename()) if self.source else None
def _get_basename(self):
return os.path.splitext(os.path.basename(self.source))[0]
def _format_name(self, name):
name = self._strip_possible_prefix_from_name(name)
name = name.replace('_', ' ').strip()
return name.title() if name.islower() else name
def _strip_possible_prefix_from_name(self, name):
return name.split('__', 1)[-1]
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Error in file '%s': %s" % (path, message), level)
def save(self, **options):
"""Writes this datafile to disk.
:param options: Configuration for writing. These are passed to
:py:class:`~robot.writer.datafilewriter.WritingContext` as
keyword arguments.
See also :py:class:`robot.writer.datafilewriter.DataFileWriter`
"""
return DataFileWriter(**options).write(self)
class TestCaseFile(_TestData):
"""The parsed test case file object.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._validate()
return self
def _validate(self):
if not self.testcase_table.is_started():
raise DataError('File has no test case table.')
def _table_is_allowed(self, table):
return True
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
class ResourceFile(_TestData):
"""The parsed resource file object.
:param source: path where resource file is read from.
"""
def __init__(self, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, source=source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._report_status()
return self
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
raise DataError("Resource file '%s' contains a test case table "
"which is not allowed." % self.source)
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
"""The parsed test data directory object. Contains hiearchical structure
of other :py:class:`.TestDataDirectory` and :py:class:`.TestCaseFile`
objects.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self, include_suites=None, warn_on_skipped=False, recurse=True):
FromDirectoryPopulator().populate(self.source, self, include_suites,
warn_on_skipped, recurse)
self.children = [ch for ch in self.children if ch.has_tests()]
return self
def _get_basename(self):
return os.path.basename(self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
LOGGER.error("Test suite init file in '%s' contains a test case "
"table which is not allowed." % self.source)
return False
return True
def add_child(self, path, include_suites):
self.children.append(TestData(parent=self,source=path,
include_suites=include_suites))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
@py2to3
class _Table(object):
def __init__(self, parent):
self.parent = parent
self._header = None
def set_header(self, header):
self._header = self._prune_old_style_headers(header)
def _prune_old_style_headers(self, header):
if len(header) < 3:
return header
if self._old_header_matcher.match(header):
return [header[0]]
return header
@property
def header(self):
return self._header or [self.type.title() + 's']
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def __nonzero__(self):
return bool(self._header or len(self))
def __len__(self):
return sum(1 for item in self)
class _WithSettings(object):
_deprecated = {'document': 'Documentation',
'suiteprecondition': 'Suite Setup',
'suitepostcondition': 'Suite Teardown',
'testprecondition': 'Test Setup',
'testpostcondition': 'Test Teardown',
'precondition': 'Setup',
'postcondition': 'Teardown'}
def get_setter(self, setting_name):
normalized = self.normalize(setting_name)
if normalized in self._deprecated:
self._report_deprecated(setting_name, self._deprecated[normalized])
normalized = self.normalize(self._deprecated[normalized])
if normalized in self._setters:
return self._setters[normalized](self)
self.report_invalid_syntax("Non-existing setting '%s'." % setting_name)
def _report_deprecated(self, deprecated, use_instead):
self.report_invalid_syntax(
"Setting '%s' is deprecated. Use '%s' instead."
% (deprecated.rstrip(':'), use_instead), level='WARN')
def is_setting(self, setting_name):
return self.normalize(setting_name) in self._setters
def normalize(self, setting):
result = normalize(setting)
return result[:-1] if result[-1:] == ':' else result
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = MetadataList(self)
self.imports = ImportList(self)
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add_metadata(self, name, value='', comment=None):
self.metadata.add(Metadata(self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.add(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.add(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.add(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __len__(self):
return sum(1 for setting in self if setting.is_set())
class TestCaseFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'forcetags': lambda s: s.force_tags.populate,
'defaulttags': lambda s: s.default_tags.populate,
'testtemplate': lambda s: s.test_template.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata.data + self.imports.data:
yield setting
class ResourceFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables}
def __iter__(self):
for setting in [self.doc] + self.imports.data:
yield setting
class InitFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'forcetags': lambda s: s.force_tags.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.test_timeout] + self.metadata.data + self.imports.data:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add(self, name, value, comment=None):
self.variables.append(Variable(self, name, value, comment))
def __iter__(self):
return iter(self.variables)
@py2to3
class TestCaseTable(_Table):
type = 'test case'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def is_started(self):
return bool(self._header)
def __nonzero__(self):
return True
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
@py2to3
class Variable(object):
def __init__(self, parent, name, value, comment=None):
self.parent = parent
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if is_string(value):
value = [value]
self.value = value
self.comment = Comment(comment)
def as_list(self):
if self.has_data():
return [self.name] + self.value + self.comment.as_list()
return self.comment.as_list()
def is_set(self):
return True
def is_for_loop(self):
return False
def has_data(self):
return bool(self.name or ''.join(self.value))
def __nonzero__(self):
return self.has_data()
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax("Setting variable '%s' failed: %s"
% (self.name, message), level)
class _WithSteps(object):
def add_step(self, content, comment=None):
self.steps.append(Step(content, comment))
return self.steps[-1]
def copy(self, name):
new = copy.deepcopy(self)
new.name = name
self._add_to_parent(new)
return new
class TestCase(_WithSteps, _WithSettings):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'template': lambda s: s.template.populate,
'setup': lambda s: s.setup.populate,
'teardown': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate,
'timeout': lambda s: s.timeout.populate}
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, declaration, comment=None):
self.steps.append(ForLoop(declaration, comment))
return self.steps[-1]
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def _add_to_parent(self, test):
self.parent.tests.append(test)
@property
def settings(self):
return [self.doc, self.tags, self.setup, self.template, self.timeout,
self.teardown]
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.teardown = Fixture('[Teardown]', self)
self.tags = Tags('[Tags]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'arguments': lambda s: s.args.populate,
'return': lambda s: s.return_.populate,
'timeout': lambda s: s.timeout.populate,
'teardown': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate}
def _add_to_parent(self, test):
self.parent.keywords.append(test)
@property
def settings(self):
return [self.args, self.doc, self.tags, self.timeout, self.teardown, self.return_]
def __iter__(self):
for element in [self.args, self.doc, self.tags, self.timeout] \
+ self.steps + [self.teardown, self.return_]:
yield element
class ForLoop(_WithSteps):
"""The parsed representation of a for-loop.
:param list declaration: The literal cell values that declare the loop
(excluding ":FOR").
:param str comment: A comment, default None.
:ivar str flavor: The value of the 'IN' item, uppercased.
Typically 'IN', 'IN RANGE', 'IN ZIP', or 'IN ENUMERATE'.
:ivar list vars: Variables set per-iteration by this loop.
:ivar list items: Loop values that come after the 'IN' item.
:ivar str comment: A comment, or None.
:ivar list steps: A list of steps in the loop.
"""
def __init__(self, declaration, comment=None):
self.flavor, index = self._get_flavors_and_index(declaration)
self.vars = declaration[:index]
self.items = declaration[index+1:]
self.comment = Comment(comment)
self.steps = []
def _get_flavors_and_index(self, declaration):
for index, item in enumerate(declaration):
item = item.upper()
if item.replace(' ', '').startswith('IN'):
return item, index
return 'IN', len(declaration)
def is_comment(self):
return False
def is_for_loop(self):
return True
def as_list(self, indent=False, include_comment=True):
comments = self.comment.as_list() if include_comment else []
return [': FOR'] + self.vars + [self.flavor] + self.items + comments
def __iter__(self):
return iter(self.steps)
def is_set(self):
return True
class Step(object):
def __init__(self, content, comment=None):
self.assign = self._get_assign(content)
self.name = content.pop(0) if content else None
self.args = content
self.comment = Comment(comment)
def _get_assign(self, content):
assign = []
while content and is_var(content[0].rstrip('= ')):
assign.append(content.pop(0))
return assign
def is_comment(self):
return not (self.assign or self.name or self.args)
def is_for_loop(self):
return False
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
kw = [self.name] if self.name is not None else []
comments = self.comment.as_list() if include_comment else []
data = self.assign + kw + self.args + comments
if indent:
data.insert(0, '')
return data
class OldStyleSettingAndVariableTableHeaderMatcher(object):
def match(self, header):
return all((True if e.lower() == 'value' else False)
for e in header[1:])
class OldStyleTestAndKeywordTableHeaderMatcher(object):
def match(self, header):
if header[1].lower() != 'action':
return False
for h in header[2:]:
if not h.lower().startswith('arg'):
return False
return True
|
|
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
- address family:
- AF_INET: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are four classes in an inheritance diagram that represent
synchronous servers of four types:
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingServer and ThreadingServer mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer!
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the mix-in request handler
classes StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to reqd all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class's
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses can't be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
"""
from __future__ import division
__version__ = "0.2"
import socket
import sys
import os
class TCPServer:
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- verify_request(request, client_address)
- process_request(request, client_address)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- address_family
- socket_type
- request_queue_size (only for stream sockets)
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.socket = socket.socket(self.address_family,
self.socket_type)
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
#print self.server_address
self.socket.bind(self.server_address)
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def serve_forever(self):
"""Handle one request at a time until doomsday."""
while 1:
self.handle_request()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking."""
request, client_address = self.get_request()
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return true if we should proceed with this request.
"""
return 1
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc()
print '-'*40
class UDPServer(TCPServer):
"""UDP server class."""
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
active_children = None
def collect_children(self):
"""Internal routine to wait for died children."""
while self.active_children:
pid, status = os.waitpid(0, os.WNOHANG)
if not pid: break
self.active_children.remove(pid)
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
os._exit(0)
except Exception:
try:
self.handle_error(request,
client_address)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
import thread, threading
t=threading.Thread(target=self.finish_request,
args=(request, client_address))
t.setDaemon(1)
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
try:
self.setup()
self.handle()
self.finish()
finally:
sys.exc_traceback = None # Help garbage collection
def setup(self):
pass
def __del__(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
def setup(self):
self.connection = self.request
self.rfile = self.connection.makefile('rb', 0)
self.wfile = self.connection.makefile('wb', 0)
def finish(self):
self.wfile.flush()
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO.StringIO(self.packet)
self.wfile = StringIO.StringIO(self.packet)
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AzureMonitorMetricsDestination(msrest.serialization.Model):
"""Azure Monitor Metrics destination.
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureMonitorMetricsDestination, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class DataCollectionRule(msrest.serialization.Model):
"""Definition of what monitoring data to collect and where that data should be sent.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDataSources
:param destinations: The specification of destinations.
:type destinations:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDestinations
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2019_11_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'immutable_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'data_sources': {'key': 'dataSources', 'type': 'DataCollectionRuleDataSources'},
'destinations': {'key': 'destinations', 'type': 'DataCollectionRuleDestinations'},
'data_flows': {'key': 'dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRule, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.immutable_id = None
self.data_sources = kwargs.get('data_sources', None)
self.destinations = kwargs.get('destinations', None)
self.data_flows = kwargs.get('data_flows', None)
self.provisioning_state = None
class DataCollectionRuleAssociation(msrest.serialization.Model):
"""Definition of association of a data collection rule with a monitored Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'data_collection_rule_id': {'key': 'dataCollectionRuleId', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleAssociation, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.data_collection_rule_id = kwargs.get('data_collection_rule_id', None)
self.provisioning_state = None
class DataCollectionRuleAssociationProxyOnlyResource(msrest.serialization.Model):
"""Definition of generic ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified ID of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: Resource entity tag (ETag).
:vartype etag: str
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'data_collection_rule_id': {'key': 'properties.dataCollectionRuleId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.etag = None
self.description = kwargs.get('description', None)
self.data_collection_rule_id = kwargs.get('data_collection_rule_id', None)
self.provisioning_state = None
class DataCollectionRuleAssociationProxyOnlyResourceListResult(msrest.serialization.Model):
"""A pageable list of resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of resources.
:type value:
list[~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleAssociationProxyOnlyResource]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCollectionRuleAssociationProxyOnlyResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResourceListResult, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class DataCollectionRuleAssociationProxyOnlyResourceProperties(DataCollectionRuleAssociation):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'data_collection_rule_id': {'key': 'dataCollectionRuleId', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResourceProperties, self).__init__(**kwargs)
class DataSourcesSpec(msrest.serialization.Model):
"""Specification of data sources that will be collected.
:param performance_counters: The list of performance counter data source configurations.
:type performance_counters:
list[~$(python-base-namespace).v2019_11_01_preview.models.PerfCounterDataSource]
:param windows_event_logs: The list of Windows Event Log data source configurations.
:type windows_event_logs:
list[~$(python-base-namespace).v2019_11_01_preview.models.WindowsEventLogDataSource]
:param syslog: The list of Syslog data source configurations.
:type syslog: list[~$(python-base-namespace).v2019_11_01_preview.models.SyslogDataSource]
:param extensions: The list of Azure VM extension data source configurations.
:type extensions:
list[~$(python-base-namespace).v2019_11_01_preview.models.ExtensionDataSource]
"""
_attribute_map = {
'performance_counters': {'key': 'performanceCounters', 'type': '[PerfCounterDataSource]'},
'windows_event_logs': {'key': 'windowsEventLogs', 'type': '[WindowsEventLogDataSource]'},
'syslog': {'key': 'syslog', 'type': '[SyslogDataSource]'},
'extensions': {'key': 'extensions', 'type': '[ExtensionDataSource]'},
}
def __init__(
self,
**kwargs
):
super(DataSourcesSpec, self).__init__(**kwargs)
self.performance_counters = kwargs.get('performance_counters', None)
self.windows_event_logs = kwargs.get('windows_event_logs', None)
self.syslog = kwargs.get('syslog', None)
self.extensions = kwargs.get('extensions', None)
class DataCollectionRuleDataSources(DataSourcesSpec):
"""The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param performance_counters: The list of performance counter data source configurations.
:type performance_counters:
list[~$(python-base-namespace).v2019_11_01_preview.models.PerfCounterDataSource]
:param windows_event_logs: The list of Windows Event Log data source configurations.
:type windows_event_logs:
list[~$(python-base-namespace).v2019_11_01_preview.models.WindowsEventLogDataSource]
:param syslog: The list of Syslog data source configurations.
:type syslog: list[~$(python-base-namespace).v2019_11_01_preview.models.SyslogDataSource]
:param extensions: The list of Azure VM extension data source configurations.
:type extensions:
list[~$(python-base-namespace).v2019_11_01_preview.models.ExtensionDataSource]
"""
_attribute_map = {
'performance_counters': {'key': 'performanceCounters', 'type': '[PerfCounterDataSource]'},
'windows_event_logs': {'key': 'windowsEventLogs', 'type': '[WindowsEventLogDataSource]'},
'syslog': {'key': 'syslog', 'type': '[SyslogDataSource]'},
'extensions': {'key': 'extensions', 'type': '[ExtensionDataSource]'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleDataSources, self).__init__(**kwargs)
class DestinationsSpec(msrest.serialization.Model):
"""Specification of destinations that can be used in data flows.
:param log_analytics: List of Log Analytics destinations.
:type log_analytics:
list[~$(python-base-namespace).v2019_11_01_preview.models.LogAnalyticsDestination]
:param azure_monitor_metrics: Azure Monitor Metrics destination.
:type azure_monitor_metrics:
~$(python-base-namespace).v2019_11_01_preview.models.DestinationsSpecAzureMonitorMetrics
"""
_attribute_map = {
'log_analytics': {'key': 'logAnalytics', 'type': '[LogAnalyticsDestination]'},
'azure_monitor_metrics': {'key': 'azureMonitorMetrics', 'type': 'DestinationsSpecAzureMonitorMetrics'},
}
def __init__(
self,
**kwargs
):
super(DestinationsSpec, self).__init__(**kwargs)
self.log_analytics = kwargs.get('log_analytics', None)
self.azure_monitor_metrics = kwargs.get('azure_monitor_metrics', None)
class DataCollectionRuleDestinations(DestinationsSpec):
"""The specification of destinations.
:param log_analytics: List of Log Analytics destinations.
:type log_analytics:
list[~$(python-base-namespace).v2019_11_01_preview.models.LogAnalyticsDestination]
:param azure_monitor_metrics: Azure Monitor Metrics destination.
:type azure_monitor_metrics:
~$(python-base-namespace).v2019_11_01_preview.models.DestinationsSpecAzureMonitorMetrics
"""
_attribute_map = {
'log_analytics': {'key': 'logAnalytics', 'type': '[LogAnalyticsDestination]'},
'azure_monitor_metrics': {'key': 'azureMonitorMetrics', 'type': 'DestinationsSpecAzureMonitorMetrics'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleDestinations, self).__init__(**kwargs)
class DataCollectionRuleResource(msrest.serialization.Model):
"""Definition of ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The geo-location where the resource lives.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: The kind of the resource. Possible values include: "Linux", "Windows".
:type kind: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleResourceKind
:ivar id: Fully qualified ID of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: Resource entity tag (ETag).
:vartype etag: str
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDataSources
:param destinations: The specification of destinations.
:type destinations:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDestinations
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2019_11_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'immutable_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'immutable_id': {'key': 'properties.immutableId', 'type': 'str'},
'data_sources': {'key': 'properties.dataSources', 'type': 'DataCollectionRuleDataSources'},
'destinations': {'key': 'properties.destinations', 'type': 'DataCollectionRuleDestinations'},
'data_flows': {'key': 'properties.dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleResource, self).__init__(**kwargs)
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.kind = kwargs.get('kind', None)
self.id = None
self.name = None
self.type = None
self.etag = None
self.description = kwargs.get('description', None)
self.immutable_id = None
self.data_sources = kwargs.get('data_sources', None)
self.destinations = kwargs.get('destinations', None)
self.data_flows = kwargs.get('data_flows', None)
self.provisioning_state = None
class DataCollectionRuleResourceListResult(msrest.serialization.Model):
"""A pageable list of resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of resources.
:type value:
list[~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleResource]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCollectionRuleResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleResourceListResult, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class DataCollectionRuleResourceProperties(DataCollectionRule):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDataSources
:param destinations: The specification of destinations.
:type destinations:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDestinations
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2019_11_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'immutable_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'data_sources': {'key': 'dataSources', 'type': 'DataCollectionRuleDataSources'},
'destinations': {'key': 'destinations', 'type': 'DataCollectionRuleDestinations'},
'data_flows': {'key': 'dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataCollectionRuleResourceProperties, self).__init__(**kwargs)
class DataFlow(msrest.serialization.Model):
"""Definition of which streams are sent to which destinations.
:param streams: List of streams for this data flow.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataFlowStreams]
:param destinations: List of destinations for this data flow.
:type destinations: list[str]
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'destinations': {'key': 'destinations', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(DataFlow, self).__init__(**kwargs)
self.streams = kwargs.get('streams', None)
self.destinations = kwargs.get('destinations', None)
class DestinationsSpecAzureMonitorMetrics(AzureMonitorMetricsDestination):
"""Azure Monitor Metrics destination.
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DestinationsSpecAzureMonitorMetrics, self).__init__(**kwargs)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~$(python-base-namespace).v2019_11_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~$(python-base-namespace).v2019_11_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~$(python-base-namespace).v2019_11_01_preview.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ExtensionDataSource(msrest.serialization.Model):
"""Definition of which data will be collected from a separate VM extension that integrates with the Azure Monitor Agent.
Collected from either Windows and Linux machines, depending on which extension is defined.
All required parameters must be populated in order to send to Azure.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownExtensionDataSourceStreams]
:param extension_name: Required. The name of the VM extension.
:type extension_name: str
:param extension_settings: The extension settings. The format is specific for particular
extension.
:type extension_settings: any
:param input_data_sources: The list of data sources this extension needs data from.
:type input_data_sources: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'extension_name': {'required': True},
}
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'extension_name': {'key': 'extensionName', 'type': 'str'},
'extension_settings': {'key': 'extensionSettings', 'type': 'object'},
'input_data_sources': {'key': 'inputDataSources', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtensionDataSource, self).__init__(**kwargs)
self.streams = kwargs.get('streams', None)
self.extension_name = kwargs['extension_name']
self.extension_settings = kwargs.get('extension_settings', None)
self.input_data_sources = kwargs.get('input_data_sources', None)
self.name = kwargs.get('name', None)
class LogAnalyticsDestination(msrest.serialization.Model):
"""Log Analytics destination.
Variables are only populated by the server, and will be ignored when sending a request.
:param workspace_resource_id: The resource ID of the Log Analytics workspace.
:type workspace_resource_id: str
:ivar workspace_id: The Customer ID of the Log Analytics workspace.
:vartype workspace_id: str
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'workspace_id': {'readonly': True},
}
_attribute_map = {
'workspace_resource_id': {'key': 'workspaceResourceId', 'type': 'str'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogAnalyticsDestination, self).__init__(**kwargs)
self.workspace_resource_id = kwargs.get('workspace_resource_id', None)
self.workspace_id = None
self.name = kwargs.get('name', None)
class PerfCounterDataSource(msrest.serialization.Model):
"""Definition of which performance counters will be collected and how they will be collected by this data collection rule.
Collected from both Windows and Linux machines where the counter is present.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownPerfCounterDataSourceStreams]
:param sampling_frequency_in_seconds: The number of seconds between consecutive counter
measurements (samples).
:type sampling_frequency_in_seconds: int
:param counter_specifiers: A list of specifier names of the performance counters you want to
collect.
Use a wildcard (*) to collect a counter for all instances.
To get a list of performance counters on Windows, run the command 'typeperf'.
:type counter_specifiers: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'sampling_frequency_in_seconds': {'key': 'samplingFrequencyInSeconds', 'type': 'int'},
'counter_specifiers': {'key': 'counterSpecifiers', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PerfCounterDataSource, self).__init__(**kwargs)
self.streams = kwargs.get('streams', None)
self.sampling_frequency_in_seconds = kwargs.get('sampling_frequency_in_seconds', None)
self.counter_specifiers = kwargs.get('counter_specifiers', None)
self.name = kwargs.get('name', None)
class ResourceForUpdate(msrest.serialization.Model):
"""Definition of ARM tracked top level resource properties for update operation.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ResourceForUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class SyslogDataSource(msrest.serialization.Model):
"""Definition of which syslog data will be collected and how it will be collected.
Only collected from Linux machines.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownSyslogDataSourceStreams]
:param facility_names: The list of facility names.
:type facility_names: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownSyslogDataSourceFacilityNames]
:param log_levels: The log levels to collect.
:type log_levels: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownSyslogDataSourceLogLevels]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'facility_names': {'key': 'facilityNames', 'type': '[str]'},
'log_levels': {'key': 'logLevels', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SyslogDataSource, self).__init__(**kwargs)
self.streams = kwargs.get('streams', None)
self.facility_names = kwargs.get('facility_names', None)
self.log_levels = kwargs.get('log_levels', None)
self.name = kwargs.get('name', None)
class WindowsEventLogDataSource(msrest.serialization.Model):
"""Definition of which Windows Event Log events will be collected and how they will be collected.
Only collected from Windows machines.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownWindowsEventLogDataSourceStreams]
:param x_path_queries: A list of Windows Event Log queries in XPATH format.
:type x_path_queries: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'x_path_queries': {'key': 'xPathQueries', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WindowsEventLogDataSource, self).__init__(**kwargs)
self.streams = kwargs.get('streams', None)
self.x_path_queries = kwargs.get('x_path_queries', None)
self.name = kwargs.get('name', None)
|
|
import os
import re
import shutil
import sys
from vee import log
from vee.cli import style, style_note, style_warning
from vee.envvars import join_env_path
from vee.package import Package
from vee.pipeline.generic import GenericBuilder
from vee.subproc import call
from vee.utils import find_in_tree
python_version = '%d.%d' % (sys.version_info[:2])
site_packages = os.path.join('lib', 'python' + python_version, 'site-packages')
def call_setup_py(setup_py, args, **kwargs):
kwargs['cwd'] = os.path.dirname(setup_py)
cmd = ['python', '-c', 'import sys, setuptools; sys.argv[0]=__file__=%r; execfile(__file__)' % os.path.basename(setup_py)]
cmd.extend(args)
return call(cmd, **kwargs)
class PythonBuilder(GenericBuilder):
factory_priority = 5000
@classmethod
def factory(cls, step, pkg):
if step != 'inspect':
return
setup_path = find_in_tree(pkg.build_path, 'setup.py')
egg_path = find_in_tree(pkg.build_path, 'EGG-INFO', 'dir') or find_in_tree(pkg.build_path, '*.egg-info', 'dir')
dist_path = find_in_tree(pkg.build_path, '*.dist-info', 'dir')
if setup_path or egg_path or dist_path:
return cls(pkg, (setup_path, egg_path, dist_path))
def get_next(self, name):
if name in ('build', 'install', 'develop'):
return self
def __init__(self, pkg, paths):
super(PythonBuilder, self).__init__(pkg)
self.setup_path, self.egg_path, self.dist_path = paths
def inspect(self):
pkg = self.package
if self.setup_path and not self.egg_path:
log.info(style_note('Building Python egg-info'))
res = call_setup_py(self.setup_path, ['egg_info'], env=pkg.fresh_environ(), indent=True, verbosity=1)
if res:
raise RuntimeError('Could not build Python package')
self.egg_path = find_in_tree(pkg.build_path, '*.egg-info', 'dir')
if not self.egg_path:
log.warning('Could not find newly created *.egg-info')
if self.egg_path:
requires_path = os.path.join(self.egg_path, 'requires.txt')
if os.path.exists(requires_path):
for line in open(requires_path, 'rb'):
line = line.strip()
if not line:
continue
if line.startswith('['):
break
name = re.split('\W', line)[0].lower()
log.debug('%s depends on %s' % (pkg.name, name))
pkg.dependencies.append(Package(name=name, url='pypi:%s' % name))
def build(self):
pkg = self.package
if self.setup_path:
# Some packages need to be built at the same time as installing.
# Anything which uses the distutils install_clib command, for instance...
if pkg.defer_setup_build:
log.info(style_note('Deferring build to install stage'))
return
log.info(style_note('Building Python package'))
cmd = ['build']
cmd.extend(pkg.config)
res = call_setup_py(self.setup_path, cmd, env=pkg.fresh_environ(), indent=True, verbosity=1)
if res:
raise RuntimeError('Could not build Python package')
return
# python setup.py bdist_egg
if self.egg_path:
log.info(style_note('Found Python Egg', os.path.basename(self.egg_path)))
log.warning('Scripts and other data will not be installed.')
if not pkg.package_path.endswith('.egg'):
log.warning('package does not appear to be an Egg')
# We must rename the egg!
pkg_info_path = os.path.join(self.egg_path, 'PKG-INFO')
if not os.path.exists(pkg_info_path):
log.warning('EGG-INFO/PKG-INFO does not exist')
else:
pkg_info = {}
for line in open(pkg_info_path, 'rU'):
line = line.strip()
if not line:
continue
name, value = line.split(':')
pkg_info[name.strip().lower()] = value.strip()
try:
pkg_name = pkg_info['name']
pkg_version = pkg_info['version']
except KeyError:
log.warning('EGG-INFO/PKG-INFO is malformed')
else:
new_egg_path = os.path.join(os.path.dirname(self.egg_path), '%s-%s.egg-info' % (pkg_name, pkg_version))
shutil.move(self.egg_path, new_egg_path)
self.egg_path = new_egg_path
pkg.build_subdir = os.path.dirname(self.egg_path)
pkg.install_prefix = site_packages
return
# python setup.py bdist_wheel
if self.dist_path:
log.info(style_note('Found Python Wheel', os.path.basename(self.dist_path)))
log.warning('Scripts and other data will not be installed.')
if not pkg.package_path.endswith('.whl'):
log.warning('package does not appear to be a Wheel')
pkg.build_subdir = os.path.dirname(self.dist_path)
pkg.install_prefix = site_packages
return
def install(self):
if not self.setup_path:
return super(PythonBuilder, self).install()
pkg = self.package
pkg._assert_paths(install=True)
install_site_packages = os.path.join(pkg.install_path, site_packages)
# Setup the PYTHONPATH to point to the "install" directory.
env = pkg.fresh_environ()
env['PYTHONPATH'] = '%s:%s' % (install_site_packages, env.get('PYTHONPATH', ''))
if os.path.exists(pkg.install_path):
log.warning('Removing existing install', pkg.install_path)
shutil.rmtree(pkg.install_path)
os.makedirs(install_site_packages)
log.info(style_note('Installing Python package', 'to ' + install_site_packages))
cmd = [
'install',
'--root', pkg.install_path, # Better than prefix
'--prefix', '.',
'--install-lib', site_packages, # So that we don't get lib64; virtualenv symlinks them together anyways.
'--single-version-externally-managed',
]
if not pkg.defer_setup_build:
cmd.append('--skip-build')
res = call_setup_py(self.setup_path, cmd, env=env, indent=True, verbosity=1)
if res:
raise RuntimeError('Could not install Python package')
def develop(self):
pkg = self.package
log.info(style_note('Building scripts'))
cmd = [
'build_scripts', '-e', '/usr/bin/env VEE=%s VEE_PYTHON=%s dev python' % (os.environ.get("VEE", ''), os.environ.get('VEE_PYTHON', '')),
'install_scripts', '-d', 'build/scripts',
]
if call_setup_py(self.setup_path, cmd):
raise RuntimeError('Could not build scripts')
egg_info = find_in_tree(os.path.dirname(self.setup_path), '*.egg-info', 'dir')
if not egg_info:
raise RuntimeError('Could not find built egg-info')
dirs_to_link = set()
for line in open(os.path.join(egg_info, 'top_level.txt')):
dirs_to_link.add(os.path.dirname(line.strip()))
for name in sorted(dirs_to_link):
log.info(style_note("Adding ./%s to $PYTHONPATH" % name))
pkg.environ['PYTHONPATH'] = join_env_path('./' + name, pkg.environ.get('PYTHONPATH', '@'))
scripts = os.path.join(os.path.dirname(self.setup_path), 'build', 'scripts')
if os.path.exists(scripts):
log.info(style_note("Adding ./build/scripts to $PATH"))
pkg.environ['PATH'] = join_env_path('./build/scripts', pkg.environ.get('PATH', '@'))
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for compute.network.rpcapi
"""
import collections
import mock
from mox3 import mox
from oslo_config import cfg
from jacket import context
from jacket.compute.network import rpcapi as network_rpcapi
from jacket.objects.compute import base as objects_base
from jacket.compute import test
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit import fake_network
CONF = cfg.CONF
class NetworkRpcAPITestCase(test.NoDBTestCase):
def setUp(self):
super(NetworkRpcAPITestCase, self).setUp()
self.flags(multi_host=True)
# Used to specify the default value expected if no real value is passed
DefaultArg = collections.namedtuple('DefaultArg', ['value'])
def _test_network_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = network_rpcapi.NetworkAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(CONF.network_topic, rpcapi.client.target.topic)
expected_retval = 'foo' if rpc_method == 'call' else None
expected_version = kwargs.pop('version', None)
expected_fanout = kwargs.pop('fanout', None)
expected_kwargs = kwargs.copy()
for k, v in expected_kwargs.items():
if isinstance(v, self.DefaultArg):
expected_kwargs[k] = v.value
kwargs.pop(k)
prepare_kwargs = {}
if expected_version:
prepare_kwargs['version'] = expected_version
if expected_fanout:
prepare_kwargs['fanout'] = True
if 'source_compute' in expected_kwargs:
# Fix up for migrate_instance_* calls.
expected_kwargs['source'] = expected_kwargs.pop('source_compute')
expected_kwargs['dest'] = expected_kwargs.pop('dest_compute')
targeted_methods = [
'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
'_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
'_associate_floating_ip', '_disassociate_floating_ip',
'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
'migrate_instance_finish',
'allocate_for_instance', 'deallocate_for_instance',
]
targeted_by_instance = ['deallocate_for_instance']
if method in targeted_methods and ('host' in expected_kwargs or
'instance' in expected_kwargs):
if method in targeted_by_instance:
host = expected_kwargs['instance']['host']
else:
host = expected_kwargs['host']
if method not in ['allocate_for_instance',
'deallocate_fixed_ip']:
expected_kwargs.pop('host')
if CONF.multi_host:
prepare_kwargs['server'] = host
self.mox.StubOutWithMock(rpcapi, 'client')
version_check = [
'deallocate_for_instance', 'deallocate_fixed_ip',
'allocate_for_instance', 'release_fixed_ip', 'set_network_host',
'setup_networks_on_host'
]
if method in version_check:
rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True)
if prepare_kwargs:
rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
rpc_method = getattr(rpcapi.client, rpc_method)
rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
self.mox.ReplayAll()
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
def test_create_networks(self):
self._test_network_api('create_networks', rpc_method='call',
arg1='arg', arg2='arg')
def test_delete_network(self):
self._test_network_api('delete_network', rpc_method='call',
uuid='fake_uuid', fixed_range='range')
def test_allocate_for_instance(self):
self._test_network_api('allocate_for_instance', rpc_method='call',
instance_id='fake_id', project_id='fake_id', host='fake_host',
rxtx_factor='fake_factor', vpn=False, requested_networks={},
macs=[], version='1.13')
def test_deallocate_for_instance(self):
instance = fake_instance.fake_instance_obj(context.get_admin_context())
self._test_network_api('deallocate_for_instance', rpc_method='call',
requested_networks=self.DefaultArg(None), instance=instance,
version='1.11')
def test_deallocate_for_instance_with_expected_networks(self):
instance = fake_instance.fake_instance_obj(context.get_admin_context())
self._test_network_api('deallocate_for_instance', rpc_method='call',
instance=instance, requested_networks={}, version='1.11')
def test_add_fixed_ip_to_instance(self):
self._test_network_api('add_fixed_ip_to_instance', rpc_method='call',
instance_id='fake_id', rxtx_factor='fake_factor',
host='fake_host', network_id='fake_id', version='1.9')
def test_remove_fixed_ip_from_instance(self):
self._test_network_api('remove_fixed_ip_from_instance',
rpc_method='call', instance_id='fake_id',
rxtx_factor='fake_factor', host='fake_host',
address='fake_address', version='1.9')
def test_add_network_to_project(self):
self._test_network_api('add_network_to_project', rpc_method='call',
project_id='fake_id', network_uuid='fake_uuid')
def test_get_instance_nw_info(self):
self._test_network_api('get_instance_nw_info', rpc_method='call',
instance_id='fake_id', rxtx_factor='fake_factor',
host='fake_host', project_id='fake_id', version='1.9')
def test_validate_networks(self):
self._test_network_api('validate_networks', rpc_method='call',
networks={})
def test_get_dns_domains(self):
self._test_network_api('get_dns_domains', rpc_method='call')
def test_add_dns_entry(self):
self._test_network_api('add_dns_entry', rpc_method='call',
address='addr', name='name', dns_type='foo', domain='domain')
def test_modify_dns_entry(self):
self._test_network_api('modify_dns_entry', rpc_method='call',
address='addr', name='name', domain='domain')
def test_delete_dns_entry(self):
self._test_network_api('delete_dns_entry', rpc_method='call',
name='name', domain='domain')
def test_delete_dns_domain(self):
self._test_network_api('delete_dns_domain', rpc_method='call',
domain='fake_domain')
def test_get_dns_entries_by_address(self):
self._test_network_api('get_dns_entries_by_address', rpc_method='call',
address='fake_address', domain='fake_domain')
def test_get_dns_entries_by_name(self):
self._test_network_api('get_dns_entries_by_name', rpc_method='call',
name='fake_name', domain='fake_domain')
def test_create_private_dns_domain(self):
self._test_network_api('create_private_dns_domain', rpc_method='call',
domain='fake_domain', av_zone='fake_zone')
def test_create_public_dns_domain(self):
self._test_network_api('create_public_dns_domain', rpc_method='call',
domain='fake_domain', project='fake_project')
def test_setup_networks_on_host(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
instance = fake_instance.fake_instance_obj(ctxt)
self._test_network_api('setup_networks_on_host', rpc_method='call',
instance_id=instance.id, host='fake_host', teardown=False,
instance=instance, version='1.16')
def test_setup_networks_on_host_v1_0(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
instance = fake_instance.fake_instance_obj(ctxt)
host = 'fake_host'
teardown = True
rpcapi = network_rpcapi.NetworkAPI()
call_mock = mock.Mock()
cctxt_mock = mock.Mock(call=call_mock)
with test.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.setup_networks_on_host(ctxt, instance.id, host, teardown,
instance)
# assert our mocks were called as expected
can_send_mock.assert_called_once_with('1.16')
prepare_mock.assert_called_once_with(version='1.0')
call_mock.assert_called_once_with(ctxt, 'setup_networks_on_host',
host=host, teardown=teardown,
instance_id=instance.id)
def test_lease_fixed_ip(self):
self._test_network_api('lease_fixed_ip', rpc_method='cast',
host='fake_host', address='fake_addr')
def test_release_fixed_ip(self):
self._test_network_api('release_fixed_ip', rpc_method='cast',
host='fake_host', address='fake_addr', mac='fake_mac',
version='1.14')
def test_release_fixed_ip_no_mac_support(self):
# Tests that the mac kwarg is not passed when we can't send version
# 1.14 to the network manager.
ctxt = context.RequestContext('fake_user', 'fake_project')
address = '192.168.65.158'
host = 'fake-host'
mac = '00:0c:29:2c:b2:64'
rpcapi = network_rpcapi.NetworkAPI()
cast_mock = mock.Mock()
cctxt_mock = mock.Mock(cast=cast_mock)
with test.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.release_fixed_ip(ctxt, address, host, mac)
# assert our mocks were called as expected 232
can_send_mock.assert_called_once_with('1.14')
prepare_mock.assert_called_once_with(server=host, version='1.0')
cast_mock.assert_called_once_with(ctxt, 'release_fixed_ip',
address=address)
def test_set_network_host(self):
network = fake_network.fake_network_obj(context.get_admin_context())
self._test_network_api('set_network_host', rpc_method='call',
network_ref=network, version='1.15')
def test_set_network_host_network_object_to_primitive(self):
# Tests that the network object is converted to a primitive if it
# can't send version 1.15.
ctxt = context.RequestContext('fake_user', 'fake_project')
network = fake_network.fake_network_obj(ctxt)
network_dict = objects_base.obj_to_primitive(network)
rpcapi = network_rpcapi.NetworkAPI()
call_mock = mock.Mock()
cctxt_mock = mock.Mock(call=call_mock)
with test.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.set_network_host(ctxt, network)
# assert our mocks were called as expected
can_send_mock.assert_called_once_with('1.15')
prepare_mock.assert_called_once_with(version='1.0')
call_mock.assert_called_once_with(ctxt, 'set_network_host',
network_ref=network_dict)
def test_rpc_setup_network_on_host(self):
self._test_network_api('rpc_setup_network_on_host', rpc_method='call',
network_id='fake_id', teardown=False, host='fake_host')
def test_rpc_allocate_fixed_ip(self):
self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call',
instance_id='fake_id', network_id='fake_id', address='addr',
vpn=True, host='fake_host')
def test_deallocate_fixed_ip(self):
instance = fake_instance.fake_db_instance()
self._test_network_api('deallocate_fixed_ip', rpc_method='call',
address='fake_addr', host='fake_host', instance=instance,
version='1.12')
def test_update_dns(self):
self._test_network_api('update_dns', rpc_method='cast', fanout=True,
network_ids='fake_id', version='1.3')
def test__associate_floating_ip(self):
self._test_network_api('_associate_floating_ip', rpc_method='call',
floating_address='fake_addr', fixed_address='fixed_address',
interface='fake_interface', host='fake_host',
instance_uuid='fake_uuid', version='1.6')
def test__disassociate_floating_ip(self):
self._test_network_api('_disassociate_floating_ip', rpc_method='call',
address='fake_addr', interface='fake_interface',
host='fake_host', instance_uuid='fake_uuid', version='1.6')
def test_migrate_instance_start(self):
self._test_network_api('migrate_instance_start', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host=self.DefaultArg(None),
version='1.2')
def test_migrate_instance_start_multi_host(self):
self._test_network_api('migrate_instance_start', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host='fake_host',
version='1.2')
def test_migrate_instance_finish(self):
self._test_network_api('migrate_instance_finish', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host=self.DefaultArg(None),
version='1.2')
def test_migrate_instance_finish_multi_host(self):
self._test_network_api('migrate_instance_finish', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host='fake_host',
version='1.2')
|
|
"""
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps based on Fourier transforms and Count Sketches.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Daniel Lopez-Sanchez (TensorSketch) <lope@usal.es>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
try:
from scipy.fft import fft, ifft
except ImportError: # scipy < 1.4
from scipy.fftpack import fft, ifft
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels, KERNEL_PARAMS
from .utils.validation import check_non_negative
class PolynomialCountSketch(BaseEstimator, TransformerMixin):
"""Polynomial kernel approximation via Tensor Sketch.
Implements Tensor Sketch, which approximates the feature map
of the polynomial kernel::
K(X, Y) = (gamma * <X, Y> + coef0)^degree
by efficiently computing a Count Sketch of the outer product of a
vector with itself using Fast Fourier Transforms (FFT). Read more in the
:ref:`User Guide <polynomial_kernel_approx>`.
.. versionadded:: 0.24
Parameters
----------
gamma : float, default=1.0
Parameter of the polynomial kernel whose feature map
will be approximated.
degree : int, default=2
Degree of the polynomial kernel whose feature map
will be approximated.
coef0 : int, default=0
Constant term of the polynomial kernel whose feature map
will be approximated.
n_components : int, default=100
Dimensionality of the output feature space. Usually, `n_components`
should be greater than the number of features in input samples in
order to achieve good performance. The optimal score / run time
balance is typically achieved around `n_components` = 10 * `n_features`,
but this depends on the specific dataset being used.
random_state : int, RandomState instance, default=None
Determines random number generation for indexHash and bitHash
initialization. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
Attributes
----------
indexHash_ : ndarray of shape (degree, n_features), dtype=int64
Array of indexes in range [0, n_components) used to represent
the 2-wise independent hash functions for Count Sketch computation.
bitHash_ : ndarray of shape (degree, n_features), dtype=float32
Array with random entries in {+1, -1}, used to represent
the 2-wise independent hash functions for Count Sketch computation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
Examples
--------
>>> from sklearn.kernel_approximation import PolynomialCountSketch
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> ps = PolynomialCountSketch(degree=3, random_state=1)
>>> X_features = ps.fit_transform(X)
>>> clf = SGDClassifier(max_iter=10, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=10)
>>> clf.score(X_features, y)
1.0
"""
def __init__(
self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None
):
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Initializes the internal variables. The method needs no information
about the distribution of data, so we only care about n_features in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` in the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
if not self.degree >= 1:
raise ValueError(f"degree={self.degree} should be >=1.")
X = self._validate_data(X, accept_sparse="csc")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
if self.coef0 != 0:
n_features += 1
self.indexHash_ = random_state.randint(
0, high=self.n_components, size=(self.degree, n_features)
)
self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))
return self
def transform(self, X):
"""Generate the feature map approximation for X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
New data, where `n_samples` in the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csc", reset=False)
X_gamma = np.sqrt(self.gamma) * X
if sp.issparse(X_gamma) and self.coef0 != 0:
X_gamma = sp.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))],
format="csc",
)
elif not sp.issparse(X_gamma) and self.coef0 != 0:
X_gamma = np.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))]
)
if X_gamma.shape[1] != self.indexHash_.shape[1]:
raise ValueError(
"Number of features of test samples does not"
" match that of training samples."
)
count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components))
if sp.issparse(X_gamma):
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += (
(iHashBit * X_gamma[:, j]).toarray().ravel()
)
else:
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j]
# For each same, compute a count sketch of phi(x) using the polynomial
# multiplication (via FFT) of p count sketches of x.
count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True)
count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1)
data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True))
return data_sketch
class RBFSampler(TransformerMixin, BaseEstimator):
"""Approximate a RBF kernel feature map using random Fourier features.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float, default=1.0
Parameter of RBF kernel: exp(-gamma * x^2).
n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
weights and random offset when fitting the training data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
random_offset_ : ndarray of shape (n_components,), dtype=float64
Random offset used to compute the projection in the `n_components`
dimensions of the feature space.
random_weights_ : ndarray of shape (n_features, n_components),\
dtype=float64
Random projection directions drawn from the Fourier transform
of the RBF kernel.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
SkewedChi2Sampler : Approximate feature map for
"skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
Examples
--------
>>> from sklearn.kernel_approximation import RBFSampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> rbf_feature = RBFSampler(gamma=1, random_state=1)
>>> X_features = rbf_feature.fit_transform(X)
>>> clf = SGDClassifier(max_iter=5, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=5)
>>> clf.score(X_features, y)
1.0
"""
def __init__(self, *, gamma=1.0, n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` in the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse="csr")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)
)
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where `n_samples` in the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(TransformerMixin, BaseEstimator):
"""Approximate feature map for "skewed chi-squared" kernel.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float, default=1.0
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
weights and random offset when fitting the training data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
random_weights_ : ndarray of shape (n_features, n_components)
Weight array, sampled from a secant hyperbolic distribution, which will
be used to linearly transform the log of the data.
random_offset_ : ndarray of shape (n_features, n_components)
Bias term, which will be added to the data. It is uniformly distributed
between 0 and 2*pi.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
Examples
--------
>>> from sklearn.kernel_approximation import SkewedChi2Sampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
... n_components=10,
... random_state=0)
>>> X_features = chi2_feature.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=10, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=10)
>>> clf.score(X_features, y)
1.0
"""
def __init__(self, *, skewedness=1.0, n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` in the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = as_float_array(X, copy=True)
X = self._validate_data(X, copy=False, reset=False)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(TransformerMixin, BaseEstimator):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, default=2
Gives the number of (complex) sampling points.
sample_interval : float, default=None
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Attributes
----------
sample_interval_ : float
Stored sampling interval. Specified as a parameter if `sample_steps`
not in {1,2,3}.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.kernel_approximation import AdditiveChi2Sampler
>>> X, y = load_digits(return_X_y=True)
>>> chi2sampler = AdditiveChi2Sampler(sample_steps=2)
>>> X_transformed = chi2sampler.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3)
>>> clf.fit(X_transformed, y)
SGDClassifier(max_iter=5, random_state=0)
>>> clf.score(X_transformed, y)
0.9499...
"""
def __init__(self, *, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set the parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the transformer.
"""
X = self._validate_data(X, accept_sparse="csr")
check_non_negative(X, "X in AdditiveChi2Sampler.fit")
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError(
"If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval"
)
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` in the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : {ndarray, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array or sparse matrix depends on
the type of the input X.
"""
msg = (
"%(name)s is not fitted. Call fit to set the parameters before"
" calling transform"
)
check_is_fitted(self, msg=msg)
X = self._validate_data(X, accept_sparse="csr", reset=False)
check_non_negative(X, "X in AdditiveChi2Sampler.transform")
sparse = sp.issparse(X)
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = X != 0.0
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new.append(X_step)
return sp.hstack(X_new)
def _more_tags(self):
return {"stateless": True, "requires_positive_X": True}
class Nystroem(TransformerMixin, BaseEstimator):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
.. versionadded:: 0.13
Parameters
----------
kernel : str or callable, default='rbf'
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as `kernel_params`, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
coef0 : float, default=None
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
degree : float, default=None
Degree of the polynomial kernel. Ignored by other kernels.
kernel_params : dict, default=None
Additional parameters (keyword arguments) for kernel function passed
as callable object.
n_components : int, default=100
Number of features to construct.
How many data points will be used to construct the mapping.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the uniform sampling without
replacement of `n_components` of the training data to construct the
basis kernel.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the kernel matrix into `n_jobs` even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.24
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : ndarray of shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : ndarray of shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
Examples
--------
>>> from sklearn import datasets, svm
>>> from sklearn.kernel_approximation import Nystroem
>>> X, y = datasets.load_digits(n_class=9, return_X_y=True)
>>> data = X / 16.
>>> clf = svm.LinearSVC()
>>> feature_map_nystroem = Nystroem(gamma=.2,
... random_state=1,
... n_components=300)
>>> data_transformed = feature_map_nystroem.fit_transform(data)
>>> clf.fit(data_transformed, y)
LinearSVC()
>>> clf.score(data_transformed, y)
0.9987...
"""
def __init__(
self,
kernel="rbf",
*,
gamma=None,
coef0=None,
degree=None,
kernel_params=None,
n_components=100,
random_state=None,
n_jobs=None,
):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` in the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse="csr")
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn(
"n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel."
)
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(
basis,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**self._get_kernel_params(),
)
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(
X,
self.components_,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**kernel_params,
)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel) and self.kernel != "precomputed":
for param in KERNEL_PARAMS[self.kernel]:
if getattr(self, param) is not None:
params[param] = getattr(self, param)
else:
if (
self.gamma is not None
or self.coef0 is not None
or self.degree is not None
):
raise ValueError(
"Don't pass gamma, coef0 or degree to "
"Nystroem if using a callable "
"or precomputed kernel"
)
return params
def _more_tags(self):
return {
"_xfail_checks": {
"check_transformer_preserve_dtypes": (
"dtypes are preserved but not at a close enough precision"
)
},
"preserves_dtype": [np.float64, np.float32],
}
|
|
"""Support to interact with a Music Player Daemon."""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MPD"
DEFAULT_PORT = 6600
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=120)
SUPPORT_MPD = (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_SHUFFLE_SET
| SUPPORT_SEEK
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MPD platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
device = MpdDevice(host, port, password, name)
add_entities([device], True)
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, password, name):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = name
self.password = password
self._status = None
self._currentsong = None
self._playlists = None
self._currentplaylist = None
self._is_connected = False
self._muted = False
self._muted_volume = 0
# set up MPD client
self._client = mpd.MPDClient()
self._client.timeout = 30
self._client.idletimeout = None
def _connect(self):
"""Connect to MPD."""
import mpd
try:
self._client.connect(self.server, self.port)
if self.password is not None:
self._client.password(self.password)
except mpd.ConnectionError:
return
self._is_connected = True
def _disconnect(self):
"""Disconnect from MPD."""
import mpd
try:
self._client.disconnect()
except mpd.ConnectionError:
pass
self._is_connected = False
self._status = None
def _fetch_status(self):
"""Fetch status from MPD."""
self._status = self._client.status()
self._currentsong = self._client.currentsong()
self._update_playlists()
@property
def available(self):
"""Return true if MPD is available and connected."""
return self._is_connected
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
if not self._is_connected:
self._connect()
self._fetch_status()
except (mpd.ConnectionError, OSError, BrokenPipeError, ValueError):
# Cleanly disconnect in case connection is not in valid state
self._disconnect()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self._status is None:
return STATE_OFF
if self._status["state"] == "play":
return STATE_PLAYING
if self._status["state"] == "pause":
return STATE_PAUSED
if self._status["state"] == "stop":
return STATE_OFF
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._currentsong.get("file")
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
# Time does not exist for streams
return self._currentsong.get("time")
@property
def media_title(self):
"""Return the title of current playing media."""
name = self._currentsong.get("name", None)
title = self._currentsong.get("title", None)
file_name = self._currentsong.get("file", None)
if name is None and title is None:
if file_name is None:
return "None"
return os.path.basename(file_name)
if name is None:
return title
if title is None:
return name
return f"{name}: {title}"
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self._currentsong.get("artist")
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self._currentsong.get("album")
@property
def volume_level(self):
"""Return the volume level."""
if "volume" in self._status:
return int(self._status["volume"]) / 100
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._status is None:
return None
supported = SUPPORT_MPD
if "volume" in self._status:
supported |= SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE
if self._playlists is not None:
supported |= SUPPORT_SELECT_SOURCE
return supported
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
def select_source(self, source):
"""Choose a different available playlist and play it."""
self.play_media(MEDIA_TYPE_PLAYLIST, source)
@Throttle(PLAYLIST_UPDATE_INTERVAL)
def _update_playlists(self, **kwargs):
"""Update available MPD playlists."""
import mpd
try:
self._playlists = []
for playlist_data in self._client.listplaylists():
self._playlists.append(playlist_data["playlist"])
except mpd.CommandError as error:
self._playlists = None
_LOGGER.warning("Playlists could not be updated: %s:", error)
def set_volume_level(self, volume):
"""Set volume of media player."""
if "volume" in self._status:
self._client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume <= 100:
self._client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume >= 0:
self._client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(1)
def media_stop(self):
"""Service to send the MPD the command for stop."""
self._client.stop()
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self._client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self._client.previous()
def mute_volume(self, mute):
"""Mute. Emulated with set_volume_level."""
if "volume" in self._status:
if mute:
self._muted_volume = self.volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._muted_volume)
self._muted = mute
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.debug("Playing playlist: %s", media_id)
if media_type == MEDIA_TYPE_PLAYLIST:
if media_id in self._playlists:
self._currentplaylist = media_id
else:
self._currentplaylist = None
_LOGGER.warning("Unknown playlist name %s", media_id)
self._client.clear()
self._client.load(media_id)
self._client.play()
else:
self._client.clear()
self._client.add(media_id)
self._client.play()
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return bool(int(self._status["random"]))
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._client.random(int(shuffle))
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self._client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self._client.play()
self._update_playlists(no_throttle=True)
def clear_playlist(self):
"""Clear players playlist."""
self._client.clear()
def media_seek(self, position):
"""Send seek command."""
self._client.seekcur(position)
|
|
# pylint: disable=protected-access, unused-argument
# pylint: disable=no-value-for-parameter
from unittest import TestCase
import radical.entk.exceptions as ree
import radical.entk
from radical.entk.execman.rp.task_processor import resolve_placeholders
from radical.entk.execman.rp.task_processor import resolve_arguments
from radical.entk.execman.rp.task_processor import resolve_tags
from radical.entk.execman.rp.task_processor import get_input_list_from_task
from radical.entk.execman.rp.task_processor import get_output_list_from_task
from radical.entk.execman.rp.task_processor import create_td_from_task
from radical.entk.execman.rp.task_processor import create_task_from_rp
try:
import mock
except ImportError:
from unittest import mock
class TestBase(TestCase):
# ------------------------------------------------------------------------------
#
@mock.patch('radical.utils.Logger')
def test_resolve_placeholders(self, mocked_Logger):
pipeline_name = 'p1'
stage_name = 's1'
t1_name = 't1'
placeholders = {
pipeline_name: {
stage_name: {
t1_name: {
'path' : '/home/vivek/t1',
'rts_uid': 'unit.0002'
}
}
}
}
paths = ['test_file > $SHARED/test_file',
'test_file > $Pipeline_%s_Stage_%s_Task_%s/test_file' % (pipeline_name, stage_name, t1_name),
'test_file > $NODE_LFS_PATH/test.txt']
self.assertEqual(resolve_placeholders(paths[0], placeholders),
'test_file > pilot:///test_file')
self.assertEqual(resolve_placeholders(paths[1], placeholders),
'test_file > /home/vivek/t1/test_file')
with self.assertRaises(ree.ValueError):
self.assertEqual(resolve_placeholders(paths[2], placeholders))
# ------------------------------------------------------------------------------
#
@mock.patch('radical.utils.Logger')
def test_resolve_arguments(self, mocked_Logger):
pipeline_name = 'p1'
stage_name = 's1'
t1_name = 't1'
t2_name = 't2'
placeholders = {
pipeline_name: {
stage_name: {
t1_name: {
'path' : '/home/vivek/t1',
'rts_uid': 'unit.0002'
},
t2_name: {
'path' : '/home/vivek/t2',
'rts_uid': 'unit.0003'
}
}
}
}
arguments = ['$SHARED',
'$Pipeline_%s_Stage_%s_Task_%s' % (pipeline_name, stage_name, t1_name),
'$Pipeline_%s_Stage_%s_Task_%s' % (pipeline_name, stage_name, t2_name),
'$NODE_LFS_PATH/test.txt']
self.assertEqual(resolve_arguments(arguments, placeholders),
['$RP_PILOT_STAGING', '/home/vivek/t1',
'/home/vivek/t2', '$NODE_LFS_PATH/test.txt'])
# ------------------------------------------------------------------------------
#
@mock.patch('radical.utils.Logger')
def test_resolve_tags(self, mocked_Logger):
pipeline_name = 'p1'
stage_name = 's1'
task = mock.Mock()
task.uid = 'task.0000'
task.tags = {'colocate': task.uid}
task2 = mock.Mock()
task2.uid = 'task.0001'
task2.tags = None
t2_name = 't2'
placeholders = {
pipeline_name: {
stage_name: {
task.uid: {
'path' : '/home/vivek/t1',
'uid': 'unit.0002'
},
t2_name: {
'path' : '/home/vivek/t2',
'uid': 'unit.0003'
}
}
}
}
self.assertEqual(resolve_tags(task=task,
parent_pipeline_name=pipeline_name,
placeholders=placeholders),
{'colocate': 'unit.0002'})
self.assertEqual(resolve_tags(task=task2, parent_pipeline_name=pipeline_name,
placeholders=placeholders), {'colocate':'task.0001'})
# ------------------------------------------------------------------------------
#
@mock.patch('radical.pilot.TaskDescription')
@mock.patch('radical.utils.Logger')
@mock.patch.object(radical.entk.execman.rp.task_processor, 'get_output_list_from_task', return_value='outputs')
@mock.patch.object(radical.entk.execman.rp.task_processor, 'resolve_arguments', return_value='test_args')
@mock.patch.object(radical.entk.execman.rp.task_processor, 'resolve_tags', return_value='test_tag')
@mock.patch.object(radical.entk.execman.rp.task_processor, 'get_input_list_from_task', return_value='inputs')
@mock.patch('radical.utils.generate_id', return_value='task.0000.0000')
def test_create_td_from_task(self, mocked_TaskDescription,
mocked_Logger, mocked_get_input_list_from_task,
mocked_get_output_list_from_task,
mocked_resolve_arguments, mocked_resolve_tags,
mocked_generate_id):
mocked_TaskDescription.name = None
mocked_TaskDescription.pre_exec = None
mocked_TaskDescription.executable = None
mocked_TaskDescription.arguments = None
mocked_TaskDescription.sandbox = None
mocked_TaskDescription.post_exec = None
mocked_TaskDescription.tag = None
mocked_TaskDescription.cpu_processes = None
mocked_TaskDescription.cpu_threads = None
mocked_TaskDescription.cpu_process_type = None
mocked_TaskDescription.cpu_thread_type = None
mocked_TaskDescription.gpu_processes = None
mocked_TaskDescription.gpu_threads = None
mocked_TaskDescription.gpu_process_type = None
mocked_TaskDescription.gpu_thread_type = None
mocked_TaskDescription.lfs_per_process = None
mocked_TaskDescription.stdout = None
mocked_TaskDescription.stderr = None
mocked_TaskDescription.input_staging = None
mocked_TaskDescription.output_staging = None
task = mock.Mock()
task.uid = 'task.0000'
task.name = 'task.name'
task.parent_stage = {'uid' : 'stage.0000',
'name' : 'stage.0000'}
task.parent_pipeline = {'uid' : 'pipe.0000',
'name' : 'pipe.0000'}
task.pre_exec = 'post_exec'
task.executable = '/bin/date'
task.arguments = 'test_arg'
task.sandbox = 'unit.0000'
task.post_exec = ''
task.cpu_reqs = {'cpu_processes': 5,
'cpu_threads': 6,
'cpu_process_type': 'MPI',
'cpu_thread_type': 'MPI'}
task.gpu_reqs = {'gpu_processes': 5,
'gpu_threads': 6,
'gpu_process_type': 'MPI',
'gpu_thread_type': 'MPI'}
task.tags = None
task.lfs_per_process = 235
task.stderr = 'stderr'
task.stdout = 'stdout'
hash_table = {}
test_td = create_td_from_task(task=task, placeholders=None,
task_hash_table=hash_table,
pkl_path='.test.pkl',
sid='test.sid')
self.assertEqual(test_td.name, 'task.0000,task.name,stage.0000,stage.0000,pipe.0000,pipe.0000')
self.assertEqual(test_td.pre_exec, 'post_exec')
self.assertEqual(test_td.executable, '/bin/date')
self.assertEqual(test_td.arguments, 'test_args')
self.assertEqual(test_td.sandbox, 'unit.0000')
self.assertEqual(test_td.post_exec, '')
self.assertEqual(test_td.cpu_processes, 5)
self.assertEqual(test_td.cpu_threads, 6)
self.assertEqual(test_td.cpu_process_type, 'MPI')
self.assertEqual(test_td.cpu_thread_type, 'MPI')
self.assertEqual(test_td.gpu_processes, 5)
self.assertEqual(test_td.gpu_threads, 6)
self.assertEqual(test_td.gpu_process_type, 'MPI')
self.assertEqual(test_td.gpu_thread_type, 'MPI')
self.assertEqual(test_td.lfs_per_process, 235)
self.assertEqual(test_td.stdout, 'stdout')
self.assertEqual(test_td.stderr, 'stderr')
self.assertEqual(test_td.input_staging, 'inputs')
self.assertEqual(test_td.output_staging, 'outputs')
self.assertEqual(test_td.uid, 'task.0000')
self.assertEqual(hash_table, {'task.0000':'task.0000'})
task.cpu_reqs = {'cpu_processes': 5,
'cpu_threads': 6,
'cpu_process_type': None,
'cpu_thread_type': None}
task.gpu_reqs = {'gpu_processes': 5,
'gpu_threads': 6,
'gpu_process_type': None,
'gpu_thread_type': None}
test_td = create_td_from_task(task=task, placeholders=None,
task_hash_table=hash_table,
pkl_path='.test.pkl',
sid='test.sid')
self.assertEqual(test_td.name, 'task.0000,task.name,stage.0000,stage.0000,pipe.0000,pipe.0000')
self.assertEqual(test_td.pre_exec, 'post_exec')
self.assertEqual(test_td.executable, '/bin/date')
self.assertEqual(test_td.arguments, 'test_args')
self.assertEqual(test_td.sandbox, 'unit.0000')
self.assertEqual(test_td.post_exec, '')
self.assertEqual(test_td.cpu_processes, 5)
self.assertEqual(test_td.cpu_threads, 6)
self.assertEqual(test_td.cpu_process_type, 'POSIX')
self.assertEqual(test_td.cpu_thread_type, 'OpenMP')
self.assertEqual(test_td.gpu_processes, 5)
self.assertEqual(test_td.gpu_threads, 6)
self.assertEqual(test_td.gpu_process_type, 'POSIX')
self.assertEqual(test_td.gpu_thread_type, 'GPU_OpenMP')
self.assertEqual(test_td.lfs_per_process, 235)
self.assertEqual(test_td.stdout, 'stdout')
self.assertEqual(test_td.stderr, 'stderr')
self.assertEqual(test_td.input_staging, 'inputs')
self.assertEqual(test_td.output_staging, 'outputs')
self.assertEqual(test_td.uid, 'task.0000.0000')
self.assertEqual(hash_table, {'task.0000':'task.0000.0000'})
# ------------------------------------------------------------------------------
#
@mock.patch('radical.entk.Task')
@mock.patch('radical.utils.Logger')
def test_create_task_from_rp(self, mocked_Task, mocked_Logger):
test_cud = mock.Mock()
test_cud.name = 'task.0000,task.0000,stage.0000,stage.0000,pipe.0000,pipe.0000'
test_cud.pre_exec = 'post_exec'
test_cud.executable = '/bin/date'
test_cud.arguments = 'test_args'
test_cud.sandbox = 'unit.0000'
test_cud.post_exec = ''
test_cud.cpu_processes = 5
test_cud.cpu_threads = 6
test_cud.cpu_process_type = 'POSIX'
test_cud.cpu_thread_type = None
test_cud.gpu_processes = 5
test_cud.gpu_threads = 6
test_cud.gpu_process_type = 'POSIX'
test_cud.gpu_thread_type = None
test_cud.lfs_per_process = 235
test_cud.stdout = 'stdout'
test_cud.stderr = 'stderr'
test_cud.input_staging = 'inputs'
test_cud.output_staging = 'outputs'
test_cud.uid = 'unit.0000'
test_cud.state = 'EXECUTING'
test_cud.sandbox = 'test_folder'
mocked_Task.uid = None
mocked_Task.name = None
mocked_Task.parent_stage = {}
mocked_Task.parent_pipeline = {}
mocked_Task.path = None
mocked_Task.rts_uid = None
task = create_task_from_rp(test_cud, None)
self.assertEqual(task.uid, 'task.0000')
self.assertEqual(task.name, 'task.0000')
self.assertEqual(task.parent_stage, {'uid': 'stage.0000', 'name': 'stage.0000'})
self.assertEqual(task.parent_pipeline, {'uid': 'pipe.0000', 'name': 'pipe.0000'})
self.assertEqual(task.path, 'test_folder')
self.assertEqual(task.rts_uid, 'unit.0000')
# ------------------------------------------------------------------------------
#
@mock.patch('radical.entk.Task')
@mock.patch('radical.utils.Logger')
def test_issue_271(self, mocked_Task, mocked_Logger):
test_cud = mock.Mock()
test_cud.name = 'task.0000,task.0000,stage.0000,stage.0000,pipe.0000,pipe.0000'
test_cud.pre_exec = 'post_exec'
test_cud.executable = '/bin/date'
test_cud.arguments = 'test_args'
test_cud.sandbox = 'unit.0000'
test_cud.post_exec = ''
test_cud.cpu_processes = 5
test_cud.cpu_threads = 6
test_cud.cpu_process_type = 'POSIX'
test_cud.cpu_thread_type = None
test_cud.gpu_processes = 5
test_cud.gpu_threads = 6
test_cud.gpu_process_type = 'POSIX'
test_cud.gpu_thread_type = None
test_cud.lfs_per_process = 235
test_cud.stdout = 'stdout'
test_cud.stderr = 'stderr'
test_cud.input_staging = 'inputs'
test_cud.output_staging = 'outputs'
test_cud.uid = 'unit.0000'
test_cud.state = 'DONE'
test_cud.sandbox = 'test_folder'
mocked_Task.uid = None
mocked_Task.name = None
mocked_Task.parent_stage = {}
mocked_Task.parent_pipeline = {}
mocked_Task.path = None
mocked_Task.rts_uid = None
task = create_task_from_rp(test_cud, None)
self.assertEqual(task.exit_code, 0)
test_cud.state = 'FAILED'
task = create_task_from_rp(test_cud, None)
self.assertEqual(task.exit_code, 1)
test_cud.state = 'EXECUTING'
task = create_task_from_rp(test_cud, None)
self.assertIsNone(task.exit_code)
# ------------------------------------------------------------------------------
#
@mock.patch('radical.utils.Logger')
def test_get_input_list_from_task(self, mocked_Logger):
task = mock.Mock()
with self.assertRaises(ree.TypeError):
get_input_list_from_task(task, '')
pipeline_name = 'p1'
stage_name = 's1'
t1_name = 't1'
placeholders = {
pipeline_name: {
stage_name: {
t1_name: {
'path' : '/home/vivek/t1',
'rts_uid': 'unit.0002'
}
}
}
}
task = mock.MagicMock(spec=radical.entk.Task)
task.link_input_data = ['$SHARED/test_folder/test_file > test_folder/test_file']
task.upload_input_data = ['$SHARED/test_folder/test_file > test_file']
task.copy_input_data = ['$Pipeline_p1_Stage_s1_Task_t1/test_file > $SHARED/test_file']
task.move_input_data = ['test_file > test_file']
test = get_input_list_from_task(task, placeholders)
input_list = [{'source': 'pilot:///test_folder/test_file',
'target': 'test_folder/test_file',
'action': 'Link'},
{'source': 'pilot:///test_folder/test_file',
'target': 'test_file'},
{'source': '/home/vivek/t1/test_file',
'target': 'pilot:///test_file',
'action': 'Copy'},
{'source': 'test_file',
'target': 'test_file',
'action': 'Move'}]
self.assertEqual(test[0], input_list[0])
self.assertEqual(test[1], input_list[1])
self.assertEqual(test[2], input_list[2])
self.assertEqual(test[3], input_list[3])
# ------------------------------------------------------------------------------
#
@mock.patch('radical.utils.Logger')
def test_get_output_list_from_task(self, mocked_Logger):
task = mock.Mock()
with self.assertRaises(ree.TypeError):
get_output_list_from_task(task, '')
task = mock.MagicMock(spec=radical.entk.Task)
task.link_output_data = ['test_file > $SHARED/test_file']
task.download_output_data = ['test_file > $SHARED/test_file']
task.copy_output_data = ['test_file > $SHARED/test_file']
task.move_output_data = ['test_file > $SHARED/test_file']
test = get_output_list_from_task(task, {})
output_list = [{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Link'},
{'source': 'test_file',
'target': 'pilot:///test_file'},
{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Copy'},
{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Move'}]
self.assertEqual(test[0], output_list[0])
self.assertEqual(test[1], output_list[1])
self.assertEqual(test[2], output_list[2])
self.assertEqual(test[3], output_list[3])
# ------------------------------------------------------------------------------
#
@mock.patch('radical.pilot.TaskDescription')
@mock.patch('radical.utils.Logger')
def test_issue_259(self, mocked_TaskDescription, mocked_Logger):
mocked_TaskDescription.name = None
mocked_TaskDescription.pre_exec = None
mocked_TaskDescription.executable = None
mocked_TaskDescription.arguments = None
mocked_TaskDescription.sandbox = None
mocked_TaskDescription.post_exec = None
mocked_TaskDescription.tag = None
mocked_TaskDescription.cpu_processes = None
mocked_TaskDescription.cpu_threads = None
mocked_TaskDescription.cpu_process_type = None
mocked_TaskDescription.cpu_thread_type = None
mocked_TaskDescription.gpu_processes = None
mocked_TaskDescription.gpu_threads = None
mocked_TaskDescription.gpu_process_type = None
mocked_TaskDescription.gpu_thread_type = None
mocked_TaskDescription.lfs_per_process = None
mocked_TaskDescription.stdout = None
mocked_TaskDescription.stderr = None
mocked_TaskDescription.input_staging = None
mocked_TaskDescription.output_staging = None
pipeline_name = 'pipe.0000'
stage_name = 'stage.0000'
t1_name = 'task.0000'
t2_name = 'task.0001'
placeholders = {
pipeline_name: {
stage_name: {
t1_name: {
'path' : '/home/vivek/t1',
'uid': 'unit.0002'
},
t2_name: {
'path' : '/home/vivek/t2',
'uid': 'unit.0003'
}
}
}
}
task = mock.MagicMock(spec=radical.entk.Task)
task.uid = 'task.0000'
task.name = 'task.0000'
task.parent_stage = {'uid' : 'stage.0000',
'name' : 'stage.0000'}
task.parent_pipeline = {'uid' : 'pipe.0000',
'name' : 'pipe.0000'}
task.pre_exec = 'post_exec'
task.executable = '/bin/date'
task.arguments = ['$SHARED',
'$Pipeline_%s_Stage_%s_Task_%s' % (pipeline_name,
stage_name,
t1_name),
'$Pipeline_%s_Stage_%s_Task_%s' % (pipeline_name,
stage_name,
t2_name),
'$NODE_LFS_PATH/test.txt']
task.sandbox = 'unit.0000'
task.post_exec = ''
task.cpu_reqs = {'cpu_processes': 5,
'cpu_threads': 6,
'cpu_process_type': 'POSIX',
'cpu_thread_type': None}
task.gpu_reqs = {'gpu_processes': 5,
'gpu_threads': 6,
'gpu_process_type': 'POSIX',
'gpu_thread_type': None}
task.tags = None
task.lfs_per_process = 235
task.stderr = 'stderr'
task.stdout = 'stdout'
input_list = [{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Link'},
{'source': 'test_file',
'target': 'pilot:///test_file'},
{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Copy'},
{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Move'}]
task.link_input_data = ['test_file > $SHARED/test_file']
task.upload_input_data = ['test_file > $SHARED/test_file']
task.copy_input_data = ['test_file > $SHARED/test_file']
task.move_input_data = ['test_file > $SHARED/test_file']
output_list = [{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Link'},
{'source': 'test_file',
'target': 'pilot:///test_file'},
{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Copy'},
{'source': 'test_file',
'target': 'pilot:///test_file',
'action': 'Move'}]
task.link_output_data = ['test_file > $SHARED/test_file']
task.download_output_data = ['test_file > $SHARED/test_file']
task.copy_output_data = ['test_file > $SHARED/test_file']
task.move_output_data = ['test_file > $SHARED/test_file']
hash_table = {}
test_cud = create_td_from_task(task, placeholders, hash_table,
pkl_path='.test.pkl',
sid='test.sid')
self.assertEqual(test_cud.name, 'task.0000,task.0000,stage.0000,stage.0000,pipe.0000,pipe.0000')
self.assertEqual(test_cud.pre_exec, 'post_exec')
self.assertEqual(test_cud.executable, '/bin/date')
self.assertEqual(test_cud.arguments, ['$RP_PILOT_STAGING',
'/home/vivek/t1',
'/home/vivek/t2',
'$NODE_LFS_PATH/test.txt'])
self.assertEqual(test_cud.sandbox, 'unit.0000')
self.assertEqual(test_cud.post_exec, '')
self.assertEqual(test_cud.cpu_processes, 5)
self.assertEqual(test_cud.cpu_threads, 6)
self.assertEqual(test_cud.cpu_process_type, 'POSIX')
self.assertEqual(test_cud.cpu_thread_type, 'OpenMP')
self.assertEqual(test_cud.gpu_processes, 5)
self.assertEqual(test_cud.gpu_threads, 6)
self.assertEqual(test_cud.gpu_process_type, 'POSIX')
self.assertEqual(test_cud.gpu_thread_type, 'GPU_OpenMP')
self.assertEqual(test_cud.lfs_per_process, 235)
self.assertEqual(test_cud.stdout, 'stdout')
self.assertEqual(test_cud.stderr, 'stderr')
self.assertEqual(test_cud.input_staging, input_list)
self.assertEqual(test_cud.output_staging, output_list)
|
|
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- mape a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
ssl() -- secure socket layer support (only available if configured)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
_have_ssl = False
try:
import _ssl
from _ssl import *
_have_ssl = True
except ImportError:
pass
import os, sys
try:
from errno import EBADF
except ImportError:
EBADF = 9
__all__ = ["getfqdn"]
__all__.extend(os._get_exports_list(_socket))
if _have_ssl:
__all__.extend(os._get_exports_list(_ssl))
_realsocket = socket
if _have_ssl:
_realssl = ssl
def ssl(sock, keyfile=None, certfile=None):
if hasattr(sock, "_sock"):
sock = sock._sock
return _realssl(sock, keyfile, certfile)
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown')
if sys.platform == "riscos":
_socketmethods = _socketmethods + ('sleeptaskw',)
# All the method names that must be delegated to either the real socket
# object or the _closedsocket object.
_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
"send", "sendto")
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(EBADF, 'Bad file descriptor')
def close(self):
pass
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
self._sock = _sock
for method in _delegate_methods:
setattr(self, method, getattr(_sock, method))
def close(self):
self._sock.close()
self._sock = _closedsocket()
dummy = self._sock._dummy
for method in _delegate_methods:
setattr(self, method, dummy)
close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
return _socketobject(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return _fileobject(self._sock, mode, bufsize)
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
_s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
"%s.__doc__ = _realsocket.%s.__doc__\n")
for _m in _socketmethods:
exec _s % (_m, _m, _m, _m)
del _m, _s
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf"]
def __init__(self, sock, mode='rb', bufsize=-1):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
|
|
# coding: utf-8
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the editor view."""
__author__ = 'sll@google.com (Sean Lip)'
import imghdr
import logging
from core.controllers import base
from core.domain import config_domain
from core.domain import dependency_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import gadget_registry
from core.domain import interaction_registry
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import rule_domain
from core.domain import skins_services
from core.domain import stats_services
from core.domain import user_services
from core.domain import value_generators_domain
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
import feconf
import utils
import jinja2
# The frontend template for a new state. It is sent to the frontend when the
# exploration editor page is first loaded, so that new states can be
# added in a way that is completely client-side.
# IMPORTANT: Before adding this state to an existing exploration, the
# state name and the destination of the default rule should first be
# changed to the desired new state name.
NEW_STATE_TEMPLATE = {
'content': [{
'type': 'text',
'value': ''
}],
'interaction': exp_domain.State.NULL_INTERACTION_DICT,
'param_changes': [],
'unresolved_answers': {},
}
def get_value_generators_js():
"""Return a string that concatenates the JS for all value generators."""
all_value_generators = (
value_generators_domain.Registry.get_all_generator_classes())
value_generators_js = ''
for _, generator_cls in all_value_generators.iteritems():
value_generators_js += generator_cls.get_js_template()
return value_generators_js
VALUE_GENERATORS_JS = config_domain.ComputedProperty(
'value_generators_js', {'type': 'unicode'},
'JavaScript code for the value generators', get_value_generators_js)
MODERATOR_REQUEST_FORUM_URL_DEFAULT_VALUE = (
'https://moderator/request/forum/url')
MODERATOR_REQUEST_FORUM_URL = config_domain.ConfigProperty(
'moderator_request_forum_url', {'type': 'unicode'},
'A link to the forum for nominating explorations to be featured '
'in the gallery',
default_value=MODERATOR_REQUEST_FORUM_URL_DEFAULT_VALUE)
def _require_valid_version(version_from_payload, exploration_version):
"""Check that the payload version matches the given exploration version."""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != exploration_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_version, version_from_payload))
def require_editor(handler):
"""Decorator that checks if the user can edit the given entity."""
def test_editor(self, exploration_id, escaped_state_name=None, **kwargs):
"""Gets the user and exploration id if the user can edit it.
Args:
self: the handler instance
exploration_id: the exploration id
escaped_state_name: the URL-escaped state name, if it exists
**kwargs: any other arguments passed to the handler
Returns:
The relevant handler, if the user is authorized to edit this
exploration.
Raises:
self.PageNotFoundException: if no such exploration or state exists.
self.UnauthorizedUserException: if the user exists but does not
have the right credentials.
"""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if self.username in config_domain.BANNED_USERNAMES.value:
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.UnauthorizedUserException(
'You do not have the credentials to edit this exploration.',
self.user_id)
if not escaped_state_name:
return handler(self, exploration_id, **kwargs)
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
return handler(self, exploration_id, state_name, **kwargs)
return test_editor
class EditorHandler(base.BaseHandler):
"""Base class for all handlers for the editor page."""
# The page name to use as a key for generating CSRF tokens.
PAGE_NAME_FOR_CSRF = 'editor'
class ExplorationPage(EditorHandler):
"""The editor page for a single exploration."""
EDITOR_PAGE_DEPENDENCY_IDS = ['codemirror']
def get(self, exploration_id):
"""Handles GET requests."""
if exploration_id in base.DISABLED_EXPLORATIONS.value:
self.render_template(
'error/disabled_exploration.html', iframe_restriction=None)
return
exploration = exp_services.get_exploration_by_id(
exploration_id, strict=False)
if (exploration is None or
not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id)):
self.redirect('/')
return
can_edit = (
bool(self.user_id) and
self.username not in config_domain.BANNED_USERNAMES.value and
rights_manager.Actor(self.user_id).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id))
value_generators_js = VALUE_GENERATORS_JS.value
interaction_ids = (
interaction_registry.Registry.get_all_interaction_ids())
interaction_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
interaction_dependency_ids + self.EDITOR_PAGE_DEPENDENCY_IDS))
interaction_templates = (
rte_component_registry.Registry.get_html_for_all_components() +
interaction_registry.Registry.get_interaction_html(
interaction_ids))
interaction_validators_html = (
interaction_registry.Registry.get_validators_html(
interaction_ids))
gadget_types = gadget_registry.Registry.get_all_gadget_types()
gadget_templates = (
gadget_registry.Registry.get_gadget_html(gadget_types))
skin_templates = skins_services.Registry.get_skin_templates(
skins_services.Registry.get_all_skin_ids())
self.values.update({
'GADGET_SPECS': gadget_registry.Registry.get_all_specs(),
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'PANEL_SPECS': skins_services.Registry.get_all_specs()[
feconf.DEFAULT_SKIN_ID],
'additional_angular_modules': additional_angular_modules,
'can_delete': rights_manager.Actor(
self.user_id).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_edit': can_edit,
'can_modify_roles': rights_manager.Actor(
self.user_id).can_modify_roles(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_publicize': rights_manager.Actor(
self.user_id).can_publicize(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_publish': rights_manager.Actor(
self.user_id).can_publish(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_release_ownership': rights_manager.Actor(
self.user_id).can_release_ownership(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_unpublicize': rights_manager.Actor(
self.user_id).can_unpublicize(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_unpublish': rights_manager.Actor(
self.user_id).can_unpublish(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'dependencies_html': jinja2.utils.Markup(dependencies_html),
'gadget_templates': jinja2.utils.Markup(gadget_templates),
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'interaction_validators_html': jinja2.utils.Markup(
interaction_validators_html),
'moderator_request_forum_url': MODERATOR_REQUEST_FORUM_URL.value,
'nav_mode': feconf.NAV_MODE_CREATE,
'value_generators_js': jinja2.utils.Markup(value_generators_js),
'skin_js_urls': [
skins_services.Registry.get_skin_js_url(skin_id)
for skin_id in skins_services.Registry.get_all_skin_ids()],
'skin_templates': jinja2.utils.Markup(skin_templates),
'title': exploration.title,
'ALL_LANGUAGE_CODES': feconf.ALL_LANGUAGE_CODES,
'ALLOWED_GADGETS': feconf.ALLOWED_GADGETS,
'ALLOWED_INTERACTION_CATEGORIES': (
feconf.ALLOWED_INTERACTION_CATEGORIES),
# This is needed for the exploration preview.
'CATEGORIES_TO_COLORS': feconf.CATEGORIES_TO_COLORS,
'INVALID_PARAMETER_NAMES': feconf.INVALID_PARAMETER_NAMES,
'NEW_STATE_TEMPLATE': NEW_STATE_TEMPLATE,
'SHOW_TRAINABLE_UNRESOLVED_ANSWERS': (
feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS),
'TAG_REGEX': feconf.TAG_REGEX,
})
self.render_template('editor/exploration_editor.html')
class ExplorationHandler(EditorHandler):
"""Page with editor data for a single exploration."""
PAGE_NAME_FOR_CSRF = 'editor'
def _get_exploration_data(self, exploration_id, version=None):
"""Returns a description of the given exploration."""
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except:
raise self.PageNotFoundException
states = {}
for state_name in exploration.states:
state_dict = exploration.states[state_name].to_dict()
state_dict['unresolved_answers'] = (
stats_services.get_top_unresolved_answers_for_default_rule(
exploration_id, state_name))
states[state_name] = state_dict
editor_dict = {
'category': exploration.category,
'exploration_id': exploration_id,
'init_state_name': exploration.init_state_name,
'language_code': exploration.language_code,
'objective': exploration.objective,
'param_changes': exploration.param_change_dicts,
'param_specs': exploration.param_specs_dict,
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict(),
'show_state_editor_tutorial_on_load': (
self.user_id and not
self.user_has_started_state_editor_tutorial),
'skin_customizations': exploration.skin_instance.to_dict()[
'skin_customizations'],
'states': states,
'tags': exploration.tags,
'title': exploration.title,
'version': exploration.version,
}
return editor_dict
def get(self, exploration_id):
"""Gets the data for the exploration overview page."""
if not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=None)
self.values.update(
self._get_exploration_data(exploration_id, version=version))
self.render_json(self.values)
@require_editor
def put(self, exploration_id):
"""Updates properties of the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
commit_message = self.payload.get('commit_message')
change_list = self.payload.get('change_list')
try:
exp_services.update_exploration(
self.user_id, exploration_id, change_list, commit_message)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
self.values.update(self._get_exploration_data(exploration_id))
self.render_json(self.values)
@require_editor
def delete(self, exploration_id):
"""Deletes the given exploration."""
role = self.request.get('role')
if not role:
role = None
if role == rights_manager.ROLE_ADMIN:
if not self.is_admin:
logging.error(
'%s tried to delete an exploration, but is not an admin.'
% self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role == rights_manager.ROLE_MODERATOR:
if not self.is_moderator:
logging.error(
'%s tried to delete an exploration, but is not a '
'moderator.' % self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role is not None:
raise self.InvalidInputException('Invalid role: %s' % role)
logging.info(
'%s %s tried to delete exploration %s' %
(role, self.user_id, exploration_id))
exploration = exp_services.get_exploration_by_id(exploration_id)
can_delete = rights_manager.Actor(self.user_id).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration.id)
if not can_delete:
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration %s' %
(self.user_id, exploration_id))
is_exploration_cloned = rights_manager.is_exploration_cloned(
exploration_id)
exp_services.delete_exploration(
self.user_id, exploration_id, force_deletion=is_exploration_cloned)
logging.info(
'%s %s deleted exploration %s' %
(role, self.user_id, exploration_id))
class ExplorationRightsHandler(EditorHandler):
"""Handles management of exploration editing rights."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def put(self, exploration_id):
"""Updates the editing rights for the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
is_public = self.payload.get('is_public')
is_publicized = self.payload.get('is_publicized')
is_community_owned = self.payload.get('is_community_owned')
new_member_username = self.payload.get('new_member_username')
new_member_role = self.payload.get('new_member_role')
viewable_if_private = self.payload.get('viewable_if_private')
if new_member_username:
if not rights_manager.Actor(
self.user_id).can_modify_roles(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.UnauthorizedUserException(
'Only an owner of this exploration can add or change '
'roles.')
new_member_id = user_services.get_user_id_from_username(
new_member_username)
if new_member_id is None:
raise Exception(
'Sorry, we could not find the specified user.')
rights_manager.assign_role_for_exploration(
self.user_id, exploration_id, new_member_id, new_member_role)
elif is_public is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_public:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publish_exploration(
self.user_id, exploration_id)
exp_services.index_explorations_given_ids([exploration_id])
else:
rights_manager.unpublish_exploration(
self.user_id, exploration_id)
exp_services.delete_documents_from_search_index([
exploration_id])
elif is_publicized is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_publicized:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publicize_exploration(
self.user_id, exploration_id)
else:
rights_manager.unpublicize_exploration(
self.user_id, exploration_id)
elif is_community_owned:
exploration = exp_services.get_exploration_by_id(exploration_id)
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.release_ownership_of_exploration(
self.user_id, exploration_id)
elif viewable_if_private is not None:
rights_manager.set_private_viewability_of_exploration(
self.user_id, exploration_id, viewable_if_private)
else:
raise self.InvalidInputException(
'No change was made to this exploration.')
self.render_json({
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict()
})
class ResolvedAnswersHandler(EditorHandler):
"""Allows learners' answers for a state to be marked as resolved."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def put(self, exploration_id, state_name):
"""Marks learners' answers as resolved."""
resolved_answers = self.payload.get('resolved_answers')
if not isinstance(resolved_answers, list):
raise self.InvalidInputException(
'Expected a list of resolved answers; received %s.' %
resolved_answers)
if 'resolved_answers' in self.payload:
event_services.DefaultRuleAnswerResolutionEventHandler.record(
exploration_id, state_name, resolved_answers)
self.render_json({})
class UntrainedAnswersHandler(EditorHandler):
"""Returns answers that learners have submitted, but that Oppia hasn't been
explicitly trained to respond to be an exploration author.
"""
def get(self, exploration_id, escaped_state_name):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
# If trying to access a non-existing state, there is no training
# data associated with it.
self.render_json({'unhandled_answers': []})
return
state = exploration.states[state_name]
# TODO(bhenning): Answers should be bound to a particular exploration
# version or interaction ID.
# TODO(bhenning): If the top 100 answers have already been classified,
# then this handler will always return an empty list.
# TODO(bhenning): This entire function will not work as expected until
# the answers storage backend stores answers in a non-lossy way.
# Currently, answers are stored as HTML strings and they are not able
# to be converted back to the original objects they started as, so the
# normalization calls in this function will not work correctly on those
# strings. Once this happens, this handler should also be tested.
NUMBER_OF_TOP_ANSWERS_PER_RULE = 50
# The total number of possible answers is 100 because it requests the
# top 50 answers matched to the default rule and the top 50 answers
# matched to a fuzzy rule individually.
answers = stats_services.get_top_state_rule_answers(
exploration_id, state_name, [
exp_domain.DEFAULT_RULESPEC_STR, rule_domain.FUZZY_RULE_TYPE],
NUMBER_OF_TOP_ANSWERS_PER_RULE)
interaction = state.interaction
unhandled_answers = []
if feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS and interaction.id:
interaction_instance = (
interaction_registry.Registry.get_interaction_by_id(
interaction.id))
try:
# Normalize the answers.
for answer in answers:
answer['value'] = interaction_instance.normalize_answer(
answer['value'])
trained_answers = set()
for answer_group in interaction.answer_groups:
for rule_spec in answer_group.rule_specs:
if rule_spec.rule_type == rule_domain.FUZZY_RULE_TYPE:
trained_answers.update(
interaction_instance.normalize_answer(trained)
for trained
in rule_spec.inputs['training_data'])
# Include all the answers which have been confirmed to be
# associated with the default outcome.
trained_answers.update(set(
interaction_instance.normalize_answer(confirmed)
for confirmed
in interaction.confirmed_unclassified_answers))
unhandled_answers = [
answer for answer in answers
if answer['value'] not in trained_answers
]
except Exception as e:
logging.warning(
'Error loading untrained answers for interaction %s: %s.' %
(interaction.id, e))
self.render_json({
'unhandled_answers': unhandled_answers
})
class ExplorationDownloadHandler(EditorHandler):
"""Downloads an exploration as a zip file, or dict of YAML strings
representing states.
"""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=exploration.version)
output_format = self.request.get('output_format', default_value='zip')
width = int(self.request.get('width', default_value=80))
# If the title of the exploration has changed, we use the new title
filename = 'oppia-%s-v%s' % (
utils.to_ascii(exploration.title.replace(' ', '')), version)
if output_format == feconf.OUTPUT_FORMAT_ZIP:
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.zip' % str(filename))
self.response.write(
exp_services.export_to_zip_file(exploration_id, version))
elif output_format == feconf.OUTPUT_FORMAT_JSON:
self.render_json(exp_services.export_states_to_yaml(
exploration_id, version=version, width=width))
else:
raise self.InvalidInputException(
'Unrecognized output format %s' % output_format)
class StateDownloadHandler(EditorHandler):
"""Downloads a state as a YAML string."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=exploration.version)
width = int(self.request.get('width', default_value=80))
try:
state = self.request.get('state')
except:
raise self.InvalidInputException('State not found')
exploration_dict = exp_services.export_states_to_yaml(
exploration_id, version=version, width=width)
if state not in exploration_dict:
raise self.PageNotFoundException
self.response.write(exploration_dict[state])
class ExplorationResourcesHandler(EditorHandler):
"""Manages assets associated with an exploration."""
@require_editor
def get(self, exploration_id):
"""Handles GET requests."""
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
self.render_json({'filepaths': dir_list})
class ExplorationSnapshotsHandler(EditorHandler):
"""Returns the exploration snapshot history."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
snapshots = exp_services.get_exploration_snapshots_metadata(
exploration_id)
except:
raise self.PageNotFoundException
# Patch `snapshots` to use the editor's display name.
for snapshot in snapshots:
if snapshot['committer_id'] != feconf.SYSTEM_COMMITTER_ID:
snapshot['committer_id'] = user_services.get_username(
snapshot['committer_id'])
self.render_json({
'snapshots': snapshots,
})
class ExplorationRevertHandler(EditorHandler):
"""Reverts an exploration to an older version."""
@require_editor
def post(self, exploration_id):
"""Handles POST requests."""
current_version = self.payload.get('current_version')
revert_to_version = self.payload.get('revert_to_version')
if not isinstance(revert_to_version, int):
raise self.InvalidInputException(
'Expected an integer version to revert to; received %s.' %
revert_to_version)
if not isinstance(current_version, int):
raise self.InvalidInputException(
'Expected an integer current version; received %s.' %
current_version)
if revert_to_version < 1 or revert_to_version >= current_version:
raise self.InvalidInputException(
'Cannot revert to version %s from version %s.' %
(revert_to_version, current_version))
exp_services.revert_exploration(
self.user_id, exploration_id, current_version, revert_to_version)
self.render_json({})
class ExplorationStatisticsHandler(EditorHandler):
"""Returns statistics for an exploration."""
def get(self, exploration_id, exploration_version):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json(stats_services.get_exploration_stats(
exploration_id, exploration_version))
class ExplorationStatsVersionsHandler(EditorHandler):
"""Returns statistics versions for an exploration."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json({
'versions': stats_services.get_versions_for_exploration_stats(
exploration_id)})
class StateRulesStatsHandler(EditorHandler):
"""Returns detailed learner answer statistics for a state."""
def get(self, exploration_id, escaped_state_name):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
self.render_json({
'rules_stats': stats_services.get_state_rules_stats(
exploration_id, state_name)
})
class ImageUploadHandler(EditorHandler):
"""Handles image uploads."""
@require_editor
def post(self, exploration_id):
"""Saves an image uploaded by a content creator."""
raw = self.request.get('image')
filename = self.payload.get('filename')
if not raw:
raise self.InvalidInputException('No image supplied')
file_format = imghdr.what(None, h=raw)
if file_format not in feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS:
allowed_formats = ', '.join(
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS.keys())
raise Exception('Image file not recognized: it should be in '
'one of the following formats: %s.' %
allowed_formats)
if not filename:
raise self.InvalidInputException('No filename supplied')
if '/' in filename or '..' in filename:
raise self.InvalidInputException(
'Filenames should not include slashes (/) or consecutive dot '
'characters.')
if '.' in filename:
dot_index = filename.rfind('.')
primary_name = filename[:dot_index]
extension = filename[dot_index + 1:].lower()
if (extension not in
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS[file_format]):
raise self.InvalidInputException(
'Expected a filename ending in .%s; received %s' %
(file_format, filename))
else:
primary_name = filename
filepath = '%s.%s' % (primary_name, file_format)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
if fs.isfile(filepath):
raise self.InvalidInputException(
'A file with the name %s already exists. Please choose a '
'different name.' % filepath)
fs.commit(self.user_id, filepath, raw)
self.render_json({'filepath': filepath})
class ChangeListSummaryHandler(EditorHandler):
"""Returns a summary of a changelist applied to a given exploration."""
@require_editor
def post(self, exploration_id):
"""Handles POST requests."""
change_list = self.payload.get('change_list')
version = self.payload.get('version')
current_exploration = exp_services.get_exploration_by_id(
exploration_id)
if version != current_exploration.version:
# TODO(sll): Improve this.
self.render_json({
'error': (
'Sorry! Someone else has edited and committed changes to '
'this exploration while you were editing it. We suggest '
'opening another browser tab -- which will load the new '
'version of the exploration -- then transferring your '
'changes there. We will try to make this easier in the '
'future -- we have not done it yet because figuring out '
'how to merge different people\'s changes is hard. '
'(Trying to edit version %s, but the current version is '
'%s.).' % (version, current_exploration.version)
)
})
else:
utils.recursively_remove_key(change_list, '$$hashKey')
summary = exp_services.get_summary_of_change_list(
current_exploration, change_list)
updated_exploration = exp_services.apply_change_list(
exploration_id, change_list)
warning_message = ''
try:
updated_exploration.validate(strict=True)
except utils.ValidationError as e:
warning_message = unicode(e)
self.render_json({
'summary': summary,
'warning_message': warning_message
})
class StartedTutorialEventHandler(EditorHandler):
"""Records that this user has started the state editor tutorial."""
def post(self, exploration_id):
"""Handles GET requests."""
user_services.record_user_started_state_editor_tutorial(self.user_id)
|
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
try:
import unittest2 as unittest
except ImportError:
import unittest
import binascii
import dns.exception
import dns.ipv4
import dns.ipv6
# for convenience
aton4 = dns.ipv4.inet_aton
ntoa4 = dns.ipv4.inet_ntoa
aton6 = dns.ipv6.inet_aton
ntoa6 = dns.ipv6.inet_ntoa
v4_bad_addrs = ['256.1.1.1', '1.1.1', '1.1.1.1.1',
'+1.1.1.1', '1.1.1.1+', '1..2.3.4', '.1.2.3.4',
'1.2.3.4.']
class NtoAAtoNTestCase(unittest.TestCase):
def test_aton1(self):
a = aton6('::')
self.failUnless(a == '\x00' * 16)
def test_aton2(self):
a = aton6('::1')
self.failUnless(a == '\x00' * 15 + '\x01')
def test_aton3(self):
a = aton6('::10.0.0.1')
self.failUnless(a == '\x00' * 12 + '\x0a\x00\x00\x01')
def test_aton4(self):
a = aton6('abcd::dcba')
self.failUnless(a == '\xab\xcd' + '\x00' * 12 + '\xdc\xba')
def test_aton5(self):
a = aton6('1:2:3:4:5:6:7:8')
self.assertEqual(a,
binascii.unhexlify(b'00010002000300040005000600070008'))
def test_bad_aton1(self):
def bad():
a = aton6('abcd:dcba')
self.failUnlessRaises(dns.exception.SyntaxError, bad)
def test_bad_aton2(self):
def bad():
a = aton6('abcd::dcba::1')
self.failUnlessRaises(dns.exception.SyntaxError, bad)
def test_bad_aton3(self):
def bad():
a = aton6('1:2:3:4:5:6:7:8:9')
self.failUnlessRaises(dns.exception.SyntaxError, bad)
def test_aton1(self):
a = aton6('::')
self.assertEqual(a, b'\x00' * 16)
def test_aton2(self):
a = aton6('::1')
self.assertEqual(a, b'\x00' * 15 + b'\x01')
def test_aton3(self):
a = aton6('::10.0.0.1')
self.assertEqual(a, b'\x00' * 12 + b'\x0a\x00\x00\x01')
def test_aton4(self):
a = aton6('abcd::dcba')
self.assertEqual(a, b'\xab\xcd' + b'\x00' * 12 + b'\xdc\xba')
def test_ntoa1(self):
b = binascii.unhexlify(b'00010002000300040005000600070008')
t = ntoa6(b)
self.assertEqual(t, '1:2:3:4:5:6:7:8')
def test_ntoa2(self):
b = b'\x00' * 16
t = ntoa6(b)
self.assertEqual(t, '::')
def test_ntoa3(self):
b = b'\x00' * 15 + b'\x01'
t = ntoa6(b)
self.assertEqual(t, '::1')
def test_ntoa4(self):
b = b'\x80' + b'\x00' * 15
t = ntoa6(b)
self.assertEqual(t, '8000::')
def test_ntoa5(self):
b = b'\x01\xcd' + b'\x00' * 12 + b'\x03\xef'
t = ntoa6(b)
self.assertEqual(t, '1cd::3ef')
def test_ntoa6(self):
b = binascii.unhexlify(b'ffff00000000ffff000000000000ffff')
t = ntoa6(b)
self.assertEqual(t, 'ffff:0:0:ffff::ffff')
def test_ntoa7(self):
b = binascii.unhexlify(b'00000000ffff000000000000ffffffff')
t = ntoa6(b)
self.assertEqual(t, '0:0:ffff::ffff:ffff')
def test_ntoa8(self):
b = binascii.unhexlify(b'ffff0000ffff00000000ffff00000000')
t = ntoa6(b)
self.assertEqual(t, 'ffff:0:ffff::ffff:0:0')
def test_ntoa9(self):
b = binascii.unhexlify(b'0000000000000000000000000a000001')
t = ntoa6(b)
self.assertEqual(t, '::10.0.0.1')
def test_ntoa10(self):
b = binascii.unhexlify(b'0000000000000000000000010a000001')
t = ntoa6(b)
self.assertEqual(t, '::1:a00:1')
def test_ntoa11(self):
b = binascii.unhexlify(b'00000000000000000000ffff0a000001')
t = ntoa6(b)
self.assertEqual(t, '::ffff:10.0.0.1')
def test_ntoa12(self):
b = binascii.unhexlify(b'000000000000000000000000ffffffff')
t = ntoa6(b)
self.assertEqual(t, '::255.255.255.255')
def test_ntoa13(self):
b = binascii.unhexlify(b'00000000000000000000ffffffffffff')
t = ntoa6(b)
self.assertEqual(t, '::ffff:255.255.255.255')
def test_ntoa14(self):
b = binascii.unhexlify(b'0000000000000000000000000001ffff')
t = ntoa6(b)
self.assertEqual(t, '::0.1.255.255')
def test_bad_ntoa1(self):
def bad():
a = ntoa6('')
self.failUnlessRaises(ValueError, bad)
def test_bad_ntoa2(self):
def bad():
a = ntoa6('\x00' * 17)
self.failUnlessRaises(ValueError, bad)
def test_good_v4_aton(self):
pairs = [(b'1.2.3.4', b'\x01\x02\x03\x04'),
(b'255.255.255.255', b'\xff\xff\xff\xff'),
(b'0.0.0.0', b'\x00\x00\x00\x00')]
for (t, b) in pairs:
b1 = aton4(t)
t1 = ntoa4(b1)
self.assertEqual(b1, b)
self.assertEqual(t1, t)
def test_bad_v4_aton(self):
def make_bad(a):
def bad():
return aton4(a)
return bad
for addr in v4_bad_addrs:
print(addr)
self.failUnlessRaises(dns.exception.SyntaxError, make_bad(addr))
def test_bad_v6_aton(self):
addrs = ['+::0', '0::0::', '::0::', '1:2:3:4:5:6:7:8:9',
':::::::']
embedded = ['::' + x for x in v4_bad_addrs]
addrs.extend(embedded)
def make_bad(a):
def bad():
x = aton6(a)
return bad
for addr in addrs:
self.failUnlessRaises(dns.exception.SyntaxError, make_bad(addr))
def test_rfc5952_section_4_2_2(self):
addr = '2001:db8:0:1:1:1:1:1'
b1 = aton6(addr)
t1 = ntoa6(b1)
self.assertEqual(t1, addr)
def test_is_mapped(self):
t1 = '2001:db8:0:1:1:1:1:1'
t2 = '::ffff:127.0.0.1'
t3 = '1::ffff:127.0.0.1'
self.failIf(dns.ipv6.is_mapped(aton6(t1)))
self.failUnless(dns.ipv6.is_mapped(aton6(t2)))
self.failIf(dns.ipv6.is_mapped(aton6(t3)))
if __name__ == '__main__':
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
import functools
import logging
import os
import sys
import tempfile
import time
import traceback
import unittest
from builtins import range
import apache_beam as beam
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metricbase import MetricName
from apache_beam.runners.portability import fn_api_runner
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker import statesampler
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import userstate
from apache_beam.transforms import window
if statesampler.FAST_SAMPLER:
DEFAULT_SAMPLING_PERIOD_MS = statesampler.DEFAULT_SAMPLING_PERIOD_MS
else:
DEFAULT_SAMPLING_PERIOD_MS = 0
class FnApiRunnerTest(unittest.TestCase):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=False))
def test_assert_that(self):
# TODO: figure out a way for fn_api_runner to parse and raise the
# underlying exception.
with self.assertRaisesRegexp(Exception, 'Failed assert'):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a']))
def test_create(self):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a', 'b']))
def test_pardo(self):
with self.create_pipeline() as p:
res = (p
| beam.Create(['a', 'bc'])
| beam.Map(lambda e: e * 2)
| beam.Map(lambda e: e + 'x'))
assert_that(res, equal_to(['aax', 'bcbcx']))
def test_pardo_metrics(self):
class MyDoFn(beam.DoFn):
def start_bundle(self):
self.count = beam.metrics.Metrics.counter('ns1', 'elements')
def process(self, element):
self.count.inc(element)
return [element]
class MyOtherDoFn(beam.DoFn):
def start_bundle(self):
self.count = beam.metrics.Metrics.counter('ns2', 'elementsplusone')
def process(self, element):
self.count.inc(element + 1)
return [element]
with self.create_pipeline() as p:
res = (p | beam.Create([1, 2, 3])
| 'mydofn' >> beam.ParDo(MyDoFn())
| 'myotherdofn' >> beam.ParDo(MyOtherDoFn()))
p.run()
if not MetricsEnvironment.METRICS_SUPPORTED:
self.skipTest('Metrics are not supported.')
counter_updates = [{'key': key, 'value': val}
for container in p.runner.metrics_containers()
for key, val in
container.get_updates().counters.items()]
counter_values = [update['value'] for update in counter_updates]
counter_keys = [update['key'] for update in counter_updates]
assert_that(res, equal_to([1, 2, 3]))
self.assertEqual(counter_values, [6, 9])
self.assertEqual(counter_keys, [
MetricKey('mydofn',
MetricName('ns1', 'elements')),
MetricKey('myotherdofn',
MetricName('ns2', 'elementsplusone'))])
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_pardo_side_outputs(self):
def tee(elem, *tags):
for tag in tags:
if tag in elem:
yield beam.pvalue.TaggedOutput(tag, elem)
with self.create_pipeline() as p:
xy = (p
| 'Create' >> beam.Create(['x', 'y', 'xy'])
| beam.FlatMap(tee, 'x', 'y').with_outputs())
assert_that(xy.x, equal_to(['x', 'xy']), label='x')
assert_that(xy.y, equal_to(['y', 'xy']), label='y')
def test_pardo_side_and_main_outputs(self):
def even_odd(elem):
yield elem
yield beam.pvalue.TaggedOutput('odd' if elem % 2 else 'even', elem)
with self.create_pipeline() as p:
ints = p | beam.Create([1, 2, 3])
named = ints | 'named' >> beam.FlatMap(
even_odd).with_outputs('even', 'odd', main='all')
assert_that(named.all, equal_to([1, 2, 3]), label='named.all')
assert_that(named.even, equal_to([2]), label='named.even')
assert_that(named.odd, equal_to([1, 3]), label='named.odd')
unnamed = ints | 'unnamed' >> beam.FlatMap(even_odd).with_outputs()
unnamed[None] | beam.Map(id) # pylint: disable=expression-not-assigned
assert_that(unnamed[None], equal_to([1, 2, 3]), label='unnamed.all')
assert_that(unnamed.even, equal_to([2]), label='unnamed.even')
assert_that(unnamed.odd, equal_to([1, 3]), label='unnamed.odd')
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_pardo_side_inputs(self):
def cross_product(elem, sides):
for side in sides:
yield elem, side
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create(['a', 'b', 'c'])
side = p | 'side' >> beam.Create(['x', 'y'])
assert_that(main | beam.FlatMap(cross_product, beam.pvalue.AsList(side)),
equal_to([('a', 'x'), ('b', 'x'), ('c', 'x'),
('a', 'y'), ('b', 'y'), ('c', 'y')]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_pardo_windowed_side_inputs(self):
with self.create_pipeline() as p:
# Now with some windowing.
pcoll = p | beam.Create(list(range(10))) | beam.Map(
lambda t: window.TimestampedValue(t, t))
# Intentionally choosing non-aligned windows to highlight the transition.
main = pcoll | 'WindowMain' >> beam.WindowInto(window.FixedWindows(5))
side = pcoll | 'WindowSide' >> beam.WindowInto(window.FixedWindows(7))
res = main | beam.Map(lambda x, s: (x, sorted(s)),
beam.pvalue.AsList(side))
assert_that(
res,
equal_to([
# The window [0, 5) maps to the window [0, 7).
(0, list(range(7))),
(1, list(range(7))),
(2, list(range(7))),
(3, list(range(7))),
(4, list(range(7))),
# The window [5, 10) maps to the window [7, 14).
(5, list(range(7, 10))),
(6, list(range(7, 10))),
(7, list(range(7, 10))),
(8, list(range(7, 10))),
(9, list(range(7, 10)))]),
label='windowed')
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_flattened_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create([None])
side1 = p | 'side1' >> beam.Create([('a', 1)])
side2 = p | 'side2' >> beam.Create([('b', 2)])
side = (side1, side2) | beam.Flatten()
assert_that(
main | beam.Map(lambda a, b: (a, b), beam.pvalue.AsDict(side)),
equal_to([(None, {'a': 1, 'b': 2})]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_gbk_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create([None])
side = p | 'side' >> beam.Create([('a', 1)]) | beam.GroupByKey()
assert_that(
main | beam.Map(lambda a, b: (a, b), beam.pvalue.AsDict(side)),
equal_to([(None, {'a': [1]})]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_multimap_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create(['a', 'b'])
side = (p | 'side' >> beam.Create([('a', 1), ('b', 2), ('a', 3)])
# TODO(BEAM-4782): Obviate the need for this map.
| beam.Map(lambda kv: (kv[0], kv[1])))
assert_that(
main | beam.Map(lambda k, d: (k, sorted(d[k])),
beam.pvalue.AsMultiMap(side)),
equal_to([('a', [1, 3]), ('b', [2])]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_pardo_unfusable_side_inputs(self):
def cross_product(elem, sides):
for side in sides:
yield elem, side
with self.create_pipeline() as p:
pcoll = p | beam.Create(['a', 'b'])
assert_that(
pcoll | beam.FlatMap(cross_product, beam.pvalue.AsList(pcoll)),
equal_to([('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]))
with self.create_pipeline() as p:
pcoll = p | beam.Create(['a', 'b'])
derived = ((pcoll,) | beam.Flatten()
| beam.Map(lambda x: (x, x))
| beam.GroupByKey()
| 'Unkey' >> beam.Map(lambda kv: kv[0]))
assert_that(
pcoll | beam.FlatMap(cross_product, beam.pvalue.AsList(derived)),
equal_to([('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_pardo_state_only(self):
index_state_spec = userstate.CombiningValueStateSpec(
'index', beam.coders.VarIntCoder(), sum)
# TODO(ccy): State isn't detected with Map/FlatMap.
class AddIndex(beam.DoFn):
def process(self, kv, index=beam.DoFn.StateParam(index_state_spec)):
k, v = kv
index.add(1)
yield k, v, index.read()
inputs = [('A', 'a')] * 2 + [('B', 'b')] * 3
expected = [('A', 'a', 1),
('A', 'a', 2),
('B', 'b', 1),
('B', 'b', 2),
('B', 'b', 3)]
with self.create_pipeline() as p:
assert_that(p | beam.Create(inputs) | beam.ParDo(AddIndex()),
equal_to(expected))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_pardo_timers(self):
timer_spec = userstate.TimerSpec('timer', userstate.TimeDomain.WATERMARK)
class TimerDoFn(beam.DoFn):
def process(self, element, timer=beam.DoFn.TimerParam(timer_spec)):
unused_key, ts = element
timer.set(ts)
timer.set(2 * ts)
@userstate.on_timer(timer_spec)
def process_timer(self):
yield 'fired'
with self.create_pipeline() as p:
actual = (
p
| beam.Create([('k1', 10), ('k2', 100)])
| beam.ParDo(TimerDoFn())
| beam.Map(lambda x, ts=beam.DoFn.TimestampParam: (x, ts)))
expected = [('fired', ts) for ts in (20, 200)]
assert_that(actual, equal_to(expected))
def test_group_by_key(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([('a', 1), ('a', 2), ('b', 3)])
| beam.GroupByKey()
| beam.Map(lambda k_vs: (k_vs[0], sorted(k_vs[1]))))
assert_that(res, equal_to([('a', [1, 2]), ('b', [3])]))
def test_flatten(self):
with self.create_pipeline() as p:
res = (p | 'a' >> beam.Create(['a']),
p | 'bc' >> beam.Create(['b', 'c']),
p | 'd' >> beam.Create(['d'])) | beam.Flatten()
assert_that(res, equal_to(['a', 'b', 'c', 'd']))
def test_combine_per_key(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([('a', 1), ('a', 2), ('b', 3)])
| beam.CombinePerKey(beam.combiners.MeanCombineFn()))
assert_that(res, equal_to([('a', 1.5), ('b', 3.0)]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_read(self):
# Can't use NamedTemporaryFile as a context
# due to https://bugs.python.org/issue14243
temp_file = tempfile.NamedTemporaryFile(delete=False)
try:
temp_file.write(b'a\nb\nc')
temp_file.close()
with self.create_pipeline() as p:
assert_that(p | beam.io.ReadFromText(temp_file.name),
equal_to(['a', 'b', 'c']))
finally:
os.unlink(temp_file.name)
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_windowing(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([1, 2, 100, 101, 102])
| beam.Map(lambda t: window.TimestampedValue(('k', t), t))
| beam.WindowInto(beam.transforms.window.Sessions(10))
| beam.GroupByKey()
| beam.Map(lambda k_vs1: (k_vs1[0], sorted(k_vs1[1]))))
assert_that(res, equal_to([('k', [1, 2]), ('k', [100, 101, 102])]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_large_elements(self):
with self.create_pipeline() as p:
big = (p
| beam.Create(['a', 'a', 'b'])
| beam.Map(lambda x: (x, x * data_plane._DEFAULT_FLUSH_THRESHOLD)))
side_input_res = (
big
| beam.Map(lambda x, side: (x[0], side.count(x[0])),
beam.pvalue.AsList(big | beam.Map(lambda x: x[0]))))
assert_that(side_input_res,
equal_to([('a', 2), ('a', 2), ('b', 1)]), label='side')
gbk_res = (
big
| beam.GroupByKey()
| beam.Map(lambda x: x[0]))
assert_that(gbk_res, equal_to(['a', 'b']), label='gbk')
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_error_message_includes_stage(self):
with self.assertRaises(BaseException) as e_cm:
with self.create_pipeline() as p:
def raise_error(x):
raise RuntimeError('x')
# pylint: disable=expression-not-assigned
(p
| beam.Create(['a', 'b'])
| 'StageA' >> beam.Map(lambda x: x)
| 'StageB' >> beam.Map(lambda x: x)
| 'StageC' >> beam.Map(raise_error)
| 'StageD' >> beam.Map(lambda x: x))
message = e_cm.exception.args[0]
self.assertIn('StageC', message)
self.assertNotIn('StageB', message)
def test_error_traceback_includes_user_code(self):
def first(x):
return second(x)
def second(x):
return third(x)
def third(x):
raise ValueError('x')
try:
with self.create_pipeline() as p:
p | beam.Create([0]) | beam.Map(first) # pylint: disable=expression-not-assigned
except Exception: # pylint: disable=broad-except
message = traceback.format_exc()
else:
raise AssertionError('expected exception not raised')
self.assertIn('first', message)
self.assertIn('second', message)
self.assertIn('third', message)
def test_no_subtransform_composite(self):
class First(beam.PTransform):
def expand(self, pcolls):
return pcolls[0]
with self.create_pipeline() as p:
pcoll_a = p | 'a' >> beam.Create(['a'])
pcoll_b = p | 'b' >> beam.Create(['b'])
assert_that((pcoll_a, pcoll_b) | First(), equal_to(['a']))
def test_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Metrics not supported.')
counter = beam.metrics.Metrics.counter('ns', 'counter')
distribution = beam.metrics.Metrics.distribution('ns', 'distribution')
gauge = beam.metrics.Metrics.gauge('ns', 'gauge')
pcoll = p | beam.Create(['a', 'zzz'])
# pylint: disable=expression-not-assigned
pcoll | 'count1' >> beam.FlatMap(lambda x: counter.inc())
pcoll | 'count2' >> beam.FlatMap(lambda x: counter.inc(len(x)))
pcoll | 'dist' >> beam.FlatMap(lambda x: distribution.update(len(x)))
pcoll | 'gauge' >> beam.FlatMap(lambda x: gauge.set(len(x)))
res = p.run()
res.wait_until_finish()
c1, = res.metrics().query(beam.metrics.MetricsFilter().with_step('count1'))[
'counters']
self.assertEqual(c1.committed, 2)
c2, = res.metrics().query(beam.metrics.MetricsFilter().with_step('count2'))[
'counters']
self.assertEqual(c2.committed, 4)
dist, = res.metrics().query(beam.metrics.MetricsFilter().with_step('dist'))[
'distributions']
gaug, = res.metrics().query(
beam.metrics.MetricsFilter().with_step('gauge'))['gauges']
self.assertEqual(
dist.committed.data, beam.metrics.cells.DistributionData(4, 2, 1, 3))
self.assertEqual(dist.committed.mean, 2.0)
self.assertEqual(gaug.committed.value, 3)
def test_non_user_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Metrics not supported.')
pcoll = p | beam.Create(['a', 'zzz'])
# pylint: disable=expression-not-assigned
pcoll | 'MyStep' >> beam.FlatMap(lambda x: None)
res = p.run()
res.wait_until_finish()
result_metrics = res.monitoring_metrics()
all_metrics_via_montoring_infos = result_metrics.query()
def assert_counter_exists(metrics, namespace, name, step):
found = 0
metric_key = MetricKey(step, MetricName(namespace, name))
for m in metrics['counters']:
if m.key == metric_key:
found = found + 1
self.assertEqual(
1, found, "Did not find exactly 1 metric for %s." % metric_key)
urns = [
monitoring_infos.ELEMENT_COUNT_URN,
monitoring_infos.START_BUNDLE_MSECS_URN,
monitoring_infos.PROCESS_BUNDLE_MSECS_URN,
monitoring_infos.FINISH_BUNDLE_MSECS_URN,
monitoring_infos.TOTAL_MSECS_URN,
]
for urn in urns:
split = urn.split(':')
namespace = split[0]
name = ':'.join(split[1:])
assert_counter_exists(
all_metrics_via_montoring_infos, namespace, name, step='Create/Read')
assert_counter_exists(
all_metrics_via_montoring_infos, namespace, name, step='MyStep')
def test_progress_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Progress metrics not supported.')
_ = (p
| beam.Create([0, 0, 0, 5e-3 * DEFAULT_SAMPLING_PERIOD_MS])
| beam.Map(time.sleep)
| beam.Map(lambda x: ('key', x))
| beam.GroupByKey()
| 'm_out' >> beam.FlatMap(lambda x: [
1, 2, 3, 4, 5,
beam.pvalue.TaggedOutput('once', x),
beam.pvalue.TaggedOutput('twice', x),
beam.pvalue.TaggedOutput('twice', x)]))
res = p.run()
res.wait_until_finish()
def has_mi_for_ptransform(monitoring_infos, ptransform):
for mi in monitoring_infos:
if ptransform in mi.labels['PTRANSFORM']:
return True
return False
try:
# TODO(ajamato): Delete this block after deleting the legacy metrics code.
# Test the DEPRECATED legacy metrics
pregbk_metrics, postgbk_metrics = list(
res._metrics_by_stage.values())
if 'Create/Read' not in pregbk_metrics.ptransforms:
# The metrics above are actually unordered. Swap.
pregbk_metrics, postgbk_metrics = postgbk_metrics, pregbk_metrics
self.assertEqual(
4,
pregbk_metrics.ptransforms['Create/Read']
.processed_elements.measured.output_element_counts['out'])
self.assertEqual(
4,
pregbk_metrics.ptransforms['Map(sleep)']
.processed_elements.measured.output_element_counts['None'])
self.assertLessEqual(
4e-3 * DEFAULT_SAMPLING_PERIOD_MS,
pregbk_metrics.ptransforms['Map(sleep)']
.processed_elements.measured.total_time_spent)
self.assertEqual(
1,
postgbk_metrics.ptransforms['GroupByKey/Read']
.processed_elements.measured.output_element_counts['None'])
# The actual stage name ends up being something like 'm_out/lamdbda...'
m_out, = [
metrics for name, metrics in list(postgbk_metrics.ptransforms.items())
if name.startswith('m_out')]
self.assertEqual(
5,
m_out.processed_elements.measured.output_element_counts['None'])
self.assertEqual(
1,
m_out.processed_elements.measured.output_element_counts['once'])
self.assertEqual(
2,
m_out.processed_elements.measured.output_element_counts['twice'])
# Test the new MonitoringInfo monitoring format.
self.assertEqual(2, len(res._monitoring_infos_by_stage))
pregbk_mis, postgbk_mis = list(res._monitoring_infos_by_stage.values())
if not has_mi_for_ptransform(pregbk_mis, 'Create/Read'):
# The monitoring infos above are actually unordered. Swap.
pregbk_mis, postgbk_mis = postgbk_mis, pregbk_mis
def assert_has_monitoring_info(
monitoring_infos, urn, labels, value=None, ge_value=None):
# TODO(ajamato): Consider adding a matcher framework
found = 0
for m in monitoring_infos:
if m.labels == labels and m.urn == urn:
if (ge_value is not None and
m.metric.counter_data.int64_value >= ge_value):
found = found + 1
elif (value is not None and
m.metric.counter_data.int64_value == value):
found = found + 1
ge_value_str = {'ge_value' : ge_value} if ge_value else ''
value_str = {'value' : value} if value else ''
self.assertEqual(
1, found, "Found (%s) Expected only 1 monitoring_info for %s." %
(found, (urn, labels, value_str, ge_value_str),))
# pregbk monitoring infos
labels = {'PTRANSFORM' : 'Create/Read', 'TAG' : 'out'}
assert_has_monitoring_info(
pregbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=4)
labels = {'PTRANSFORM' : 'Map(sleep)', 'TAG' : 'None'}
assert_has_monitoring_info(
pregbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=4)
labels = {'PTRANSFORM' : 'Map(sleep)'}
assert_has_monitoring_info(
pregbk_mis, monitoring_infos.TOTAL_MSECS_URN,
labels, ge_value=4 * DEFAULT_SAMPLING_PERIOD_MS)
# postgbk monitoring infos
labels = {'PTRANSFORM' : 'GroupByKey/Read', 'TAG' : 'None'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=1)
labels = {'PTRANSFORM' : 'm_out', 'TAG' : 'None'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=5)
labels = {'PTRANSFORM' : 'm_out', 'TAG' : 'once'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=1)
labels = {'PTRANSFORM' : 'm_out', 'TAG' : 'twice'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=2)
except:
print(res._monitoring_infos_by_stage)
raise
class FnApiRunnerTestWithGrpc(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=True))
class FnApiRunnerTestWithGrpcMultiThreaded(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(
use_grpc=True,
sdk_harness_factory=functools.partial(
sdk_worker.SdkHarness, worker_count=2)))
class FnApiRunnerTestWithBundleRepeat(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=False, bundle_repeat=3))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for volume replication API code.
"""
import json
import mock
from oslo_config import cfg
import webob
from cinder import context
from cinder import test
from cinder.tests.api import fakes
from cinder.tests import utils as tests_utils
CONF = cfg.CONF
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeReplicationAPITestCase(test.TestCase):
"""Test Cases for replication API."""
def setUp(self):
super(VolumeReplicationAPITestCase, self).setUp()
self.ctxt = context.RequestContext('admin', 'fake', True)
self.volume_params = {
'host': CONF.host,
'size': 1}
def _get_resp(self, operation, volume_id, xml=False):
"""Helper for a replication action req for the specified volume_id."""
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id)
req.method = 'POST'
if xml:
body = '<os-%s-replica/>' % operation
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
req.body = body
else:
body = {'os-%s-replica' % operation: ''}
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
res = req.get_response(app())
return req, res
def test_promote_bad_id(self):
(req, res) = self._get_resp('promote', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 404, msg)
def test_promote_bad_id_xml(self):
(req, res) = self._get_resp('promote', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 404, msg)
def test_promote_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
def test_promote_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 202, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status_xml(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 202, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 202, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 202, msg)
def test_reenable_bad_id(self):
(req, res) = self._get_resp('reenable', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 404, msg)
def test_reenable_bad_id_xml(self):
(req, res) = self._get_resp('reenable', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 404, msg)
def test_reenable_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
def test_reenable_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 202, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 400, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(res.status_int, 202, msg)
|
|
import os
import re
import shutil
import pytest
from click.testing import CliRunner
from freezegun import freeze_time
from great_expectations import DataContext
from great_expectations.cli.v012 import cli
from great_expectations.data_context.util import file_relative_path
from great_expectations.util import gen_directory_tree_str
from tests.cli.v012.test_cli import yaml
from tests.cli.v012.test_datasource_sqlite import (
_add_datasource_and_credentials_to_context,
)
from tests.cli.v012.test_init_pandas import _delete_and_recreate_dir
from tests.cli.v012.utils import assert_no_logging_messages_or_tracebacks
try:
from unittest import mock
except ImportError:
from unittest import mock
@pytest.fixture
def titanic_sqlite_db_file(sa, tmp_path_factory):
temp_dir = str(tmp_path_factory.mktemp("foo_path"))
fixture_db_path = file_relative_path(__file__, "../../test_sets/titanic.db")
db_path = os.path.join(temp_dir, "titanic.db")
shutil.copy(fixture_db_path, db_path)
engine = sa.create_engine(f"sqlite:///{db_path}", pool_recycle=3600)
assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,)
return db_path
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
@freeze_time("09/26/2019 13:42:41")
def test_cli_init_on_new_project(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file, sa
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, "great_expectations")
database_path = os.path.join(project_dir, "titanic.db")
shutil.copy(titanic_sqlite_db_file, database_path)
engine = sa.create_engine(f"sqlite:///{database_path}", pool_recycle=3600)
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="\n\n2\n6\ntitanic\n{url}\n\n\n1\n{schema}\n{table}\nwarning\n\n\n\n".format(
url=engine.url, schema=default_schema, table=default_table
),
catch_exceptions=False,
)
stdout = result.output
assert len(stdout) < 6000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new Datasource a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Name the new Expectation Suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "Great Expectations is now set up" in stdout
context = DataContext(ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources()[0]["class_name"] == "SqlAlchemyDatasource"
assert context.list_datasources()[0]["name"] == "titanic"
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir)
config_path = os.path.join(project_dir, "great_expectations/great_expectations.yml")
assert os.path.isfile(config_path)
config = yaml.load(open(config_path))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
# Profilers are v014+ specific
os.rmdir(os.path.join(ge_dir, "profilers"))
obs_tree = gen_directory_tree_str(ge_dir)
# Instead of monkey patching guids, just regex out the guids
guid_safe_obs_tree = re.sub(
r"[a-z0-9]{32}(?=\.(json|html))", "foobarbazguid", obs_tree
)
# print(guid_safe_obs_tree)
assert (
guid_safe_obs_tree
== """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
expectations/
.ge_store_backend_id
warning.json
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
local_site/
index.html
expectations/
warning.html
static/
fonts/
HKGrotesk/
HKGrotesk-Bold.otf
HKGrotesk-BoldItalic.otf
HKGrotesk-Italic.otf
HKGrotesk-Light.otf
HKGrotesk-LightItalic.otf
HKGrotesk-Medium.otf
HKGrotesk-MediumItalic.otf
HKGrotesk-Regular.otf
HKGrotesk-SemiBold.otf
HKGrotesk-SemiBoldItalic.otf
images/
favicon.ico
glossary_scroller.gif
iterative-dev-loop.png
logo-long-vector.svg
logo-long.png
short-logo-vector.svg
short-logo.png
validation_failed_unexpected_values.gif
styles/
data_docs_custom_styles_template.css
data_docs_default_styles.css
validations/
warning/
20190926T134241.000000Z/
20190926T134241.000000Z/
foobarbazguid.html
validations/
.ge_store_backend_id
warning/
20190926T134241.000000Z/
20190926T134241.000000Z/
foobarbazguid.json
"""
)
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_new_project_extra_whitespace_in_url(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file, sa
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, "great_expectations")
database_path = os.path.join(project_dir, "titanic.db")
shutil.copy(titanic_sqlite_db_file, database_path)
engine = sa.create_engine(f"sqlite:///{database_path}", pool_recycle=3600)
engine_url_with_added_whitespace = " " + str(engine.url) + " "
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="\n\n2\n6\ntitanic\n{url}\n\n\n1\n{schema}\n{table}\nwarning\n\n\n\n".format(
url=engine_url_with_added_whitespace,
schema=default_schema,
table=default_table,
),
catch_exceptions=False,
)
stdout = result.output
assert len(stdout) < 6000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new Datasource a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Name the new Expectation Suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "Great Expectations is now set up" in stdout
context = DataContext(ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir)
config_path = os.path.join(project_dir, "great_expectations/great_expectations.yml")
assert os.path.isfile(config_path)
config = yaml.load(open(config_path))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_and_add_one(
mock_webbrowser, caplog, initialized_sqlite_project, titanic_sqlite_db_file, sa
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
_remove_all_datasources(ge_dir)
os.remove(os.path.join(ge_dir, "expectations", "warning.json"))
context = DataContext(ge_dir)
assert not context.list_expectation_suites()
runner = CliRunner(mix_stderr=False)
url = f"sqlite:///{titanic_sqlite_db_file}"
inspector = sa.inspect(sa.create_engine(url))
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="\n\n2\n6\nsqlite\n{url}\n\n\n1\n{schema}\n{table}\nmy_suite\n\n\n\n".format(
url=url, schema=default_schema, table=default_table
),
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/my_suite/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert (
"Next, we will configure database credentials and store them in the `sqlite` section"
in stdout
)
assert "What is the url/connection string for the sqlalchemy connection?" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Great Expectations connected to your database" in stdout
assert "This looks like an existing project that" not in stdout
config = _load_config_file(os.path.join(ge_dir, DataContext.GE_YML))
assert "sqlite" in config["datasources"].keys()
context = DataContext(ge_dir)
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "sqlite",
"module_name": "great_expectations.datasource",
"credentials": {"url": url},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
assert context.list_expectation_suites()[0].expectation_suite_name == "my_suite"
assert len(context.list_expectation_suites()) == 1
assert_no_logging_messages_or_tracebacks(caplog, result)
def _remove_all_datasources(ge_dir):
config_path = os.path.join(ge_dir, DataContext.GE_YML)
config = _load_config_file(config_path)
config["datasources"] = {}
with open(config_path, "w") as f:
yaml.dump(config, f)
context = DataContext(ge_dir)
assert context.list_datasources() == []
def _load_config_file(config_path):
assert os.path.isfile(config_path), "Config file is missing. Check path"
with open(config_path) as f:
read = f.read()
config = yaml.load(read)
assert isinstance(config, dict)
return config
@pytest.fixture
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def initialized_sqlite_project(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file, sa
):
"""This is an initialized project through the CLI."""
project_dir = str(tmp_path_factory.mktemp("my_rad_project"))
engine = sa.create_engine(f"sqlite:///{titanic_sqlite_db_file}", pool_recycle=3600)
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="\n\n2\n6\ntitanic\n{url}\n\n\n1\n{schema}\n{table}\nwarning\n\n\n\n".format(
url=engine.url, schema=default_schema, table=default_table
),
catch_exceptions=False,
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert_no_logging_messages_or_tracebacks(caplog, result)
context = DataContext(os.path.join(project_dir, DataContext.GE_DIR))
assert isinstance(context, DataContext)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
return project_dir
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_multiple_datasources_exist_do_nothing(
mock_webbrowser,
caplog,
initialized_sqlite_project,
titanic_sqlite_db,
empty_sqlite_db,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
context = DataContext(ge_dir)
datasource_name = "wow_a_datasource"
context = _add_datasource_and_credentials_to_context(
context, datasource_name, empty_sqlite_db
)
assert len(context.list_datasources()) == 2
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_no(
mock_webbrowser,
caplog,
initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_yes(
mock_webbrowser,
caplog,
initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/index.html".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_no_suite_create_one(
mock_webbrowser, caplog, initialized_sqlite_project, sa
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
# mangle the setup to remove all traces of any suite
expectations_dir = os.path.join(ge_dir, "expectations")
data_docs_dir = os.path.join(uncommitted_dir, "data_docs")
validations_dir = os.path.join(uncommitted_dir, "validations")
_delete_and_recreate_dir(expectations_dir)
_delete_and_recreate_dir(data_docs_dir)
_delete_and_recreate_dir(validations_dir)
context = DataContext(ge_dir)
# get the datasource from data context
all_datasources = context.list_datasources()
datasource = all_datasources[0] if all_datasources else None
# create a sqlalchemy engine using the URL of existing datasource
engine = sa.create_engine(datasource.get("credentials", {}).get("url"))
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
assert context.list_expectation_suites() == []
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="\n1\n{schema}\n{table}\nsink_me\n\n\n\n".format(
os.path.join(project_dir, "data/Titanic.csv"),
schema=default_schema,
table=default_table,
),
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/sink_me/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Always know what to expect from your data" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "The following Data Docs sites will be built" in stdout
assert "Great Expectations is now set up" in stdout
assert "Error: invalid input" not in stdout
assert "This looks like an existing project that" not in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
context = DataContext(ge_dir)
assert len(context.list_expectation_suites()) == 1
|
|
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Senlin exception subclasses.
"""
import sys
from oslo_log import log as logging
import six
from senlin.common.i18n import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
LOG = logging.getLogger(__name__)
class SenlinException(Exception):
"""Base Senlin Exception.
To correctly use this class, inherit from it and define a 'msg_fmt'
property. That msg_fmt will get printed with the keyword arguments
provided to the constructor.
"""
message = _("An unknown exception occurred.")
def __init__(self, **kwargs):
self.kwargs = kwargs
try:
self.message = self.msg_fmt % kwargs
# if last char is '.', wipe out redundant '.'
if self.message[-1] == '.':
self.message = self.message.rstrip('.') + '.'
except KeyError:
# exc_info = sys.exc_info()
# if kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in kwargs.items():
LOG.error("%s: %s", name, value) # noqa
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
# raise exc_info[0], exc_info[1], exc_info[2]
def __str__(self):
return six.text_type(self.message)
def __unicode__(self):
return six.text_type(self.message)
def __deepcopy__(self, memo):
return self.__class__(**self.kwargs)
class SIGHUPInterrupt(SenlinException):
msg_fmt = _("System SIGHUP signal received.")
class NotAuthenticated(SenlinException):
msg_fmt = _("You are not authenticated.")
class Forbidden(SenlinException):
msg_fmt = _("You are not authorized to complete this operation.")
class BadRequest(SenlinException):
msg_fmt = _("%(msg)s.")
class InvalidAPIVersionString(SenlinException):
msg_fmt = _("API Version String '%(version)s' is of invalid format. It "
"must be of format 'major.minor'.")
class MethodVersionNotFound(SenlinException):
msg_fmt = _("API version '%(version)s' is not supported on this method.")
class InvalidGlobalAPIVersion(SenlinException):
msg_fmt = _("Version '%(req_ver)s' is not supported by the API. Minimum "
"is '%(min_ver)s' and maximum is '%(max_ver)s'.")
class MultipleChoices(SenlinException):
msg_fmt = _("Multiple results found matching the query criteria "
"'%(arg)s'. Please be more specific.")
class ResourceNotFound(SenlinException):
"""Generic exception for resource not found.
The resource type here can be 'cluster', 'node', 'profile',
'policy', 'receiver', 'webhook', 'profile_type', 'policy_type',
'action', 'event' and so on.
"""
msg_fmt = _("The %(type)s '%(id)s' could not be found.")
@staticmethod
def enhance_msg(enhance, ex):
enhance_msg = ex.message[:4] + enhance + ' ' + ex.message[4:]
return enhance_msg
class ResourceInUse(SenlinException):
"""Generic exception for resource in use.
The resource type here can be 'cluster', 'node', 'profile',
'policy', 'receiver', 'webhook', 'profile_type', 'policy_type',
'action', 'event' and so on.
"""
msg_fmt = _("The %(type)s '%(id)s' cannot be deleted: %(reason)s.")
class ResourceIsLocked(SenlinException):
"""Generic exception for resource in use.
The resource type here can be 'cluster', 'node'.
"""
msg_fmt = _("%(action)s for %(type)s '%(id)s' cannot be completed "
"because it is already locked.")
class ProfileNotSpecified(SenlinException):
msg_fmt = _("Profile not specified.")
class ProfileOperationFailed(SenlinException):
msg_fmt = _("%(message)s")
class ProfileOperationTimeout(SenlinException):
msg_fmt = _("%(message)s")
class PolicyNotSpecified(SenlinException):
msg_fmt = _("Policy not specified.")
class PolicyBindingNotFound(SenlinException):
msg_fmt = _("The policy '%(policy)s' is not found attached to the "
"specified cluster '%(identity)s'.")
class PolicyTypeConflict(SenlinException):
msg_fmt = _("The policy with type '%(policy_type)s' already exists.")
class InvalidSpec(SenlinException):
msg_fmt = _("%(message)s")
class FeatureNotSupported(SenlinException):
msg_fmt = _("%(feature)s is not supported.")
class Error(SenlinException):
msg_fmt = "%(message)s"
def __init__(self, msg):
super(Error, self).__init__(message=msg)
class InvalidContentType(SenlinException):
msg_fmt = _("Invalid content type %(content_type)s")
class RequestLimitExceeded(SenlinException):
msg_fmt = _('Request limit exceeded: %(message)s')
class ActionInProgress(SenlinException):
msg_fmt = _("The %(type)s '%(id)s' is in status %(status)s.")
class ActionConflict(SenlinException):
msg_fmt = _("The %(type)s action for target %(target)s conflicts with "
"the following action(s): %(actions)s")
class ActionCooldown(SenlinException):
msg_fmt = _("The %(type)s action for cluster %(cluster)s cannot be "
"processed due to Policy %(policy_id)s cooldown still in "
"progress")
class ActionImmutable(SenlinException):
msg_fmt = _("Action (%(id)s) is in status (%(actual)s) while expected "
"status must be one of (%(expected)s).")
class NodeNotOrphan(SenlinException):
msg_fmt = _("%(message)s")
class InternalError(SenlinException):
"""A base class for internal exceptions in senlin.
The internal exception classes which inherit from :class:`SenlinException`
class should be translated to a user facing exception type if they need to
be made user visible.
"""
msg_fmt = _("%(message)s")
message = _('Internal error happened')
def __init__(self, **kwargs):
self.code = kwargs.pop('code', 500)
# If a "message" is not provided, or None or blank, use the default.
self.message = kwargs.pop('message', self.message) or self.message
super(InternalError, self).__init__(
code=self.code, message=self.message, **kwargs)
class EResourceBusy(InternalError):
# Internal exception, not to be exposed to end user.
msg_fmt = _("The %(type)s '%(id)s' is busy now.")
class TrustNotFound(InternalError):
# Internal exception, not to be exposed to end user.
msg_fmt = _("The trust for trustor '%(trustor)s' could not be found.")
class EResourceCreation(InternalError):
# Used when creating resources in other services
def __init__(self, **kwargs):
self.resource_id = kwargs.pop('resource_id', None)
super(EResourceCreation, self).__init__(
resource_id=self.resource_id, **kwargs)
msg_fmt = _("Failed in creating %(type)s: %(message)s.")
class EResourceUpdate(InternalError):
# Used when updating resources from other services
msg_fmt = _("Failed in updating %(type)s '%(id)s': %(message)s.")
class EResourceDeletion(InternalError):
# Used when deleting resources from other services
msg_fmt = _("Failed in deleting %(type)s '%(id)s': %(message)s.")
class EServerNotFound(InternalError):
# Used when deleting resources from other services
msg_fmt = _("Failed in found %(type)s '%(id)s': %(message)s.")
class EResourceOperation(InternalError):
"""Generic exception for resource fail operation.
The op here can be 'recovering','rebuilding', 'checking' and
so on. And the op 'creating', 'updating' and 'deleting' we can
use separately class `EResourceCreation`,`EResourceUpdate` and
`EResourceDeletion`.
The type here is resource's driver type.It can be 'server',
'stack', 'container' and so on.
The id is resource's id.
The message here can be message from class 'ResourceNotFound',
'ResourceInUse' and so on, or developer can specified message.
"""
# Used when operating resources from other services
msg_fmt = _("Failed in %(op)s %(type)s '%(id)s': %(message)s.")
class ESchema(InternalError):
msg_fmt = _("%(message)s")
class InvalidPlugin(InternalError):
msg_fmt = _("%(message)s")
class PolicyNotAttached(InternalError):
msg_fmt = _("The policy '%(policy)s' is not attached to the specified "
"cluster '%(cluster)s'.")
class HTTPExceptionDisguise(Exception):
"""Disguises HTTP exceptions.
The purpose is to let them be handled by the webob fault application
in the wsgi pipeline.
"""
def __init__(self, exception):
self.exc = exception
self.tb = sys.exc_info()[2]
|
|
import csv
import matplotlib.pyplot as plt
import numpy as np
from src.algorithms.slp import haversine, median
from src.utils.geo import GeoCoord
class EstimatorCurve:
'''
The EstimatorCurve class is used to assess the confidence of a predicted location for SLP.
Attributes:
w_stdev (numpy arr): A two dimensional numpy array representing the estimator curve. The x
axis is the standard deviations and y axis is the probability. The curve is a CDF. This
curve is generated from known locations where at least two neighbors are at different
locations.
wo_stdev (numpy_arr): A two dimensional numpy array representing the estimator curve
'''
def __init__(self, w_stdev, wo_stdev):
self.w_stdev = w_stdev
self.wo_stdev = wo_stdev
def predict_probability_area(self, upper_bound, lower_bound, estimated_loc):
'''
Given a prediction and a bounding box this will return a confidence range
for that prediction
Args:
upper_bound (geoCoord): bounding box top right geoCoord
lower_bound (geoCoord): bounding box bottom left geoCoord
estimated_loc (LocEstimate): geoCoord of the estimated location
Returns:
Probability Tuple(Tuple(float,float)): A probability range tuple (min probability, max probability)
'''
geo = estimated_loc.geo_coord
top_dist = haversine(geo, GeoCoord(upper_bound.lat, geo.lon))
bottom_dist = haversine(geo, GeoCoord(lower_bound.lat, geo.lon))
r_dist = haversine(geo, GeoCoord(geo.lat, upper_bound.lon))
l_dist = haversine(geo, GeoCoord(geo.lat, lower_bound.lon))
min_dist = min([top_dist, bottom_dist, r_dist, l_dist])
max_dist = max([top_dist, bottom_dist, r_dist, l_dist])
#min_prob = self.lookup( (min_dist- med_error)/std_dev)
#max_prob = self.lookup( (max_dist - med_error)/ std_dev)
return (self.lookup((min_dist-estimated_loc.dispersion)/estimated_loc.dispersion_std_dev),\
self.lookup((max_dist-estimated_loc.dispersion)/estimated_loc.dispersion_std_dev))
@staticmethod
def load_from_rdds(locs_known, edges, desired_samples=1000, dispersion_threshold=150, neighbor_threshold=3):
'''
Creates an EstimatorCurve
Args:
locs_known (rdd of LocEstimate): RDD of locations that are known
edges (rdd of (src_id (dest_id, weight)): RDD of edges in the network
desired_samples (int): Limit the curve to just a sample of data
Returns:
EstimatorCurve: A new EstimatorCurve representing the known input data
'''
# Filter edge list so we never attempt to estimate a "known" location
known_edges = edges.keyBy(lambda (src_id, (dst_id, weight)): dst_id)\
.leftOuterJoin(locs_known)\
.flatMap(lambda (dst_id, (edge, loc_known)): [edge] if loc_known is not None else [] )
medians = known_edges.join(locs_known)\
.map(lambda (src_id, ((dst_id, weight), src_loc)) : (dst_id, (src_loc, weight)))\
.groupByKey()\
.filter(lambda (src_id, neighbors) : len(neighbors) >= neighbor_threshold)\
.mapValues(lambda neighbors :\
median(haversine, [loc for loc,w in neighbors], [w for loc,w in neighbors]))\
.join(locs_known)\
.mapValues(lambda (found_loc, known_loc) :\
(found_loc, known_loc, haversine(known_loc.geo_coord, found_loc.geo_coord)))\
.filter(lambda (src_id, (found_loc, known_loc, dist)) : found_loc.dispersion < dispersion_threshold)
#some medians might have std_devs of zero
close_locs = medians.filter(lambda (src_id, (found_loc, known_loc, dist)) : found_loc.dispersion_std_dev == 0)
#remaining_locs = medians.filter(lambda (src_id, (found_loc, known_loc, dist)) : found_loc.dispersion_std_dev != 0)
values = medians.map(lambda (src_id, (found_loc, known_loc, dist)) :\
(src_id, ((dist-found_loc.dispersion)/found_loc.dispersion_std_dev if found_loc.dispersion_std_dev != 0 else 0)))\
.values()
values_wo_stdev = close_locs.map(lambda (src_id, (found_loc, known_loc, dist)): (src_id, dist))\
.values()
return EstimatorCurve(EstimatorCurve.build_curve(values, desired_samples),\
EstimatorCurve.build_curve(values_wo_stdev, desired_samples))
@staticmethod
def build_curve(vals, desired_samples):
'''
Static helper method for building the curve from a set of stdev stample
Args:
vals (rdd of floats): The rdd containing the standard deviation from the distance
between the estimated location and the actual locationn
desired_samples (int): For larger RDDs it is more efficient to take a sample for
the collect
Returns:
curve (numpy.ndarray): two dimensional array representing the curve.
Column 0 is the sorted stdevs and column 1 is the percentage for the CDF.
'''
cnt = vals.count()
sample = vals;
if(cnt > desired_samples):
sample = vals.sample(False, desired_samples/float(cnt), 45)
print("Before sample: ", cnt, " records")
cnt = sample.count()
print("Sample count: ", cnt)
return np.column_stack((np.sort(sample.collect()), np.arange(cnt)/float(cnt)))
def lookup(self, val, axis=0):
return EstimatorCurve.lookup_static(self.w_stdev, val, axis)
@staticmethod
def lookup_static(table, val, axis=0):
'''
lookups up closes stdev by subtracting from lookup table, taking absolute value
and finding which is closest to zero by sorting and taking the first element
Args:
num_std_devs (float): the stdev to lookup
Returns:
CDF (float) : Percentage of actual locations found to be within the input stdev
'''
max_v = np.max(table[:, axis])
min_v = np.min(table[:, axis])
if (val < max_v and val > min_v) :
if(axis==0):
arr = np.absolute(table-np.array([val,0]))
elif(axis == 1):
arr = np.absolute(table-np.array([0,val]))
else:
print("Axis must be either 0 or 1")
return arr[arr[:,axis].argsort()][0][(axis+1)%2]
elif val < min_v:
return 0
else:
return 1
def validator(self, sc, eval_rdd):
'''
Validates a curve
Args:
curve(numpy.darray) : x axis is stdev, y axis is percent
eval_rdd(rdd (src_id, (dist, loc_estimate))): this is the result of the evaluator function
'''
b_curve = sc.broadcast(self.w_stdev)
x_vals = np.arange(.1,1,.05)
local = eval_rdd.map(lambda (src_id, (dist, loc_estimate)) :\
[1 if EstimatorCurve.lookup_static(b_curve.value, pct, axis=1) > (dist - loc_estimate.dispersion)/loc_estimate.dispersion_std_dev\
else 0 for pct in x_vals])\
.collect()
y_vals = np.sum(np.array([np.array(xi) for xi in local]), axis=0)/float(len(local))
print("sum of difference of squares: %s" % np.sum(np.sqrt((y_vals - x_vals)**2)))
plt.plot(x_vals, x_vals)
plt.plot(x_vals, y_vals)
plt.show()
def confidence_estimation_viewer(self, sc, eval_rdd):
'''
Displays a plot of the estimated and actual probability that the true point is within an array of radius values
Args:
curve(numpy.darray) : x axis is stdev, y axis is percent
eval_rdd(rdd (src_id, (dist, loc_estimate))): this is the result of the evaluator function
'''
test_radius = [0.5,1,5,10,25,50,75,100,150,200,250,300,400,500,600,700,800,900,1000]
b_curve = sc.broadcast(self.w_stdev)
actual_pcts = eval_rdd.map(lambda (src_id, (dist, loc_estimate)) : \
[1 if dist <= radius else 0 for radius in test_radius]).collect()
y_vals_act_pct = np.sum(np.array([np.array(xi) for xi in actual_pcts]), axis=0)/float(len(actual_pcts))
predict_pcts = eval_rdd.map(lambda (src_id, (dist, loc_estimate)) : \
[EstimatorCurve.lookup_static(b_curve, (radius-loc_estimate.dispersion)/loc_estimate.dispersion_std_dev if loc_estimate.dispersion_std_dev !=0 else 0, axis=0)\
for radius in test_radius]).collect()
y_vals_pred_pct = np.sum(np.array([np.array(xi) for xi in predict_pcts]), axis=0)/float(len(predict_pcts))
act = plt.plot(test_radius, y_vals_act_pct, 'b', label="Actual")
pred = plt.plot(test_radius, y_vals_pred_pct, 'r', label="Predicted")
plt.xlabel("Radius")
plt.ylabel("Percentage Within")
plt.title("Percentage Within Radius")
plt.legend(loc=4)
plt.show()
def plot(self, w_stdev_lim=10, wo_stdev_lim=1000):
'''
Plots both the stdev curve and the distance curve for when the stdev
is 0
Args:
w_stdev_lim(int) : x axis limit for the plot
wo_stdev_lim(int) : x axis limit for the plot
'''
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(121)
ax1.plot(self.w_stdev[:,0], self.w_stdev[:,1], label="stdev>0")
ax1.set_xlim([-10,w_stdev_lim])
ax1.set_xlabel("standard deviation")
ax1.set_ylabel("percentage (CDF)")
ax1.legend()
ax2 = fig.add_subplot(122)
ax2.plot(self.wo_stdev[:,0], self.wo_stdev[:,1], label="stddev==0")
ax2.set_xlim([0,wo_stdev_lim])
ax2.set_xlabel("distance")
ax2.set_ylabel("percentage (CDF)")
ax2.legend()
plt.show()
def save(self, name="estimator"):
'''
Saves the EstimatorCurve as a csv
Args:
name(string): A prefix name for the filename. Two CSVs will be created-- one for
when the stdev is 0, and one for when it is greater than 0
'''
np.savetxt(open(name+"_curve.csv",'w'), self.w_stdev, delimiter=",")
np.savetxt(open(name+"_curve_zero_stdev.csv", "w"), self.wo_stdev, delimiter=",")
print("Saved estimator curve as \'%s.curve\'" % name)
print("Saved estimator curve with 0 stdev as \'%s.curve_zero_stdev\'" % name)
@staticmethod
def load_from_file(name="estimator"):
'''
Loads an Estimator curve from csv files
Args:
name(string): prefix name for the two CSV files
'''
return EstimatorCurve(np.loadtxt(open(name+"_curve.csv"), delimiter=","),\
np.loadtxt(open(name+"_curve_zero_stdev.csv"), delimiter=","))
|
|
import mock
from oslo.config import cfg
import testtools
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import exceptions
from neutron import context
from neutron.db import api as db
from neutron.db import quota_db
from neutron import manager
from neutron.plugins.linuxbridge.db import l2network_db_v2
from neutron import quota
from neutron.tests import base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
TARGET_PLUGIN = ('neutron.plugins.linuxbridge.lb_neutron_plugin'
'.LinuxBridgePluginV2')
_get_path = test_api_v2._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure 'stale' patched copies of the plugin are never returned
manager.NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
args = ['--config-file', test_extensions.etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin and extensions path
cfg.CONF.set_override('core_plugin', TARGET_PLUGIN)
cfg.CONF.set_override(
'quota_items',
['network', 'subnet', 'port', 'extra1'],
group='QUOTAS')
quota.QUOTAS = quota.QuotaEngine()
quota.register_resources_from_config()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will regester the items in conf when starting
# extra1 here is added later, so have to do it manually
quota.QUOTAS.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
l2network_db_v2.initialize()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
def tearDown(self):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
db.clear_db()
cfg.CONF.reset()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(QuotaExtensionTestCase, self).tearDown()
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota_db.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
self.assertEqual(-1, quota['quota']['extra1'])
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
class QuotaExtensionDbTestCaseXML(QuotaExtensionDbTestCase):
fmt = 'xml'
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def test_quotas_default_values(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
self.assertEqual(-1, quota['quota']['extra1'])
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class QuotaExtensionCfgTestCaseXML(QuotaExtensionCfgTestCase):
fmt = 'xml'
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota_db.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota_db.DbQuotaDriver._get_quotas."""
driver = quota_db.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(quota_db.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
target_tenant,
default_quotas,
['network'])
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
|
|
from django import template
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_text
import comments
register = template.Library()
class BaseCommentNode(template.Node):
"""
Base helper class (abstract) for handling the get_comment_* template tags.
Looks a bit strange, but the subclasses below should make this a bit more
obvious.
"""
@classmethod
def handle_token(cls, parser, token):
"""Class method to parse get_comment_list/count/form and return a Node."""
tokens = token.split_contents()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% get_whatever for obj as varname %}
if len(tokens) == 5:
if tokens[3] != 'as':
raise template.TemplateSyntaxError("Third argument in %r must be 'as'" % tokens[0])
return cls(
object_expr = parser.compile_filter(tokens[2]),
as_varname = tokens[4],
)
# {% get_whatever for app.model pk as varname %}
elif len(tokens) == 6:
if tokens[4] != 'as':
raise template.TemplateSyntaxError("Fourth argument in %r must be 'as'" % tokens[0])
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3]),
as_varname = tokens[5]
)
else:
raise template.TemplateSyntaxError("%r tag requires 4 or 5 arguments" % tokens[0])
@staticmethod
def lookup_content_type(token, tagname):
try:
app, model = token.split('.')
return ContentType.objects.get_by_natural_key(app, model)
except ValueError:
raise template.TemplateSyntaxError("Third argument in %r must be in the format 'app.model'" % tagname)
except ContentType.DoesNotExist:
raise template.TemplateSyntaxError("%r tag has non-existant content-type: '%s.%s'" % (tagname, app, model))
def __init__(self, ctype=None, object_pk_expr=None, object_expr=None, as_varname=None, comment=None):
if ctype is None and object_expr is None:
raise template.TemplateSyntaxError("Comment nodes must be given either a literal object or a ctype and object pk.")
self.comment_model = comments.get_model()
self.as_varname = as_varname
self.ctype = ctype
self.object_pk_expr = object_pk_expr
self.object_expr = object_expr
self.comment = comment
def render(self, context):
qs = self.get_query_set(context)
context[self.as_varname] = self.get_context_value_from_queryset(context, qs)
return ''
def get_query_set(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if not object_pk:
return self.comment_model.objects.none()
qs = self.comment_model.objects.filter(
content_type = ctype,
object_pk = smart_text(object_pk),
site__pk = settings.SITE_ID,
)
# The is_public and is_removed fields are implementation details of the
# built-in comment model's spam filtering system, so they might not
# be present on a custom comment model subclass. If they exist, we
# should filter on them.
field_names = [f.name for f in self.comment_model._meta.fields]
if 'is_public' in field_names:
qs = qs.filter(is_public=True)
if getattr(settings, 'COMMENTS_HIDE_REMOVED', True) and 'is_removed' in field_names:
qs = qs.filter(is_removed=False)
return qs
def get_target_ctype_pk(self, context):
if self.object_expr:
try:
obj = self.object_expr.resolve(context)
except template.VariableDoesNotExist:
return None, None
return ContentType.objects.get_for_model(obj), obj.pk
else:
return self.ctype, self.object_pk_expr.resolve(context, ignore_failures=True)
def get_context_value_from_queryset(self, context, qs):
"""Subclasses should override this."""
raise NotImplementedError
class CommentListNode(BaseCommentNode):
"""Insert a list of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return list(qs)
class CommentCountNode(BaseCommentNode):
"""Insert a count of comments into the context."""
def get_context_value_from_queryset(self, context, qs):
return qs.count()
class CommentFormNode(BaseCommentNode):
"""Insert a form for the comment model into the context."""
def get_form(self, context):
obj = self.get_object(context)
if obj:
return comments.get_form()(obj)
else:
return None
def get_object(self, context):
if self.object_expr:
try:
return self.object_expr.resolve(context)
except template.VariableDoesNotExist:
return None
else:
object_pk = self.object_pk_expr.resolve(context,
ignore_failures=True)
return self.ctype.get_object_for_this_type(pk=object_pk)
def render(self, context):
context[self.as_varname] = self.get_form(context)
return ''
class RenderCommentFormNode(CommentFormNode):
"""Render the comment form directly"""
@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_comment_form and return a Node."""
tokens = token.split_contents()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_form for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_form for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/form.html" % (ctype.app_label, ctype.model),
"comments/%s/form.html" % ctype.app_label,
"comments/form.html"
]
context.push()
formstr = render_to_string(template_search_list, {"form" : self.get_form(context)}, context)
context.pop()
return formstr
else:
return ''
class RenderCommentListNode(CommentListNode):
"""Render the comment list directly"""
@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_comment_list and return a Node."""
tokens = token.split_contents()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_comment_list for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_comment_list for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseCommentNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
template_search_list = [
"comments/%s/%s/list.html" % (ctype.app_label, ctype.model),
"comments/%s/list.html" % ctype.app_label,
"comments/list.html"
]
qs = self.get_query_set(context)
context.push()
liststr = render_to_string(template_search_list, {
"comment_list" : self.get_context_value_from_queryset(context, qs)
}, context)
context.pop()
return liststr
else:
return ''
# We could just register each classmethod directly, but then we'd lose out on
# the automagic docstrings-into-admin-docs tricks. So each node gets a cute
# wrapper function that just exists to hold the docstring.
@register.tag
def get_comment_count(parser, token):
"""
Gets the comment count for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_comment_count for [object] as [varname] %}
{% get_comment_count for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_comment_count for event as comment_count %}
{% get_comment_count for calendar.event event.id as comment_count %}
{% get_comment_count for calendar.event 17 as comment_count %}
"""
return CommentCountNode.handle_token(parser, token)
@register.tag
def get_comment_list(parser, token):
"""
Gets the list of comments for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_comment_list for [object] as [varname] %}
{% get_comment_list for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_comment_list for event as comment_list %}
{% for comment in comment_list %}
...
{% endfor %}
"""
return CommentListNode.handle_token(parser, token)
@register.tag
def render_comment_list(parser, token):
"""
Render the comment list (as returned by ``{% get_comment_list %}``)
through the ``comments/list.html`` template
Syntax::
{% render_comment_list for [object] %}
{% render_comment_list for [app].[model] [object_id] %}
Example usage::
{% render_comment_list for event %}
"""
return RenderCommentListNode.handle_token(parser, token)
@register.tag
def get_comment_form(parser, token):
"""
Get a (new) form object to post a new comment.
Syntax::
{% get_comment_form for [object] as [varname] %}
{% get_comment_form for [app].[model] [object_id] as [varname] %}
"""
return CommentFormNode.handle_token(parser, token)
@register.tag
def render_comment_form(parser, token):
"""
Render the comment form (as returned by ``{% render_comment_form %}``) through
the ``comments/form.html`` template.
Syntax::
{% render_comment_form for [object] %}
{% render_comment_form for [app].[model] [object_id] %}
"""
return RenderCommentFormNode.handle_token(parser, token)
@register.simple_tag
def comment_form_target():
"""
Get the target URL for the comment form.
Example::
<form action="{% comment_form_target %}" method="post">
"""
return comments.get_form_target()
@register.simple_tag
def get_comment_permalink(comment, anchor_pattern=None):
"""
Get the permalink for a comment, optionally specifying the format of the
named anchor to be appended to the end of the URL.
Example::
{% get_comment_permalink comment "#c%(id)s-by-%(user_name)s" %}
"""
if anchor_pattern:
return comment.get_absolute_url(anchor_pattern)
return comment.get_absolute_url()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from copy import deepcopy
from typing import Dict, List
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLBatchPredictOperator,
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLDeployModelOperator,
AutoMLGetModelOperator,
AutoMLImportDataOperator,
AutoMLListDatasetOperator,
AutoMLPredictOperator,
AutoMLTablesListColumnSpecsOperator,
AutoMLTablesListTableSpecsOperator,
AutoMLTablesUpdateDatasetOperator,
AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_DATASET_BUCKET = os.environ.get(
"GCP_AUTOML_DATASET_BUCKET", "gs://INVALID BUCKET NAME/bank-marketing.csv"
)
TARGET = os.environ.get("GCP_AUTOML_TARGET", "Deposit")
# Example values
MODEL_ID = "TBL123456"
DATASET_ID = "TBL123456"
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
}
# Example dataset
DATASET = {
"display_name": "test_set",
"tables_dataset_metadata": {"target_column_spec_id": ""},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_DATASET_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
def get_target_column_spec(columns_specs: List[Dict], column_name: str) -> str:
"""
Using column name returns spec of the column.
"""
for column in columns_specs:
if column["display_name"] == column_name:
return extract_object_id(column)
raise Exception(f"Unknown target column: {column_name}")
# Example DAG to create dataset, train model_id and deploy it.
with models.DAG(
"example_create_and_deploy",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
user_defined_macros={
"get_target_column_spec": get_target_column_spec,
"target": TARGET,
"extract_object_id": extract_object_id,
},
tags=['example'],
) as create_deploy_dag:
# [START howto_operator_automl_create_dataset]
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task",
dataset=DATASET,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
dataset_id = "{{ task_instance.xcom_pull('create_dataset_task', key='dataset_id') }}"
# [END howto_operator_automl_create_dataset]
MODEL["dataset_id"] = dataset_id
# [START howto_operator_automl_import_data]
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
# [END howto_operator_automl_import_data]
# [START howto_operator_automl_specs]
list_tables_spec_task = AutoMLTablesListTableSpecsOperator(
task_id="list_tables_spec_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_specs]
# [START howto_operator_automl_column_specs]
list_columns_spec_task = AutoMLTablesListColumnSpecsOperator(
task_id="list_columns_spec_task",
dataset_id=dataset_id,
table_spec_id="{{ extract_object_id(task_instance.xcom_pull('list_tables_spec_task')[0]) }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_column_specs]
# [START howto_operator_automl_update_dataset]
update = deepcopy(DATASET)
update["name"] = '{{ task_instance.xcom_pull("create_dataset_task")["name"] }}'
update["tables_dataset_metadata"][ # type: ignore
"target_column_spec_id"
] = "{{ get_target_column_spec(task_instance.xcom_pull('list_columns_spec_task'), target) }}"
update_dataset_task = AutoMLTablesUpdateDatasetOperator(
task_id="update_dataset_task",
dataset=update,
location=GCP_AUTOML_LOCATION,
)
# [END howto_operator_automl_update_dataset]
# [START howto_operator_automl_create_model]
create_model_task = AutoMLTrainModelOperator(
task_id="create_model_task",
model=MODEL,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
model_id = "{{ task_instance.xcom_pull('create_model_task', key='model_id') }}"
# [END howto_operator_automl_create_model]
# [START howto_operator_automl_delete_model]
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_delete_model]
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
(
create_dataset_task # noqa
>> import_dataset_task # noqa
>> list_tables_spec_task # noqa
>> list_columns_spec_task # noqa
>> update_dataset_task # noqa
>> create_model_task # noqa
>> delete_model_task # noqa
>> delete_datasets_task # noqa
)
# Example DAG for AutoML datasets operations
with models.DAG(
"example_automl_dataset",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
user_defined_macros={"extract_object_id": extract_object_id},
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task",
dataset=DATASET,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
dataset_id = '{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
list_tables_spec_task = AutoMLTablesListTableSpecsOperator(
task_id="list_tables_spec_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
list_columns_spec_task = AutoMLTablesListColumnSpecsOperator(
task_id="list_columns_spec_task",
dataset_id=dataset_id,
table_spec_id="{{ extract_object_id(task_instance.xcom_pull('list_tables_spec_task')[0]) }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [START howto_operator_list_dataset]
list_datasets_task = AutoMLListDatasetOperator(
task_id="list_datasets_task",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_list_dataset]
# [START howto_operator_delete_dataset]
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id="{{ task_instance.xcom_pull('list_datasets_task', key='dataset_id_list') | list }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_delete_dataset]
(
create_dataset_task # noqa
>> import_dataset_task # noqa
>> list_tables_spec_task # noqa
>> list_columns_spec_task # noqa
>> list_datasets_task # noqa
>> delete_datasets_task # noqa
)
with models.DAG(
"example_gcp_get_deploy",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=["example"],
) as get_deploy_dag:
# [START howto_operator_get_model]
get_model_task = AutoMLGetModelOperator(
task_id="get_model_task",
model_id=MODEL_ID,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_get_model]
# [START howto_operator_deploy_model]
deploy_model_task = AutoMLDeployModelOperator(
task_id="deploy_model_task",
model_id=MODEL_ID,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_deploy_model]
with models.DAG(
"example_gcp_predict",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=["example"],
) as predict_dag:
# [START howto_operator_prediction]
predict_task = AutoMLPredictOperator(
task_id="predict_task",
model_id=MODEL_ID,
payload={}, # Add your own payload, the used model_id must be deployed
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_prediction]
# [START howto_operator_batch_prediction]
batch_predict_task = AutoMLBatchPredictOperator(
task_id="batch_predict_task",
model_id=MODEL_ID,
input_config={}, # Add your config
output_config={}, # Add your config
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_batch_prediction]
|
|
# ___ __ ___ _ _ ___
# || \/ | ||=|| \\// ||=||
# || | || || // || ||
# Ignore warnings for yaml usage.
import warnings
import ruamel.yaml
warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
import email.utils
import time
from datetime import datetime as Datetime
import pytz
import humanize
import dateparser
import iso8601
import dateutil.parser
from tzlocal import get_localzone
EPOCH_START = (1970, 1, 1)
class MayaDT(object):
"""The Maya Datetime object."""
def __init__(self, epoch):
super(MayaDT, self).__init__()
self._epoch = epoch
def __repr__(self):
return '<MayaDT epoch={}>'.format(self._epoch)
def __format__(self, *args, **kwargs):
"""Return's the datetime's format"""
return format(self.datetime(), *args, **kwargs)
# Timezone Crap
# -------------
@property
def timezone(self):
"""Returns the UTC tzinfo name. It's always UTC. Always."""
return 'UTC'
@property
def _tz(self):
"""Returns the UTC tzinfo object."""
return pytz.timezone(self.timezone)
@property
def local_timezone(self):
"""Returns the name of the local timezone, for informational purposes."""
return self._local_tz.zone
@property
def _local_tz(self):
"""Returns the local timezone."""
return get_localzone()
@staticmethod
def __dt_to_epoch(dt):
"""Converts a datetime into an epoch."""
# Assume UTC if no datetime is provided.
if dt.tzinfo is None:
dt = dt.replace(tzinfo=pytz.utc)
epoch_start = Datetime(*EPOCH_START, tzinfo=pytz.timezone('UTC'))
return (dt - epoch_start).total_seconds()
# Importers
# ---------
@classmethod
def from_datetime(klass, dt):
"""Returns MayaDT instance from datetime."""
return klass(klass.__dt_to_epoch(dt))
@classmethod
def from_iso8601(klass, string):
"""Returns MayaDT instance from iso8601 string."""
dt = iso8601.parse_date(string)
return klass.from_datetime(dt)
@staticmethod
def from_rfc2822(string):
"""Returns MayaDT instance from rfc2822 string."""
return parse(string)
# Exporters
# ---------
def datetime(self, to_timezone=None, naive=False):
"""Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {string} -- timezone to convert to (default: None/UTC)
naive {boolean} -- if True, the tzinfo is simply dropped (default: False)
"""
if to_timezone:
dt = self.datetime().astimezone(pytz.timezone(to_timezone))
else:
dt = Datetime.utcfromtimestamp(self._epoch)
dt.replace(tzinfo=self._tz)
# Strip the timezone info if requested to do so.
if naive:
return dt.replace(tzinfo=None)
else:
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self._tz)
return dt
def iso8601(self):
"""Returns an ISO 8601 representation of the MayaDT."""
# Get a timezone-naive datetime.
dt = self.datetime(naive=True)
return '{}Z'.format(dt.isoformat())
def rfc2822(self):
"""Returns an RFC 2822 representation of the MayaDT."""
return email.utils.formatdate(self.epoch, usegmt=True)
# Properties
# ----------
@property
def year(self):
return self.datetime().year
@property
def month(self):
return self.datetime().month
@property
def day(self):
return self.datetime().day
@property
def hour(self):
return self.datetime().hour
@property
def minute(self):
return self.datetime().minute
@property
def second(self):
return self.datetime().second
@property
def microsecond(self):
return self.datetime().microsecond
@property
def epoch(self):
return self._epoch
# Human Slang Extras
# ------------------
def slang_date(self):
""""Returns human slang representation of date."""
return humanize.naturaldate(self.datetime())
def slang_time(self):
""""Returns human slang representation of time."""
dt = self.datetime(naive=True, to_timezone=self.local_timezone)
return humanize.naturaltime(dt)
def now():
"""Returns a MayaDT instance for this exact moment."""
epoch = time.time()
return MayaDT(epoch=epoch)
def when(string, timezone='UTC'):
""""Returns a MayaDT instance for the human moment specified.
Powered by dateparser. Useful for scraping websites.
Examples:
'next week', 'now', 'tomorrow', '300 years ago', 'August 14, 2015'
Keyword Arguments:
string -- string to be parsed
timezone -- timezone referenced from (default: 'UTC')
"""
dt = dateparser.parse(string, settings={'TIMEZONE': timezone, 'RETURN_AS_TIMEZONE_AWARE': True, 'TO_TIMEZONE': 'UTC'})
if dt is None:
raise ValueError('invalid datetime input specified.')
return MayaDT.from_datetime(dt)
def parse(string, day_first=False):
""""Returns a MayaDT instance for the machine-produced moment specified.
Powered by dateutil. Accepts most known formats. Useful for working with data.
Keyword Arguments:
string -- string to be parsed
day_first -- if true, the first value (e.g. 01/05/2016) is parsed as day (default: False)
"""
dt = dateutil.parser.parse(string, dayfirst=day_first)
return MayaDT.from_datetime(dt)
|
|
"""
This module contains the functions that create most of data files for
a particular region of New Zealand.
It assumes that you have the created the following files for a region
<region> already::
|- data/
|- processed/
|- rents.csv
|- rental_areas.geojson
|- rental_area_points.geojson
|- <region>/
|- walking_commutes.csv
|- bicycling_commutes.csv
|- driving_commutes.csv
|- transit_commutes.csv
TODO:
- Add automated tests
"""
from typing import Optional, List
import json
import os
from pathlib import Path
import datetime as dt
import numpy as np
import pandas as pd
import geopandas as gpd
ROOT = Path(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
DATA_DIR = ROOT/'docs'/'data'
CRS_NZGD49 = 'epsg:27200'
CRS_NZTM = 'epsg:2193'
CRS_WGS84 = 'epsg:4326'
REGIONS = [
'auckland',
'canterbury',
'wellington',
]
MODES = [
'walking',
'bicycling',
'driving',
'transit',
]
# Cost in NZD/km. Get transit costs from an origin-destination matrix.
COST_BY_MODE = {
'walking': 0,
'bicycling': 0,
'driving': 0.274,
'transit': 0,
}
def get_secret(key, secrets_path=ROOT/'secrets.json'):
"""
Open the JSON file at ``secrets_path``, and return the value
corresponding to the given key.
"""
secrets_path = Path(secrets_path)
with secrets_path.open() as src:
secrets = json.load(src)
return secrets[key]
def get_path(key, region=None):
"""
Return the path (Path object) of the file corresponding to the given
key (string) and region (string).
"""
path = DATA_DIR/'processed'
error = ValueError('Invalid key-region pair ({!s}, {!s})'.format(
key, region))
if region is None:
if key == 'property_titles':
# Special case using collected data
path = DATA_DIR/'collected'/'nz-cadastral-titles-jan-10.gpkg'
elif key == 'au2001_csv':
path /= 'au2001.csv'
elif key == 'au2001':
path /= 'au2001.geojson'
elif key == 'rental_areas_csv':
path /= 'rental_areas.csv'
elif key == 'rental_areas':
path /= 'rental_areas.geojson'
elif key == 'rental_points':
path /= 'rental_points.geojson'
elif key == 'rents':
path /= 'rents.csv'
else:
raise error
else:
path /= region
if key == 'rental_areas':
path /= 'rental_areas.geojson'
elif key == 'rental_points':
path /= 'rental_points.geojson'
elif key == 'rents':
path /= 'rents.csv'
elif key == 'rents_json':
path /= 'rents.json'
elif key == 'commutes_walking':
path /= 'commutes_walking.csv'
elif key == 'commutes_bicycling':
path /= 'commutes_bicycling.csv'
elif key == 'commutes_driving':
path /= region/'commutes_driving.csv'
elif key == 'commutes_transit':
path /= 'commutes_transit.csv'
elif key == 'transit_costs':
path /= 'transit_costs.csv'
elif key == 'commute_costs':
path /= 'commute_costs.json'
else:
raise error
return path
def get_data(key, region=None):
"""
Return the data corresponding to the given key (string) and
region (string) and key.
"""
path = get_path(key, region)
if not path.exists:
raise ValueError('Data does not exist for key-region pair'
' ({!s}, {!s})'.format(key, region))
s = path.suffix
if s == '.csv':
result = pd.read_csv(path, dtype={'au2001': str})
elif s in ['.geojson', '.gpkg']:
result = gpd.read_file(str(path))
elif s == '.json':
with path.open() as src:
result = json.load(src)
return result
def get_latest_quarters(n: int, *, from_today=False) -> List[str]:
"""
Return a list of the latest ``n`` rental data quarters as
YYYY-MM-DD datestrings sorted chronologically.
Each quarter will be of the form YYYY-03-01, YYYY-06-01,
YYYY-09-01, or YYYY-12-01.
If ``from_today``, get the latest quarters theoretically possible from
today's date; otherwise, get them from the rental data.
"""
if from_today:
quarters = [q.strftime('%Y-%m') + '-01' for q in
pd.date_range(end=dt.datetime.now(), freq='Q', periods=n)]
else:
quarters = get_data('rents')['quarter'].unique()[-n:].tolist()
return quarters
def aggregate_rents(rents, date=None, groupby_cols=('rental_area',
'num_bedrooms')):
"""
Given a DataFrame of rents, group the rents by the given groupby
columns, recomputing the counts and means.
Return the resulting data frame, which have the following columns.
- the columns in ``groupby_cols``
- ``'territory'``
- ``'region'``
- ``'rent_count'``
- ``'rent_mean'``
- ``'rent_geo_mean'``
If a date (YYYY-MM-DD date string) is given, then first slice the
rents to calendar quarters equal to or later than the date.
"""
if date is not None:
f = rents.loc[lambda x: x.quarter >= date].copy()
else:
f = rents.copy()
def my_agg(group):
d = {}
if 'territory' not in groupby_cols:
d['territory'] = group['territory'].iat[0]
if 'region' not in groupby_cols:
d['region'] = group['region'].iat[0]
d['rent_count'] = group['rent_count'].sum()
d['rent_mean'] = (group['rent_mean']*group['rent_count']).sum()/\
d['rent_count']
if d['rent_count']:
d['rent_geo_mean'] = (group['rent_geo_mean']**(
group['rent_count']/d['rent_count'])).prod()
else:
d['rent_geo_mean'] = np.nan
return pd.Series(d)
return (
f
.groupby(list(groupby_cols))
.apply(my_agg)
.reset_index()
)
def nan_to_none(df):
"""
Replace the NaN values in the given DataFrame with None and return
the resulting DataFrame.
"""
return df.where((pd.notnull(df)), None)
def build_json_rents(rents):
"""
Given a DataFrame of rents of the form output by
:func:read_data('rents'), aggregate the rents by rental area
and number of bedrooms ('1', '2', '3', or '4'), and return the
result as a dictionary of the form
rental area -> num_bedrooms -> rent geometric mean.
Some of the mean rents could be ``None``.
"""
f = aggregate_rents(rents)
# Drop 5+ bedrooms and round to nearest dollar
f = f[f['num_bedrooms'] != '5+'].copy().round()
# Replace NaN with None to make JSON-compatible
f = nan_to_none(f)
# Save to dictionary of form rental area -> num_bedrooms -> rent geo mean
d = {area: dict(g[['num_bedrooms', 'rent_geo_mean']].values)
for area, g in f.groupby('rental_area')}
return d
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._sourcemodbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of SourceMod functions.
It is able to re-generate itself.
Do not edit the FUNCTIONS list by hand.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
FUNCTIONS = ['TopMenuHandler',
'CreateTopMenu',
'LoadTopMenuConfig',
'AddToTopMenu',
'GetTopMenuInfoString',
'GetTopMenuObjName',
'RemoveFromTopMenu',
'DisplayTopMenu',
'FindTopMenuCategory',
'OnAdminMenuCreated',
'OnAdminMenuReady',
'GetAdminTopMenu',
'AddTargetsToMenu',
'AddTargetsToMenu2',
'RedisplayAdminMenu',
'TEHook',
'AddTempEntHook',
'RemoveTempEntHook',
'TE_Start',
'TE_IsValidProp',
'TE_WriteNum',
'TE_ReadNum',
'TE_WriteFloat',
'TE_ReadFloat',
'TE_WriteVector',
'TE_ReadVector',
'TE_WriteAngles',
'TE_WriteFloatArray',
'TE_Send',
'TE_WriteEncodedEnt',
'TE_SendToAll',
'TE_SendToClient',
'CreateKeyValues',
'KvSetString',
'KvSetNum',
'KvSetUInt64',
'KvSetFloat',
'KvSetColor',
'KvSetVector',
'KvGetString',
'KvGetNum',
'KvGetFloat',
'KvGetColor',
'KvGetUInt64',
'KvGetVector',
'KvJumpToKey',
'KvJumpToKeySymbol',
'KvGotoFirstSubKey',
'KvGotoNextKey',
'KvSavePosition',
'KvDeleteKey',
'KvDeleteThis',
'KvGoBack',
'KvRewind',
'KvGetSectionName',
'KvSetSectionName',
'KvGetDataType',
'KeyValuesToFile',
'FileToKeyValues',
'KvSetEscapeSequences',
'KvNodesInStack',
'KvCopySubkeys',
'KvFindKeyById',
'KvGetNameSymbol',
'KvGetSectionSymbol',
'TE_SetupSparks',
'TE_SetupSmoke',
'TE_SetupDust',
'TE_SetupMuzzleFlash',
'TE_SetupMetalSparks',
'TE_SetupEnergySplash',
'TE_SetupArmorRicochet',
'TE_SetupGlowSprite',
'TE_SetupExplosion',
'TE_SetupBloodSprite',
'TE_SetupBeamRingPoint',
'TE_SetupBeamPoints',
'TE_SetupBeamLaser',
'TE_SetupBeamRing',
'TE_SetupBeamFollow',
'HookEvent',
'HookEventEx',
'UnhookEvent',
'CreateEvent',
'FireEvent',
'CancelCreatedEvent',
'GetEventBool',
'SetEventBool',
'GetEventInt',
'SetEventInt',
'GetEventFloat',
'SetEventFloat',
'GetEventString',
'SetEventString',
'GetEventName',
'SetEventBroadcast',
'GetUserMessageId',
'GetUserMessageName',
'StartMessage',
'StartMessageEx',
'EndMessage',
'MsgHook',
'MsgPostHook',
'HookUserMessage',
'UnhookUserMessage',
'StartMessageAll',
'StartMessageOne',
'InactivateClient',
'ReconnectClient',
'GetMaxEntities',
'GetEntityCount',
'IsValidEntity',
'IsValidEdict',
'IsEntNetworkable',
'CreateEdict',
'RemoveEdict',
'GetEdictFlags',
'SetEdictFlags',
'GetEdictClassname',
'GetEntityNetClass',
'ChangeEdictState',
'GetEntData',
'SetEntData',
'GetEntDataFloat',
'SetEntDataFloat',
'GetEntDataEnt2',
'SetEntDataEnt2',
'GetEntDataVector',
'SetEntDataVector',
'GetEntDataString',
'SetEntDataString',
'FindSendPropOffs',
'FindSendPropInfo',
'FindDataMapOffs',
'GetEntSendPropOffs',
'GetEntProp',
'SetEntProp',
'GetEntPropFloat',
'SetEntPropFloat',
'GetEntPropEnt',
'SetEntPropEnt',
'GetEntPropVector',
'SetEntPropVector',
'GetEntPropString',
'SetEntPropString',
'GetEntPropArraySize',
'GetEntDataArray',
'SetEntDataArray',
'GetEntityClassname',
'float',
'FloatMul',
'FloatDiv',
'FloatAdd',
'FloatSub',
'FloatFraction',
'RoundToZero',
'RoundToCeil',
'RoundToFloor',
'RoundToNearest',
'FloatCompare',
'SquareRoot',
'Pow',
'Exponential',
'Logarithm',
'Sine',
'Cosine',
'Tangent',
'FloatAbs',
'ArcTangent',
'ArcCosine',
'ArcSine',
'ArcTangent2',
'RoundFloat',
'operator%',
'DegToRad',
'RadToDeg',
'GetURandomInt',
'GetURandomFloat',
'SetURandomSeed',
'SetURandomSeedSimple',
'RemovePlayerItem',
'GivePlayerItem',
'GetPlayerWeaponSlot',
'IgniteEntity',
'ExtinguishEntity',
'TeleportEntity',
'ForcePlayerSuicide',
'SlapPlayer',
'FindEntityByClassname',
'GetClientEyeAngles',
'CreateEntityByName',
'DispatchSpawn',
'DispatchKeyValue',
'DispatchKeyValueFloat',
'DispatchKeyValueVector',
'GetClientAimTarget',
'GetTeamCount',
'GetTeamName',
'GetTeamScore',
'SetTeamScore',
'GetTeamClientCount',
'SetEntityModel',
'GetPlayerDecalFile',
'GetServerNetStats',
'EquipPlayerWeapon',
'ActivateEntity',
'SetClientInfo',
'SetClientListeningFlags',
'GetClientListeningFlags',
'SetListenOverride',
'GetListenOverride',
'IsClientMuted',
'TR_GetPointContents',
'TR_GetPointContentsEnt',
'TR_TraceRay',
'TR_TraceHull',
'TR_TraceRayFilter',
'TR_TraceHullFilter',
'TR_TraceRayEx',
'TR_TraceHullEx',
'TR_TraceRayFilterEx',
'TR_TraceHullFilterEx',
'TR_GetFraction',
'TR_GetEndPosition',
'TR_GetEntityIndex',
'TR_DidHit',
'TR_GetHitGroup',
'TR_GetPlaneNormal',
'TR_PointOutsideWorld',
'SortIntegers',
'SortFloats',
'SortStrings',
'SortFunc1D',
'SortCustom1D',
'SortCustom2D',
'SortADTArray',
'SortFuncADTArray',
'SortADTArrayCustom',
'CompileRegex',
'MatchRegex',
'GetRegexSubString',
'SimpleRegexMatch',
'TF2_GetPlayerClass',
'TF2_SetPlayerClass',
'TF2_GetPlayerResourceData',
'TF2_SetPlayerResourceData',
'TF2_RemoveWeaponSlot',
'TF2_RemoveAllWeapons',
'TF2_IsPlayerInCondition',
'TF2_GetObjectType',
'TF2_GetObjectMode',
'NominateMap',
'RemoveNominationByMap',
'RemoveNominationByOwner',
'GetExcludeMapList',
'GetNominatedMapList',
'CanMapChooserStartVote',
'InitiateMapChooserVote',
'HasEndOfMapVoteFinished',
'EndOfMapVoteEnabled',
'OnNominationRemoved',
'OnMapVoteStarted',
'CreateTimer',
'KillTimer',
'TriggerTimer',
'GetTickedTime',
'GetMapTimeLeft',
'GetMapTimeLimit',
'ExtendMapTimeLimit',
'GetTickInterval',
'OnMapTimeLeftChanged',
'IsServerProcessing',
'CreateDataTimer',
'ByteCountToCells',
'CreateArray',
'ClearArray',
'CloneArray',
'ResizeArray',
'GetArraySize',
'PushArrayCell',
'PushArrayString',
'PushArrayArray',
'GetArrayCell',
'GetArrayString',
'GetArrayArray',
'SetArrayCell',
'SetArrayString',
'SetArrayArray',
'ShiftArrayUp',
'RemoveFromArray',
'SwapArrayItems',
'FindStringInArray',
'FindValueInArray',
'ProcessTargetString',
'ReplyToTargetError',
'MultiTargetFilter',
'AddMultiTargetFilter',
'RemoveMultiTargetFilter',
'OnBanClient',
'OnBanIdentity',
'OnRemoveBan',
'BanClient',
'BanIdentity',
'RemoveBan',
'CreateTrie',
'SetTrieValue',
'SetTrieArray',
'SetTrieString',
'GetTrieValue',
'GetTrieArray',
'GetTrieString',
'RemoveFromTrie',
'ClearTrie',
'GetTrieSize',
'GetFunctionByName',
'CreateGlobalForward',
'CreateForward',
'GetForwardFunctionCount',
'AddToForward',
'RemoveFromForward',
'RemoveAllFromForward',
'Call_StartForward',
'Call_StartFunction',
'Call_PushCell',
'Call_PushCellRef',
'Call_PushFloat',
'Call_PushFloatRef',
'Call_PushArray',
'Call_PushArrayEx',
'Call_PushString',
'Call_PushStringEx',
'Call_Finish',
'Call_Cancel',
'NativeCall',
'CreateNative',
'ThrowNativeError',
'GetNativeStringLength',
'GetNativeString',
'SetNativeString',
'GetNativeCell',
'GetNativeCellRef',
'SetNativeCellRef',
'GetNativeArray',
'SetNativeArray',
'FormatNativeString',
'OnRebuildAdminCache',
'DumpAdminCache',
'AddCommandOverride',
'GetCommandOverride',
'UnsetCommandOverride',
'CreateAdmGroup',
'FindAdmGroup',
'SetAdmGroupAddFlag',
'GetAdmGroupAddFlag',
'GetAdmGroupAddFlags',
'SetAdmGroupImmuneFrom',
'GetAdmGroupImmuneCount',
'GetAdmGroupImmuneFrom',
'AddAdmGroupCmdOverride',
'GetAdmGroupCmdOverride',
'RegisterAuthIdentType',
'CreateAdmin',
'GetAdminUsername',
'BindAdminIdentity',
'SetAdminFlag',
'GetAdminFlag',
'GetAdminFlags',
'AdminInheritGroup',
'GetAdminGroupCount',
'GetAdminGroup',
'SetAdminPassword',
'GetAdminPassword',
'FindAdminByIdentity',
'RemoveAdmin',
'FlagBitsToBitArray',
'FlagBitArrayToBits',
'FlagArrayToBits',
'FlagBitsToArray',
'FindFlagByName',
'FindFlagByChar',
'FindFlagChar',
'ReadFlagString',
'CanAdminTarget',
'CreateAuthMethod',
'SetAdmGroupImmunityLevel',
'GetAdmGroupImmunityLevel',
'SetAdminImmunityLevel',
'GetAdminImmunityLevel',
'FlagToBit',
'BitToFlag',
'ServerCommand',
'ServerCommandEx',
'InsertServerCommand',
'ServerExecute',
'ClientCommand',
'FakeClientCommand',
'FakeClientCommandEx',
'PrintToServer',
'PrintToConsole',
'ReplyToCommand',
'GetCmdReplySource',
'SetCmdReplySource',
'IsChatTrigger',
'ShowActivity2',
'ShowActivity',
'ShowActivityEx',
'FormatActivitySource',
'SrvCmd',
'RegServerCmd',
'ConCmd',
'RegConsoleCmd',
'RegAdminCmd',
'GetCmdArgs',
'GetCmdArg',
'GetCmdArgString',
'CreateConVar',
'FindConVar',
'ConVarChanged',
'HookConVarChange',
'UnhookConVarChange',
'GetConVarBool',
'SetConVarBool',
'GetConVarInt',
'SetConVarInt',
'GetConVarFloat',
'SetConVarFloat',
'GetConVarString',
'SetConVarString',
'ResetConVar',
'GetConVarDefault',
'GetConVarFlags',
'SetConVarFlags',
'GetConVarBounds',
'SetConVarBounds',
'GetConVarName',
'QueryClientConVar',
'GetCommandIterator',
'ReadCommandIterator',
'CheckCommandAccess',
'CheckAccess',
'IsValidConVarChar',
'GetCommandFlags',
'SetCommandFlags',
'FindFirstConCommand',
'FindNextConCommand',
'SendConVarValue',
'AddServerTag',
'RemoveServerTag',
'CommandListener',
'AddCommandListener',
'RemoveCommandListener',
'TF2_IgnitePlayer',
'TF2_RespawnPlayer',
'TF2_RegeneratePlayer',
'TF2_AddCondition',
'TF2_RemoveCondition',
'TF2_SetPlayerPowerPlay',
'TF2_DisguisePlayer',
'TF2_RemovePlayerDisguise',
'TF2_StunPlayer',
'TF2_MakeBleed',
'TF2_GetResourceEntity',
'TF2_GetClass',
'TF2_CalcIsAttackCritical',
'TF2_OnIsHolidayActive',
'TF2_IsPlayerInDuel',
'TF2_OnConditionAdded',
'TF2_OnConditionRemoved',
'TF2_OnWaitingForPlayersStart',
'TF2_OnWaitingForPlayersEnd',
'SQL_Connect',
'SQL_DefConnect',
'SQL_ConnectCustom',
'SQLite_UseDatabase',
'SQL_CheckConfig',
'SQL_GetDriver',
'SQL_ReadDriver',
'SQL_GetDriverIdent',
'SQL_GetDriverProduct',
'SQL_GetAffectedRows',
'SQL_GetInsertId',
'SQL_GetError',
'SQL_EscapeString',
'SQL_QuoteString',
'SQL_FastQuery',
'SQL_Query',
'SQL_PrepareQuery',
'SQL_FetchMoreResults',
'SQL_HasResultSet',
'SQL_GetRowCount',
'SQL_GetFieldCount',
'SQL_FieldNumToName',
'SQL_FieldNameToNum',
'SQL_FetchRow',
'SQL_MoreRows',
'SQL_Rewind',
'SQL_FetchString',
'SQL_FetchFloat',
'SQL_FetchInt',
'SQL_IsFieldNull',
'SQL_FetchSize',
'SQL_BindParamInt',
'SQL_BindParamFloat',
'SQL_BindParamString',
'SQL_Execute',
'SQL_LockDatabase',
'SQL_UnlockDatabase',
'SQLTCallback',
'SQL_IsSameConnection',
'SQL_TConnect',
'SQL_TQuery',
'CloseHandle',
'CloneHandle',
'MenuHandler',
'CreateMenu',
'DisplayMenu',
'DisplayMenuAtItem',
'AddMenuItem',
'InsertMenuItem',
'RemoveMenuItem',
'RemoveAllMenuItems',
'GetMenuItem',
'GetMenuSelectionPosition',
'GetMenuItemCount',
'SetMenuPagination',
'GetMenuPagination',
'GetMenuStyle',
'SetMenuTitle',
'GetMenuTitle',
'CreatePanelFromMenu',
'GetMenuExitButton',
'SetMenuExitButton',
'GetMenuExitBackButton',
'SetMenuExitBackButton',
'SetMenuNoVoteButton',
'CancelMenu',
'GetMenuOptionFlags',
'SetMenuOptionFlags',
'IsVoteInProgress',
'CancelVote',
'VoteMenu',
'VoteMenuToAll',
'VoteHandler',
'SetVoteResultCallback',
'CheckVoteDelay',
'IsClientInVotePool',
'RedrawClientVoteMenu',
'GetMenuStyleHandle',
'CreatePanel',
'CreateMenuEx',
'GetClientMenu',
'CancelClientMenu',
'GetMaxPageItems',
'GetPanelStyle',
'SetPanelTitle',
'DrawPanelItem',
'DrawPanelText',
'CanPanelDrawFlags',
'SetPanelKeys',
'SendPanelToClient',
'GetPanelTextRemaining',
'GetPanelCurrentKey',
'SetPanelCurrentKey',
'RedrawMenuItem',
'InternalShowMenu',
'GetMenuVoteInfo',
'IsNewVoteAllowed',
'PrefetchSound',
'EmitAmbientSound',
'FadeClientVolume',
'StopSound',
'EmitSound',
'EmitSentence',
'GetDistGainFromSoundLevel',
'AmbientSHook',
'NormalSHook',
'AddAmbientSoundHook',
'AddNormalSoundHook',
'RemoveAmbientSoundHook',
'RemoveNormalSoundHook',
'EmitSoundToClient',
'EmitSoundToAll',
'ATTN_TO_SNDLEVEL',
'strlen',
'StrContains',
'strcmp',
'strncmp',
'StrEqual',
'strcopy',
'Format',
'FormatEx',
'VFormat',
'StringToInt',
'StringToIntEx',
'IntToString',
'StringToFloat',
'StringToFloatEx',
'FloatToString',
'BreakString',
'TrimString',
'SplitString',
'ReplaceString',
'ReplaceStringEx',
'GetCharBytes',
'IsCharAlpha',
'IsCharNumeric',
'IsCharSpace',
'IsCharMB',
'IsCharUpper',
'IsCharLower',
'StripQuotes',
'CharToUpper',
'CharToLower',
'FindCharInString',
'StrCat',
'ExplodeString',
'ImplodeStrings',
'GetVectorLength',
'GetVectorDistance',
'GetVectorDotProduct',
'GetVectorCrossProduct',
'NormalizeVector',
'GetAngleVectors',
'GetVectorAngles',
'GetVectorVectors',
'AddVectors',
'SubtractVectors',
'ScaleVector',
'NegateVector',
'MakeVectorFromPoints',
'BaseComm_IsClientGagged',
'BaseComm_IsClientMuted',
'BaseComm_SetClientGag',
'BaseComm_SetClientMute',
'FormatUserLogText',
'FindPluginByFile',
'FindTarget',
'AcceptEntityInput',
'SetVariantBool',
'SetVariantString',
'SetVariantInt',
'SetVariantFloat',
'SetVariantVector3D',
'SetVariantPosVector3D',
'SetVariantColor',
'SetVariantEntity',
'GameRules_GetProp',
'GameRules_SetProp',
'GameRules_GetPropFloat',
'GameRules_SetPropFloat',
'GameRules_GetPropEnt',
'GameRules_SetPropEnt',
'GameRules_GetPropVector',
'GameRules_SetPropVector',
'GameRules_GetPropString',
'GameRules_SetPropString',
'GameRules_GetRoundState',
'OnClientConnect',
'OnClientConnected',
'OnClientPutInServer',
'OnClientDisconnect',
'OnClientDisconnect_Post',
'OnClientCommand',
'OnClientSettingsChanged',
'OnClientAuthorized',
'OnClientPreAdminCheck',
'OnClientPostAdminFilter',
'OnClientPostAdminCheck',
'GetMaxClients',
'GetClientCount',
'GetClientName',
'GetClientIP',
'GetClientAuthString',
'GetClientUserId',
'IsClientConnected',
'IsClientInGame',
'IsClientInKickQueue',
'IsClientAuthorized',
'IsFakeClient',
'IsClientSourceTV',
'IsClientReplay',
'IsClientObserver',
'IsPlayerAlive',
'GetClientInfo',
'GetClientTeam',
'SetUserAdmin',
'GetUserAdmin',
'AddUserFlags',
'RemoveUserFlags',
'SetUserFlagBits',
'GetUserFlagBits',
'CanUserTarget',
'RunAdminCacheChecks',
'NotifyPostAdminCheck',
'CreateFakeClient',
'SetFakeClientConVar',
'GetClientHealth',
'GetClientModel',
'GetClientWeapon',
'GetClientMaxs',
'GetClientMins',
'GetClientAbsAngles',
'GetClientAbsOrigin',
'GetClientArmor',
'GetClientDeaths',
'GetClientFrags',
'GetClientDataRate',
'IsClientTimingOut',
'GetClientTime',
'GetClientLatency',
'GetClientAvgLatency',
'GetClientAvgLoss',
'GetClientAvgChoke',
'GetClientAvgData',
'GetClientAvgPackets',
'GetClientOfUserId',
'KickClient',
'KickClientEx',
'ChangeClientTeam',
'GetClientSerial',
'GetClientFromSerial',
'FindStringTable',
'GetNumStringTables',
'GetStringTableNumStrings',
'GetStringTableMaxStrings',
'GetStringTableName',
'FindStringIndex',
'ReadStringTable',
'GetStringTableDataLength',
'GetStringTableData',
'SetStringTableData',
'AddToStringTable',
'LockStringTables',
'AddFileToDownloadsTable',
'GetEntityFlags',
'SetEntityFlags',
'GetEntityMoveType',
'SetEntityMoveType',
'GetEntityRenderMode',
'SetEntityRenderMode',
'GetEntityRenderFx',
'SetEntityRenderFx',
'SetEntityRenderColor',
'GetEntityGravity',
'SetEntityGravity',
'SetEntityHealth',
'GetClientButtons',
'EntityOutput',
'HookEntityOutput',
'UnhookEntityOutput',
'HookSingleEntityOutput',
'UnhookSingleEntityOutput',
'SMC_CreateParser',
'SMC_ParseFile',
'SMC_GetErrorString',
'SMC_ParseStart',
'SMC_SetParseStart',
'SMC_ParseEnd',
'SMC_SetParseEnd',
'SMC_NewSection',
'SMC_KeyValue',
'SMC_EndSection',
'SMC_SetReaders',
'SMC_RawLine',
'SMC_SetRawLine',
'BfWriteBool',
'BfWriteByte',
'BfWriteChar',
'BfWriteShort',
'BfWriteWord',
'BfWriteNum',
'BfWriteFloat',
'BfWriteString',
'BfWriteEntity',
'BfWriteAngle',
'BfWriteCoord',
'BfWriteVecCoord',
'BfWriteVecNormal',
'BfWriteAngles',
'BfReadBool',
'BfReadByte',
'BfReadChar',
'BfReadShort',
'BfReadWord',
'BfReadNum',
'BfReadFloat',
'BfReadString',
'BfReadEntity',
'BfReadAngle',
'BfReadCoord',
'BfReadVecCoord',
'BfReadVecNormal',
'BfReadAngles',
'BfGetNumBytesLeft',
'CreateProfiler',
'StartProfiling',
'StopProfiling',
'GetProfilerTime',
'OnPluginStart',
'AskPluginLoad2',
'OnPluginEnd',
'OnPluginPauseChange',
'OnGameFrame',
'OnMapStart',
'OnMapEnd',
'OnConfigsExecuted',
'OnAutoConfigsBuffered',
'OnAllPluginsLoaded',
'GetMyHandle',
'GetPluginIterator',
'MorePlugins',
'ReadPlugin',
'GetPluginStatus',
'GetPluginFilename',
'IsPluginDebugging',
'GetPluginInfo',
'FindPluginByNumber',
'SetFailState',
'ThrowError',
'GetTime',
'FormatTime',
'LoadGameConfigFile',
'GameConfGetOffset',
'GameConfGetKeyValue',
'GetSysTickCount',
'AutoExecConfig',
'RegPluginLibrary',
'LibraryExists',
'GetExtensionFileStatus',
'OnLibraryAdded',
'OnLibraryRemoved',
'ReadMapList',
'SetMapListCompatBind',
'OnClientFloodCheck',
'OnClientFloodResult',
'CanTestFeatures',
'GetFeatureStatus',
'RequireFeature',
'LoadFromAddress',
'StoreToAddress',
'CreateStack',
'PushStackCell',
'PushStackString',
'PushStackArray',
'PopStackCell',
'PopStackString',
'PopStackArray',
'IsStackEmpty',
'PopStack',
'OnPlayerRunCmd',
'BuildPath',
'OpenDirectory',
'ReadDirEntry',
'OpenFile',
'DeleteFile',
'ReadFileLine',
'ReadFile',
'ReadFileString',
'WriteFile',
'WriteFileString',
'WriteFileLine',
'ReadFileCell',
'WriteFileCell',
'IsEndOfFile',
'FileSeek',
'FilePosition',
'FileExists',
'RenameFile',
'DirExists',
'FileSize',
'FlushFile',
'RemoveDir',
'CreateDirectory',
'GetFileTime',
'LogToOpenFile',
'LogToOpenFileEx',
'SetNextMap',
'GetNextMap',
'ForceChangeLevel',
'GetMapHistorySize',
'GetMapHistory',
'GeoipCode2',
'GeoipCode3',
'GeoipCountry',
'MarkNativeAsOptional',
'RegClientCookie',
'FindClientCookie',
'SetClientCookie',
'GetClientCookie',
'SetAuthIdCookie',
'AreClientCookiesCached',
'OnClientCookiesCached',
'CookieMenuHandler',
'SetCookiePrefabMenu',
'SetCookieMenuItem',
'ShowCookieMenu',
'GetCookieIterator',
'ReadCookieIterator',
'GetCookieAccess',
'GetClientCookieTime',
'LoadTranslations',
'SetGlobalTransTarget',
'GetClientLanguage',
'GetServerLanguage',
'GetLanguageCount',
'GetLanguageInfo',
'SetClientLanguage',
'GetLanguageByCode',
'GetLanguageByName',
'CS_OnBuyCommand',
'CS_OnCSWeaponDrop',
'CS_OnGetWeaponPrice',
'CS_OnTerminateRound',
'CS_RespawnPlayer',
'CS_SwitchTeam',
'CS_DropWeapon',
'CS_TerminateRound',
'CS_GetTranslatedWeaponAlias',
'CS_GetWeaponPrice',
'CS_GetClientClanTag',
'CS_SetClientClanTag',
'LogToGame',
'SetRandomSeed',
'GetRandomFloat',
'GetRandomInt',
'IsMapValid',
'IsDedicatedServer',
'GetEngineTime',
'GetGameTime',
'GetGameTickCount',
'GetGameDescription',
'GetGameFolderName',
'GetCurrentMap',
'PrecacheModel',
'PrecacheSentenceFile',
'PrecacheDecal',
'PrecacheGeneric',
'IsModelPrecached',
'IsDecalPrecached',
'IsGenericPrecached',
'PrecacheSound',
'IsSoundPrecached',
'CreateDialog',
'GuessSDKVersion',
'PrintToChat',
'PrintToChatAll',
'PrintCenterText',
'PrintCenterTextAll',
'PrintHintText',
'PrintHintTextToAll',
'ShowVGUIPanel',
'CreateHudSynchronizer',
'SetHudTextParams',
'SetHudTextParamsEx',
'ShowSyncHudText',
'ClearSyncHud',
'ShowHudText',
'ShowMOTDPanel',
'DisplayAskConnectBox',
'EntIndexToEntRef',
'EntRefToEntIndex',
'MakeCompatEntRef',
'SetClientViewEntity',
'SetLightStyle',
'GetClientEyePosition',
'CreateDataPack',
'WritePackCell',
'WritePackFloat',
'WritePackString',
'ReadPackCell',
'ReadPackFloat',
'ReadPackString',
'ResetPack',
'GetPackPosition',
'SetPackPosition',
'IsPackReadable',
'LogMessage',
'LogMessageEx',
'LogToFile',
'LogToFileEx',
'LogAction',
'LogError',
'OnLogAction',
'GameLogHook',
'AddGameLogHook',
'RemoveGameLogHook',
'FindTeamByName',
'StartPrepSDKCall',
'PrepSDKCall_SetVirtual',
'PrepSDKCall_SetSignature',
'PrepSDKCall_SetFromConf',
'PrepSDKCall_SetReturnInfo',
'PrepSDKCall_AddParameter',
'EndPrepSDKCall',
'SDKCall']
if __name__ == '__main__':
import pprint
import re
import sys
import urllib
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
def get_version():
f = urllib.urlopen('http://docs.sourcemod.net/api/index.php')
r = re.compile(r'SourceMod v\.<b>([\d\.]+)</td>')
for line in f:
m = r.search(line)
if m is not None:
return m.groups()[0]
def get_sm_functions():
f = urllib.urlopen('http://docs.sourcemod.net/api/SMfuncs.js')
r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def regenerate(filename, natives):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('FUNCTIONS = [')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('FUNCTIONS = %s\n\n' % pprint.pformat(natives))
f.write(footer)
f.close()
def run():
version = get_version()
print '> Downloading function index for SourceMod %s' % version
functions = get_sm_functions()
print '> %d functions found:' % len(functions)
functionlist = []
for full_function_name in functions:
print '>> %s' % full_function_name
functionlist.append(full_function_name)
regenerate(__file__, functionlist)
run()
|
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009, Jean-Michel Sizun
# Copyright 2009 Frank Scholz <coherence@beebits.net>
import os.path
import time
from twisted.internet import threads
from twisted.web import server, static
from twisted.web.error import PageRedirect
from coherence.upnp.core.utils import ReverseProxyUriResource
from twisted.internet import task
from coherence.upnp.core import utils
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendStore, BackendItem, Container, LazyContainer, \
AbstractBackendStore
from coherence import log
from urlparse import urlsplit
import gdata.photos.service
import gdata.media
import gdata.geo
class PicasaProxy(ReverseProxyUriResource):
def __init__(self, uri):
ReverseProxyUriResource.__init__(self, uri)
def render(self, request):
if request.received_headers.has_key('referer'):
del request.received_headers['referer']
return ReverseProxyUriResource.render(self, request)
class PicasaPhotoItem(BackendItem):
def __init__(self, photo):
#print photo
self.photo = photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.duration = None
self.size = None
self.mimetype = photo.content.type
self.description = photo.summary.text
self.date = None
self.item = None
self.photo_url = photo.content.src
self.thumbnail_url = photo.media.thumbnail[0].url
self.url = None
self.location = PicasaProxy(self.photo_url)
def replace_by(self, item):
#print photo
self.photo = item.photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.mimetype = self.photo.content.type
self.description = self.photo.summary.text
self.photo_url = self.photo.content.src
self.thumbnail_url = self.photo.media.thumbnail[0].url
self.location = PicasaProxy(self.photo_url)
return True
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.Photo(upnp_id,upnp_parent_id,self.name)
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
self.item.res.append(res)
self.item.childCount = 0
return self.item
def get_path(self):
return self.url
def get_id(self):
return self.storage_id
class PicasaStore(AbstractBackendStore):
logCategory = 'picasa_store'
implements = ['MediaServer']
description = ('Picasa Web Albums', 'connects to the Picasa Web Albums service and exposes the featured photos and albums for a given user.', None)
options = [{'option':'name', 'text':'Server Name:', 'type':'string','default':'my media','help': 'the name under this MediaServer shall show up with on other UPnP clients'},
{'option':'version','text':'UPnP Version:','type':'int','default':2,'enum': (2,1),'help': 'the highest UPnP version this MediaServer shall support','level':'advance'},
{'option':'uuid','text':'UUID Identifier:','type':'string','help':'the unique (UPnP) identifier for this MediaServer, usually automatically set','level':'advance'},
{'option':'refresh','text':'Refresh period','type':'string'},
{'option':'login','text':'User ID:','type':'string','group':'User Account'},
{'option':'password','text':'Password:','type':'string','group':'User Account'},
]
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name','Picasa Web Albums')
self.refresh = int(kwargs.get('refresh',60))*60
self.login = kwargs.get('userid',kwargs.get('login',''))
self.password = kwargs.get('password','')
rootContainer = Container(None, self.name)
self.set_root_item(rootContainer)
self.AlbumsContainer = LazyContainer(rootContainer, 'My Albums', None, self.refresh, self.retrieveAlbums)
rootContainer.add_child(self.AlbumsContainer)
self.FeaturedContainer = LazyContainer(rootContainer, 'Featured photos', None, self.refresh, self.retrieveFeaturedPhotos)
rootContainer.add_child(self.FeaturedContainer)
self.init_completed()
def __repr__(self):
return self.__class__.__name__
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:*,'
'http-get:*:image/gif:*,'
'http-get:*:image/png:*',
default=True)
self.wmc_mapping = {'16': self.get_root_id()}
self.gd_client = gdata.photos.service.PhotosService()
self.gd_client.email = self.login
self.gd_client.password = self.password
self.gd_client.source = 'Coherence UPnP backend'
if len(self.login) > 0:
d = threads.deferToThread(self.gd_client.ProgrammaticLogin)
def retrieveAlbums(self, parent=None):
albums = threads.deferToThread(self.gd_client.GetUserFeed)
def gotAlbums(albums):
if albums is None:
print "Unable to retrieve albums"
return
for album in albums.entry:
title = album.title.text
album_id = album.gphoto_id.text
item = LazyContainer(parent, title, album_id, self.refresh, self.retrieveAlbumPhotos, album_id=album_id)
parent.add_child(item, external_id=album_id)
def gotError(error):
print "ERROR: %s" % error
albums.addCallbacks(gotAlbums, gotError)
return albums
def retrieveFeedPhotos (self, parent=None, feed_uri=''):
#print feed_uri
photos = threads.deferToThread(self.gd_client.GetFeed, feed_uri)
def gotPhotos(photos):
if photos is None:
print "Unable to retrieve photos for feed %s" % feed_uri
return
for photo in photos.entry:
photo_id = photo.gphoto_id.text
item = PicasaPhotoItem(photo)
item.parent = parent
parent.add_child(item, external_id=photo_id)
def gotError(error):
print "ERROR: %s" % error
photos.addCallbacks(gotPhotos, gotError)
return photos
def retrieveAlbumPhotos (self, parent=None, album_id=''):
album_feed_uri = '/data/feed/api/user/%s/albumid/%s?kind=photo' % (self.login, album_id)
return self.retrieveFeedPhotos(parent, album_feed_uri)
def retrieveFeaturedPhotos (self, parent=None):
feed_uri = 'http://picasaweb.google.com/data/feed/api/featured'
return self.retrieveFeedPhotos(parent, feed_uri)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_volume
version_added: '2.4'
short_description: Manage volumes on Pure Storage FlashArrays
description:
- Create, delete or extend the capacity of a volume on Pure Storage FlashArray.
author:
- Simon Dodsley (@sdodsley)
options:
name:
description:
- The name of the volume.
required: true
target:
description:
- The name of the target volume, if copying.
state:
description:
- Define whether the volume should exist or not.
default: present
choices: [ absent, present ]
eradicate:
description:
- Define whether to eradicate the volume on delete or leave in trash.
type: bool
default: 'no'
overwrite:
description:
- Define whether to overwrite a target volume if it already exisits.
type: bool
default: 'no'
size:
description:
- Volume size in M, G, T or P units.
extends_documentation_fragment:
- purestorage
'''
EXAMPLES = r'''
- name: Create new volume named foo
purefa_volume:
name: foo
size: 1T
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Extend the size of an existing volume named foo
purefa_volume:
name: foo
size: 2T
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Delete and eradicate volume named foo
purefa_volume:
name: foo
eradicate: yes
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
- name: Create clone of volume bar named foo
purefa_volume:
name: foo
target: bar
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Overwrite volume bar with volume foo
purefa_volume:
name: foo
target: bar
overwrite: yes
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
'''
RETURN = r'''
'''
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def human_to_bytes(size):
"""Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
"""
bytes = size[:-1]
unit = size[-1]
if bytes.isdigit():
bytes = int(bytes)
if unit == 'P':
bytes *= 1125899906842624
elif unit == 'T':
bytes *= 1099511627776
elif unit == 'G':
bytes *= 1073741824
elif unit == 'M':
bytes *= 1048576
else:
bytes = 0
else:
bytes = 0
return bytes
def get_volume(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['name'])
except:
return None
def get_target(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['target'])
except:
return None
def create_volume(module, array):
"""Create Volume"""
size = module.params['size']
if not module.check_mode:
array.create_volume(module.params['name'], size)
module.exit_json(changed=True)
def copy_from_volume(module, array):
"""Create Volume Clone"""
changed = False
tgt = get_target(module, array)
if tgt is None:
changed = True
if not module.check_mode:
array.copy_volume(module.params['name'],
module.params['target'])
elif tgt is not None and module.params['overwrite']:
changed = True
if not module.check_mode:
array.copy_volume(module.params['name'],
module.params['target'],
overwrite=module.params['overwrite'])
module.exit_json(changed=changed)
def update_volume(module, array):
"""Update Volume"""
changed = True
vol = array.get_volume(module.params['name'])
if human_to_bytes(module.params['size']) > vol['size']:
if not module.check_mode:
array.extend_volume(module.params['name'], module.params['size'])
else:
changed = False
module.exit_json(changed=changed)
def delete_volume(module, array):
""" Delete Volume"""
if not module.check_mode:
array.destroy_volume(module.params['name'])
if module.params['eradicate']:
array.eradicate_volume(module.params['name'])
module.exit_json(changed=True)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
target=dict(type='str'),
overwrite=dict(type='bool', default=False),
eradicate=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
size=dict(type='str'),
))
mutually_exclusive = [['size', 'target']]
module = AnsibleModule(argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in volume')
size = module.params['size']
state = module.params['state']
array = get_system(module)
volume = get_volume(module, array)
target = get_target(module, array)
if state == 'present' and not volume and size:
create_volume(module, array)
elif state == 'present' and volume and size:
update_volume(module, array)
elif state == 'present' and volume and target:
copy_from_volume(module, array)
elif state == 'present' and volume and not target:
copy_from_volume(module, array)
elif state == 'absent' and volume:
delete_volume(module, array)
elif state == 'present' and not volume or not size:
module.exit_json(changed=False)
elif state == 'absent' and not volume:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017, ColoredInsaneAsylums
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# DETAILS:
# File Name: derivatives.py
# Description: This file contains source code for the core functionality of deriving files from the original file.
#
# Creator: Milind Siddhanti (milindsiddhanti at utexas dot edu)
#
# IMPORT NEEDED MODULES
import csv
import sys
import re
import os
from datetime import datetime
from time import localtime, time, strftime
from subprocess import PIPE, Popen
from metadatautilspkg.globalvars import *
from metadatautilspkg.errorcodes import *
from metadatautilspkg.dbfunctions import *
from metadatautilspkg.premis import *
from metadatautilspkg.metadatautils import *
def main():
# Verify whether ImageMagick is installed in the system or not. Throws error if ImageMagick is not installed.
output, error, exitcode = runCmd('identify -version')
imgversion = output.decode('utf-8').split('\n')
# remove null values from the list
while '' in imgversion:
imgversion.remove('')
imgver = []
for str in imgversion:
if ': ' in str:
imgver.append(str)
else:
globalvars.derivativeErrorList.append([errorcodes.ERROR_INSTALL_IMAGEMAGICK["message"]])
print_error(errorcodes.ERROR_INSTALL_IMAGEMAGICK["message"])
errorCSV()
exit(errorcodes.ERROR_INSTALL_IMAGEMAGICK["code"])
version = {}
for item in imgver:
key, value = item.split(": ")
key = key.strip(" ")
value = value.strip(" ")
version[key] = value
if 'Version' in version:
ver = version['Version']
if 'ImageMagick' not in ver:
globalvars.derivativeErrorList.append([errorcodes.ERROR_INSTALL_IMAGEMAGICK["message"]])
print_error(errorcodes.ERROR_INSTALL_IMAGEMAGICK["message"])
errorCSV()
exit(errorcodes.ERROR_INSTALL_IMAGEMAGICK["code"])
else:
globalvars.derivativeErrorList.append([errorcodes.ERROR_INSTALL_IMAGEMAGICK["message"]])
print_error(errorcodes.ERROR_INSTALL_IMAGEMAGICK["message"])
errorCSV()
exit(errorcodes.ERROR_INSTALL_IMAGEMAGICK["code"])
argParser = defineCommandLineOptions()
parseCommandLineArgs(argParser, sys.argv[1:])
print_info("quiet mode: ", globalvars.quietMode)
if globalvars.batchMode == True: # Batch mode. Read and validate CSV file.
# Read CSV file contents into globalvars.technicalList.
try:
# Open the CSV file in read-only mode.
csvFileHandle = open (globalvars.csvFile, "r")
except IOError as ioErrorCsvRead:
print_error(ioErrorCsvRead)
print_error(errorcodes.ERROR_CANNOT_OPEN_CSV_FILE["message"])
exit(errorcodes.ERROR_CANNOT_OPEN_CSV_FILE["code"])
# CSV file successfully opened.
csvReader = csv.reader(csvFileHandle) # Create an iterable object from the
# CSV file using csv.reader().
# Extract the first row to check if it is a header.
firstRow = next(csvReader, None)
print_info("Checking the header row. Header: {}".format(firstRow))
if len(firstRow) == 0: # This also serves as a check for an empty CSV file
print_error(errorcodes.ERROR_INVALID_HEADER_ROW["message"])
globalvars.derivativeErrorList.append([errorcodes.ERROR_INVALID_HEADER_ROW["message"]])
errorCSV()
exit(errorcodes.ERROR_INVALID_HEADER_ROW["code"])
# This for loop reads and checks the format (i.errorcodes., presence of at least two
# columns per row) of the CSV file, and populates 'globalvars.technicalList'
rowNum = 1
for row in csvReader:
globalvars.derivativeList.append(row)
rowNum += 1
csvFileHandle.close() # Close the CSV file as it will not be needed from this point on.
print_info("Number of folder path(s) read from the CSV: {}".format(len(globalvars.derivativeList)))
# READ-IN THE LABEL DICTIONARY
globalvars.labels = readLabelDictionary()
print_info("The following labels will be used for labeling metadata items in the database records:")
print_info(globalvars.labels)
# READ-IN THE CONTROLLED VOCABULARY
globalvars.vocab = readControlledVocabulary()
# CREATE DATABASE CONNECTION
dbParams = init_db() # TODO: there needs to be a check to determine if the
# database connection was successful or not.
globalvars.dbHandle = dbParams["handle"]
globalvars.dbCollection = dbParams["collection_name"]
# PROCESS ALL RECORDS
for row in globalvars.derivativeList:
filePath = row[0]
print_info("filepath Info Data: {}".format(filePath))
if os.path.isdir(filePath) != True:
globalvars.technicalErrorList.append([errorcodes.ERROR_CANNOT_FIND_DIRECTORY["message"].format(filePath)])
print_error(errorcodes.ERROR_CANNOT_FIND_DIRECTORY["message"].format(filePath))
errorCSV()
exit(errorcodes.ERROR_CANNOT_FIND_DIRECTORY["code"])
else:
derivativeFile = derivativeRecord(filePath)
def errorCSV():
# WRITE ALL ROWS THAT COULD NOT BE PROCESSED TO A CSV FILE
if len(globalvars.derivativeErrorList) > 0:
errorsCSVFileName = ("derivative_profile_errors_" + strftime("%Y-%m-%d_%H%M%S", localtime(time())) + ".csv")
try:
errorsCSVFileHandle = open(errorsCSVFileName, 'w')
except IOError as ioErrorCsvWrite:
print_error(ioErrorCsvWrite)
print_error(errorcodes.ERROR_CANNOT_WRITE_CSV_FILE["message"])
exit (errorcodes.ERROR_CANNOT_WRITE_CSV_FILE["code"])
csvWriter = csv.writer(errorsCSVFileHandle, delimiter=',', quotechar='"', lineterminator='\n')
for row in globalvars.derivativeErrorList:
csvWriter.writerow(row)
errorsCSVFileHandle.close()
print_error("Errors were encountered and has been written to the following file: {}.".format(errorsCSVFileName))
def defineCommandLineOptions():
#PARSE AND VALIDATE COMMAND-LINE OPTIONS
argParser = argparse.ArgumentParser(description="Migrate Files for Preservation")
argParser.add_argument('-f', '--file', nargs=1, default=False, metavar='CSVPATH', help='CSVPATH is the path to the CSV file to be used with the -f option.')
argParser.add_argument('-q', '--quiet', action='store_true', help='Enable this option to suppress all logging, except critical error messages.')
argParser.add_argument('-s', '--sourcefiletype', nargs=1, default=False, metavar='SOURCEFILETYPE', help='CSVPATH is the path to the CSV file to be used with the -f option.')
argParser.add_argument('-c', '--destfiletype', nargs=1, default=False, metavar='DESTFILETYPE', help='Enable this option to suppress all logging, except critical error messages.')
argParser.add_argument('-r', '--resize', nargs=1, default=False, metavar='RESIZEDIM', help='CSVPATH is the path to the CSV file to be used with the -f option.')
return argParser
def parseCommandLineArgs(argParser, args):
parsedArgs = argParser.parse_args(args)
if len(args) == 0:
print_error(errorcodes.ERROR_INVALID_ARGUMENT_STRING["message"])
argParser.print_help()
exit(errorcodes.ERROR_INVALID_ARGUMENT_STRING["code"])
globalvars.quietMode = parsedArgs.quiet
if parsedArgs.file:
globalvars.batchMode = True
globalvars.csvFile = parsedArgs.file[0]
else:
print_error(errorcodes.ERROR_FILE_ARGUMENT["message"])
globalvars.derivativeErrorList.append([errorcodes.ERROR_FILE_ARGUMENT["message"]])
errorCSV()
exit(errorcodes.ERROR_FILE_ARGUMENT["code"])
if parsedArgs.sourcefiletype:
globalvars.sourcefiletype = parsedArgs.sourcefiletype[0]
else:
print_error(errorcodes.ERROR_SOURCETYPE["message"])
globalvars.derivativeErrorList.append([errorcodes.ERROR_SOURCETYPE["message"]])
errorCSV()
exit(errorcodes.ERROR_SOURCETYPE["code"])
if parsedArgs.destfiletype:
globalvars.destfiletype = parsedArgs.destfiletype[0]
else:
globalvars.destfiletype = globalvars.sourcefiletype
if parsedArgs.resize:
globalvars.resize = parsedArgs.resize[0]
if((globalvars.destfiletype == "") and (globalvars.resize == "")):
print_error(errorcodes.ERROR_DESTTYPE_RESIZE["message"])
globalvars.derivativeErrorList.append([errorcodes.ERROR_DESTTYPE_RESIZE["message"]])
errorCSV()
exit(errorcodes.ERROR_DESTTYPE_RESIZE["code"])
def runCmd(cmd):
"""
This method runs a command and returns a list
with the contents of its stdout and stderr and
the exit code of the command.
"""
shell_cmd = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(handleChildStdin,handleChildStdout,handleChildStderr) = (shell_cmd.stdin, shell_cmd.stdout, shell_cmd.stderr)
childStdout = handleChildStdout.read()
childStderr = handleChildStderr.read()
shell_cmd.wait()
return [childStdout, childStderr, shell_cmd.returncode]
def derivativeRecord(filePath):
if((globalvars.destfiletype == "")):
print("Source filetype '{}' and re-dimension value '{}' as given by in the input command."
.format(globalvars.sourcefiletype, globalvars.resize))
elif((globalvars.resize == "")):
print("Source fietype '{} and destiantion filetype value '{}' as given by in the input command."
.format(globalvars.sourcefiletype, globalvars.destfiletype))
else:
print("Source filetype '{}', destination filetype '{}' and re-dimension value '{}' as given by in the input command."
.format(globalvars.sourcefiletype, globalvars.destfiletype, globalvars.resize))
for path, subdirs, files in os.walk(filePath):
for name in files:
derRes = ""
queryName = name.split(".")[0]
derFileName = "_".join([queryName, globalvars.resize])
derFileNameExt = ".".join([derFileName, globalvars.destfiletype])
if derFileNameExt in files:
print_error(errorcodes.ERROR_FILE_EXISTS["message"])
globalvars.derivativeErrorList.append([errorcodes.ERROR_FILE_EXISTS["message"].format(derFileNameExt)])
errorCSV()
exit(errorcodes.ERROR_FILE_EXISTS["code"])
else:
records = globalvars.dbHandle[globalvars.dbCollection].find({"_id": queryName})
records = [record for record in records]
if(len(records) > 0):
for document in records:
if "technical" in document:
xRes = document['technical']['image']['xResolution']
yRes = document['technical']['image']['yResolution']
width = document['technical']['image']['width']
height = document['technical']['image']['length']
if(xRes >= yRes):
derRes = "x".join([globalvars.resize, yRes])
else:
derRes = "x".join([xRes, globalvars.resize])
fullPath = os.path.sep.join([os.path.abspath(filePath), name])
derivedFilePath = os.path.sep.join([os.path.abspath(filePath), derFileNameExt])
# execute the command "convert <original_filePath> -resize 64x64 <derived_filePath>" to generate derivative image.
commandInput = " ".join(['convert', fullPath, '-resize', derRes, derivedFilePath])
output, error, exitcode = runCmd(commandInput)
migration = createMigrationEvent(globalvars.destfiletype, derRes, width, height, derFileNameExt)
print_info("The following record has been initialized for the file: '{}': {}".format(derFileNameExt, migration))
document['premis']['eventList'].append(migration)
dbUpdatePremisProfile = updateRecordInDB(queryName, document)
if __name__ == "__main__":
main()
|
|
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from DistributedMinigame import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.safezone import Walk
from toontown.toonbase import ToontownTimer
from direct.gui import OnscreenText
import MinigameAvatarScorePanel
from direct.distributed import DistributedSmoothNode
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
import TagGameGlobals
import Trajectory
class DistributedTagGame(DistributedMinigame):
DURATION = TagGameGlobals.DURATION
IT_SPEED_INCREASE = 1.3
IT_ROT_INCREASE = 1.3
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTagGame', [State.State('off', self.enterOff, self.exitOff, ['play']), State.State('play', self.enterPlay, self.exitPlay, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, ['off'])], 'off', 'off')
self.addChildGameFSM(self.gameFSM)
self.walkStateData = Walk.Walk('walkDone')
self.scorePanels = []
self.initialPositions = ((0, 10, 0, 180, 0, 0),
(10, 0, 0, 90, 0, 0),
(0, -10, 0, 0, 0, 0),
(-10, 0, 0, -90, 0, 0))
base.localAvatar.isIt = 0
self.modelCount = 4
def getTitle(self):
return TTLocalizer.TagGameTitle
def getInstructions(self):
return TTLocalizer.TagGameInstructions
def getMaxDuration(self):
return self.DURATION
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.itText = OnscreenText.OnscreenText('itText', fg=(0.95, 0.95, 0.65, 1), scale=0.14, font=ToontownGlobals.getSignFont(), pos=(0.0, -0.8), wordwrap=15, mayChange=1)
self.itText.hide()
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.ground = loader.loadModel('phase_4/models/minigames/tag_arena')
self.music = base.loadMusic('phase_4/audio/bgm/MG_toontag.ogg')
self.tagSfx = base.loadSfx('phase_4/audio/sfx/MG_Tag_C.ogg')
self.itPointer = loader.loadModel('phase_4/models/minigames/bboard-pointer')
self.tracks = []
self.IT = None
return
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.ignoreAll()
del self.tracks
del self.IT
self.sky.removeNode()
del self.sky
self.itPointer.removeNode()
del self.itPointer
self.ground.removeNode()
del self.ground
del self.music
del self.tagSfx
self.itText.cleanup()
del self.itText
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
self.ground.reparentTo(render)
self.sky.reparentTo(render)
myPos = self.avIdList.index(self.localAvId)
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.reparentTo(render)
base.localAvatar.loop('neutral')
camera.reparentTo(render)
camera.setPosHpr(0, -24, 16, 0, -30, 0)
base.camLens.setFar(450.0)
base.transitions.irisIn(0.4)
NametagGlobals.setWant2dNametags(True)
DistributedSmoothNode.activateSmoothing(1, 1)
self.IT = None
return
def offstage(self):
self.notify.debug('offstage')
DistributedSmoothNode.activateSmoothing(1, 0)
NametagGlobals.setWant2dNametags(False)
DistributedMinigame.offstage(self)
self.sky.reparentTo(hidden)
self.ground.reparentTo(hidden)
base.camLens.setFar(ToontownGlobals.DefaultCameraFar)
self.itText.hide()
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
for avId in self.avIdList:
self.acceptTagEvent(avId)
myPos = self.avIdList.index(self.localAvId)
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avatar = self.getAvatar(avId)
if avatar:
avatar.startSmooth()
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.d_clearSmoothing()
base.localAvatar.sendCurrentPosition()
base.localAvatar.b_setAnimState('neutral', 1)
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.gameFSM.request('play')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterPlay(self):
self.notify.debug('enterPlay')
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avName = self.getAvatarName(avId)
scorePanel = MinigameAvatarScorePanel.MinigameAvatarScorePanel(avId, avName)
scorePanel.setPos(-0.213, 0.0, 0.28 * i + 0.66)
scorePanel.reparentTo(base.a2dBottomRight)
self.scorePanels.append(scorePanel)
base.setCellsActive(base.rightCells, 0)
self.walkStateData.enter()
self.walkStateData.fsm.request('walking')
if base.localAvatar.isIt:
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.DURATION)
self.timer.countdown(self.DURATION, self.timerExpired)
base.playMusic(self.music, looping=1, volume=0.9)
base.localAvatar.setIdealCameraPos(Point3(0, -24, 8))
def exitPlay(self):
for task in self.tracks:
task.finish()
self.tracks = []
for avId in self.avIdList:
toon = self.getAvatar(avId)
if toon:
toon.getGeomNode().clearMat()
toon.scale = 1.0
toon.rescaleToon()
self.walkStateData.exit()
self.music.stop()
self.timer.destroy()
del self.timer
for panel in self.scorePanels:
panel.cleanup()
self.scorePanels = []
base.setCellsActive(base.rightCells, 1)
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed)
self.itPointer.reparentTo(hidden)
base.localAvatar.cameraIndex = 0
base.localAvatar.setCameraPositionByIndex(0)
def timerExpired(self):
self.notify.debug('local timer expired')
self.gameOver()
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.gameFSM.request('off')
def exitCleanup(self):
pass
def setIt(self, avId):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() != 'play':
self.notify.debug('Ignoring setIt after done playing')
return
self.itText.show()
self.notify.debug(str(avId) + ' is now it')
if avId == self.localAvId:
self.itText.setText(TTLocalizer.TagGameYouAreIt)
base.localAvatar.isIt = 1
base.localAvatar.controlManager.setSpeeds(OTPGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonJumpForce, OTPGlobals.ToonReverseSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
else:
self.itText.setText(TTLocalizer.TagGameSomeoneElseIsIt % self.getAvatarName(avId))
base.localAvatar.isIt = 0
base.localAvatar.setWalkSpeedNormal()
avatar = self.getAvatar(avId)
if avatar:
self.itPointer.reparentTo(avatar)
self.itPointer.setZ(avatar.getHeight())
base.playSfx(self.tagSfx)
toon = self.getAvatar(avId)
duration = 0.6
if not toon:
return
spinTrack = LerpHprInterval(toon.getGeomNode(), duration, Point3(0, 0, 0), startHpr=Point3(-5.0 * 360.0, 0, 0), blendType='easeOut')
growTrack = Parallel()
gs = 2.5
for hi in xrange(toon.headParts.getNumPaths()):
head = toon.headParts[hi]
growTrack.append(LerpScaleInterval(head, duration, Point3(gs, gs, gs)))
def bounceFunc(t, trajectory, node = toon.getGeomNode()):
node.setZ(trajectory.calcZ(t))
def bounceCleanupFunc(node = toon.getGeomNode(), z = toon.getGeomNode().getZ()):
node.setZ(z)
bounceTrack = Sequence()
startZ = toon.getGeomNode().getZ()
tLen = 0
zVel = 30
decay = 0.6
while tLen < duration:
trajectory = Trajectory.Trajectory(0, Point3(0, 0, startZ), Point3(0, 0, zVel), gravMult=5.0)
dur = trajectory.calcTimeOfImpactOnPlane(startZ)
if dur <= 0:
break
bounceTrack.append(LerpFunctionInterval(bounceFunc, fromData=0.0, toData=dur, duration=dur, extraArgs=[trajectory]))
tLen += dur
zVel *= decay
bounceTrack.append(Func(bounceCleanupFunc))
tagTrack = Sequence(Func(toon.animFSM.request, 'off'), Parallel(spinTrack, growTrack, bounceTrack), Func(toon.animFSM.request, 'Happy'))
self.tracks.append(tagTrack)
tagTrack.start()
if self.IT:
it = self.getAvatar(self.IT)
shrinkTrack = Parallel()
for hi in xrange(it.headParts.getNumPaths()):
head = it.headParts[hi]
scale = ToontownGlobals.toonHeadScales[it.style.getAnimal()]
shrinkTrack.append(LerpScaleInterval(head, duration, scale))
self.tracks.append(shrinkTrack)
shrinkTrack.start()
self.IT = avId
def acceptTagEvent(self, avId):
self.accept('enterdistAvatarCollNode-' + str(avId), self.sendTagIfIt, [avId])
def sendTagIfIt(self, avId, collisionEntry):
if base.localAvatar.isIt:
self.notify.debug('Tagging ' + str(avId))
self.sendUpdate('tag', [avId])
else:
self.notify.debug('Bumped ' + str(avId))
def setTreasureScore(self, scores):
if not self.hasLocalToon:
return
self.notify.debug('setTreasureScore: %s' % scores)
for i in xrange(len(self.scorePanels)):
self.scorePanels[i].setScore(scores[i])
|
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import base64
import binascii
import cgi
import collections
import html
from urllib.parse import unquote
from django.conf import settings
from django.core.exceptions import (
RequestDataTooBig, SuspiciousMultipartForm, TooManyFieldsSent,
)
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser:
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the
uploaded data.
:encoding:
The encoding with which to treat the incoming data.
"""
# Content-Type should contain multipart and the boundary information.
content_type = META.get('CONTENT_TYPE', '')
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
try:
ctypes, opts = parse_header(content_type.encode('ascii'))
except UnicodeEncodeError:
raise MultiPartParserError('Invalid non-ASCII Content-Type in multipart: %s' % force_str(content_type))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % force_str(boundary))
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, str):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Return a tuple containing the POST and FILES dictionary, respectively.
"""
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(
self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding,
)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict(mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
# Number of bytes that have been read.
num_bytes_read = 0
# To count the number of keys in the request.
num_post_keys = 0
# To limit the amount of data read from the request.
read_size = None
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_str(field_name, encoding, errors='replace')
if item_type == FIELD:
# Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
num_post_keys += 1
if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and
settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys):
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
# Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read(size=read_size)
num_bytes_read += len(raw_data)
try:
data = base64.b64decode(raw_data)
except binascii.Error:
data = raw_data
else:
data = field_stream.read(size=read_size)
num_bytes_read += len(data)
# Add two here to make the check consistent with the
# x-www-form-urlencoded check that includes '&='.
num_bytes_read += len(field_name) + 2
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
self._post.appendlist(field_name, force_str(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if file_name:
file_name = force_str(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(html.unescape(file_name))
if not file_name:
continue
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(
field_name, file_name, content_type,
content_length, charset, content_type_extra,
)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as exc:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data.") from exc
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[i])
counters[i] += chunk_length
if chunk is None:
# Don't continue if the chunk received by
# the handler is None.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
# any() shortcircuits if a handler's upload_complete() returns a value.
any(handler.upload_complete() for handler in handlers)
self._post._mutable = False
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_str(old_field_name, self._encoding, errors='replace'), file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream:
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
return b''.join(parts())
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
Return whatever chunk is conveniently returned from the iterator.
Useful to avoid unnecessary bookkeeping if performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replace the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Place bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = bytes + self._leftover
def _update_unget_history(self, num_bytes):
"""
Update the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([
current_number for current_number in self._unget_history
if current_number == num_bytes
])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter:
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, yield chunks of read operations from that object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter:
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter:
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data):
"""
Find a multipart boundary in data.
Should no boundary exist in the data, return None. Otherwise, return
a tuple containing the indices of the following:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""Exhaust an iterator or stream."""
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
collections.deque(iterator, maxlen=0) # consume iterator quickly.
def parse_boundary_stream(stream, max_header_size):
"""
Parse one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser:
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
"""
Parse the header into a key-value.
Input (line): bytes, output: str for key/name, bytes for values which
will be decoded later.
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
if has_encoding:
encoding, lang, value = value.split(b"'")
value = unquote(value.decode(), encoding=encoding.decode())
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
|
'''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.shortcuts import render, redirect
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from django.core.servers.basehttp import FileWrapper
from django.utils import timezone
from django.contrib import messages
from utils.render_helper import render_index
from utils.user_info import validate_user, has_problem_auth, has_problem_ownership
from users.models import User
from problem.models import Problem, Tag, Testcase
from problem.forms import ProblemForm, TagForm, TagFilter
from utils import log_info, config_info
from problem.problem_info import *
from utils import log_info
from utils.render_helper import render_index, get_current_page
from utils.rejudge import rejudge_problem
import os
import json
logger = log_info.get_logger()
# Create your views here.
def problem(request):
user = validate_user(request.user)
can_add_problem = user.has_subjudge_auth()
filter_type = request.GET.get('filter')
tag_filter = TagFilter(request.GET)
if tag_filter.is_valid():
tag_name = tag_filter.cleaned_data['tag_name']
if filter_type == 'mine':
problem_list = get_owner_problem_list(user)
mine = True
else:
problem_list = get_problem_list(user)
mine = False
if tag_name:
problem_list = problem_list.filter(tags__tag_name=tag_name)
for p in problem_list:
p.in_contest = check_in_contest(p)
problems = get_current_page(request, problem_list, 15)
for p in problems:
if p.total_submission != 0:
p.pass_rate = float(p.ac_count) / float(p.total_submission) * 100.0
p.not_pass_rate = 100.0 - p.pass_rate
p.pass_rate = "%.2f" % (p.pass_rate)
p.not_pass_rate = "%.2f" % (p.not_pass_rate)
else:
p.no_submission = True
else:
problems = []
mine = False
return render_index(request, 'problem/panel.html',
{'all_problem': problems, 'mine': mine,
'can_add_problem': can_add_problem, 'tag_filter': tag_filter})
def detail(request, pid):
user = validate_user(request.user)
tag_form = TagForm()
try:
problem = Problem.objects.get(pk=pid)
if not has_problem_auth(user, problem):
logger.warning("%s has no permission to see problem %d" % (user, problem.pk))
raise PermissionDenied()
except Problem.DoesNotExist:
logger.warning('problem %s not found' % (pid))
raise Http404('problem %s does not exist' % (pid))
problem.testcase = get_testcase(problem)
problem = verify_problem_code(problem)
return render_index(request, 'problem/detail.html', {'problem': problem, 'tag_form': tag_form})
@login_required
def new(request):
if request.method == "POST":
if 'pname' in request.POST and request.POST['pname'].strip() != "":
p = Problem(pname=request.POST['pname'], owner=request.user)
p.save()
logger.info("problem %s created by %s" % (p.pk, request.user))
return redirect("/problem/%d/edit/" % p.pk)
return redirect("/problem/")
@login_required
def edit(request, pid=None):
tag_form = TagForm()
try:
problem = Problem.objects.get(pk=pid)
if not request.user.has_admin_auth() and request.user != problem.owner:
logger.warning("user %s has no permission to edit problem %s" % (request.user, pid))
raise PermissionDenied()
except Problem.DoesNotExist:
logger.warning("problem %s does not exist" % (pid))
raise Http404("problem %s does not exist" % (pid))
testcase = get_testcase(problem)
tags = problem.tags.all()
if request.method == 'GET':
form = ProblemForm(instance=problem)
if request.method == 'POST':
form = ProblemForm(request.POST, request.FILES, instance=problem)
if form.is_valid():
problem = form.save()
problem.sample_in = request.POST['sample_in']
problem.sample_out = request.POST['sample_out']
problem.save()
file_ex = get_problem_file_extension(problem)
if "special_judge_code" in request.FILES:
with open('%s%s%s' % (SPECIAL_PATH, problem.pk, file_ex), 'w') as t_in:
for chunk in request.FILES['special_judge_code'].chunks():
t_in.write(chunk)
if "partial_judge_code" in request.FILES:
with open('%s%s%s' % (PARTIAL_PATH, problem.pk, file_ex), 'w') as t_in:
for chunk in request.FILES['partial_judge_code'].chunks():
t_in.write(chunk)
if "partial_judge_header" in request.FILES:
with open('%s%s.h' % (PARTIAL_PATH, problem.pk), 'w') as t_in:
for chunk in request.FILES['partial_judge_header'].chunks():
t_in.write(chunk)
logger.info('edit problem, pid = %d by %s' % (problem.pk, request.user))
messages.success(request, 'problem %d edited' % problem.pk)
return redirect('/problem/%d' % (problem.pk))
file_ex = get_problem_file_extension(problem)
problem = verify_problem_code(problem)
return render_index(request, 'problem/edit.html',
{'form': form, 'problem': problem,
'tags': tags, 'tag_form': tag_form,
'testcase': testcase,
'path': {
'TESTCASE_PATH': TESTCASE_PATH,
'SPECIAL_PATH': SPECIAL_PATH,
'PARTIAL_PATH': PARTIAL_PATH, }
})
@login_required
def tag(request, pid):
if request.method == "POST":
tag = request.POST['tag_name']
try:
problem = Problem.objects.get(pk=pid)
except Problem.DoesNotExist:
logger.warning("problem %s does not exist" % (pid))
raise Http404("problem %s does not exist" % (pid))
if not problem.tags.filter(tag_name=tag).exists():
new_tag, created = Tag.objects.get_or_create(tag_name=tag)
problem.tags.add(new_tag)
problem.save()
logger.info("add new tag '%s' to problem %s by %s" % (tag, pid, request.user))
return HttpResponse(json.dumps({'tag_id': new_tag.pk}),
content_type="application/json")
return HttpRequestBadRequest()
return HttpResponse()
@login_required
def delete_tag(request, pid, tag_id):
try:
problem = Problem.objects.get(pk=pid)
tag = Tag.objects.get(pk=tag_id)
except Problem.DoesNotExist:
logger.warning("problem %s does not exist" % (pid))
raise Http404("problem %s does not exist" % (pid))
except Tag.DoesNotExist:
logger.warning("tag %s does not exist" % (tag_id))
raise Http404("tag %s does not exist" % (tag_id))
if not request.user.has_admin_auth() and request.user != problem.owner:
raise PermissionDenied()
logger.info("tag %s deleted by %s" % (tag.tag_name, request.user))
problem.tags.remove(tag)
return HttpResponse()
@login_required
def testcase(request, pid, tid=None):
if request.method == 'POST':
try:
problem = Problem.objects.get(pk=pid)
except Problem.DoesNotExist:
logger.warning("problem %s does not exist" % (pid))
raise Http404("problem %s does not exist" % (pid))
if tid == None:
testcase = Testcase()
testcase.problem = problem
else:
try:
testcase = Testcase.objects.get(pk=tid)
except Testcase.DoesNotExist:
logger.warning("testcase %s does not exist" % (tid))
raise Http404("testcase %s does not exist" % (tid))
if testcase.problem != problem:
logger.warning("testcase %s does not belong to problem %s" % (tid, pid))
raise Http404("testcase %s does not belong to problem %s" % (tid, pid))
has_message = False
if 'time_limit' in request.POST:
testcase.time_limit = request.POST['time_limit']
testcase.memory_limit = request.POST['memory_limit']
testcase.save()
logger.info("testcase saved, tid = %s by %s" % (testcase.pk, request.user))
messages.success(request, "testcase %s saved" % testcase.pk)
has_message = True
if 't_in' in request.FILES:
TESTCASE_PATH = config_info.get_config('path', 'testcase_path')
try:
with open('%s%s.in' % (TESTCASE_PATH, testcase.pk), 'w') as t_in:
for chunk in request.FILES['t_in'].chunks():
t_in.write(chunk.replace('\r\n', '\n'))
logger.info("testcase %s.in saved by %s" % (testcase.pk, request.user))
with open('%s%s.out' % (TESTCASE_PATH, testcase.pk), 'w') as t_out:
for chunk in request.FILES['t_out'].chunks():
t_out.write(chunk.replace('\r\n', '\n'))
logger.info("testcase %s.out saved by %s" % (testcase.pk, request.user))
if not has_message:
messages.success(request, "testcase %s saved" % testcase.pk)
except IOError, OSError:
logger.error("saving testcase error")
return HttpResponse(json.dumps({'tid': testcase.pk}),
content_type="application/json")
return HttpResponse()
@login_required
def delete_testcase(request, pid, tid):
try:
problem = Problem.objects.get(pk=pid)
testcase = Testcase.objects.get(pk=tid)
except Problem.DoesNotExist:
logger.warning("problem %s does not exist" % (pid))
raise Http404("problem %s does not exist" % (pid))
except Testcase.DoesNotExist:
logger.warning("testcase %s does not exist" % (tid))
raise Http404("testcase %s does not exist" % (tid))
if not request.user.has_admin_auth() and request.user != problem.owner:
raise PermissionDenied
logger.info("testcase %d deleted" % (testcase.pk))
try:
os.remove('%s%d.in' % (TESTCASE_PATH, testcase.pk))
os.remove('%s%d.out' % (TESTCASE_PATH, testcase.pk))
except IOError, OSError:
logger.error("remove testcase %s error" % (testcase.pk))
logger.info("testcase %d deleted by %s" % (testcase.pk, request.user))
messages.success(request, "testcase %s deleted" % testcase.pk)
testcase.delete()
return HttpResponse()
@login_required
def delete_problem(request, pid):
try:
problem = Problem.objects.get(pk=pid)
except Problem.DoesNotExist:
logger.warning("problem %s does not exist" % (pid))
raise Http404("problem %s does not exist" % (pid))
if not request.user.has_admin_auth() and request.user != problem.owner:
raise PermissionDenied
logger.info("problem %d deleted by %s" % (problem.pk, request.user))
messages.success(request, "problem %d deleted" % problem.pk)
problem.delete()
return redirect('/problem/')
def preview(request):
problem = Problem()
problem.pname = request.POST['pname']
problem.description = request.POST['description']
problem.input= request.POST['input']
problem.output = request.POST['output']
problem.sample_in = request.POST['sample_in']
problem.sample_out = request.POST['sample_out']
problem.tag = request.POST['tags'].split(',')
return render_index(request, 'problem/preview.html', {'problem': problem, 'preview': True})
def download_testcase(request, filename):
try:
f = open(TESTCASE_PATH+filename, "r")
except IOError:
raise Http404()
response = HttpResponse(FileWrapper(f), content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=' + filename
return response
def download_partial(request, filename):
try:
f = open(PARTIAL_PATH+filename, "r")
except IOError:
raise Http404()
response = HttpResponse(FileWrapper(f), content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=' + filename
return response
def download_special(request, filename):
try:
f = open(SPECIAL_PATH+filename, "r")
except IOError:
raise Http404()
response = HttpResponse(FileWrapper(f), content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=' + filename
return response
@login_required
def rejudge(request):
pid = request.GET.get('pid')
if pid:
try:
problem = Problem.objects.get(pk=pid)
if not has_problem_ownership(request.user, problem) and \
not request.user.has_admin_auth():
logger.warning("%s has no permission to rejudge problem %d"
% (request.user, problem.pk))
raise PermissionDenied()
rejudge_problem(problem)
logger.info("problem %s rejudged" % problem.pk)
messages.success(request, 'problem %s rejuded' % problem.pk)
except Problem.DoesNotExist:
raise Http404()
return redirect('/problem/')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
################################################################################
#
# Copyright (c) 2016 Wojciech Migda
# All rights reserved
# Distributed under the terms of the MIT license
#
################################################################################
#
# Filename: OptimizedOffsetRegressor.py
#
# Decription:
# Regressor implementing optimized ofsets in a scikit-learn fashion.
# Based on scripts on Kaggle
#
# Authors:
# Wojciech Migda
#
################################################################################
#
# History:
# --------
# Date Who Ticket Description
# ---------- --- --------- ------------------------------------------------
# 2016-01-23 wm Initial version
#
################################################################################
"""
from __future__ import print_function
__author__ = 'Wojciech Migda'
__date__ = '2016-01-23'
__version__ = '0.0.1'
__all__ = [
'OptimizedOffsetRegressor'
]
from sklearn.base import BaseEstimator, RegressorMixin
class OptimizedOffsetRegressor(BaseEstimator, RegressorMixin):
def __init__(self, n_jobs=-1, offset_scale=1.0, n_buckets=2, initial_offsets=None, scoring='accuracy'):
self.n_jobs = int(n_jobs)
self.offset_scale = float(offset_scale)
self.n_buckets = int(n_buckets)
if initial_offsets is None:
self.initial_offsets_ = [-0.5] * self.n_buckets
pass
else:
self.initial_offsets_ = list(initial_offsets)
assert(len(self.initial_offsets_) == self.n_buckets)
pass
from sklearn.metrics import get_scorer
self.scoring = get_scorer(scoring)
pass
def __call__(self, args):
return self.OffsetMinimizer_(args)
def apply_offset(self, data, bin_offset, sv):
mask = data[0].astype(int) == sv
data[1, mask] = data[0, mask] + bin_offset
return data
def OffsetMinimizer_(self, args):
def apply_offset_and_score(data, bin_offset, sv):
data = self.apply_offset(data, bin_offset, sv)
return self.scoring(data[1], data[2])
j, data, offset0 = args
from scipy.optimize import fmin_powell
return fmin_powell(lambda x: apply_offset_and_score(data, x, j), offset0, disp=True)
def fit(self, X, y):
from multiprocessing import Pool
pool = Pool(processes=None if self.n_jobs is -1 else self.n_jobs)
from numpy import vstack
self.data_ = vstack((X, X, y))
for j in range(self.n_buckets):
self.data_ = self.apply_offset(self.data_, self.initial_offsets_[j], j)
from numpy import array
self.offsets_ = array(pool.map(self,
zip(range(self.n_buckets),
[self.data_] * self.n_buckets,
self.initial_offsets_)))
# self.offsets_ = array(map(self,
# zip(range(self.n_buckets),
# [self.data_] * self.n_buckets,
# self.initial_offsets_)))
return self
def predict(self, X):
from numpy import vstack
data = vstack((X, X))
for j in range(self.n_buckets):
data = self.apply_offset(data, self.offsets_[j], j)
return data[1]
pass
class DigitizedOptimizedOffsetRegressor(BaseEstimator, RegressorMixin):
def __init__(self,
n_jobs=-1,
offset_scale=1.0,
n_buckets=2,
initial_params=None,
minimizer='BFGS',
basinhopping=False,
scoring='accuracy'):
from numpy import array
self.n_jobs = int(n_jobs)
self.offset_scale = float(offset_scale)
self.n_buckets = int(n_buckets)
if initial_params is None:
#self.initial_offsets_ = [-0.5] * self.n_buckets
pass
else:
self.params = array(initial_params)
#assert(len(self.initial_offsets_) == self.n_buckets)
pass
self.minimizer = minimizer
self.basinhopping = basinhopping
from sklearn.metrics import get_scorer
self.scoring = get_scorer(scoring)
pass
def apply_params(self, params, data):
from numpy import digitize
offsets = params[:self.n_buckets][::-1]
# both give #40: 0.67261
#splits = [1., 2., 3., 4., 5., 6., 7.]
#response = digitize(data[0], splits)
#splits = [2., 3., 4., 5., 6., 7., 8.]
#response = digitize(data[0], splits) + 1
from numpy import linspace
splits = linspace(0, 7, self.n_buckets + 1)[1:-1] + 1
#print(splits)
response = digitize(data[0], splits)
#from numpy import bincount
#print(bincount(response))
for i, off in enumerate(offsets):
mask = response == i
data[1, mask] = data[0, mask] + offsets[i]
return data
def apply_params_and_score(self, params, data):
data = self.apply_params(params, data)
return self.scoring(data[1], data[2])
#return -self.scoring(data[1], data[2]) ** 2
def fit(self, X, y):
from numpy import vstack
data = vstack((X, X, y))
from scipy.optimize import minimize,approx_fprime
minimizer_kwargs = {
'args': (data,),
'method': self.minimizer,
'jac': lambda x, args:
approx_fprime(x, self.apply_params_and_score, 0.05, args),
'tol': 1e-4,
'options': {'disp': True}
}
if not self.basinhopping:
# from sys import path as sys_path
# sys_path.insert(0, './hyperopt')
# from hyperopt import fmin, tpe, hp
# space = {i: hp.uniform(str(i), -4, 4) for i in range(self.n_buckets)}
# #from hyperopt import Trials
# #trials = Trials()
# best = fmin(fn=lambda space: self.apply_params_and_score([space[i] for i in range(self.n_buckets)], data),
# space=space,
# algo=tpe.suggest,
# max_evals=1000,
# #trials=trials
# )
# print(best, self.apply_params_and_score([best[str(i)] for i in range(self.n_buckets)], data))
optres = minimize(
self.apply_params_and_score,
self.params,
**minimizer_kwargs)
pass
else:
from scipy.optimize import basinhopping
optres = basinhopping(
self.apply_params_and_score,
self.params,
niter=100,
T=0.05,
stepsize=0.10,
minimizer_kwargs=minimizer_kwargs)
minimizer_kwargs['method'] = 'BFGS'
optres = minimize(
self.apply_params_and_score,
optres.x,
**minimizer_kwargs)
pass
print(optres)
self.params = optres.x
return self
def predict(self, X):
from numpy import vstack
data = vstack((X, X))
params = self.params.copy()
params[:self.n_buckets] = self.offset_scale * params[:self.n_buckets]
data = self.apply_params(params, data)
return data[1]
pass
class FullDigitizedOptimizedOffsetRegressor(BaseEstimator, RegressorMixin):
def __init__(self,
n_jobs=-1,
offset_scale=1.0,
n_buckets=2,
initial_params=None,
minimizer='BFGS',
basinhopping=False,
scoring='accuracy'):
from numpy import array
self.n_jobs = int(n_jobs)
self.offset_scale = float(offset_scale)
self.n_buckets = int(n_buckets)
if initial_params is None:
#self.initial_offsets_ = [-0.5] * self.n_buckets
pass
else:
self.params = array(initial_params)
#assert(len(self.initial_offsets_) == self.n_buckets)
pass
self.minimizer = minimizer
self.basinhopping = basinhopping
from sklearn.metrics import get_scorer
self.scoring = get_scorer(scoring)
pass
def apply_params(self, params, data):
from numpy import digitize
offsets = params[:self.n_buckets]
splits = sorted(list(params[self.n_buckets:2 * self.n_buckets - 1]))
response = digitize(data[0], splits)
for i, off in enumerate(offsets):
mask = response == i
data[1, mask] = data[0, mask] + offsets[i]
return data
def apply_params_and_score(self, params, data):
data = self.apply_params(params, data)
return self.scoring(data[1], data[2])
def fit(self, X, y):
from numpy import vstack
data = vstack((X, X, y))
from scipy.optimize import minimize,approx_fprime
minimizer_kwargs = {
'args': (data,),
'method': self.minimizer,
'jac': lambda x, args:
approx_fprime(x, self.apply_params_and_score, 0.05, args),
'tol': 3e-2 if self.minimizer == 'BFGS' else 1e-4,
'options': {'disp': True}
}
if not self.basinhopping:
optres = minimize(
self.apply_params_and_score,
self.params,
**minimizer_kwargs)
pass
else:
from scipy.optimize import basinhopping
optres = basinhopping(
self.apply_params_and_score,
self.params,
niter=250,
T=0.05,
stepsize=0.10,
minimizer_kwargs=minimizer_kwargs)
minimizer_kwargs['method'] = 'BFGS'
minimizer_kwargs['tol'] = 1e-2
minimizer_kwargs['jac'] = lambda x, args: \
approx_fprime(x, self.apply_params_and_score, 0.01, args)
optres = minimize(
self.apply_params_and_score,
optres.x,
**minimizer_kwargs)
pass
print(optres)
self.params = optres.x
return self
def predict(self, X):
from numpy import vstack
data = vstack((X, X))
params = self.params.copy()
params[:self.n_buckets] = self.offset_scale * params[:self.n_buckets]
data = self.apply_params(params, data)
return data[1]
pass
if __name__ == "__main__":
pass
|
|
import numpy
from gtfspy.routing.label import LabelTimeWithBoardingsCount, merge_pareto_frontiers, compute_pareto_front, \
LabelVehLegCount, LabelTime, LabelTimeBoardingsAndRoute, LabelTimeAndRoute
from gtfspy.routing.connection import Connection
class NodeProfileMultiObjective:
"""
In the multi-objective connection scan algorithm,
each stop has a profile entry containing all Pareto-optimal entries.
"""
def __init__(self,
dep_times=None,
walk_to_target_duration=float('inf'),
label_class=LabelTimeWithBoardingsCount,
transit_connection_dep_times=None,
closest_target=None,
node_id=None):
"""
Parameters
----------
dep_times
walk_to_target_duration
label_class: label class to be used
transit_connection_dep_times:
if not given, all connections are assumed to be real connections
closest_target: int, optional
stop_I of the closest target if within walking distance (and Routes are recorded)
"""
if dep_times is None:
dep_times = []
n_dep_times = len(dep_times)
assert n_dep_times == len(set(dep_times)), "There should be no duplicate departure times"
self._departure_times = list(reversed(sorted(dep_times)))
self.dep_times_to_index = dict(zip(self._departure_times, range(len(self._departure_times))))
self._label_bags = [[]] * len(self._departure_times)
self._walk_to_target_duration = walk_to_target_duration
self._min_dep_time = float('inf')
self.label_class = label_class
self.closest_target = closest_target
if self.label_class == LabelTimeBoardingsAndRoute and self._walk_to_target_duration < float('inf'):
assert (self.closest_target is not None)
if transit_connection_dep_times is not None:
self._connection_dep_times = transit_connection_dep_times
else:
self._connection_dep_times = dep_times
assert (isinstance(self._connection_dep_times, (list, numpy.ndarray)))
self._closed = False
self._finalized = False
self._final_pareto_optimal_labels = None
self._real_connection_labels = None
self.node_id = node_id
def _check_dep_time_is_valid(self, dep_time):
"""
A simple checker, that connections are coming in descending order of departure time
and that no departure time has been "skipped".
Parameters
----------
dep_time
Returns
-------
None
"""
assert dep_time <= self._min_dep_time, "Labels should be entered in decreasing order of departure time."
dep_time_index = self.dep_times_to_index[dep_time]
if self._min_dep_time < float('inf'):
min_dep_index = self.dep_times_to_index[self._min_dep_time]
assert min_dep_index == dep_time_index or (min_dep_index == dep_time_index - 1), \
"dep times should be ordered sequentially"
else:
assert dep_time_index is 0, "first dep_time index should be zero (ensuring that all connections are properly handled)"
self._min_dep_time = dep_time
def get_walk_to_target_duration(self):
"""
Get walking distance to target node.
Returns
-------
walk_to_target_duration: float
"""
return self._walk_to_target_duration
def update(self, new_labels, departure_time_backup=None):
"""
Update the profile with the new labels.
Each new label should have the same departure_time.
Parameters
----------
new_labels: list[LabelTime]
Returns
-------
added: bool
whether new_pareto_tuple was added to the set of pareto-optimal tuples
"""
if self._closed:
raise RuntimeError("Profile is closed, no updates can be made")
try:
departure_time = next(iter(new_labels)).departure_time
except StopIteration:
departure_time = departure_time_backup
self._check_dep_time_is_valid(departure_time)
for new_label in new_labels:
assert (new_label.departure_time == departure_time)
dep_time_index = self.dep_times_to_index[departure_time]
if dep_time_index > 0:
# Departure time is modified in order to not pass on labels which are not Pareto-optimal when departure time is ignored.
mod_prev_labels = [label.get_copy_with_specified_departure_time(departure_time) for label
in self._label_bags[dep_time_index - 1]]
else:
mod_prev_labels = list()
mod_prev_labels += self._label_bags[dep_time_index]
walk_label = self._get_label_to_target(departure_time)
if walk_label:
new_labels = new_labels + [walk_label]
new_frontier = merge_pareto_frontiers(new_labels, mod_prev_labels)
self._label_bags[dep_time_index] = new_frontier
return True
def evaluate(self, dep_time, first_leg_can_be_walk=True, connection_arrival_time=None):
"""
Get the pareto_optimal set of Labels, given a departure time.
Parameters
----------
dep_time : float, int
time in unix seconds
first_leg_can_be_walk : bool, optional
whether to allow walking to target to be included into the profile
(I.e. whether this function is called when scanning a pseudo-connection:
"double" walks are not allowed.)
connection_arrival_time: float, int, optional
used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity)
connection: connection object
Returns
-------
pareto_optimal_labels : set
Set of Labels
"""
walk_labels = list()
# walk label towards target
if first_leg_can_be_walk and self._walk_to_target_duration != float('inf'):
# add walk_label
if connection_arrival_time is not None:
walk_labels.append(self._get_label_to_target(connection_arrival_time))
else:
walk_labels.append(self._get_label_to_target(dep_time))
# if dep time is larger than the largest dep time -> only walk labels are possible
if dep_time in self.dep_times_to_index:
assert (dep_time != float('inf'))
index = self.dep_times_to_index[dep_time]
labels = self._label_bags[index]
pareto_optimal_labels = merge_pareto_frontiers(labels, walk_labels)
else:
pareto_optimal_labels = walk_labels
if not first_leg_can_be_walk:
pareto_optimal_labels = [label for label in pareto_optimal_labels if not label.first_leg_is_walk]
return pareto_optimal_labels
def _get_label_to_target(self, departure_time):
if departure_time != float('inf') and self._walk_to_target_duration != float('inf'):
if self._walk_to_target_duration == 0:
first_leg_is_walk = False
else:
first_leg_is_walk = True
if self.label_class == LabelTimeBoardingsAndRoute or self.label_class == LabelTimeAndRoute:
if self._walk_to_target_duration > 0:
walk_connection = Connection(self.node_id,
self.closest_target,
departure_time,
departure_time + self._walk_to_target_duration,
Connection.WALK_TRIP_ID,
Connection.WALK_SEQ,
is_walk=True
)
else:
walk_connection = None
if self.label_class == LabelTimeAndRoute:
label = self.label_class(departure_time=float(departure_time),
arrival_time_target=float(departure_time + self._walk_to_target_duration),
movement_duration=self._walk_to_target_duration,
first_leg_is_walk=first_leg_is_walk,
connection=walk_connection)
else:
label = self.label_class(departure_time=float(departure_time),
arrival_time_target=float(departure_time + self._walk_to_target_duration),
movement_duration=self._walk_to_target_duration,
n_boardings=0,
first_leg_is_walk=first_leg_is_walk,
connection=walk_connection)
else:
label = self.label_class(departure_time=float(departure_time),
arrival_time_target=float(departure_time + self._walk_to_target_duration),
n_boardings=0,
first_leg_is_walk=first_leg_is_walk)
return label
else:
return None
def get_labels_for_real_connections(self):
self._closed = True
if self._real_connection_labels is None:
self._compute_real_connection_labels()
return self._real_connection_labels
def get_final_optimal_labels(self):
"""
Get pareto-optimal labels.
Returns
-------
"""
assert self._finalized, "finalize() first!"
return self._final_pareto_optimal_labels
def finalize(self, neighbor_label_bags=None, walk_durations=None, departure_arrival_stop_pairs=None):
"""
Parameters
----------
neighbor_label_bags: list
each list element is a list of labels corresponding to a neighboring node
(note: only labels with first connection being a departure should be included)
walk_durations: list
departure_arrival_stop_pairs: list of tuples
Returns
-------
None
"""
assert (not self._finalized)
if self._final_pareto_optimal_labels is None:
self._compute_real_connection_labels()
if neighbor_label_bags is not None:
assert (len(walk_durations) == len(neighbor_label_bags))
self._compute_final_pareto_optimal_labels(neighbor_label_bags,
walk_durations,
departure_arrival_stop_pairs)
else:
self._final_pareto_optimal_labels = self._real_connection_labels
self._finalized = True
self._closed = True
def _compute_real_connection_labels(self):
pareto_optimal_labels = []
# do not take those bags with first event is a pseudo-connection
for dep_time in self._connection_dep_times:
index = self.dep_times_to_index[dep_time]
pareto_optimal_labels.extend([label for label in self._label_bags[index] if not label.first_leg_is_walk])
if self.label_class == LabelTimeWithBoardingsCount or self.label_class == LabelTime \
or self.label_class == LabelTimeBoardingsAndRoute:
pareto_optimal_labels = [label for label in pareto_optimal_labels
if label.duration() < self._walk_to_target_duration]
if self.label_class == LabelVehLegCount and self._walk_to_target_duration < float('inf'):
pareto_optimal_labels.append(LabelVehLegCount(0))
self._real_connection_labels = [label.get_copy() for label in compute_pareto_front(pareto_optimal_labels,
finalization=True)]
def _compute_final_pareto_optimal_labels(self, neighbor_label_bags, walk_durations, departure_arrival_stops):
labels_from_neighbors = []
for i, (label_bag, walk_duration)in enumerate(zip(neighbor_label_bags, walk_durations)):
for label in label_bag:
if self.label_class == LabelTimeBoardingsAndRoute or self.label_class == LabelTimeAndRoute:
departure_arrival_tuple = departure_arrival_stops[i]
departure_time = label.departure_time - walk_duration
arrival_time = label.departure_time
connection = Connection(departure_arrival_tuple[0],
departure_arrival_tuple[1],
departure_time,
arrival_time,
Connection.WALK_TRIP_ID,
Connection.WALK_SEQ,
is_walk=True)
labels_from_neighbors.append(label.get_copy_with_walk_added(walk_duration, connection))
else:
labels_from_neighbors.append(label.get_copy_with_walk_added(walk_duration))
pareto_front = compute_pareto_front(self._real_connection_labels +
labels_from_neighbors,
finalization=True)
if pareto_front and hasattr(pareto_front[0], "duration"):
self._final_pareto_optimal_labels = list(filter(lambda label: label.duration() < self._walk_to_target_duration, pareto_front))
else:
self._final_pareto_optimal_labels = pareto_front
|
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_config import cfg
import requests
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
ENV = {
'__actions': {
'std.http': {
'auth': 'librarian:password123',
'timeout': 30,
}
}
}
EXPECTED_ENV_AUTH = ('librarian', 'password123')
WORKFLOW1 = """
---
version: "2.0"
wf1:
tasks:
task1:
action: std.http url="https://api.library.org/books"
publish:
result: <% $ %>
"""
WORKFLOW2 = """
---
version: "2.0"
wf2:
tasks:
task1:
action: std.http url="https://api.library.org/books" timeout=60
publish:
result: <% $ %>
"""
WORKFLOW1_WITH_ITEMS = """
---
version: "2.0"
wf1_with_items:
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %>
publish:
result: <% $ %>
"""
WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %> timeout=60
publish:
result: <% $ %>
"""
class ActionDefaultTest(base.EngineTestCase):
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1)
wf_ex = self.engine.start_workflow('wf1', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET',
'https://api.library.org/books',
params=None,
data=None,
json=None,
headers=None,
cookies=None,
allow_redirects=None,
proxies=None,
verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=ENV['__actions']['std.http']['timeout']
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2)
wf_ex = self.engine.start_workflow('wf2', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, json=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=60
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf1_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
json=None, headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=ENV['__actions']['std.http']['timeout'])
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf2_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
json=None, headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=60)
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
|
|
import atexit
from contextlib import contextmanager
from selenium.common.exceptions import (
NoAlertPresentException,
NoSuchWindowException,
TimeoutException,
UnexpectedAlertPresentException,
WebDriverException)
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from time import sleep
import capybara
from capybara.driver.base import Base
from capybara.exceptions import ExpectationNotMet, ModalNotFound
from capybara.helpers import desc, Timer, toregex
from capybara.selenium.browser import get_browser
from capybara.selenium.node import Node
from capybara.utils import cached_property, isregex
class Driver(Base):
"""
A Capybara driver that uses Selenium WebDriver to drive a real browser.
Args:
app (object): The WSGI-compliant app to drive.
browser (str, optional): The name of the browser to use. Defaults to "firefox".
clear_local_storage (bool, optional): Whether to clear local storage on reset.
Defaults to False.
clear_session_storage (bool, optional): Whether to clear session storage on
reset. Defaults to False.
desired_capabilities (Dict[str, str | bool], optional): Desired
capabilities of the underlying browser. Defaults to a set of
reasonable defaults provided by Selenium.
options: Arbitrary keyword arguments for the underlying Selenium driver.
"""
def __init__(
self,
app,
browser="firefox",
clear_local_storage=False,
clear_session_storage=False,
desired_capabilities=None,
**options
):
self.app = app
self._browser_name = browser
self._clear_local_storage = clear_local_storage
self._clear_session_storage = clear_session_storage
self._desired_capabilities = desired_capabilities
self._options = options
self._frame_handles = []
@property
def needs_server(self):
return True
@cached_property
def browser(self):
capabilities = self._desired_capabilities
if self._firefox:
capabilities = (capabilities or DesiredCapabilities.FIREFOX).copy()
# Auto-accept unload alerts triggered by navigating away.
if capabilities.get("marionette"):
capabilities["unhandledPromptBehavior"] = "dismiss"
else:
capabilities["unexpectedAlertBehaviour"] = "ignore"
browser = get_browser(self._browser_name, capabilities=capabilities, **self._options)
atexit.register(browser.quit)
return browser
@property
def current_url(self):
return self.browser.current_url
@property
def title(self):
return self.browser.title
@property
def html(self):
return self.browser.page_source
@property
def text(self):
return self.browser.text
def switch_to_frame(self, frame):
if frame == "top":
self._frame_handles = []
self.browser.switch_to.default_content()
elif frame == "parent":
self._frame_handles.pop()
self.browser.switch_to.default_content()
for frame_handle in self._frame_handles:
self.browser.switch_to.frame(frame_handle)
else:
self._frame_handles.append(frame.native)
self.browser.switch_to.frame(frame.native)
@property
def current_window_handle(self):
return self.browser.current_window_handle
def window_size(self, handle):
with self._window(handle):
size = self.browser.get_window_size()
return [size["width"], size["height"]]
def resize_window_to(self, handle, width, height):
with self._window(handle):
try:
self.browser.set_window_size(width, height)
except WebDriverException as e:
if self._chrome and "failed to change window state" in str(e):
# Chromedriver doesn't wait long enough for state to change when coming out of fullscreen
# and raises unnecessary error. Wait a bit and try again.
sleep(0.5)
self.browser.set_window_size(width, height)
else:
raise
def maximize_window(self, handle):
with self._window(handle):
self.browser.maximize_window()
def fullscreen_window(self, handle):
with self._window(handle):
self.browser.fullscreen_window()
def close_window(self, handle):
with self._window(handle):
self.browser.close()
@property
def window_handles(self):
return self.browser.window_handles
def open_new_window(self):
self.browser.execute_script("window.open();")
def switch_to_window(self, handle):
self.browser.switch_to.window(handle)
@property
def no_such_window_error(self):
return NoSuchWindowException
def visit(self, url):
self.browser.get(url)
def refresh(self):
try:
with self.accept_modal(None, wait=0.1):
self.browser.refresh()
except ModalNotFound:
pass
def go_back(self):
self.browser.back()
def go_forward(self):
self.browser.forward()
def execute_script(self, script, *args):
args = [arg.native if isinstance(arg, Node) else arg for arg in args]
return self.browser.execute_script(script, *args)
def evaluate_script(self, script, *args):
result = self.execute_script("return {0}".format(script.strip()), *args)
return self._wrap_element_script_result(result)
def evaluate_async_script(self, script, *args):
self.browser.set_script_timeout(capybara.default_max_wait_time)
args = [arg.native if isinstance(arg, Node) else arg for arg in args]
result = self.browser.execute_async_script(script, *args)
return self._wrap_element_script_result(result)
def save_screenshot(self, path, **kwargs):
self.browser.get_screenshot_as_file(path)
@contextmanager
def accept_modal(self, modal_type, text=None, response=None, wait=None):
yield
modal = self._find_modal(text=text, wait=wait)
if response:
modal.send_keys(response)
modal.accept()
@contextmanager
def dismiss_modal(self, modal_type, text=None, wait=None):
yield
modal = self._find_modal(text=text, wait=wait)
modal.dismiss()
def reset(self):
# Avoid starting the browser just to reset the session.
if "browser" in self.__dict__:
navigated = False
timer = Timer(10)
while True:
try:
# Only trigger a navigation if we haven't done it already,
# otherwise it can trigger an endless series of unload modals.
if not navigated:
self.browser.delete_all_cookies()
self._clear_storage()
self.browser.get("about:blank")
navigated = True
while True:
try:
next(self._find_xpath("/html/body/*"))
except StopIteration:
break
if timer.expired:
raise ExpectationNotMet("Timed out waiting for Selenium session reset")
sleep(0.05)
break
except UnexpectedAlertPresentException:
# This error is thrown if an unhandled alert is on the page.
try:
self.browser.switch_to.alert.accept()
# Allow time for the modal to be handled.
sleep(0.25)
except NoAlertPresentException:
# The alert is now gone.
if self.browser.current_url != "about:blank":
# If navigation has not occurred, Firefox may have dismissed the alert
# before we could accept it.
# Try to navigate again, anticipating the alert this time.
try:
self.browser.get("about:blank")
sleep(0.1) # Wait for the alert.
self.browser.switch_to.alert.accept()
except NoAlertPresentException:
# No alert appeared this time.
pass
# Try cleaning up the browser again.
continue
for handle in self.window_handles:
if handle != self.current_window_handle:
self.close_window(handle)
@property
def invalid_element_errors(self):
return (WebDriverException,)
@property
def _marionette(self):
return self._firefox and self.browser.w3c
@property
def _chrome(self):
return self._browser_name == "chrome"
@property
def _firefox(self):
return self._browser_name in ["ff", "firefox"]
def _clear_storage(self):
if "browser" in self.__dict__:
if self._clear_local_storage:
self.execute_script("window.localStorage.clear()")
if self._clear_session_storage:
self.execute_script("window.sessionStorage.clear()")
def _find_css(self, css):
return (Node(self, element) for element in self.browser.find_elements_by_css_selector(css))
def _find_xpath(self, xpath):
return (Node(self, element) for element in self.browser.find_elements_by_xpath(xpath))
def _find_modal(self, text=None, wait=None):
wait = wait or capybara.default_max_wait_time
try:
alert = WebDriverWait(self.browser, wait).until(EC.alert_is_present())
regexp = toregex(text)
if not regexp.search(alert.text):
qualifier = "matching" if isregex(text) else "with"
raise ModalNotFound("Unable to find modal dialog {0} {1}".format(qualifier, desc(text)))
return alert
except TimeoutException:
raise ModalNotFound("Unable to find modal dialog")
@contextmanager
def _window(self, handle):
original_handle = self.current_window_handle
if handle == original_handle:
yield
else:
self.switch_to_window(handle)
try:
yield
finally:
self.switch_to_window(original_handle)
def _wrap_element_script_result(self, arg):
if isinstance(arg, list):
return [self._wrap_element_script_result(e) for e in arg]
elif isinstance(arg, dict):
return {k: self._wrap_element_script_result(v) for k, v in iter(arg.items())}
elif isinstance(arg, WebElement):
return Node(self, arg)
else:
return arg
|
|
import unittest
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from operator import attrgetter, itemgetter
from uuid import UUID
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
BinaryField, BooleanField, Case, Count, DecimalField, F,
GenericIPAddressField, IntegerField, Max, Min, Q, Sum, TextField, Value,
When,
)
from django.test import SimpleTestCase, TestCase
from .models import CaseTestModel, Client, FKCaseTestModel, O2OCaseTestModel
try:
from PIL import Image
except ImportError:
Image = None
class CaseExpressionTests(TestCase):
@classmethod
def setUpTestData(cls):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=1)
o = CaseTestModel.objects.create(integer=2, integer2=3, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=2, integer2=2, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=3, integer2=3, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=4, integer2=5, string='4')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=5)
cls.group_by_fields = [
f.name for f in CaseTestModel._meta.get_fields()
if not (f.is_relation and f.auto_created) and
(
connection.features.allows_group_by_lob or
not isinstance(f, (BinaryField, TextField))
)
]
def test_annotate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=1),
When(integer=2, then=2),
)).order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer=1, then=F('o2o_rel__integer') + 1),
When(integer=2, then=F('o2o_rel__integer') + 3),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_in_clause(self):
fk_rels = FKCaseTestModel.objects.filter(integer__in=[5])
self.assertQuerysetEqual(
CaseTestModel.objects.only('pk', 'integer').annotate(in_test=Sum(Case(
When(fk_rel__in=fk_rels, then=F('fk_rel__integer')),
default=Value(0),
))).order_by('pk'),
[(1, 0), (2, 0), (3, 0), (2, 0), (3, 0), (3, 0), (4, 5)],
transform=attrgetter('integer', 'in_test')
)
def test_annotate_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer2=F('o2o_rel__integer'), then=Value('equal')),
When(integer2=F('o2o_rel__integer') + 1, then=Value('+1')),
default=Value('other'),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, 'other')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'three'), (2, 'two'), (3, 'three'), (3, 'three'), (4, 'one')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
f_plus_3=F('integer') + 3,
).annotate(
f_test=Case(
When(integer=1, then='f_plus_1'),
When(integer=2, then='f_plus_3'),
default='integer',
),
).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).annotate(
f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('f_plus_1'), then=Value('+1')),
),
).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_minus_2=F('integer') - 2,
).annotate(
test=Case(
When(f_minus_2=-1, then=Value('negative one')),
When(f_minus_2=0, then=Value('zero')),
When(f_minus_2=1, then=Value('one')),
default=Value('other'),
),
).order_by('pk'),
[(1, 'negative one'), (2, 'zero'), (3, 'one'), (2, 'zero'), (3, 'one'), (3, 'one'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)],
transform=itemgetter('integer', 'test', 'min', 'max')
)
def test_annotate_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer2=F('min'), then=Value('min')),
When(integer2=F('max'), then=Value('max')),
),
).order_by('pk'),
[(1, 1, 'min'), (2, 3, 'max'), (3, 4, 'max'), (2, 2, 'min'), (3, 4, 'max'), (3, 3, 'min'), (4, 5, 'min')],
transform=itemgetter('integer', 'integer2', 'test')
)
def test_annotate_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields).annotate(
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(max=3, then=Value('max = 3')),
When(max=4, then=Value('max = 4')),
default=Value(''),
),
).order_by('pk'),
[(1, 1, ''), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (2, 3, 'max = 3'),
(3, 4, 'max = 4'), (3, 4, 'max = 4'), (4, 5, '')],
transform=itemgetter('integer', 'max', 'test')
)
def test_annotate_exclude(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
)).exclude(test='other').order_by('pk'),
[(1, 'one'), (2, 'two'), (2, 'two')],
transform=attrgetter('integer', 'test')
)
def test_annotate_values_not_in_order_by(self):
self.assertEqual(
list(CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
When(integer=3, then=Value('three')),
default=Value('other'),
)).order_by('test').values_list('integer', flat=True)),
[1, 4, 3, 3, 3, 2, 2]
)
def test_annotate_with_empty_when(self):
objects = CaseTestModel.objects.annotate(
selected=Case(
When(pk__in=[], then=Value('selected')),
default=Value('not selected'),
)
)
self.assertEqual(len(objects), CaseTestModel.objects.count())
self.assertTrue(all(obj.selected == 'not selected' for obj in objects))
def test_combined_expression(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
) + 1,
).order_by('pk'),
[(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)],
transform=attrgetter('integer', 'test')
)
def test_in_subquery(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
pk__in=CaseTestModel.objects.annotate(
test=Case(
When(integer=F('integer2'), then='pk'),
When(integer=4, then='pk'),
),
).values('test')).order_by('pk'),
[(1, 1), (2, 2), (3, 3), (4, 5)],
transform=attrgetter('integer', 'integer2')
)
def test_condition_with_lookups(self):
qs = CaseTestModel.objects.annotate(
test=Case(
When(Q(integer2=1), string='2', then=Value(False)),
When(Q(integer2=1), string='1', then=Value(True)),
default=Value(False),
output_field=BooleanField(),
),
)
self.assertIs(qs.get(integer=1).test, True)
def test_case_reuse(self):
SOME_CASE = Case(
When(pk=0, then=Value('0')),
default=Value('1'),
)
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk'),
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk').values_list('pk', 'somecase'),
lambda x: (x.pk, x.somecase)
)
def test_aggregate(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=Sum(Case(
When(integer=1, then=1),
)),
two=Sum(Case(
When(integer=2, then=1),
)),
three=Sum(Case(
When(integer=3, then=1),
)),
four=Sum(Case(
When(integer=4, then=1),
)),
),
{'one': 1, 'two': 2, 'three': 3, 'four': 1}
)
def test_aggregate_with_expression_as_value(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=Sum(Case(When(integer=1, then='integer'))),
two=Sum(Case(When(integer=2, then=F('integer') - 1))),
three=Sum(Case(When(integer=3, then=F('integer') + 1))),
),
{'one': 1, 'two': 2, 'three': 12}
)
def test_aggregate_with_expression_as_condition(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
equal=Sum(Case(
When(integer2=F('integer'), then=1),
)),
plus_one=Sum(Case(
When(integer2=F('integer') + 1, then=1),
)),
),
{'equal': 3, 'plus_one': 4}
)
def test_filter(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
default=1,
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
)).order_by('pk'),
[(2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('integer') + 1),
When(integer=3, then=F('integer')),
default='integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string=Case(
When(integer2=F('integer'), then=Value('2')),
When(integer2=F('integer') + 1, then=Value('3')),
)).order_by('pk'),
[(3, 4, '3'), (2, 2, '2'), (3, 4, '3')],
transform=attrgetter('integer', 'integer2', 'string')
)
def test_filter_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('o2o_rel__integer') + 1),
When(integer=3, then=F('o2o_rel__integer')),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
)).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(o2o_rel__integer=1, then=1),
When(o2o_rel__integer=2, then=3),
When(o2o_rel__integer=3, then=4),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f=F('integer'),
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(integer=2, then='f_plus_1'),
When(integer=3, then='f'),
),
).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer=Case(
When(integer2=F('integer'), then=2),
When(integer2=F('f_plus_1'), then=3),
),
).order_by('pk'),
[(3, 4), (2, 2), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(f_plus_1=3, then=3),
When(f_plus_1=4, then=4),
default=1,
),
).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer2=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(integer2=F('min'), then=2),
When(integer2=F('max'), then=3),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.group_by_fields).annotate(
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(max=3, then=2),
When(max=4, then=3),
),
).order_by('pk'),
[(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)],
transform=itemgetter('integer', 'integer2', 'max')
)
def test_update(self):
CaseTestModel.objects.update(
string=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'string')
)
def test_update_without_default(self):
CaseTestModel.objects.update(
integer2=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'integer2')
)
def test_update_with_expression_as_value(self):
CaseTestModel.objects.update(
integer=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[('1', 2), ('2', 5), ('3', 3), ('2', 5), ('3', 3), ('3', 3), ('4', 4)],
transform=attrgetter('string', 'integer')
)
def test_update_with_expression_as_condition(self):
CaseTestModel.objects.update(
string=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'string')
)
def test_update_with_join_in_condition_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
),
)
def test_update_with_join_in_predicate_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
string=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
),
)
def test_update_big_integer(self):
CaseTestModel.objects.update(
big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'big_integer')
)
def test_update_binary(self):
CaseTestModel.objects.update(
binary=Case(
When(integer=1, then=b'one'),
When(integer=2, then=b'two'),
default=b'',
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, b'one'), (2, b'two'), (3, b''), (2, b'two'), (3, b''), (3, b''), (4, b'')],
transform=lambda o: (o.integer, bytes(o.binary))
)
def test_update_boolean(self):
CaseTestModel.objects.update(
boolean=Case(
When(integer=1, then=True),
When(integer=2, then=True),
default=False,
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, True), (3, False), (2, True), (3, False), (3, False), (4, False)],
transform=attrgetter('integer', 'boolean')
)
def test_update_date(self):
CaseTestModel.objects.update(
date=Case(
When(integer=1, then=date(2015, 1, 1)),
When(integer=2, then=date(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, date(2015, 1, 1)), (2, date(2015, 1, 2)), (3, None), (2, date(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date')
)
def test_update_date_time(self):
CaseTestModel.objects.update(
date_time=Case(
When(integer=1, then=datetime(2015, 1, 1)),
When(integer=2, then=datetime(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, datetime(2015, 1, 1)), (2, datetime(2015, 1, 2)), (3, None), (2, datetime(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date_time')
)
def test_update_decimal(self):
CaseTestModel.objects.update(
decimal=Case(
When(integer=1, then=Decimal('1.1')),
When(integer=2, then=Value(Decimal('2.2'), output_field=DecimalField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, Decimal('1.1')),
(2, Decimal('2.2')),
(3, None),
(2, Decimal('2.2')),
(3, None),
(3, None),
(4, None)
],
transform=attrgetter('integer', 'decimal')
)
def test_update_duration(self):
CaseTestModel.objects.update(
duration=Case(
When(integer=1, then=timedelta(1)),
When(integer=2, then=timedelta(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, timedelta(1)), (2, timedelta(2)), (3, None), (2, timedelta(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'duration')
)
def test_update_email(self):
CaseTestModel.objects.update(
email=Case(
When(integer=1, then=Value('1@example.com')),
When(integer=2, then=Value('2@example.com')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1@example.com'), (2, '2@example.com'), (3, ''), (2, '2@example.com'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'email')
)
def test_update_file(self):
CaseTestModel.objects.update(
file=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, str(o.file))
)
def test_update_file_path(self):
CaseTestModel.objects.update(
file_path=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'file_path')
)
def test_update_float(self):
CaseTestModel.objects.update(
float=Case(
When(integer=1, then=1.1),
When(integer=2, then=2.2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'float')
)
@unittest.skipUnless(Image, "Pillow not installed")
def test_update_image(self):
CaseTestModel.objects.update(
image=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, str(o.image))
)
def test_update_generic_ip_address(self):
CaseTestModel.objects.update(
generic_ip_address=Case(
When(integer=1, then=Value('1.1.1.1')),
When(integer=2, then=Value('2.2.2.2')),
output_field=GenericIPAddressField(),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1.1.1.1'), (2, '2.2.2.2'), (3, None), (2, '2.2.2.2'), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'generic_ip_address')
)
def test_update_null_boolean(self):
CaseTestModel.objects.update(
null_boolean=Case(
When(integer=1, then=True),
When(integer=2, then=False),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'null_boolean')
)
def test_update_null_boolean_old(self):
CaseTestModel.objects.update(
null_boolean_old=Case(
When(integer=1, then=True),
When(integer=2, then=False),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'null_boolean_old')
)
def test_update_positive_big_integer(self):
CaseTestModel.objects.update(
positive_big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_big_integer')
)
def test_update_positive_integer(self):
CaseTestModel.objects.update(
positive_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_integer')
)
def test_update_positive_small_integer(self):
CaseTestModel.objects.update(
positive_small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_small_integer')
)
def test_update_slug(self):
CaseTestModel.objects.update(
slug=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'slug')
)
def test_update_small_integer(self):
CaseTestModel.objects.update(
small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'small_integer')
)
def test_update_string(self):
CaseTestModel.objects.filter(string__in=['1', '2']).update(
string=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string__in=['1', '2']).order_by('pk'),
[(1, '1'), (2, '2'), (2, '2')],
transform=attrgetter('integer', 'string')
)
def test_update_text(self):
CaseTestModel.objects.update(
text=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'text')
)
def test_update_time(self):
CaseTestModel.objects.update(
time=Case(
When(integer=1, then=time(1)),
When(integer=2, then=time(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, time(1)), (2, time(2)), (3, None), (2, time(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'time')
)
def test_update_url(self):
CaseTestModel.objects.update(
url=Case(
When(integer=1, then=Value('http://1.example.com/')),
When(integer=2, then=Value('http://2.example.com/')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, 'http://1.example.com/'), (2, 'http://2.example.com/'), (3, ''), (2, 'http://2.example.com/'),
(3, ''), (3, ''), (4, '')
],
transform=attrgetter('integer', 'url')
)
def test_update_uuid(self):
CaseTestModel.objects.update(
uuid=Case(
When(integer=1, then=UUID('11111111111111111111111111111111')),
When(integer=2, then=UUID('22222222222222222222222222222222')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, UUID('11111111111111111111111111111111')),
(2, UUID('22222222222222222222222222222222')),
(3, None),
(2, UUID('22222222222222222222222222222222')),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter('integer', 'uuid')
)
def test_update_fk(self):
obj1, obj2 = CaseTestModel.objects.all()[:2]
CaseTestModel.objects.update(
fk=Case(
When(integer=1, then=obj1.pk),
When(integer=2, then=obj2.pk),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, obj1.pk), (2, obj2.pk), (3, None), (2, obj2.pk), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'fk_id')
)
def test_lookup_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer__lt=2, then=Value('less than 2')),
When(integer__gt=2, then=Value('greater than 2')),
default=Value('equal to 2'),
),
).order_by('pk'),
[
(1, 'less than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (2, 'equal to 2'), (3, 'greater than 2'),
(3, 'greater than 2'), (4, 'greater than 2')
],
transform=attrgetter('integer', 'test')
)
def test_lookup_different_fields(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=2, integer2=3, then=Value('when')),
default=Value('default'),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'default'), (3, 4, 'default'),
(3, 3, 'default'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_combined_q_object(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(Q(integer=2) | Q(integer2=3), then=Value('when')),
default=Value('default'),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'when'), (3, 4, 'default'),
(3, 3, 'when'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_order_by_conditional_implicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)).order_by('test', 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
def test_order_by_conditional_explicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)).order_by(F('test').asc(), 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
def test_join_promotion(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
),
),
[(o, 3)],
lambda x: (x, x.foo)
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
),
),
[(o, 2)],
lambda x: (x, x.foo)
)
def test_join_promotion_multiple_annotations(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
),
bar=Case(
When(fk_rel__pk=1, then=4),
default=5,
),
),
[(o, 3, 5)],
lambda x: (x, x.foo, x.bar)
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
),
bar=Case(
When(fk_rel__isnull=True, then=4),
default=5,
),
),
[(o, 2, 4)],
lambda x: (x, x.foo, x.bar)
)
def test_m2m_exclude(self):
CaseTestModel.objects.create(integer=10, integer2=1, string='1')
qs = CaseTestModel.objects.values_list('id', 'integer').annotate(
cnt=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
).order_by('integer')
# The first o has 2 as its fk_rel__integer=1, thus it hits the
# default=2 case. The other ones have 2 as the result as they have 2
# fk_rel objects, except for integer=4 and integer=10 (created above).
# The integer=4 case has one integer, thus the result is 1, and
# integer=10 doesn't have any and this too generates 1 (instead of 0)
# as ~Q() also matches nulls.
self.assertQuerysetEqual(
qs,
[(1, 2), (2, 2), (2, 2), (3, 2), (3, 2), (3, 2), (4, 1), (10, 1)],
lambda x: x[1:]
)
def test_m2m_reuse(self):
CaseTestModel.objects.create(integer=10, integer2=1, string='1')
# Need to use values before annotate so that Oracle will not group
# by fields it isn't capable of grouping by.
qs = CaseTestModel.objects.values_list('id', 'integer').annotate(
cnt=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
).annotate(
cnt2=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
).order_by('integer')
self.assertEqual(str(qs.query).count(' JOIN '), 1)
self.assertQuerysetEqual(
qs,
[(1, 2, 2), (2, 2, 2), (2, 2, 2), (3, 2, 2), (3, 2, 2), (3, 2, 2), (4, 1, 1), (10, 1, 1)],
lambda x: x[1:]
)
def test_aggregation_empty_cases(self):
tests = [
# Empty cases and default.
(Case(output_field=IntegerField()), None),
# Empty cases and a constant default.
(Case(default=Value('empty')), 'empty'),
# Empty cases and column in the default.
(Case(default=F('url')), ''),
]
for case, value in tests:
with self.subTest(case=case):
self.assertQuerysetEqual(
CaseTestModel.objects.values('string').annotate(
case=case,
integer_sum=Sum('integer'),
).order_by('string'),
[
('1', value, 1),
('2', value, 4),
('3', value, 9),
('4', value, 4),
],
transform=itemgetter('string', 'case', 'integer_sum'),
)
class CaseDocumentationExamples(TestCase):
@classmethod
def setUpTestData(cls):
Client.objects.create(
name='Jane Doe',
account_type=Client.REGULAR,
registered_on=date.today() - timedelta(days=36),
)
Client.objects.create(
name='James Smith',
account_type=Client.GOLD,
registered_on=date.today() - timedelta(days=5),
)
Client.objects.create(
name='Jack Black',
account_type=Client.PLATINUM,
registered_on=date.today() - timedelta(days=10 * 365),
)
def test_simple_example(self):
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(account_type=Client.GOLD, then=Value('5%')),
When(account_type=Client.PLATINUM, then=Value('10%')),
default=Value('0%'),
),
).order_by('pk'),
[('Jane Doe', '0%'), ('James Smith', '5%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_lookup_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(registered_on__lte=a_year_ago, then=Value('10%')),
When(registered_on__lte=a_month_ago, then=Value('5%')),
default=Value('0%'),
),
).order_by('pk'),
[('Jane Doe', '5%'), ('James Smith', '0%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_conditional_update_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
Client.objects.update(
account_type=Case(
When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)),
When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)),
default=Value(Client.REGULAR),
),
)
self.assertQuerysetEqual(
Client.objects.all().order_by('pk'),
[('Jane Doe', 'G'), ('James Smith', 'R'), ('Jack Black', 'P')],
transform=attrgetter('name', 'account_type')
)
def test_conditional_aggregation_example(self):
Client.objects.create(
name='Jean Grey',
account_type=Client.REGULAR,
registered_on=date.today(),
)
Client.objects.create(
name='James Bond',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
Client.objects.create(
name='Jane Porter',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
self.assertEqual(
Client.objects.aggregate(
regular=Count('pk', filter=Q(account_type=Client.REGULAR)),
gold=Count('pk', filter=Q(account_type=Client.GOLD)),
platinum=Count('pk', filter=Q(account_type=Client.PLATINUM)),
),
{'regular': 2, 'gold': 1, 'platinum': 3}
)
# This was the example before the filter argument was added.
self.assertEqual(
Client.objects.aggregate(
regular=Sum(Case(
When(account_type=Client.REGULAR, then=1),
)),
gold=Sum(Case(
When(account_type=Client.GOLD, then=1),
)),
platinum=Sum(Case(
When(account_type=Client.PLATINUM, then=1),
)),
),
{'regular': 2, 'gold': 1, 'platinum': 3}
)
def test_filter_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.filter(
registered_on__lte=Case(
When(account_type=Client.GOLD, then=a_month_ago),
When(account_type=Client.PLATINUM, then=a_year_ago),
),
),
[('Jack Black', 'P')],
transform=attrgetter('name', 'account_type')
)
def test_hash(self):
expression_1 = Case(
When(account_type__in=[Client.REGULAR, Client.GOLD], then=1),
default=2,
output_field=IntegerField(),
)
expression_2 = Case(
When(account_type__in=(Client.REGULAR, Client.GOLD), then=1),
default=2,
output_field=IntegerField(),
)
expression_3 = Case(When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2)
expression_4 = Case(When(account_type__in=[Client.PLATINUM, Client.GOLD], then=2), default=1)
self.assertEqual(hash(expression_1), hash(expression_2))
self.assertNotEqual(hash(expression_2), hash(expression_3))
self.assertNotEqual(hash(expression_1), hash(expression_4))
self.assertNotEqual(hash(expression_3), hash(expression_4))
class CaseWhenTests(SimpleTestCase):
def test_only_when_arguments(self):
msg = 'Positional arguments must all be When objects.'
with self.assertRaisesMessage(TypeError, msg):
Case(When(Q(pk__in=[])), object())
def test_invalid_when_constructor_args(self):
msg = (
'When() supports a Q object, a boolean expression, or lookups as '
'a condition.'
)
with self.assertRaisesMessage(TypeError, msg):
When(condition=object())
with self.assertRaisesMessage(TypeError, msg):
When(condition=Value(1))
with self.assertRaisesMessage(TypeError, msg):
When(Value(1), string='1')
with self.assertRaisesMessage(TypeError, msg):
When()
def test_empty_q_object(self):
msg = "An empty Q() can't be used as a When() condition."
with self.assertRaisesMessage(ValueError, msg):
When(Q(), then=Value(True))
|
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from swaggyjenkins.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from swaggyjenkins.exceptions import ApiAttributeError
def lazy_import():
from swaggyjenkins.model.pipeline_runartifacts import PipelineRunartifacts
globals()['PipelineRunartifacts'] = PipelineRunartifacts
class PipelineRun(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'_class': (str,), # noqa: E501
'artifacts': ([PipelineRunartifacts],), # noqa: E501
'duration_in_millis': (int,), # noqa: E501
'estimated_duration_in_millis': (int,), # noqa: E501
'en_queue_time': (str,), # noqa: E501
'end_time': (str,), # noqa: E501
'id': (str,), # noqa: E501
'organization': (str,), # noqa: E501
'pipeline': (str,), # noqa: E501
'result': (str,), # noqa: E501
'run_summary': (str,), # noqa: E501
'start_time': (str,), # noqa: E501
'state': (str,), # noqa: E501
'type': (str,), # noqa: E501
'commit_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_class': '_class', # noqa: E501
'artifacts': 'artifacts', # noqa: E501
'duration_in_millis': 'durationInMillis', # noqa: E501
'estimated_duration_in_millis': 'estimatedDurationInMillis', # noqa: E501
'en_queue_time': 'enQueueTime', # noqa: E501
'end_time': 'endTime', # noqa: E501
'id': 'id', # noqa: E501
'organization': 'organization', # noqa: E501
'pipeline': 'pipeline', # noqa: E501
'result': 'result', # noqa: E501
'run_summary': 'runSummary', # noqa: E501
'start_time': 'startTime', # noqa: E501
'state': 'state', # noqa: E501
'type': 'type', # noqa: E501
'commit_id': 'commitId', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""PipelineRun - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_class (str): [optional] # noqa: E501
artifacts ([PipelineRunartifacts]): [optional] # noqa: E501
duration_in_millis (int): [optional] # noqa: E501
estimated_duration_in_millis (int): [optional] # noqa: E501
en_queue_time (str): [optional] # noqa: E501
end_time (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
organization (str): [optional] # noqa: E501
pipeline (str): [optional] # noqa: E501
result (str): [optional] # noqa: E501
run_summary (str): [optional] # noqa: E501
start_time (str): [optional] # noqa: E501
state (str): [optional] # noqa: E501
type (str): [optional] # noqa: E501
commit_id (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""PipelineRun - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_class (str): [optional] # noqa: E501
artifacts ([PipelineRunartifacts]): [optional] # noqa: E501
duration_in_millis (int): [optional] # noqa: E501
estimated_duration_in_millis (int): [optional] # noqa: E501
en_queue_time (str): [optional] # noqa: E501
end_time (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
organization (str): [optional] # noqa: E501
pipeline (str): [optional] # noqa: E501
result (str): [optional] # noqa: E501
run_summary (str): [optional] # noqa: E501
start_time (str): [optional] # noqa: E501
state (str): [optional] # noqa: E501
type (str): [optional] # noqa: E501
commit_id (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curses-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import curses
from curses import textpad
import signal
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
class CursesUI(object):
"""Curses-based Command-line UI.
In this class, the methods with the prefix "_screen_" are the methods that
interact with the actual terminal using the curses library.
"""
CLI_PROMPT = "tfdbg> "
CLI_EXIT_COMMANDS = ["exit", "quit"]
CLI_TERMINATOR_KEY = 7 # Terminator key for input text box.
CLI_TAB_KEY = ord("\t")
REGEX_SEARCH_PREFIX = "/"
# Possible Enter keys. 343 is curses key code for the num-pad Enter key when
# num lock is off.
CLI_CR_KEYS = [ord("\n"), ord("\r"), 343]
_SCROLL_REFRESH = "refresh"
_SCROLL_UP = "up"
_SCROLL_DOWN = "down"
_SCROLL_HOME = "home"
_SCROLL_END = "end"
def __init__(self):
self._screen_init()
self._screen_refresh_size()
# TODO(cais): Error out if the size of the screen is too small.
# Initialize some UI component size and locations.
self._init_layout()
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
# Create tab completion registry and register the empty-str (top-level)
# tab-completion context with it.
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._command_history_store = debugger_cli_common.CommandHistory()
# Active list of command history, used in history navigation.
# _command_handler_registry holds all the history commands the CLI has
# received, up to a size limit. _active_command_history is the history
# currently being navigated in, e.g., using the Up/Down keys. The latter
# can be different from the former during prefixed or regex-based history
# navigation, e.g., when user enter the beginning of a command and hit Up.
self._active_command_history = []
# Pointer to the current position in the history sequence.
# 0 means it is a new command being keyed in.
self._command_pointer = 0
self._command_history_limit = 100
self._pending_command = ""
# State related to screen output.
self._output_pad = None
self._curr_unwrapped_output = None
self._curr_wrapped_output = None
# NamedTuple for rectangular locations on screen
self.rectangle = collections.namedtuple("rectangle",
"top left bottom right")
# Register signal handler for SIGINT.
signal.signal(signal.SIGINT, self._interrupt_handler)
def _init_layout(self):
"""Initialize the layout of UI components.
Initialize the location and size of UI components such as command textbox
and output region according to the terminal size.
"""
# Height of command text box
self._command_textbox_height = 2
self._title_row = 0
# Top row index of the output pad.
# A "pad" is a curses object that holds lines of text and not limited to
# screen size. It can be rendered on the screen partially with scroll
# parameters specified.
self._output_top_row = 1
# Number of rows that the output pad has.
self._output_num_rows = (
self._max_y - self._output_top_row - self._command_textbox_height - 1)
# Row index of scroll information line: Taking into account the zero-based
# row indexing and the command textbox area under the scroll information
# row.
self._output_scroll_row = self._max_y - 1 - self._command_textbox_height
# Tab completion bottom row.
self._candidates_top_row = self._output_scroll_row - 4
self._candidates_bottom_row = self._output_scroll_row - 1
# Maximum number of lines the candidates display can have.
self._candidates_max_lines = int(self._output_num_rows / 2)
# Font attribute for search and highlighting.
self._search_highlight_font_attr = "bw_reversed"
self.max_output_lines = 10000
def _screen_init(self):
"""Screen initialization.
Creates curses stdscr and initialize the color pairs for display.
"""
self._stdscr = curses.initscr()
self._command_window = None
# Prepare color pairs.
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_BLACK, curses.COLOR_WHITE)
self._color_pairs = {}
self._color_pairs["white"] = curses.color_pair(1)
self._color_pairs["red"] = curses.color_pair(2)
self._color_pairs["green"] = curses.color_pair(3)
self._color_pairs["yellow"] = curses.color_pair(4)
self._color_pairs["blue"] = curses.color_pair(5)
self._color_pairs["magenta"] = curses.color_pair(6)
# Black-white reversed
self._color_pairs["bw_reversed"] = curses.color_pair(7)
# A_BOLD is not really a "color". But place it here for convenience.
self._color_pairs["bold"] = curses.A_BOLD
# Default color pair to use when a specified color pair does not exist.
self._default_color_pair = self._color_pairs["white"]
def _screen_launch(self):
"""Launch the curses screen."""
curses.noecho()
curses.cbreak()
self._stdscr.keypad(1)
self._screen_create_command_window()
def _screen_create_command_window(self):
"""Create command window according to screen size."""
if self._command_window:
del self._command_window
self._command_window = curses.newwin(
self._command_textbox_height, self._max_x - len(self.CLI_PROMPT),
self._max_y - self._command_textbox_height, len(self.CLI_PROMPT))
def _screen_refresh(self):
self._stdscr.refresh()
def _screen_terminate(self):
"""Terminate the curses screen."""
self._stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
# Remove SIGINT handler.
signal.signal(signal.SIGINT, signal.SIG_DFL)
def run_ui(self, init_command=None, title=None, title_color=None):
"""Run the Curses CLI.
Args:
init_command: (str) Optional command to run on CLI start up.
title: (str) Optional title to display in the CLI.
title_color: (str) Optional color of the title, e.g., "yellow".
Returns:
An exit token of arbitrary type. Can be None.
"""
self._screen_launch()
# Optional initial command.
if init_command is not None:
self._dispatch_command(init_command)
if title is not None:
self._title(title, title_color=title_color)
# CLI main loop.
exit_token = self._ui_loop()
self._screen_terminate()
return exit_token
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""A wrapper around CommandHandlerRegistry.register_command_handler().
In addition to calling the wrapped register_command_handler() method, this
method also registers the top-level tab-completion context based on the
command prefixes and their aliases.
See the doc string of the wrapped method for more details on the args.
Args:
prefix: (str) command prefix.
handler: (callable) command handler.
help_info: (str) help information.
prefix_aliases: (list of str) aliases of the command prefix.
"""
self._command_handler_registry.register_command_handler(
prefix, handler, help_info, prefix_aliases=prefix_aliases)
self._tab_completion_registry.extend_comp_items("", [prefix])
if prefix_aliases:
self._tab_completion_registry.extend_comp_items("", prefix_aliases)
def register_tab_comp_context(self, *args, **kwargs):
"""Wrapper around TabCompletionRegistry.register_tab_comp_context()."""
self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)
def set_help_intro(self, help_intro):
"""Set an introductory message to the help output of the command registry.
Args:
help_intro: (list of str) Text lines appended to the beginning of the
the output of the command "help", as introductory information.
"""
self._command_handler_registry.set_help_intro(help_intro=help_intro)
def get_help(self):
return self._command_handler_registry.get_help()
def _screen_create_command_textbox(self, existing_command):
"""Create command textbox on screen.
Args:
existing_command: (str) A command string to put in the textbox right
after its creation.
"""
# Display the tfdbg prompt.
self._stdscr.addstr(self._max_y - self._command_textbox_height, 0,
self.CLI_PROMPT, curses.A_BOLD)
self._stdscr.refresh()
self._command_window.clear()
# Command text box.
self._command_textbox = textpad.Textbox(
self._command_window, insert_mode=True)
# Enter existing command.
self._auto_key_in(existing_command)
def _ui_loop(self):
"""Command-line UI loop.
Returns:
An exit token of arbitrary type. The token can be None.
"""
while True:
# Enter history command if pointer is in history (> 0):
if self._command_pointer > 0:
existing_command = self._active_command_history[-self._command_pointer]
else:
existing_command = self._pending_command
self._screen_create_command_textbox(existing_command)
command, terminator, pending_command_changed = self._get_user_command()
if terminator in self.CLI_CR_KEYS:
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
elif terminator == self.CLI_TAB_KEY:
tab_completed = self._tab_complete(command)
self._pending_command = tab_completed
self._cmd_ptr = 0
elif pending_command_changed:
self._pending_command = command
return
def _get_user_command(self):
"""Get user command from UI.
Returns:
command: (str) The user-entered command.
terminator: (str) Terminator type for the command.
If command is a normal command entered with the Enter key, the value
will be the key itself. If this is a tab completion call (using the
Tab key), the value will reflect that as well.
pending_command_changed: (bool) If the pending command has changed.
Used during command history navigation.
"""
# First, reset textbox state variables.
self._textbox_curr_terminator = None
self._textbox_pending_command_changed = False
command = self._screen_get_user_command()
command = self._strip_terminator(command)
return (command, self._textbox_curr_terminator,
self._textbox_pending_command_changed)
def _screen_get_user_command(self):
return self._command_textbox.edit(validate=self._on_textbox_keypress)
def _strip_terminator(self, command):
for v in self.CLI_CR_KEYS:
if v < 256:
command = command.replace(chr(v), "")
return command.strip()
def _screen_refresh_size(self):
self._max_y, self._max_x = self._stdscr.getmaxyx()
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
if command:
self._command_history_store.add_command(command)
if (len(command) > len(self.REGEX_SEARCH_PREFIX) and
command.startswith(self.REGEX_SEARCH_PREFIX) and
self._curr_unwrapped_output):
# Regex search and highlighting in screen output.
regex = command[len(self.REGEX_SEARCH_PREFIX):]
# TODO(cais): Support scrolling to matches.
# TODO(cais): Display warning message on screen if no match.
self._display_output(self._curr_unwrapped_output, highlight_regex=regex)
self._command_pointer = 0
self._pending_command = ""
return
prefix, args = self._parse_command(command)
if not prefix:
# Empty command: take no action. Should not exit.
return
screen_info = {"cols": self._max_x}
exit_token = None
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=screen_info)
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
"ERROR: Invalid command prefix \"%s\"" % prefix
])
# Clear active command history. Until next up/down history navigation
# occurs, it will stay empty.
self._active_command_history = []
if exit_token is not None:
return exit_token
self._display_output(screen_output)
self._command_pointer = 0
self._pending_command = ""
def _parse_command(self, command):
"""Parse a command string into prefix and arguments.
Args:
command: (str) Command string to be parsed.
Returns:
prefix: (str) The command prefix.
args: (list of str) The command arguments (i.e., not including the
prefix).
"""
command = command.strip()
if not command:
return "", []
command_items = command_parser.parse_command(command)
return command_items[0], command_items[1:]
def _screen_gather_textbox_str(self):
"""Gather the text string in the command text box.
Returns:
(str) the current text string in the command textbox, excluding any
return keys.
"""
txt = self._command_textbox.gather()
return txt.strip()
def _on_textbox_keypress(self, x):
"""Text box key validator: Callback of key strokes.
Handles a user's keypress in the input text box. Translates certain keys to
terminator keys for the textbox to allow its edit() method to return.
Also handles special key-triggered events such as PgUp/PgDown scrolling of
the screen output.
Args:
x: (int) Key code.
Returns:
(int) A translated key code. In most cases, this is identical to the
input x. However, if x is a Return key, the return value will be
CLI_TERMINATOR_KEY, so that the text box's edit() method can return.
Raises:
TypeError: If the input x is not of type int.
"""
if not isinstance(x, int):
raise TypeError("Key validator expected type int, received type %s" %
type(x))
if x in self.CLI_CR_KEYS:
# Make Enter key the terminator
self._textbox_curr_terminator = x
return self.CLI_TERMINATOR_KEY
elif x == self.CLI_TAB_KEY:
self._textbox_curr_terminator = self.CLI_TAB_KEY
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_PPAGE:
self._scroll_output(self._SCROLL_UP)
return x
elif x == curses.KEY_NPAGE:
self._scroll_output(self._SCROLL_DOWN)
return x
elif x == curses.KEY_HOME:
self._scroll_output(self._SCROLL_HOME)
return x
elif x == curses.KEY_END:
self._scroll_output(self._SCROLL_END)
return x
elif x in [curses.KEY_UP, curses.KEY_DOWN]:
# Command history navigation.
if not self._active_command_history:
hist_prefix = self._screen_gather_textbox_str()
self._active_command_history = (
self._command_history_store.lookup_prefix(
hist_prefix, self._command_history_limit))
if self._active_command_history:
if x == curses.KEY_UP:
if self._command_pointer < len(self._active_command_history):
self._command_pointer += 1
elif x == curses.KEY_DOWN:
if self._command_pointer > 0:
self._command_pointer -= 1
else:
self._command_pointer = 0
self._textbox_curr_terminator = x
# Force return from the textbox edit(), so that the textbox can be
# redrawn with a history command entered.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_RESIZE:
# Respond to terminal resize.
self._screen_refresh_size()
self._init_layout()
self._screen_create_command_window()
if self._curr_unwrapped_output is not None:
# Force render screen output again, under new screen size.
self._output_pad = self._display_output(
self._curr_unwrapped_output, is_refresh=True)
# Force return from the textbox edit(), so that the textbox can be
# redrawn.
return self.CLI_TERMINATOR_KEY
else:
# Mark the pending command as modified.
self._textbox_pending_command_changed = True
# Invalidate active command history.
self._command_pointer = 0
self._active_command_history = []
return x
def _title(self, title, title_color=None):
"""Display title.
Args:
title: (str) The title to display.
title_color: (str) Color of the title, e.g., "yellow".
"""
# Pad input title str with "-" and space characters to make it pretty.
self._title_line = "--- %s " % title
if len(self._title_line) < self._max_x:
self._title_line += "-" * (self._max_x - len(self._title_line))
self._screen_draw_text_line(
self._title_row, self._title_line, color=title_color)
def _auto_key_in(self, command):
"""Automatically key in a command to the command Textbox.
Args:
command: The command, as a string.
"""
for c in command:
self._command_textbox.do_command(ord(c))
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
"""Render a line of text on the screen.
Args:
row: (int) Row index.
line: (str) The line content.
attr: curses font attribute.
color: (str) font foreground color name.
Raises:
TypeError: If row is not of type int.
"""
if not isinstance(row, int):
raise TypeError("Invalid type in row")
if len(line) > self._max_x:
line = line[:self._max_x]
if color is None:
self._stdscr.addstr(row, 0, line, attr)
else:
self._stdscr.addstr(row, 0, line, self._color_pairs[color])
self._screen_refresh()
def _screen_new_output_pad(self, rows, cols):
"""Generate a new pad on the screen.
Args:
rows: (int) Number of rows the pad will have: not limited to screen size.
cols: (int) Number of columns the pad will have: not limited to screen
size.
Returns:
A curses textpad object.
"""
return curses.newpad(rows, cols)
def _display_output(self, output, is_refresh=False, highlight_regex=None):
"""Display text output in a scrollable text pad.
Args:
output: A RichTextLines object that is the screen output text.
is_refresh: (bool) Is this a refreshing display with existing output.
highlight_regex: (str) Optional string representing the regex used to
search and highlight in the current screen output.
"""
if highlight_regex:
output = debugger_cli_common.regex_find(
output, highlight_regex, font_attr=self._search_highlight_font_attr)
else:
self._curr_unwrapped_output = output
self._curr_wrapped_output = debugger_cli_common.wrap_rich_text_lines(
output, self._max_x - 1)
# Limit number of lines displayed to avoid curses overflow problems.
if self._curr_wrapped_output.num_lines() > self.max_output_lines:
self._curr_wrapped_output = self._curr_wrapped_output.slice(
0, self.max_output_lines)
self._curr_wrapped_output.lines.append("Output cut off at %d lines!" %
self.max_output_lines)
self._curr_wrapped_output.font_attr_segs[self.max_output_lines] = [
(0, len(output.lines[-1]), "magenta")
]
(self._output_pad, self._output_pad_height,
self._output_pad_width) = self._display_lines(self._curr_wrapped_output,
self._output_num_rows)
# Size of view port on screen, which is always smaller or equal to the
# screen size.
self._output_pad_screen_height = self._output_num_rows - 1
self._output_pad_screen_width = self._max_x - 1
self._output_pad_screen_location = self.rectangle(
top=self._output_top_row,
left=0,
bottom=self._output_top_row + self._output_num_rows,
right=self._output_pad_screen_width)
if is_refresh:
self._scroll_output(self._SCROLL_REFRESH)
else:
self._output_pad_row = 0
self._scroll_output(self._SCROLL_HOME)
def _display_lines(self, output, min_num_rows):
"""Display RichTextLines object on screen.
Args:
output: A RichTextLines object.
min_num_rows: (int) Minimum number of output rows.
Returns:
1) The text pad object used to display the text.
2) (int) number of rows of the text pad, which may exceed screen size.
3) (int) number of columns of the text pad.
Raises:
ValueError: If input argument "output" is invalid.
"""
if not isinstance(output, debugger_cli_common.RichTextLines):
raise ValueError(
"Output is required to be an instance of RichTextLines, but is not.")
self._screen_refresh()
# Number of rows the output area will have.
rows = max(min_num_rows, len(output.lines))
# Size of the output pad, which may exceed screen size and require
# scrolling.
cols = self._max_x - 1
# Create new output pad.
pad = self._screen_new_output_pad(rows, cols)
for i in xrange(len(output.lines)):
if i in output.font_attr_segs:
self._screen_add_line_to_output_pad(
pad, i, output.lines[i], color_segments=output.font_attr_segs[i])
else:
self._screen_add_line_to_output_pad(pad, i, output.lines[i])
return pad, rows, cols
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
"""Render a line in a text pad.
Assumes: segments in color_segments are sorted in ascending order of the
beginning index.
Note: Gaps between the segments are allowed and will be fixed in with a
default color.
Args:
pad: The text pad to render the line in.
row: Row index, as an int.
txt: The text to be displayed on the specified row, as a str.
color_segments: A list of 3-tuples. Each tuple represents the beginning
and the end of a color segment, in the form of a right-open interval:
[start, end). The last element of the tuple is a color string, e.g.,
"red".
Raisee:
TypeError: If color_segments is not of type list.
"""
if not color_segments:
pad.addstr(row, 0, txt, self._default_color_pair)
return
if not isinstance(color_segments, list):
raise TypeError("Input color_segments needs to be a list, but is not.")
all_segments = []
all_color_pairs = []
# Process the beginning.
if color_segments[0][0] == 0:
pass
else:
all_segments.append((0, color_segments[0][0]))
all_color_pairs.append(self._default_color_pair)
for (curr_start, curr_end, curr_color), (next_start, _, _) in zip(
color_segments, color_segments[1:] + [(len(txt), None, None)]):
all_segments.append((curr_start, curr_end))
all_color_pairs.append(
self._color_pairs.get(curr_color, self._default_color_pair))
if curr_end < next_start:
# Fill in the gap with the default color.
all_segments.append((curr_end, next_start))
all_color_pairs.append(self._default_color_pair)
# Finally, draw all the segments.
for segment, color_pair in zip(all_segments, all_color_pairs):
pad.addstr(row, segment[0], txt[segment[0]:segment[1]], color_pair)
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pad.refresh(viewport_top, viewport_left, screen_location_top,
screen_location_left, screen_location_bottom,
screen_location_right)
def _scroll_output(self, direction):
"""Scroll the output pad.
Args:
direction: _SCROLL_REFRESH, _SCROLL_UP, _SCROLL_DOWN, _SCROLL_HOME or
_SCROLL_END
Raises:
ValueError: On invalid scroll direction.
"""
if not self._output_pad:
# No output pad is present. Do nothing.
return
if direction == self._SCROLL_REFRESH:
pass
elif direction == self._SCROLL_UP:
# Scroll up
if self._output_pad_row - 1 >= 0:
self._output_pad_row -= 1
elif direction == self._SCROLL_DOWN:
# Scroll down
if self._output_pad_row + 1 < (
self._output_pad_height - self._output_pad_screen_height):
self._output_pad_row += 1
elif direction == self._SCROLL_HOME:
# Scroll to top
self._output_pad_row = 0
elif direction == self._SCROLL_END:
# Scroll to bottom
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
else:
raise ValueError("Unsupported scroll mode: %s" % direction)
# Actually scroll the output pad: refresh with new location.
self._screen_scroll_output_pad(self._output_pad, self._output_pad_row, 0,
self._output_pad_screen_location.top,
self._output_pad_screen_location.left,
self._output_pad_screen_location.bottom,
self._output_pad_screen_location.right)
if self._output_pad_height > self._output_pad_screen_height + 1:
# Display information about the scrolling of tall screen output.
self._scroll_info = "--- Scroll: %.2f%% " % (100.0 * (
float(self._output_pad_row) /
(self._output_pad_height - self._output_pad_screen_height - 1)))
if len(self._scroll_info) < self._max_x:
self._scroll_info += "-" * (self._max_x - len(self._scroll_info))
self._screen_draw_text_line(
self._output_scroll_row, self._scroll_info, color="green")
else:
# Screen output is not tall enough to cause scrolling.
self._scroll_info = "-" * self._max_x
self._screen_draw_text_line(
self._output_scroll_row, self._scroll_info, color="green")
def _tab_complete(self, command_str):
"""Perform tab completion.
Obtains tab completion candidates.
If there are no candidates, return command_str and take no other actions.
If there are candidates, display the candidates on screen and return
command_str + (common prefix of the candidates).
Args:
command_str: (str) The str in the command input textbox when Tab key is
hit.
Returns:
(str) Completed string. Could be the same as command_str if no completion
candidate is available. If candidate(s) are available, return command_str
appended by the common prefix of the candidates.
"""
command_str = command_str.lstrip()
if not command_str:
# Empty (top-level) context.
context = ""
prefix = ""
items = []
else:
items = command_str.split(" ")
if len(items) == 1:
# Single word: top-level context.
context = ""
prefix = items[0]
else:
# Multiple words.
context = items[0]
prefix = items[-1]
candidates, common_prefix = self._tab_completion_registry.get_completions(
context, prefix)
if candidates and len(candidates) > 1:
self._display_candidates(candidates)
else:
# In the case of len(candidates) == 1, the single completion will be
# entered to the textbox automatically. So there is no need to show any
# candidates.
self._display_candidates([])
if common_prefix:
# Common prefix is not None and non-empty. The completed string will
# incorporate the common prefix.
return " ".join(items[:-1] + [common_prefix])
else:
return " ".join(items)
def _display_candidates(self, candidates):
"""Show candidates (e.g., tab-completion candidates) on multiple lines.
Args:
candidates: (list of str) candidates.
"""
if self._curr_unwrapped_output:
# Force refresh screen output.
self._scroll_output(self._SCROLL_REFRESH)
if not candidates:
return
candidates_prefix = "Candidates: "
candidates_line = candidates_prefix + " ".join(candidates)
candidates_output = debugger_cli_common.RichTextLines(
candidates_line,
font_attr_segs={
0: [(len(candidates_prefix), len(candidates_line), "yellow")]
})
candidates_output = debugger_cli_common.wrap_rich_text_lines(
candidates_output, self._max_x - 1)
# Calculate how many lines the candidate text should occupy. Limit it to
# a maximum value.
candidates_num_rows = min(
len(candidates_output.lines), self._candidates_max_lines)
self._candidates_top_row = (
self._candidates_bottom_row - candidates_num_rows + 1)
# Render the candidate text on screen.
pad, _, _ = self._display_lines(candidates_output, 0)
self._screen_scroll_output_pad(
pad, 0, 0, self._candidates_top_row, 0,
self._candidates_top_row + candidates_num_rows - 1, self._max_x - 1)
def _interrupt_handler(self, signal_num, frame):
_ = signal_num # Unused.
_ = frame # Unused.
self._screen_terminate()
print("\ntfdbg: caught SIGINT; calling sys.exit(1).", file=sys.stderr)
sys.exit(1)
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from datetime import datetime
import itertools
import netaddr
from neutron_lib.agent import topics
from neutron_lib.api.definitions import portbindings_extended as pb_ext
from neutron_lib.callbacks import events as callback_events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources as callback_resources
from neutron_lib import constants
from neutron_lib.plugins import utils
from neutron_lib import rpc as lib_rpc
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from neutron.agent import resource_cache
from neutron.api.rpc.callbacks import resources
from neutron import objects
LOG = logging.getLogger(__name__)
BINDING_DEACTIVATE = 'binding_deactivate'
DeviceInfo = collections.namedtuple('DeviceInfo', 'mac pci_slot')
def create_consumers(endpoints, prefix, topic_details, start_listening=True):
"""Create agent RPC consumers.
:param endpoints: The list of endpoints to process the incoming messages.
:param prefix: Common prefix for the plugin/agent message queues.
:param topic_details: A list of topics. Each topic has a name, an
operation, and an optional host param keying the
subscription to topic.host for plugin calls.
:param start_listening: if True, it starts the processing loop
:returns: A common Connection.
"""
connection = lib_rpc.Connection()
for details in topic_details:
topic, operation, node_name = itertools.islice(
itertools.chain(details, [None]), 3)
topic_name = topics.get_topic_name(prefix, topic, operation)
connection.create_consumer(topic_name, endpoints, fanout=True)
if node_name:
node_topic_name = '%s.%s' % (topic_name, node_name)
connection.create_consumer(node_topic_name,
endpoints,
fanout=False)
if start_listening:
connection.consume_in_threads()
return connection
class PluginReportStateAPI(object):
"""RPC client used to report state back to plugin.
This class implements the client side of an rpc interface. The server side
can be found in neutron.db.agents_db.AgentExtRpcCallback. For more
information on changing rpc interfaces, see
doc/source/contributor/internals/rpc_api.rst.
"""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.2',
namespace=constants.RPC_NAMESPACE_STATE)
self.client = lib_rpc.get_client(target)
self.timeout = cfg.CONF.AGENT.report_interval
def has_alive_neutron_server(self, context, **kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'has_alive_neutron_server', **kwargs)
def report_state(self, context, agent_state, use_call=False):
cctxt = self.client.prepare(timeout=self.timeout)
# add unique identifier to a report
# that can be logged on server side.
# This create visible correspondence between events on
# the agent and on the server
agent_state['uuid'] = uuidutils.generate_uuid()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT),
}
method = cctxt.call if use_call else cctxt.cast
return method(context, 'report_state', **kwargs)
class PluginApi(object):
'''Agent side of the rpc API.
API version history:
1.0 - Initial version.
1.3 - get_device_details rpc signature upgrade to obtain 'host' and
return value to include fixed_ips and device_owner for
the device port
1.4 - tunnel_sync rpc signature upgrade to obtain 'host'
1.5 - Support update_device_list and
get_devices_details_list_and_failed_devices
1.6 - Support get_network_details
1.7 - Support get_ports_by_vnic_type_and_host
1.8 - Rename agent_restarted to refresh_tunnels in
update_device_list to reflect its expanded purpose
1.9 - Support for device definition as DeviceInfo(mac, pci_info) for:
- get_device_details
- get_devices_details_list (indirectly, calls get_device_details)
- update_device_down
- update_device_up
- update_device_list (indirectly, called from update_device_down
and update_device_up)
'''
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.9')
self.client = lib_rpc.get_client(target)
def get_device_details(self, context, device, agent_id, host=None):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(context, 'get_device_details', device=device,
agent_id=agent_id, host=host)
def get_devices_details_list(self, context, devices, agent_id, host=None):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(context, 'get_devices_details_list',
devices=devices, agent_id=agent_id, host=host)
def get_devices_details_list_and_failed_devices(self, context, devices,
agent_id, host=None,
**kwargs):
"""Get devices details and the list of devices that failed.
This method returns the devices details. If an error is thrown when
retrieving the devices details, the device is put in a list of
failed devices.
"""
cctxt = self.client.prepare(version='1.5')
return cctxt.call(
context,
'get_devices_details_list_and_failed_devices',
devices=devices, agent_id=agent_id, host=host)
def get_network_details(self, context, network, agent_id, host=None):
cctxt = self.client.prepare(version='1.6')
return cctxt.call(context, 'get_network_details', network=network,
agent_id=agent_id, host=host)
def update_device_down(self, context, device, agent_id, host=None):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(context, 'update_device_down', device=device,
agent_id=agent_id, host=host)
def update_device_up(self, context, device, agent_id, host=None):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(context, 'update_device_up', device=device,
agent_id=agent_id, host=host)
def update_device_list(self, context, devices_up, devices_down,
agent_id, host, refresh_tunnels=False):
cctxt = self.client.prepare(version='1.9')
ret_devices_up = []
failed_devices_up = []
ret_devices_down = []
failed_devices_down = []
step = cfg.CONF.rpc_resources_processing_step
devices_up = list(devices_up)
devices_down = list(devices_down)
for i in range(0, max(len(devices_up), len(devices_down)), step):
# Divide-and-conquer RPC timeout
ret = cctxt.call(context, 'update_device_list',
devices_up=devices_up[i:i + step],
devices_down=devices_down[i:i + step],
agent_id=agent_id, host=host,
refresh_tunnels=refresh_tunnels)
ret_devices_up.extend(ret.get("devices_up", []))
failed_devices_up.extend(ret.get("failed_devices_up", []))
ret_devices_down.extend(ret.get("devices_down", []))
failed_devices_down.extend(ret.get("failed_devices_down", []))
return {'devices_up': ret_devices_up,
'failed_devices_up': failed_devices_up,
'devices_down': ret_devices_down,
'failed_devices_down': failed_devices_down}
def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None):
cctxt = self.client.prepare(version='1.4')
return cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type, host=host)
def get_ports_by_vnic_type_and_host(self, context, vnic_type, host):
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'get_ports_by_vnic_type_and_host',
vnic_type=vnic_type, host=host)
class CacheBackedPluginApi(PluginApi):
RESOURCE_TYPES = [resources.PORT,
resources.SECURITYGROUP,
resources.SECURITYGROUPRULE,
resources.NETWORK,
resources.SUBNET,
resources.ADDRESSGROUP]
def __init__(self, *args, **kwargs):
super(CacheBackedPluginApi, self).__init__(*args, **kwargs)
self.remote_resource_cache = None
self._create_cache_for_l2_agent()
def register_legacy_notification_callbacks(self, legacy_interface):
"""Emulates the server-side notifications from ml2 AgentNotifierApi.
legacy_interface is an object with 'delete'/'update' methods for
core resources.
"""
self._legacy_interface = legacy_interface
for e in (callback_events.AFTER_UPDATE, callback_events.AFTER_DELETE):
for r in (resources.PORT, resources.NETWORK):
registry.subscribe(self._legacy_notifier, r, e)
def _legacy_notifier(self, rtype, event, trigger, payload):
"""Checks if legacy interface is expecting calls for resource.
looks for port_update, network_delete, etc and calls them with
the payloads the handlers are expecting (an ID).
"""
context = payload.context
resource_id = payload.resource_id
rtype = rtype.lower() # all legacy handlers don't camelcase
agent_restarted = payload.metadata.pop("agent_restarted", None)
method, host_with_activation, host_with_deactivation = (
self._get_method_host(rtype, event, payload))
if not hasattr(self._legacy_interface, method):
# TODO(kevinbenton): once these notifications are stable, emit
# a deprecation warning for legacy handlers
return
# If there is a binding deactivation, we must also notify the
# corresponding activation
if method == BINDING_DEACTIVATE:
self._legacy_interface.binding_deactivate(
context, port_id=resource_id, host=host_with_deactivation)
self._legacy_interface.binding_activate(
context, port_id=resource_id, host=host_with_activation)
else:
payload = {rtype: {'id': resource_id},
'%s_id' % rtype: resource_id}
if method == "port_update" and agent_restarted is not None:
# Mark ovs-agent restart for local port_update
payload["agent_restarted"] = agent_restarted
getattr(self._legacy_interface, method)(context, **payload)
def _get_method_host(self, rtype, event, payload):
"""Constructs the name of method to be called in the legacy interface.
If the event received is a port update that contains a binding
activation where a previous binding is deactivated, the method name
is 'binding_deactivate' and the host where the binding has to be
deactivated is returned. Otherwise, the method name is constructed from
rtype and the event received and the host is None.
"""
is_delete = event == callback_events.AFTER_DELETE
suffix = 'delete' if is_delete else 'update'
method = "%s_%s" % (rtype, suffix)
host_with_activation = None
host_with_deactivation = None
if is_delete or rtype != callback_resources.PORT:
return method, host_with_activation, host_with_deactivation
# A port update was received. Find out if it is a binding activation
# where a previous binding was deactivated
BINDINGS = pb_ext.COLLECTION_NAME
changed_fields = payload.metadata['changed_fields']
if BINDINGS in changed_fields:
existing_active_binding = (
utils.get_port_binding_by_status_and_host(
getattr(payload.states[0], 'bindings', []),
constants.ACTIVE))
updated_active_binding = (
utils.get_port_binding_by_status_and_host(
getattr(payload.latest_state, 'bindings', []),
constants.ACTIVE))
if (existing_active_binding and updated_active_binding and
existing_active_binding.host !=
updated_active_binding.host):
if (utils.get_port_binding_by_status_and_host(
getattr(payload.latest_state, 'bindings', []),
constants.INACTIVE,
host=existing_active_binding.host)):
method = BINDING_DEACTIVATE
host_with_activation = updated_active_binding.host
host_with_deactivation = existing_active_binding.host
return method, host_with_activation, host_with_deactivation
def get_devices_details_list_and_failed_devices(self, context, devices,
agent_id, host=None,
agent_restarted=False):
result = {'devices': [], 'failed_devices': []}
for device in devices:
try:
result['devices'].append(
self.get_device_details(context, device, agent_id, host,
agent_restarted))
except Exception:
LOG.exception("Failed to get details for device %s", device)
result['failed_devices'].append(device)
return result
def get_device_details(self, context, device, agent_id, host=None,
agent_restarted=False):
port_obj = self.remote_resource_cache.get_resource_by_id(
resources.PORT, device, agent_restarted)
if not port_obj:
LOG.debug("Device %s does not exist in cache.", device)
return {'device': device}
if not port_obj.binding_levels:
LOG.warning("Device %s is not bound.", port_obj)
return {'device': device}
segment = port_obj.binding_levels[-1].segment
if not segment:
LOG.debug("Device %s is not bound to any segment.", port_obj)
return {'device': device}
binding = utils.get_port_binding_by_status_and_host(
port_obj.bindings, constants.ACTIVE, raise_if_not_found=True,
port_id=port_obj.id)
migrating_to = migrating_to_host(port_obj.bindings)
if (not (migrating_to and cfg.CONF.nova.live_migration_events) and
port_obj.device_owner.startswith(
constants.DEVICE_OWNER_COMPUTE_PREFIX) and
binding[pb_ext.HOST] != host):
LOG.debug("Device %s has no active binding in this host",
port_obj)
return {'device': device,
constants.NO_ACTIVE_BINDING: True}
net = self.remote_resource_cache.get_resource_by_id(
resources.NETWORK, port_obj.network_id)
net_qos_policy_id = net.qos_policy_id
# match format of old RPC interface
mac_addr = str(netaddr.EUI(str(port_obj.mac_address),
dialect=netaddr.mac_unix_expanded))
entry = {
'device': device,
'device_id': port_obj.device_id,
'network_id': port_obj.network_id,
'port_id': port_obj.id,
'mac_address': mac_addr,
'admin_state_up': port_obj.admin_state_up,
'status': port_obj.status,
'network_type': segment.network_type,
'segmentation_id': segment.segmentation_id,
'physical_network': segment.physical_network,
'fixed_ips': [{'subnet_id': o.subnet_id,
'ip_address': str(o.ip_address)}
for o in port_obj.fixed_ips],
'device_owner': port_obj.device_owner,
'allowed_address_pairs': [{'mac_address': o.mac_address,
'ip_address': o.ip_address}
for o in port_obj.allowed_address_pairs],
'port_security_enabled': getattr(port_obj.security,
'port_security_enabled', True),
'qos_policy_id': port_obj.qos_policy_id,
'network_qos_policy_id': net_qos_policy_id,
'profile': binding.profile,
'vif_type': binding.vif_type,
'vnic_type': binding.vnic_type,
'security_groups': list(port_obj.security_group_ids),
'migrating_to': migrating_to,
}
LOG.debug("Returning: %s", entry)
return entry
def get_devices_details_list(self, context, devices, agent_id, host=None):
return [self.get_device_details(context, device, agent_id, host)
for device in devices]
def _create_cache_for_l2_agent(self):
"""Create a push-notifications cache for L2 agent related resources."""
objects.register_objects()
rcache = resource_cache.RemoteResourceCache(self.RESOURCE_TYPES)
rcache.start_watcher()
self.remote_resource_cache = rcache
# TODO(ralonsoh): move this method to neutron_lib.plugins.utils
def migrating_to_host(bindings, host=None):
"""Return the host the port is being migrated.
If the host is passed, the port binding profile with the "migrating_to",
that contains the host the port is being migrated, is compared to this
value. If no value is passed, this method will return if the port is
being migrated ("migrating_to" is present in any port binding profile).
The function returns None or the matching host.
"""
for binding in (binding for binding in bindings if
binding[pb_ext.STATUS] == constants.ACTIVE):
profile = binding.get('profile')
if not profile:
continue
profile = (jsonutils.loads(profile) if isinstance(profile, str) else
profile)
migrating_to = profile.get('migrating_to')
if migrating_to:
if not host: # Just know if the port is being migrated.
return migrating_to
if migrating_to == host:
return migrating_to
return None
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras generic Python utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.frozen_keras import regularizers
from tensorflow.python.frozen_keras.utils import generic_utils
from tensorflow.python.platform import test
class HasArgTest(test.TestCase):
def test_has_arg(self):
def f_x(x):
return x
def f_x_args(x, *args):
_ = args
return x
def f_x_kwargs(x, **kwargs):
_ = kwargs
return x
self.assertTrue(generic_utils.has_arg(
f_x, 'x', accept_all=False))
self.assertFalse(generic_utils.has_arg(
f_x, 'y', accept_all=False))
self.assertTrue(generic_utils.has_arg(
f_x_args, 'x', accept_all=False))
self.assertFalse(generic_utils.has_arg(
f_x_args, 'y', accept_all=False))
self.assertTrue(generic_utils.has_arg(
f_x_kwargs, 'x', accept_all=False))
self.assertFalse(generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=False))
self.assertTrue(generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=True))
class TestCustomObjectScope(test.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass(object):
pass
with generic_utils.custom_object_scope(
{'CustomClass': CustomClass, 'custom_fn': custom_fn}):
# Disable activation test since its not under frozen_keras package.
# act = keras.activations.get('custom_fn')
# self.assertEqual(act, custom_fn)
cl = regularizers.get('CustomClass')
self.assertEqual(cl.__class__, CustomClass)
class SerializeKerasObjectTest(test.TestCase):
def test_serialize_none(self):
serialized = generic_utils.serialize_keras_object(None)
self.assertEqual(serialized, None)
deserialized = generic_utils.deserialize_keras_object(
serialized)
self.assertEqual(deserialized, None)
def test_serialize_custom_class_with_default_name(self):
@generic_utils.register_keras_serializable()
class TestClass(object):
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
serialized_name = 'Custom>TestClass'
inst = TestClass(value=10)
class_name = generic_utils._GLOBAL_CUSTOM_NAMES[TestClass]
self.assertEqual(serialized_name, class_name)
config = generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, TestClass)
self.assertEqual(10, new_inst._value)
# Make sure registering a new class with same name will fail.
with self.assertRaisesRegex(ValueError, '.*has already been registered.*'):
@generic_utils.register_keras_serializable() # pylint: disable=function-redefined
class TestClass(object):
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
def test_serialize_custom_class_with_custom_name(self):
@generic_utils.register_keras_serializable(
'TestPackage', 'CustomName')
class OtherTestClass(object):
def __init__(self, val):
self._val = val
def get_config(self):
return {'val': self._val}
serialized_name = 'TestPackage>CustomName'
inst = OtherTestClass(val=5)
class_name = generic_utils._GLOBAL_CUSTOM_NAMES[OtherTestClass]
self.assertEqual(serialized_name, class_name)
fn_class_name = generic_utils.get_registered_name(
OtherTestClass)
self.assertEqual(fn_class_name, class_name)
cls = generic_utils.get_registered_object(fn_class_name)
self.assertEqual(OtherTestClass, cls)
config = generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, OtherTestClass)
self.assertEqual(5, new_inst._val)
def test_serialize_custom_function(self):
@generic_utils.register_keras_serializable()
def my_fn():
return 42
serialized_name = 'Custom>my_fn'
class_name = generic_utils._GLOBAL_CUSTOM_NAMES[my_fn]
self.assertEqual(serialized_name, class_name)
fn_class_name = generic_utils.get_registered_name(my_fn)
self.assertEqual(fn_class_name, class_name)
config = generic_utils.serialize_keras_object(my_fn)
self.assertEqual(class_name, config)
fn = generic_utils.deserialize_keras_object(config)
self.assertEqual(42, fn())
fn_2 = generic_utils.get_registered_object(fn_class_name)
self.assertEqual(42, fn_2())
def test_serialize_custom_class_without_get_config_fails(self):
with self.assertRaisesRegex(
ValueError, 'Cannot register a class that does '
'not have a get_config.*'):
@generic_utils.register_keras_serializable( # pylint: disable=unused-variable
'TestPackage', 'TestClass')
class TestClass(object):
def __init__(self, value):
self._value = value
def test_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
layer = keras.layers.Dense(
SerializableInt(3),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config, custom_objects={'SerializableInt': SerializableInt})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
self.assertEqual(new_layer.units.__class__, SerializableInt)
self.assertEqual(new_layer.units, 3)
def test_nested_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
class SerializableNestedInt(int):
"""A serializable object containing another serializable object."""
def __new__(cls, value, int_obj):
obj = int.__new__(cls, value)
obj.int_obj = int_obj
return obj
def get_config(self):
return {'value': int(self), 'int_obj': self.int_obj}
@classmethod
def from_config(cls, config):
return cls(**config)
nested_int = SerializableInt(4)
layer = keras.layers.Dense(
SerializableNestedInt(3, nested_int),
name='SerializableNestedInt',
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config,
custom_objects={
'SerializableInt': SerializableInt,
'SerializableNestedInt': SerializableNestedInt
})
# Make sure the string field doesn't get convert to custom object, even
# they have same value.
self.assertEqual(new_layer.name, 'SerializableNestedInt')
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
self.assertEqual(new_layer.units.__class__, SerializableNestedInt)
self.assertEqual(new_layer.units, 3)
self.assertEqual(new_layer.units.int_obj.__class__, SerializableInt)
self.assertEqual(new_layer.units.int_obj, 4)
def test_nested_serializable_fn(self):
def serializable_fn(x):
"""A serializable function to pass out of a test layer's config."""
return x
class SerializableNestedInt(int):
"""A serializable object containing a serializable function."""
def __new__(cls, value, fn):
obj = int.__new__(cls, value)
obj.fn = fn
return obj
def get_config(self):
return {'value': int(self), 'fn': self.fn}
@classmethod
def from_config(cls, config):
return cls(**config)
layer = keras.layers.Dense(
SerializableNestedInt(3, serializable_fn),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config,
custom_objects={
'serializable_fn': serializable_fn,
'SerializableNestedInt': SerializableNestedInt
})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertIsInstance(new_layer.bias_regularizer, keras.regularizers.L1L2)
self.assertIsInstance(new_layer.units, SerializableNestedInt)
self.assertEqual(new_layer.units, 3)
self.assertIs(new_layer.units.fn, serializable_fn)
class SliceArraysTest(test.TestCase):
def test_slice_arrays(self):
input_a = list([1, 2, 3])
self.assertEqual(
generic_utils.slice_arrays(input_a, start=0),
[None, None, None])
self.assertEqual(
generic_utils.slice_arrays(input_a, stop=3),
[None, None, None])
self.assertEqual(
generic_utils.slice_arrays(input_a, start=0, stop=1),
[None, None, None])
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script is for downloading and maintaining a local copy of all of the XML files of PLOS articles.
By default it doesn't rely on access to the PLOS's internal network (but can use it if available).
Diagram of relevant systems here: https://confluence.plos.org/confluence/display/CXP/How+allofPLOS+works
Workflow:
Check whether list of DOI files is complete
* query Solr API for list of new articles (limited by date)
* create list of missing DOIs, by comparing against existing list of DOIs or file names
Update by downloading articles from journal pages if local store is not complete
Check for and download amended articles that have been issued amendments
Check for and download versions of record (VOR) for uncorrected proofs
Zip folder down, appending when able
Create log file for actions that can be referenced
TODO: add start date for beginning of time for article pubdates (2003-08-11), calculation for most recent pub date
"""
import argparse
import datetime
import errno
import gzip
import logging
import os
import shutil
import time
import tarfile
import zipfile
import lxml.etree as et
import requests
from tqdm import tqdm
from .. import get_corpus_dir, newarticledir, uncorrected_proofs_text_list
from ..plos_regex import validate_doi
from ..transformations import (BASE_URL_API, filename_to_doi, doi_to_path, doi_to_url)
from ..article import Article
from .gdrive import (download_file_from_google_drive, get_zip_metadata, unzip_articles,
ZIP_ID, LOCAL_ZIP, LOCAL_TEST_ZIP, TEST_ZIP_ID, min_files_for_valid_corpus)
help_str = "This program downloads a zip file with all PLOS articles and checks for updates"
# Making sure DS.Store not included as file
ignore_func = shutil.ignore_patterns('.DS_Store')
# Some example URLs that may be useful
EXAMPLE_SEARCH_URL = ('https://api.plos.org/search?q=*%3A*&fq=doc_type%3Afull&fl=id,'
'&wt=json&indent=true&fq=article_type:"research+article"+OR+article_type:"correction"+OR+'
'article_type:"meta-research+article"&sort=%20id%20asc&'
'fq=publication_date:%5B2017-03-05T00:00:00Z+TO+2017-03-19T23:59:59Z%5D&start=0&rows=1000')
# Starting out list of needed articles as empty
dois_needed_list = []
def listdir_nohidden(path, extension='.xml', include_dir=True):
"""
Make a list of all files of a given extension in a given directory
Option to include local path in filename
:param path: String with a path where to search files
:param extension: String with the extension that we are looking for, xml is the default value
:param include_dir: By default, include the directory in the filename
:return: A list with all the file names inside this directory, without the DS_Store file
"""
if include_dir:
file_list = [os.path.join(path, f) for f in os.listdir(path)
if f.endswith(extension) and 'DS_Store' not in f]
else:
file_list = [f for f in os.listdir(path) if f.endswith(extension) and
'DS_Store' not in f]
return file_list
def extract_filenames(directory, extension='.xml'):
"""
Make a list of all files of a given extension in a given directory, without their extension
:param directory: String with the directory where to search files
:param extension: String with the extension that we are looking for, xml is the default value
:return: A list with all the file names inside this directory, excluding extensions
"""
filenames = [os.path.basename(article_file).rstrip(extension) for article_file in
listdir_nohidden(directory, extension) if os.path.isfile(article_file)]
return filenames
def search_solr_records(days_ago=14, start=0, rows=1000, start_date=None, end_date=None, item='id'):
"""
Queries the solr database for a list of articles based on the date of publication
function defaults to querying by DOI (i.e., 'id')
TODO (on hold): if Solr XSLT is changed, query by revision_date instead of publication_date.
Then would be used in separate query to figure out updated articles to download
for full list of potential queries, see https://api.plos.org/solr/search-fields/
:param days_ago: A int value with the length of the queried date range, default is two weeks
:param start: An int value indicating the first row of results to return
:param rows: An int value indicating how many rows of results to return (from 0)
:param start_date: datetime object of earliest date in the queried range (defaults to None)
:param end_date: datetime object of latest date in the queried range (defaults to now)
:param item: Items to return/display. 'Id', the default, is the article DOI.
:return: A list of DOIs for articles published in this time period; by default, from the last two weeks
"""
if end_date is None:
end_date = datetime.datetime.now()
solr_search_results = []
if start_date is None:
earlier = datetime.timedelta(days=days_ago)
start_date = end_date - earlier
START_DATE = start_date.strftime("%Y-%m-%d")
END_DATE = end_date.strftime("%Y-%m-%d")
howmanyarticles_url_base = [BASE_URL_API,
'?q=*:*&fq=doc_type:full+-doi:image&fl=id,',
item,
'&wt=json&indent=true&sort=%20id%20asc&fq=publication_date:[',
START_DATE,
'T00:00:00Z+TO+',
END_DATE,
'T23:59:59Z]'
]
howmanyarticles_url = ''.join(howmanyarticles_url_base) + '&rows=1000'
# if include_uncorrected is False:
num_results = requests.get(howmanyarticles_url).json()["response"]["numFound"]
# Create solr_search_results & paginate through results
solr_search_results = []
while(start < num_results):
query_url = ''.join(howmanyarticles_url_base) + '&start=' + str(start) + '&rows=' + str(rows)
article_search = requests.get(query_url).json()
solr_partial_results = [x[item] for x in article_search["response"]["docs"]]
solr_search_results.extend(solr_partial_results)
start = start + rows
if start + rows > num_results:
rows = num_results - start
print("URL for solr query:", howmanyarticles_url)
if solr_search_results:
print("{0} results returned from this search."
.format(len(solr_search_results)))
else:
print('No results returned for this search.')
return solr_search_results
def get_all_solr_dois():
"""
Get every article published by PLOS, up to 500,000, as indexed by Solr on api.plos.org.
URL includes regex to exclude sub-DOIs and image DOIs.
:return: list of DOIs for all PLOS articles
"""
solr_magic_url = ('https://api.plos.org/terms?terms.fl=id&terms.limit=500000&wt=json&indent=true&terms.regex='
'10%5C.1371%5C/(journal%5C.p%5Ba-zA-Z%5D%7B3%7D%5C.%5B%5Cd%5D%7B7%7D$%7Cannotation%5C/'
'%5Ba-zA-Z0-9%5D%7B8%7D-%5Ba-zA-Z0-9%5D%7B4%7D-%5Ba-zA-Z0-9%5D%7B4%7D-%5Ba-zA-Z0-9%5D'
'%7B4%7D-%5Ba-zA-Z0-9%5D%7B12%7D$)')
results = requests.get(solr_magic_url).json()
solr_dois = [id for id in results['terms']['id'] if isinstance(id, str)]
return solr_dois
def get_dois_needed_list(comparison_list=None, directory=None):
"""
Takes result of query from get_all_solr_dois and compares to local article directory.
:param comparison_list: Defaults to creating a full list of local article files.
:param directory: An int value indicating the first row of results to return
:return: A list of DOIs for articles that are not in the local article directory.
"""
if comparison_list is None:
comparison_list = get_all_solr_dois()
if directory is None:
directory = get_corpus_dir()
# Transform local files to DOIs
local_article_list = [filename_to_doi(article) for article in listdir_nohidden(directory, '.xml')]
dois_needed_list = list(set(comparison_list) - set(local_article_list))
if dois_needed_list:
print(len(dois_needed_list), "new articles to download.")
else:
print("No new articles found to add to Corpus folder.")
return dois_needed_list
def copytree(source, destination, symlinks=False, ignore=None):
"""
Copies all the files in one directory to another
:param source: Original directory of files
:param destination: Directory where files are copied to
:param symlinks: param from the shutil.copytree function
:param ignore: param from the shutil.copytree function; default is include all files
:return: None
"""
for item in listdir_nohidden(source, include_dir=False):
s = os.path.join(source, item)
d = os.path.join(destination, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def repo_download(dois, tempdir, ignore_existing=True):
"""
Downloads a list of articles by DOI from PLOS's journal pages to a temporary directory
Use in conjunction with get_dois_needed_list
:param dois: Iterable with DOIs for articles to obtain
:param tempdir: Temporary directory where files are copied to
:param ignore_existing: Don't re-download to tempdir if already downloaded
"""
# make temporary directory, if needed
try:
os.mkdir(tempdir)
except FileExistsError:
pass
if ignore_existing:
existing_articles = [filename_to_doi(f) for f in listdir_nohidden(tempdir)]
dois = set(dois) - set(existing_articles)
for doi in tqdm(sorted(dois), disable=None):
url = doi_to_url(doi)
article_path = doi_to_path(doi, directory=tempdir)
# create new local XML files
if ignore_existing is False or ignore_existing and os.path.isfile(article_path) is False:
response = requests.get(url, stream=True)
response.raise_for_status()
with open(article_path, 'wb') as f:
for block in response.iter_content(1024):
f.write(block)
print(len(listdir_nohidden(tempdir)), "new articles downloaded.")
logging.info(len(listdir_nohidden(tempdir)))
def move_articles(source, destination):
"""
Move articles from one folder to another
:param source: Temporary directory of new article files
:param destination: Directory where files are copied to
:return: None
"""
oldnum_destination = len(listdir_nohidden(destination))
oldnum_source = len(listdir_nohidden(source))
if oldnum_source > 0:
print('Corpus started with {0} articles.\n'
'Moving new and updated files...'.format(oldnum_destination))
copytree(source, destination, ignore=ignore_func)
newnum_destination = len(listdir_nohidden(destination))
print('{0} files moved. Corpus now has {1} articles.'
.format(oldnum_source, newnum_destination))
logging.info("New article files moved successfully")
else:
print("No files moved.")
logging.info("No article files moved")
# Delete temporary folder in most cases
if source == newarticledir:
shutil.rmtree(source)
def compare_article_pubdate(doi, days=22, directory=None):
"""
Check if an article's publication date was more than 3 weeks ago.
:param doi: doi of the article
:param days: how long ago to compare the publication date (default 22 days)
:param directory: directory the article file is located in (defaults to get_corpus_dir())
:return: boolean for whether the pubdate was older than the days value
"""
if directory is None:
directory = get_corpus_dir()
article = Article(doi, directory=directory)
try:
pubdate = article.pubdate
today = datetime.datetime.now()
three_wks_ago = datetime.timedelta(days)
compare_date = today - three_wks_ago
return pubdate < compare_date
except ValueError:
print("Pubdate error in {}".format(doi))
def download_xml(doi, tempdir=newarticledir):
"""For a given DOI, download its remote XML file to tempdir."""
art = Article(doi, directory=tempdir)
with open(art.filepath, 'w', encoding='utf8') as f:
f.write(art.get_remote_xml())
return art
def download_updated_xml(article_file,
tempdir=newarticledir):
"""
For an article file, compare local XML to remote XML
If they're different, download new version of article
:param article_file: the filename for a single article
:param tempdir: directory where files are downloaded to
:param vor_check: whether checking to see if uncorrected proof is updated
:return: boolean for whether update was available & downloaded
"""
article = Article.from_filename(article_file)
try:
os.mkdir(tempdir)
except FileExistsError:
pass
articleXML_remote = article.get_remote_xml()
if not article_file.endswith('.xml'):
article_file += '.xml'
try:
articleXML_local = article.xml
except OSError:
article.directory = newarticledir
articleXML_local = article.xml
if articleXML_remote == articleXML_local:
updated = False
else:
article_new = download_xml(article.doi, tempdir=tempdir)
updated = True
return updated
def check_for_amended_articles(directory=newarticledir, article_list=None):
"""
For articles in the temporary download directory, check if article_type is an amendment
If amendment, surface the DOI of the article being amended
Use with `download_amended_articles`
For more information about the amendment type, see `amendment` in the Article class
:param article: the filename for a single article
:param directory: directory where the article file is, default is newarticledir
:return: list of filenames to existing local files for articles issued an amendment
"""
amended_doi_list = []
if article_list is None:
article_list = listdir_nohidden(directory)
for article_file in article_list:
article = Article.from_filename(article_file)
article.directory = directory
if article.amendment:
amended_doi_list.extend(article.related_dois)
amended_article_list = [Article(doi).filename if Article(doi).local else
doi_to_path(doi, directory=directory) for doi in list(amended_doi_list)]
print(len(amended_article_list), 'amended articles found.')
return amended_article_list
def download_amended_articles(directory=None, tempdir=newarticledir, amended_article_list=None):
"""For a list of articles that have been amended, check if the xml was also updated.
Use with `check_for_amended_articles`
Many amendments don't result in XML changes
For more information about the amendment type, see `amendment` in the Article class
:param article: the filename for a single article
:param directory: directory where the article file is, default is newarticledir
:param tempdir: where new articles are downloaded to-
:return: list of DOIs for articles downloaded with new XML versions
"""
if directory is None:
directory = get_corpus_dir()
if amended_article_list is None:
amended_article_list = check_for_amended_articles(directory)
amended_updated_article_list = []
print("Checking amended articles...")
for article in tqdm(amended_article_list, disable=None):
updated = download_updated_xml(article)
if updated:
amended_updated_article_list.append(article)
print(len(amended_updated_article_list), 'amended articles downloaded with new xml.')
return amended_updated_article_list
def get_uncorrected_proofs(directory=None, proof_filepath=uncorrected_proofs_text_list):
"""
Loads the uncorrected proofs txt file.
Failing that, creates new txt file from scratch using directory.
:param directory: Directory containing the article files
:return: set of DOIs of uncorrected proofs from text list
"""
if directory is None:
directory = get_corpus_dir()
try:
with open(proof_filepath) as f:
uncorrected_proofs = set(f.read().splitlines())
except FileNotFoundError:
print("Creating new text list of uncorrected proofs from scratch.")
article_files = listdir_nohidden(directory)
uncorrected_proofs = set()
for article_file in tqdm(article_files, disable=None, miniters=int(len(article_files)/1000)):
article = Article.from_filename(article_file)
article.directory = directory
if article.proof == 'uncorrected_proof':
uncorrected_proofs.add(article.doi)
print("Saving uncorrected proofs.")
with open(proof_filepath, 'w') as f:
for item in tqdm(sorted(uncorrected_proofs), disable=None):
f.write("%s\n" % item)
return uncorrected_proofs
def check_for_uncorrected_proofs(directory=newarticledir, proof_filepath=uncorrected_proofs_text_list):
"""
For a list of articles, check whether they are the 'uncorrected proof' type
One of the checks on newly downloaded articles.
:param proof_filepath: List of DOIs
:param directory: Directory containing the article files
:return: set of all DOIs that are uncorrected proofs, including from main article directory
"""
# Read in uncorrected proofs from uncorrected_proofs_text_list txt file
# If uncorrected_proofs txt file doesn't exist, build that set from scratch from main article directory
uncorrected_proofs = get_uncorrected_proofs(proof_filepath=proof_filepath)
# Check directory for uncorrected proofs
# Append uncorrected proofs to running set
if directory is None:
directory = get_corpus_dir()
articles = listdir_nohidden(directory)
new_proofs = 0
for article_file in articles:
article = Article.from_filename(article_file)
article.directory = directory
if article.proof == 'uncorrected_proof':
uncorrected_proofs.add(article.doi)
new_proofs += 1
# Copy all uncorrected proofs from list to clean text file
with open(proof_filepath, 'w') as f:
for item in sorted(uncorrected_proofs):
f.write("%s\n" % item)
if uncorrected_proofs:
print("{} new uncorrected proofs found. {} total in set.".format(new_proofs, len(uncorrected_proofs)))
else:
print("No uncorrected proofs found in {} or in {}.".format(directory, proof_filepath))
return uncorrected_proofs
def check_for_vor_updates(uncorrected_list=None):
"""
For existing uncorrected proofs list,
check whether a vor is available to download
:param uncorrected_list: DOIs of uncorrected articles, default None
:return: List of articles from uncorrected_list for which Solr says there is a new VOR waiting
"""
# First get/make list of uncorrected proofs
if uncorrected_list is None:
uncorrected_list = list(get_uncorrected_proofs())
# Make it check a single article
if isinstance(uncorrected_list, str):
uncorrected_list = [uncorrected_list]
# Create article list chunks for Solr query no longer than 10 DOIs at a time
list_chunks = [uncorrected_list[x:x+10] for x in range(0, len(uncorrected_list), 10)]
vor_updates_available = []
for chunk in list_chunks:
article_solr_string = ' OR '.join(chunk)
# Get up to 10 article records from Solr
# Filtered for publication_stage = vor-update-to-corrected-proof
VOR_check_url_base = [BASE_URL_API,
'?q=id:(',
article_solr_string,
')&fq=publication_stage:vor-update-to-uncorrected-proof&',
'fl=publication_stage,+id&wt=json&indent=true']
VOR_check_url = ''.join(VOR_check_url_base)
vor_check = requests.get(VOR_check_url).json()['response']['docs']
vor_chunk_results = [x['id'] for x in vor_check]
vor_updates_available.extend(vor_chunk_results)
if vor_updates_available:
print(len(vor_updates_available), "new VOR updates indexed in Solr.")
logging.info("VOR updates to download.")
else:
print("No new VOR articles indexed in Solr.")
logging.info("No new VOR articles in Solr")
return vor_updates_available
def download_vor_updates(directory=None, tempdir=newarticledir,
vor_updates_available=None):
"""
For existing uncorrected proofs list, check whether a vor is available to download
Used in conjunction w/check_for_vor_updates
Main method doesn't really work because vor updates aren't always indexed properly in Solr,
so remote_proofs_direct_check is used
:param directory: Directory containing the article files
:param tempdir: Directory where updated VORs to be downloaded to
:param vor_updates_available: Partial DOI/filenames of uncorrected articles, default None
:return: List of articles from uncorrected_list for which new version successfully downloaded
"""
if directory is None:
directory = get_corpus_dir()
if vor_updates_available is None:
vor_updates_available = check_for_vor_updates()
vor_updated_article_list = []
for doi in tqdm(vor_updates_available, disable=None):
updated = download_updated_xml(doi_to_path(doi), tempdir=tempdir)
if updated:
vor_updated_article_list.append(doi)
old_uncorrected_proofs = get_uncorrected_proofs()
new_uncorrected_proofs_list = list(old_uncorrected_proofs - set(vor_updated_article_list))
# direct remote XML check; add their totals to totals above
if new_uncorrected_proofs_list:
proofs_download_list = remote_proofs_direct_check(article_list=new_uncorrected_proofs_list)
vor_updated_article_list.extend(proofs_download_list)
new_uncorrected_proofs_list = list(set(new_uncorrected_proofs_list) - set(vor_updated_article_list))
too_old_proofs = [proof for proof in new_uncorrected_proofs_list if compare_article_pubdate(proof)]
if too_old_proofs:
print("Proofs older than 3 weeks: {}".format(too_old_proofs))
# if any VOR articles have been downloaded, update static uncorrected proofs list
if vor_updated_article_list:
with open(uncorrected_proofs_text_list, 'w') as f:
for item in sorted(new_uncorrected_proofs_list):
f.write("%s\n" % item)
print("{} uncorrected proofs updated to version of record.\n".format(len(vor_updated_article_list)) +
"{} uncorrected proofs remaining in uncorrected proof list.".format(len(new_uncorrected_proofs_list)))
else:
print("No uncorrected proofs have a VOR update.")
return vor_updated_article_list
def remote_proofs_direct_check(tempdir=newarticledir, article_list=None):
"""
Takes list of of DOIs of uncorrected proofs and compared to raw XML of the article online
If article status is now 'vor-update-to-uncorrected-proof', download new copy
This will not be necessary once Solr is indexing VOR article information correctly.
https://developer.plos.org/jira/browse/DPRO-3418
:param tempdir: temporary directory for downloading articles
:param article-list: list of uncorrected proofs to check for updates.
:return: list of all articles with updated vor
"""
try:
os.mkdir(tempdir)
except FileExistsError:
pass
proofs_download_list = []
if article_list is None:
article_list = list(get_uncorrected_proofs())
print("Checking directly for additional VOR updates...")
for doi in tqdm(article_list, disable=None):
f = doi_to_path(doi)
updated = download_updated_xml(f)
if updated:
proofs_download_list.append(doi)
if proofs_download_list:
print(len(proofs_download_list),
"VOR articles directly downloaded.")
else:
print("No other new VOR articles found.")
return proofs_download_list
def download_check_and_move(article_list, proof_filepath, tempdir, destination):
"""
For a list of new articles to get, first download them from journal pages to the temporary directory
Next, check these articles for uncorrected proofs and article_type amendments
Act on available VOR updates & amended articles
Then, move to corpus directory where the rest of the articles are
:param article_list: List of new articles to download
:param proof_filepath: List of uncorrected proofs to check for vor updates
:param tempdir: Directory where articles to be downloaded to
:param destination: Directory where new articles are to be moved to
"""
repo_download(article_list, tempdir)
amended_articles = check_for_amended_articles(directory=tempdir)
download_amended_articles(amended_article_list=amended_articles)
download_vor_updates()
check_for_uncorrected_proofs(directory=tempdir)
move_articles(tempdir, destination)
def create_local_plos_corpus(directory=None, rm_metadata=True):
"""
Downloads a fresh copy of the PLOS corpus by:
1) creating directory if it doesn't exist
2) downloading metadata about the .zip of all PLOS XML
2) downloading the zip file (defaults to corpus directory)
3) extracting the individual XML files into the corpus directory
:param directory: directory where the corpus is to be downloaded and extracted
:param rm_metadata: COMPLETE HERE
:return: None
"""
if directory is None:
directory = get_corpus_dir()
if not os.path.isdir(directory):
print('Creating folder for article xml')
os.makedirs(directory, exist_ok=True)
zip_date, zip_size, metadata_path = get_zip_metadata()
zip_path = download_file_from_google_drive(ZIP_ID, LOCAL_ZIP, file_size=zip_size)
unzip_articles(file_path=zip_path)
if rm_metadata:
os.remove(metadata_path)
def create_test_plos_corpus(directory=None):
"""
Downloads a copy of 10,000 randomly selected PLOS articles by:
1) creating directory if it doesn't exist
2) downloading the zip file (defaults to corpus directory)
3) extracting the individual XML files into the corpus directory
:param directory: directory where the corpus is to be downloaded and extracted
:return: None
"""
if directory is None:
directory = get_corpus_dir()
if not os.path.isdir(directory):
print('Creating folder for article xml')
os.makedirs(directory, exist_ok=True)
zip_path = download_file_from_google_drive(TEST_ZIP_ID, LOCAL_TEST_ZIP)
unzip_articles(file_path=zip_path, extract_directory=directory)
def download_corpus_metadata_files(csv_abstracts=True, csv_no_abstracts=True, sqlitedb=True, destination=None):
"""Downloads up to three files of metadata generated from the PLOS Corpus XML.
Includes two csvs and a sqlite database.
"""
if destination is None:
destination = os.getcwd()
if csv_abstracts:
csv_abstracts_id = '0B_JDnoghFeEKQWlNUUJtY1pIY3c'
csv_abstracts_file = download_file_from_google_drive(csv_abstracts_id,
'allofplos_metadata_test.csv',
destination=destination)
if csv_no_abstracts:
csv_no_abstracts_id = '0B_JDnoghFeEKeEp6S0R2Sm1YcEk'
csv_no_abstracts_file = download_file_from_google_drive(csv_no_abstracts_id,
'allofplos_metadata_no_abstracts_test.csv',
destination=destination)
if sqlitedb:
sqlitedb_id = '1gcQW7cc6Z9gDBu_vHxghNwQaMkyvVuMC'
sqlitedb_file = download_file_from_google_drive(sqlitedb_id,
'ploscorpus_test.db.gz',
destination=destination)
print("Extracting sqlite db...")
inF = gzip.open(sqlitedb_file, 'rb')
outF = open('ploscorpus_test.db', 'wb')
outF.write(inF.read())
inF.close()
outF.close()
print("Extraction complete.")
|
|
#!/usr/bin/env python
# (C) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
""" Detect running daemons then configure and start the agent.
"""
import argparse
from glob import glob
import json
import logging
import os
import pwd
import socket
import subprocess
import sys
import agent_config
import monasca_setup.utils as utils
from monasca_setup.utils import write_template
from service.detection import detect_init
log = logging.getLogger(__name__)
CUSTOM_PLUGIN_PATH = '/usr/lib/monasca/agent/custom_detect.d'
# dirname is called twice to get the dir 1 above the location of the script
PREFIX_DIR = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))
def main(argv=None):
parser = argparse.ArgumentParser(description='Configure and setup the agent. In a full run it will detect running' +
' daemons then configure and start the agent.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parse_arguments(parser)
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format="%(levelname)s: %(message)s")
else:
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
if args.dry_run:
log.info("Running in dry run mode, no changes will be made only reported")
# Detect and if possibly enable the agent service
agent_service = detect_init(PREFIX_DIR, args.config_dir, args.log_dir, args.template_dir, username=args.user)
# Skip base setup if only installing plugins or running specific detection
# plugins
if not args.install_plugins_only and args.detection_plugins is None:
if not args.skip_enable:
agent_service.enable()
# Verify required options
if args.username is None or args.password is None or args.keystone_url is None:
log.error('Username, password and keystone_url are required when running full configuration.')
parser.print_help()
sys.exit(1)
base_configuration(args)
# Collect the set of detection plugins to run
detected_plugins = utils.discover_plugins(CUSTOM_PLUGIN_PATH)
if args.system_only:
from detection.plugins.system import System
plugins = [System]
elif args.detection_plugins is not None:
plugins = utils.select_plugins(args.detection_plugins, detected_plugins)
elif args.skip_detection_plugins is not None:
plugins = utils.select_plugins(args.skip_detection_plugins, detected_plugins, skip=True)
else:
plugins = detected_plugins
plugin_names = [p.__name__ for p in plugins]
if args.remove: # Remove entries for each plugin from the various plugin config files
changes = remove_config(args, plugin_names)
else:
# Run detection for all the plugins, halting on any failures if plugins were specified in the arguments
detected_config = plugin_detection(plugins, args.template_dir, args.detection_args,
args.detection_args_json,
skip_failed=(args.detection_plugins is None))
if detected_config is None:
return 1 # Indicates detection problem, skip remaining steps and give non-zero exit code
changes = modify_config(args, detected_config)
# Don't restart if only doing detection plugins and no changes found
if args.detection_plugins is not None and not changes:
log.info('No changes found for plugins {0}, skipping restart of Monasca Agent'.format(plugin_names))
return 0
elif args.dry_run:
log.info('Running in dry mode, skipping changes and restart of Monasca Agent')
return 0
# Now that the config is built, start the service
if args.install_plugins_only:
log.info('Command line option install_plugins_only set, skipping '
'service (re)start.')
else:
try:
agent_service.start(restart=True)
except subprocess.CalledProcessError:
log.error('The service did not startup correctly see %s' % args.log_dir)
def base_configuration(args):
"""Write out the primary Agent configuration and setup the service.
:param args: Arguments from the command line
:return: None
"""
stat = pwd.getpwnam(args.user)
uid = stat.pw_uid
gid = stat.pw_gid
# Write the main agent.yaml - Note this is always overwritten
log.info('Configuring base Agent settings.')
dimensions = {}
# Join service in with the dimensions
if args.service:
dimensions.update({'service': args.service})
if args.dimensions:
dimensions.update(dict(item.strip().split(":") for item in args.dimensions.split(",")))
args.dimensions = dict((name, value) for (name, value) in dimensions.iteritems())
write_template(os.path.join(args.template_dir, 'agent.yaml.template'),
os.path.join(args.config_dir, 'agent.yaml'),
{'args': args, 'hostname': socket.getfqdn()},
group=gid,
user=uid,
is_yaml=True)
# Write the supervisor.conf
write_template(os.path.join(args.template_dir, 'supervisor.conf.template'),
os.path.join(args.config_dir, 'supervisor.conf'),
{'prefix': PREFIX_DIR, 'log_dir': args.log_dir, 'monasca_user': args.user},
user=uid,
group=gid)
def modify_config(args, detected_config):
"""Compare existing and detected config for each check plugin and write out
the plugin config if there are changes
"""
modified_config = False
for detection_plugin_name, new_config in detected_config.iteritems():
if args.overwrite:
modified_config = True
if args.dry_run:
continue
else:
agent_config.save_plugin_config(args.config_dir, detection_plugin_name, args.user, new_config)
else:
config = agent_config.read_plugin_config_from_disk(args.config_dir, detection_plugin_name)
# merge old and new config, new has precedence
if config is not None:
# For HttpCheck, if the new input url has the same host and
# port but a different protocol comparing with one of the
# existing instances in http_check.yaml, we want to keep the
# existing http check instance and replace the url with the
# new protocol. If name in this instance is the same as the
# url, we replace name with new url too.
# For more details please see:
# monasca-agent/docs/DeveloperDocs/agent_internals.md
if detection_plugin_name == "http_check":
# Save old http_check urls from config for later comparison
config_urls = [i['url'] for i in config['instances'] if
'url' in i]
# Check endpoint change, use new protocol instead
# Note: config is possibly changed after running
# check_endpoint_changes function.
config = agent_config.check_endpoint_changes(new_config, config)
agent_config.merge_by_name(new_config['instances'], config['instances'])
# Sort before compare, if instances have no name the sort will
# fail making order changes significant
try:
new_config['instances'].sort(key=lambda k: k['name'])
config['instances'].sort(key=lambda k: k['name'])
except Exception:
pass
if detection_plugin_name == "http_check":
new_config_urls = [i['url'] for i in new_config['instances'] if 'url' in i]
# Don't write config if no change
if new_config_urls == config_urls and new_config == config:
continue
else:
if new_config == config:
continue
modified_config = True
if args.dry_run:
log.info("Changes would be made to the config file for the {0}"
" check plugin".format(detection_plugin_name))
else:
agent_config.save_plugin_config(args.config_dir, detection_plugin_name, args.user, new_config)
return modified_config
def validate_positive(value):
int_value = int(value)
if int_value <= 0:
raise argparse.ArgumentTypeError("%s must be greater than zero" % value)
return int_value
def parse_arguments(parser):
parser.add_argument(
'-u', '--username', help="Username used for keystone authentication. Required for basic configuration.")
parser.add_argument(
'-p', '--password', help="Password used for keystone authentication. Required for basic configuration.")
parser.add_argument('--user_domain_id', help="User domain id for keystone authentication", default='')
parser.add_argument('--user_domain_name', help="User domain name for keystone authentication", default='')
parser.add_argument('--keystone_url', help="Keystone url. Required for basic configuration.")
parser.add_argument('--project_name', help="Project name for keystone authentication", default='')
parser.add_argument('--project_domain_id', help="Project domain id for keystone authentication", default='')
parser.add_argument('--project_domain_name', help="Project domain name for keystone authentication", default='')
parser.add_argument('--project_id', help="Keystone project id for keystone authentication", default='')
parser.add_argument('--monasca_url', help="Monasca API url, if not defined the url is pulled from keystone",
default='')
parser.add_argument('--service_type', help="Monasca API url service type in keystone catalog", default='')
parser.add_argument('--endpoint_type', help="Monasca API url endpoint type in keystone catalog", default='')
parser.add_argument('--region_name', help="Monasca API url region name in keystone catalog", default='')
parser.add_argument('--system_only', help="Setup the service but only configure the base config and system " +
"metrics (cpu, disk, load, memory, network).",
action="store_true", default=False)
parser.add_argument('-d', '--detection_plugins', nargs='*',
help="Skip base config and service setup and only configure this space separated list. " +
"This assumes the base config has already run.")
parser.add_argument('--skip_detection_plugins', nargs='*',
help="Skip detection for all plugins in this space separated list.")
detection_args_group = parser.add_mutually_exclusive_group()
detection_args_group.add_argument('-a', '--detection_args', help="A string of arguments that will be passed to detection " +
"plugins. Only certain detection plugins use arguments.")
detection_args_group.add_argument('-json', '--detection_args_json',
help="A JSON string that will be passed to detection plugins that parse JSON.")
parser.add_argument('--check_frequency', help="How often to run metric collection in seconds",
type=validate_positive, default=30)
parser.add_argument('--num_collector_threads', help="Number of Threads to use in Collector " +
"for running checks", type=validate_positive, default=1)
parser.add_argument('--pool_full_max_retries', help="Maximum number of collection cycles where all of the threads " +
"in the pool are still running plugins before the " +
"collector will exit and be restart",
type=validate_positive, default=4)
parser.add_argument('--plugin_collect_time_warn', help="Number of seconds a plugin collection time exceeds " +
"that causes a warning to be logged for that plugin",
type=validate_positive, default=6)
parser.add_argument('--dimensions', help="Additional dimensions to set for all metrics. A comma separated list " +
"of name/value pairs, 'name:value,name2:value2'")
parser.add_argument('--ca_file', help="Sets the path to the ca certs file if using certificates. " +
"Required only if insecure is set to False", default='')
parser.add_argument('--insecure', help="Set whether certificates are used for Keystone authentication",
default=False)
parser.add_argument('--config_dir', help="Configuration directory", default='/etc/monasca/agent')
parser.add_argument('--log_dir', help="monasca-agent log directory", default='/var/log/monasca/agent')
parser.add_argument('--log_level', help="monasca-agent logging level (ERROR, WARNING, INFO, DEBUG)", required=False,
default='WARN')
parser.add_argument('--template_dir', help="Alternative template directory",
default=os.path.join(PREFIX_DIR, 'share/monasca/agent'))
parser.add_argument('--overwrite',
help="Overwrite existing plugin configuration. " +
"The default is to merge. agent.yaml is always overwritten.",
action="store_true")
parser.add_argument('-r', '--remove', help="Rather than add the detected configuration remove it.",
action="store_true", default=False)
parser.add_argument('--skip_enable', help="By default the service is enabled, " +
"which requires the script run as root. Set this to skip that step.",
action="store_true")
parser.add_argument('--install_plugins_only', help="Only update plugin "
"configuration, do not configure services, users, etc."
" or restart services",
action="store_true")
parser.add_argument('--user', help="User name to run monasca-agent as", default='mon-agent')
parser.add_argument('-s', '--service', help="Service this node is associated with, added as a dimension.")
parser.add_argument('--amplifier', help="Integer for the number of additional measurements to create. " +
"Additional measurements contain the 'amplifier' dimension. " +
"Useful for load testing; not for production use.", default=0)
parser.add_argument('-v', '--verbose', help="Verbose Output", action="store_true")
parser.add_argument('--dry_run', help="Make no changes just report on changes", action="store_true")
parser.add_argument('--max_buffer_size',
help="Maximum number of batches of measurements to"
" buffer while unable to communicate with monasca-api",
default=1000)
parser.add_argument('--max_measurement_buffer_size',
help="Maximum number of measurements to buffer when unable to communicate"
" with the monasca-api",
default=-1)
parser.add_argument('--backlog_send_rate',
help="Maximum number of buffered batches of measurements to send at"
" one time when connection to the monasca-api is restored",
default=1000)
parser.add_argument('--monasca_statsd_port',
help="Statsd daemon port number",
default=8125)
return parser.parse_args()
def plugin_detection(plugins, template_dir, detection_args, detection_args_json, skip_failed=True, remove=False):
"""Runs the detection step for each plugin in the list and returns the complete detected agent config.
:param plugins: A list of detection plugin classes
:param template_dir: Location of plugin configuration templates
:param detection_args: Arguments passed to each detection plugin
:param skip_failed: When False any detection failure causes the run to halt and return None
:return: An agent_config instance representing the total configuration from all detection plugins run.
"""
plugin_config = agent_config.Plugins()
if detection_args_json:
json_data = json.loads(detection_args_json)
for detect_class in plugins:
# todo add option to install dependencies
if detection_args_json:
detect = detect_class(template_dir, False, **json_data)
else:
detect = detect_class(template_dir, False, detection_args)
if detect.available:
new_config = detect.build_config_with_name()
if not remove:
log.info('Configuring {0}'.format(detect.name))
if new_config is not None:
plugin_config.merge(new_config)
elif not skip_failed:
log.warn('Failed detection of plugin {0}.'.format(detect.name) +
"\n\tPossible causes: Service not found or missing arguments.")
return None
return plugin_config
def remove_config(args, plugin_names):
"""Parse all configuration removing any configuration built by plugins in plugin_names
Note there is no concept of overwrite for removal.
:param args: specified arguments
:param plugin_names: A list of the plugin names to remove from the config
:return: True if changes, false otherwise
"""
changes = False
existing_config_files = glob(os.path.join(args.config_dir, 'conf.d', '*.yaml'))
detected_plugins = utils.discover_plugins(CUSTOM_PLUGIN_PATH)
plugins = utils.select_plugins(args.detection_plugins, detected_plugins)
if (args.detection_args or args.detection_args_json):
detected_config = plugin_detection(
plugins, args.template_dir, args.detection_args, args.detection_args_json,
skip_failed=(args.detection_plugins is None), remove=True)
for file_path in existing_config_files:
deletes = False
plugin_name = os.path.splitext(os.path.basename(file_path))[0]
config = agent_config.read_plugin_config_from_disk(args.config_dir, plugin_name)
new_instances = [] # To avoid odd issues from iterating over a list you delete from, build a new instead
if args.detection_args is None:
for inst in config['instances']:
if 'built_by' in inst and inst['built_by'] in plugin_names:
changes = True
deletes = True
continue
new_instances.append(inst)
config['instances'] = new_instances
else:
for detected_key in detected_config.keys():
for inst in detected_config[detected_key]['instances']:
if inst in config['instances']:
changes = True
deletes = True
config['instances'].remove(inst)
if deletes:
agent_config.delete_from_config(args, config, file_path, plugin_name)
return changes
if __name__ == "__main__":
sys.exit(main())
|
|
import enum
from graphql.language import ast
from graphql.language.parser import parse
from ..query import Node, Field, Link, merge
class NodeVisitor:
def visit(self, obj):
visit_method = getattr(self, 'visit_{}'.format(obj.kind))
if visit_method is None:
raise NotImplementedError('Not implemented node type: {!r}'
.format(obj))
return visit_method(obj)
def visit_document(self, obj):
for definition in obj.definitions:
self.visit(definition)
def visit_operation_definition(self, obj):
self.visit(obj.selection_set)
def visit_fragment_definition(self, obj):
self.visit(obj.selection_set)
def visit_selection_set(self, obj):
for i in obj.selections:
self.visit(i)
def visit_field(self, obj):
pass
def visit_fragment_spread(self, obj):
pass
def visit_inline_fragment(self, obj):
self.visit(obj.selection_set)
class OperationGetter(NodeVisitor):
def __init__(self, operation_name=None):
self._operations = {}
self._operation_name = operation_name
@classmethod
def get(cls, doc, operation_name=None):
self = cls(operation_name=operation_name)
self.visit(doc)
if not self._operations:
raise TypeError('No operations in the document')
if self._operation_name is None:
if len(self._operations) > 1:
raise TypeError('Document should contain exactly one operation '
'when no operation name was provided')
return next(iter(self._operations.values()))
else:
try:
return self._operations[self._operation_name]
except KeyError:
raise ValueError('Undefined operation name: {!r}'
.format(self._operation_name))
def visit_fragment_definition(self, obj):
pass # skip visit here
def visit_operation_definition(self, obj):
name = obj.name.value if obj.name is not None else None
if name in self._operations:
raise TypeError('Duplicate operation definition: {!r}'
.format(name))
self._operations[name] = obj
class FragmentsCollector(NodeVisitor):
def __init__(self):
self.fragments_map = {}
def visit_operation_definition(self, obj):
pass # not interested in operations here
def visit_fragment_definition(self, obj):
if obj.name.value in self.fragments_map:
raise TypeError('Duplicated fragment name: "{}"'
.format(obj.name.value))
self.fragments_map[obj.name.value] = obj
class SelectionSetVisitMixin:
def transform_fragment(self, name):
raise NotImplementedError(type(self))
@property
def query_variables(self):
raise NotImplementedError(type(self))
@property
def query_name(self):
raise NotImplementedError(type(self))
def lookup_variable(self, name):
try:
return self.query_variables[name]
except KeyError:
raise TypeError('Variable ${} is not defined in query {}'
.format(name, self.query_name or '<unnamed>'))
def visit_selection_set(self, obj):
for i in obj.selections:
for j in self.visit(i):
yield j
def _should_skip(self, obj):
if not obj.directives:
return
skip = next((d for d in obj.directives if d.name.value == 'skip'),
None)
if skip is not None:
if len(skip.arguments) != 1:
raise TypeError('@skip directive accepts exactly one '
'argument, {} provided'
.format(len(skip.arguments)))
skip_arg = skip.arguments[0]
if skip_arg.name.value != 'if':
raise TypeError('@skip directive does not accept "{}" '
'argument'
.format(skip_arg.name.value))
return self.visit(skip_arg.value)
include = next((d for d in obj.directives if d.name.value == 'include'),
None)
if include is not None:
if len(include.arguments) != 1:
raise TypeError('@include directive accepts exactly one '
'argument, {} provided'
.format(len(include.arguments)))
include_arg = include.arguments[0]
if include_arg.name.value != 'if':
raise TypeError('@include directive does not accept "{}" '
'argument'
.format(include_arg.name.value))
return not self.visit(include_arg.value)
def visit_field(self, obj):
if self._should_skip(obj):
return
if obj.arguments:
options = {arg.name.value: self.visit(arg.value)
for arg in obj.arguments}
else:
options = None
if obj.alias is not None:
alias = obj.alias.value
else:
alias = None
if obj.selection_set is None:
yield Field(obj.name.value, options=options, alias=alias)
else:
node = Node(list(self.visit(obj.selection_set)))
yield Link(obj.name.value, node, options=options, alias=alias)
def visit_variable(self, obj):
return self.lookup_variable(obj.name.value)
def visit_null_value(self, obj):
return None
def visit_int_value(self, obj):
return int(obj.value)
def visit_float_value(self, obj):
return float(obj.value)
def visit_string_value(self, obj):
return obj.value
def visit_boolean_value(self, obj):
return obj.value
def visit_enum_value(self, obj):
return obj.value
def visit_list_value(self, obj):
return [self.visit(i) for i in obj.values]
def visit_object_value(self, obj):
return {f.name.value: self.visit(f.value) for f in obj.fields}
def visit_fragment_spread(self, obj):
if self._should_skip(obj):
return
for i in self.transform_fragment(obj.name.value):
yield i
def visit_inline_fragment(self, obj):
if self._should_skip(obj):
return
for i in self.visit(obj.selection_set):
yield i
class FragmentsTransformer(SelectionSetVisitMixin, NodeVisitor):
query_name = None
query_variables = None
def __init__(self, document, query_name, query_variables):
collector = FragmentsCollector()
collector.visit(document)
self.query_name = query_name
self.query_variables = query_variables
self.fragments_map = collector.fragments_map
self.cache = {}
self.pending_fragments = set()
def transform_fragment(self, name):
return self.visit(self.fragments_map[name])
def visit_operation_definition(self, obj):
pass # not interested in operations here
def visit_fragment_definition(self, obj):
if obj.name.value in self.cache:
return self.cache[obj.name.value]
else:
if obj.name.value in self.pending_fragments:
raise TypeError('Cyclic fragment usage: "{}"'
.format(obj.name.value))
self.pending_fragments.add(obj.name.value)
try:
selection_set = list(self.visit(obj.selection_set))
finally:
self.pending_fragments.discard(obj.name.value)
self.cache[obj.name.value] = selection_set
return selection_set
class GraphQLTransformer(SelectionSetVisitMixin, NodeVisitor):
query_name = None
query_variables = None
fragments_transformer = None
def __init__(self, document, variables=None):
self.document = document
self.variables = variables
@classmethod
def transform(cls, document, op, variables=None):
visitor = cls(document, variables)
return visitor.visit(op)
def transform_fragment(self, name):
return self.fragments_transformer.transform_fragment(name)
def visit_operation_definition(self, obj):
variables = self.variables or {}
query_name = obj.name.value if obj.name else '<unnamed>'
query_variables = {}
for var_defn in obj.variable_definitions or ():
name = var_defn.variable.name.value
try:
value = variables[name] # TODO: check variable type
except KeyError:
if var_defn.default_value is not None:
value = self.visit(var_defn.default_value)
elif isinstance(var_defn.type, ast.NonNullTypeNode):
raise TypeError('Variable "{}" is not provided for query {}'
.format(name, query_name))
else:
value = None
query_variables[name] = value
self.query_name = query_name
self.query_variables = query_variables
self.fragments_transformer = FragmentsTransformer(self.document,
self.query_name,
self.query_variables)
ordered = obj.operation is ast.OperationType.MUTATION
try:
node = Node(list(self.visit(obj.selection_set)),
ordered=ordered)
finally:
self.query_name = None
self.query_variables = None
self.fragments_transformer = None
return merge([node])
def read(src, variables=None, operation_name=None):
"""Reads a query from the GraphQL document
Example:
.. code-block:: python
query = read('{ foo bar }')
result = engine.execute(graph, query)
:param str src: GraphQL query
:param dict variables: query variables
:param str operation_name: Name of the operation to execute
:return: :py:class:`hiku.query.Node`, ready to execute query object
"""
doc = parse(src)
op = OperationGetter.get(doc, operation_name=operation_name)
if op.operation is not ast.OperationType.QUERY:
raise TypeError('Only "query" operations are supported, '
'"{}" operation was provided'
.format(op.operation.value))
return GraphQLTransformer.transform(doc, op, variables)
class OperationType(enum.Enum):
"""Enumerates GraphQL operation types"""
#: query operation
QUERY = ast.OperationType.QUERY
#: mutation operation
MUTATION = ast.OperationType.MUTATION
#: subscription operation
SUBSCRIPTION = ast.OperationType.SUBSCRIPTION
class Operation:
"""Represents requested GraphQL operation"""
def __init__(self, type_, query, name=None):
#: type of the operation
self.type = type_
#: operation's query
self.query = query
#: optional name of the operation
self.name = name
def read_operation(src, variables=None, operation_name=None):
"""Reads an operation from the GraphQL document
Example:
.. code-block:: python
op = read_operation('{ foo bar }')
if op.type is OperationType.QUERY:
result = engine.execute(query_graph, op.query)
:param str src: GraphQL document
:param dict variables: query variables
:param str operation_name: Name of the operation to execute
:return: :py:class:`Operation`
"""
doc = parse(src)
op = OperationGetter.get(doc, operation_name=operation_name)
query = GraphQLTransformer.transform(doc, op, variables)
type_ = OperationType._value2member_map_.get(op.operation)
name = op.name.value if op.name else None
if type_ is None:
raise TypeError('Unsupported operation type: {}'.format(op.operation))
else:
return Operation(type_, query, name)
|
|
"""Hierarchical Agglomerative Clustering
These routines perform some hierarchical agglomerative clustering of some
input data.
Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort,
Gael Varoquaux
License: BSD 3 clause
"""
from heapq import heapify, heappop, heappush, heappushpop
import warnings
import numpy as np
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from ..base import BaseEstimator, ClusterMixin
from ..externals import six
from ..metrics.pairwise import paired_distances, pairwise_distances
from ..utils import check_array
from ..utils.validation import check_memory
from . import _hierarchical
from ._feature_agglomeration import AgglomerationTransform
from ..utils.fast_dict import IntFloatDict
from ..externals.six.moves import xrange
###############################################################################
# For non fully-connected graphs
def _fix_connectivity(X, connectivity, affinity):
"""
Fixes the connectivity matrix
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary
"""
n_samples = X.shape[0]
if (connectivity.shape[0] != n_samples or
connectivity.shape[1] != n_samples):
raise ValueError('Wrong shape for connectivity matrix: %s '
'when X is %s' % (connectivity.shape, X.shape))
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
# Compute the number of nodes
n_components, labels = connected_components(connectivity)
if n_components > 1:
warnings.warn("the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_components,
stacklevel=2)
# XXX: Can we do without completing the matrix?
for i in xrange(n_components):
idx_i = np.where(labels == i)[0]
Xi = X[idx_i]
for j in xrange(i):
idx_j = np.where(labels == j)[0]
Xj = X[idx_j]
D = pairwise_distances(Xi, Xj, metric=affinity)
ii, jj = np.where(D == np.min(D))
ii = ii[0]
jj = jj[0]
connectivity[idx_i[ii], idx_j[jj]] = True
connectivity[idx_j[jj], idx_i[ii]] = True
return connectivity, n_components
###############################################################################
# Hierarchical tree building functions
def ward_tree(X, connectivity=None, n_clusters=None, return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
return_distance : bool (optional)
If True, return the distance between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : 1D array, shape (n_nodes-1, )
Only returned if return_distance is set to True (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
else:
return children_, 1, n_samples, None
connectivity, n_components = _fix_connectivity(X, connectivity,
affinity='euclidean')
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError('Cannot provide more clusters than samples. '
'%i n_clusters was asked, and there are %i samples.'
% (n_clusters, n_samples))
n_nodes = 2 * n_samples - n_clusters
# create inertia matrix
coord_row = []
coord_col = []
A = []
for ind, row in enumerate(connectivity.rows):
A.append(row)
# We keep only the upper triangular for the moments
# Generator expressions are faster than arrays on the following
row = [i for i in row if i < ind]
coord_row.extend(len(row) * [ind, ])
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order='C')
coord_col = np.array(coord_col, dtype=np.intp, order='C')
# build moments as a list
moments_1 = np.zeros(n_nodes, order='C')
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order='C')
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col,
inertia)
inertia = list(six.moves.zip(inertia, coord_row, coord_col))
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=np.int8, order='C')
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
inert, i, j = heappop(inertia)
if used_node[i] and used_node[j]:
break
parent[i], parent[j] = k, k
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance: # store inertia value
distances[k - n_samples] = inert
# update the moments
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
# update the structure matrix A and the inertia matrix
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
# List comprehension is faster than a for loop
[A[l].append(k) for l in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order='C')
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float64, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2,
coord_row, coord_col, ini)
# List comprehension is faster than a for loop
[heappush(inertia, (ini[idx], k, coord_col[idx]))
for idx in range(n_additions)]
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# sort children to get consistent output with unstructured version
children = [c[::-1] for c in children]
children = np.array(children) # return numpy array for efficient caching
if return_distance:
# 2 is scaling factor to compare w/ unstructured version
distances = np.sqrt(2. * distances)
return children, n_components, n_leaves, parent, distances
else:
return children, n_components, n_leaves, parent
# average and complete linkage
def linkage_tree(X, connectivity=None, n_components='deprecated',
n_clusters=None, linkage='complete', affinity="euclidean",
return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_components : int (optional)
The number of connected components in the graph.
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete"}, optional, default: "complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- average uses the average of the distances of each observation of
the two sets
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
affinity : string or callable, optional, default: "euclidean".
which metric to use. Can be "euclidean", "manhattan", or any
distance know to paired distance (see metric.pairwise)
return_distance : bool, default False
whether or not to return the distances between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray, shape (n_nodes-1,)
Returned when return_distance is set to True.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See also
--------
ward_tree : hierarchical clustering with ward linkage
"""
if n_components != 'deprecated':
warnings.warn("n_components was deprecated in 0.19"
"will be removed in 0.21", DeprecationWarning)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {'complete': _hierarchical.max_merge,
'average': _hierarchical.average_merge}
try:
join_func = linkage_choices[linkage]
except KeyError:
raise ValueError(
'Unknown linkage option, linkage should be one '
'of %s, but %s was given' % (linkage_choices.keys(), linkage))
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
if affinity == 'precomputed':
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by pdist: it is a flat array containing the upper triangular of
# the distance matrix.
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
# Translate to something understood by scipy
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(np.int)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
connectivity, n_components = _fix_connectivity(X, connectivity,
affinity=affinity)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = (connectivity.row != connectivity.col)
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == 'precomputed':
distances = X[connectivity.row, connectivity.col]
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(X[connectivity.row],
X[connectivity.col],
metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data,
connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp),
np.asarray(data, dtype=np.float64))
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(_hierarchical.WeightedEdge(d, ind, r)
for r, d in zip(row, data) if r < ind)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in xrange(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for l, d in coord_col:
A[l].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, l))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_components, n_leaves, parent, distances
return children, n_components, n_leaves, parent
# Matching names to tree-building strategies
def _complete_linkage(*args, **kwargs):
kwargs['linkage'] = 'complete'
return linkage_tree(*args, **kwargs)
def _average_linkage(*args, **kwargs):
kwargs['linkage'] = 'average'
return linkage_tree(*args, **kwargs)
_TREE_BUILDERS = dict(
ward=ward_tree,
complete=_complete_linkage,
average=_average_linkage)
###############################################################################
# Functions for cutting hierarchical clustering tree
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
cluster labels for each point
"""
if n_clusters > n_leaves:
raise ValueError('Cannot extract more clusters than samples: '
'%s clusters where given for a tree with %s leaves.'
% (n_clusters, n_leaves))
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for i in xrange(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
###############################################################################
class AgglomerativeClustering(BaseEstimator, ClusterMixin):
"""
Agglomerative Clustering
Recursively merges the pair of clusters that minimally increases
a given linkage distance.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int, default=2
The number of clusters to find.
affinity : string or callable, default: "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default: "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
pooling_func : callable, default='deprecated'
Ignored.
.. deprecated:: 0.20
``pooling_func`` has been deprecated in 0.20 and will be removed
in 0.22.
Attributes
----------
labels_ : array [n_samples]
cluster labels for each point
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_samples-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
"""
def __init__(self, n_clusters=2, affinity="euclidean",
memory=None,
connectivity=None, compute_full_tree='auto',
linkage='ward', pooling_func='deprecated'):
self.n_clusters = n_clusters
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.affinity = affinity
self.pooling_func = pooling_func
def fit(self, X, y=None):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data. Shape [n_samples, n_features], or [n_samples,
n_samples] if affinity=='precomputed'.
y : Ignored
Returns
-------
self
"""
if self.pooling_func != 'deprecated':
warnings.warn('Agglomerative "pooling_func" parameter is not used.'
' It has been deprecated in version 0.20 and will be'
'removed in 0.22', DeprecationWarning)
X = check_array(X, ensure_min_samples=2, estimator=self)
memory = check_memory(self.memory)
if self.n_clusters <= 0:
raise ValueError("n_clusters should be an integer greater than 0."
" %s was provided." % str(self.n_clusters))
if self.linkage == "ward" and self.affinity != "euclidean":
raise ValueError("%s was provided as affinity. Ward can only "
"work with euclidean distances." %
(self.affinity, ))
if self.linkage not in _TREE_BUILDERS:
raise ValueError("Unknown linkage type %s."
"Valid options are %s" % (self.linkage,
_TREE_BUILDERS.keys()))
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(
connectivity, accept_sparse=['csr', 'coo', 'lil'])
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == 'auto':
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, .02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
if self.linkage != 'ward':
kwargs['linkage'] = self.linkage
kwargs['affinity'] = self.affinity
self.children_, self.n_components_, self.n_leaves_, parents = \
memory.cache(tree_builder)(X, connectivity,
n_clusters=n_clusters,
**kwargs)
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters, self.children_,
self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reassign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
class FeatureAgglomeration(AgglomerativeClustering, AgglomerationTransform):
"""Agglomerate features.
Similar to AgglomerativeClustering, but recursively merges features
instead of samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int, default 2
The number of clusters to find.
affinity : string or callable, default "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each feature the neighboring
features following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
compute_full_tree : bool or 'auto', optional, default "auto"
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of features. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of features. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each feature of
the two sets.
- complete or maximum linkage uses the maximum distances between
all features of the two sets.
pooling_func : callable, default np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument `axis=1`, and reduce it to an array of size [M].
Attributes
----------
labels_ : array-like, (n_features,)
cluster labels for each feature.
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_features`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_features` is a non-leaf
node and has children `children_[i - n_features]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_features + i`
"""
def __init__(self, n_clusters=2, affinity="euclidean",
memory=None,
connectivity=None, compute_full_tree='auto',
linkage='ward', pooling_func=np.mean):
super(FeatureAgglomeration, self).__init__(
n_clusters=n_clusters, memory=memory, connectivity=connectivity,
compute_full_tree=compute_full_tree, linkage=linkage,
affinity=affinity)
self.pooling_func = pooling_func
def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
y : Ignored
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
ensure_min_features=2, estimator=self)
return AgglomerativeClustering.fit(self, X.T, **params)
@property
def fit_predict(self):
raise AttributeError
|
|
"""Dictionary Of Keys based matrix"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['dok_matrix', 'isspmatrix_dok']
import functools
import operator
import numpy as np
from scipy._lib.six import zip as izip, xrange
from scipy._lib.six import iteritems
from .base import spmatrix, isspmatrix
from .sputils import (isdense, getdtype, isshape, isintlike, isscalarlike,
upcast, upcast_scalar, IndexMixin, get_index_dtype)
try:
from operator import isSequenceType as _is_sequence
except ImportError:
def _is_sequence(x):
return (hasattr(x, '__len__') or hasattr(x, '__next__')
or hasattr(x, 'next'))
class dok_matrix(spmatrix, IndexMixin, dict):
"""
Dictionary Of Keys based sparse matrix.
This is an efficient structure for constructing sparse
matrices incrementally.
This can be instantiated in several ways:
dok_matrix(D)
with a dense matrix, D
dok_matrix(S)
with a sparse matrix, S
dok_matrix((M,N), [dtype])
create the matrix with initial shape (M,N)
dtype is optional, defaulting to dtype='d'
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Allows for efficient O(1) access of individual elements.
Duplicates are not allowed.
Can be efficiently converted to a coo_matrix once constructed.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dok_matrix
>>> S = dok_matrix((5, 5), dtype=np.float32)
>>> for i in range(5):
... for j in range(5):
... S[i, j] = i + j # Update element
"""
format = 'dok'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
dict.__init__(self)
spmatrix.__init__(self)
self.dtype = getdtype(dtype, default=float)
if isinstance(arg1, tuple) and isshape(arg1): # (M,N)
M, N = arg1
self.shape = (M, N)
elif isspmatrix(arg1): # Sparse ctor
if isspmatrix_dok(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.todok()
if dtype is not None:
arg1 = arg1.astype(dtype)
self.update(arg1)
self.shape = arg1.shape
self.dtype = arg1.dtype
else: # Dense ctor
try:
arg1 = np.asarray(arg1)
except:
raise TypeError('invalid input format')
if len(arg1.shape) != 2:
raise TypeError('expected rank <=2 dense array or matrix')
from .coo import coo_matrix
d = coo_matrix(arg1, dtype=dtype).todok()
self.update(d)
self.shape = arg1.shape
self.dtype = d.dtype
def getnnz(self):
return dict.__len__(self)
nnz = property(fget=getnnz)
def __len__(self):
return dict.__len__(self)
def get(self, key, default=0.):
"""This overrides the dict.get method, providing type checking
but otherwise equivalent functionality.
"""
try:
i, j = key
assert isintlike(i) and isintlike(j)
except (AssertionError, TypeError, ValueError):
raise IndexError('index must be a pair of integers')
if (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]):
raise IndexError('index out of bounds')
return dict.get(self, key, default)
def __getitem__(self, index):
"""If key=(i,j) is a pair of integers, return the corresponding
element. If either i or j is a slice or sequence, return a new sparse
matrix with just these elements.
"""
i, j = self._unpack_index(index)
i_intlike = isintlike(i)
j_intlike = isintlike(j)
if i_intlike and j_intlike:
# Scalar index case
i = int(i)
j = int(j)
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('index out of bounds')
if j < 0:
j += self.shape[1]
if j < 0 or j >= self.shape[1]:
raise IndexError('index out of bounds')
return dict.get(self, (i,j), 0.)
elif ((i_intlike or isinstance(i, slice)) and
(j_intlike or isinstance(j, slice))):
# Fast path for slicing very sparse matrices
i_slice = slice(i, i+1) if i_intlike else i
j_slice = slice(j, j+1) if j_intlike else j
i_indices = i_slice.indices(self.shape[0])
j_indices = j_slice.indices(self.shape[1])
i_seq = xrange(*i_indices)
j_seq = xrange(*j_indices)
newshape = (len(i_seq), len(j_seq))
newsize = _prod(newshape)
if len(self) < 2*newsize and newsize != 0:
# Switch to the fast path only when advantageous
# (count the iterations in the loops, adjust for complexity)
#
# We also don't handle newsize == 0 here (if
# i/j_intlike, it can mean index i or j was out of
# bounds)
return self._getitem_ranges(i_indices, j_indices, newshape)
i, j = self._index_to_arrays(i, j)
if i.size == 0:
return dok_matrix(i.shape, dtype=self.dtype)
min_i = i.min()
if min_i < -self.shape[0] or i.max() >= self.shape[0]:
raise IndexError('index (%d) out of range -%d to %d)' %
(i.min(), self.shape[0], self.shape[0]-1))
if min_i < 0:
i = i.copy()
i[i < 0] += self.shape[0]
min_j = j.min()
if min_j < -self.shape[1] or j.max() >= self.shape[1]:
raise IndexError('index (%d) out of range -%d to %d)' %
(j.min(), self.shape[1], self.shape[1]-1))
if min_j < 0:
j = j.copy()
j[j < 0] += self.shape[1]
newdok = dok_matrix(i.shape, dtype=self.dtype)
for a in xrange(i.shape[0]):
for b in xrange(i.shape[1]):
v = dict.get(self, (i[a,b], j[a,b]), 0.)
if v != 0:
dict.__setitem__(newdok, (a, b), v)
return newdok
def _getitem_ranges(self, i_indices, j_indices, shape):
# performance golf: we don't want Numpy scalars here, they are slow
i_start, i_stop, i_stride = map(int, i_indices)
j_start, j_stop, j_stride = map(int, j_indices)
newdok = dok_matrix(shape, dtype=self.dtype)
for (ii, jj) in self.keys():
# ditto for numpy scalars
ii = int(ii)
jj = int(jj)
a, ra = divmod(ii - i_start, i_stride)
if a < 0 or a >= shape[0] or ra != 0:
continue
b, rb = divmod(jj - j_start, j_stride)
if b < 0 or b >= shape[1] or rb != 0:
continue
dict.__setitem__(newdok, (a, b),
dict.__getitem__(self, (ii, jj)))
return newdok
def __setitem__(self, index, x):
if isinstance(index, tuple) and len(index) == 2:
# Integer index fast path
i, j = index
if (isintlike(i) and isintlike(j) and 0 <= i < self.shape[0]
and 0 <= j < self.shape[1]):
v = np.asarray(x, dtype=self.dtype)
if v.ndim == 0 and v != 0:
dict.__setitem__(self, (int(i), int(j)), v[()])
return
i, j = self._unpack_index(index)
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
x = x.toarray()
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
if np.size(x) == 0:
return
min_i = i.min()
if min_i < -self.shape[0] or i.max() >= self.shape[0]:
raise IndexError('index (%d) out of range -%d to %d)' %
(i.min(), self.shape[0], self.shape[0]-1))
if min_i < 0:
i = i.copy()
i[i < 0] += self.shape[0]
min_j = j.min()
if min_j < -self.shape[1] or j.max() >= self.shape[1]:
raise IndexError('index (%d) out of range -%d to %d)' %
(j.min(), self.shape[1], self.shape[1]-1))
if min_j < 0:
j = j.copy()
j[j < 0] += self.shape[1]
dict.update(self, izip(izip(i.flat, j.flat), x.flat))
if 0 in x:
zeroes = x == 0
for key in izip(i[zeroes].flat, j[zeroes].flat):
if dict.__getitem__(self, key) == 0:
# may have been superseded by later update
del self[key]
def __add__(self, other):
# First check if argument is a scalar
if isscalarlike(other):
res_dtype = upcast_scalar(self.dtype, other)
new = dok_matrix(self.shape, dtype=res_dtype)
# Add this scalar to every element.
M, N = self.shape
for i in xrange(M):
for j in xrange(N):
aij = self.get((i, j), 0) + other
if aij != 0:
new[i, j] = aij
# new.dtype.char = self.dtype.char
elif isinstance(other, dok_matrix):
if other.shape != self.shape:
raise ValueError("matrix dimensions are not equal")
# We could alternatively set the dimensions to the largest of
# the two matrices to be summed. Would this be a good idea?
res_dtype = upcast(self.dtype, other.dtype)
new = dok_matrix(self.shape, dtype=res_dtype)
new.update(self)
for key in other.keys():
new[key] += other[key]
elif isspmatrix(other):
csc = self.tocsc()
new = csc + other
elif isdense(other):
new = self.todense() + other
else:
return NotImplemented
return new
def __radd__(self, other):
# First check if argument is a scalar
if isscalarlike(other):
new = dok_matrix(self.shape, dtype=self.dtype)
# Add this scalar to every element.
M, N = self.shape
for i in xrange(M):
for j in xrange(N):
aij = self.get((i, j), 0) + other
if aij != 0:
new[i, j] = aij
elif isinstance(other, dok_matrix):
if other.shape != self.shape:
raise ValueError("matrix dimensions are not equal")
new = dok_matrix(self.shape, dtype=self.dtype)
new.update(self)
for key in other:
new[key] += other[key]
elif isspmatrix(other):
csc = self.tocsc()
new = csc + other
elif isdense(other):
new = other + self.todense()
else:
return NotImplemented
return new
def __neg__(self):
new = dok_matrix(self.shape, dtype=self.dtype)
for key in self.keys():
new[key] = -self[key]
return new
def _mul_scalar(self, other):
res_dtype = upcast_scalar(self.dtype, other)
# Multiply this scalar by every element.
new = dok_matrix(self.shape, dtype=res_dtype)
for (key, val) in iteritems(self):
new[key] = val * other
return new
def _mul_vector(self, other):
# matrix * vector
result = np.zeros(self.shape[0], dtype=upcast(self.dtype,other.dtype))
for (i,j),v in iteritems(self):
result[i] += v * other[j]
return result
def _mul_multivector(self, other):
# matrix * multivector
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
for (i,j),v in iteritems(self):
result[i,:] += v * other[j,:]
return result
def __imul__(self, other):
if isscalarlike(other):
# Multiply this scalar by every element.
for (key, val) in iteritems(self):
self[key] = val * other
# new.dtype.char = self.dtype.char
return self
else:
return NotImplemented
def __truediv__(self, other):
if isscalarlike(other):
res_dtype = upcast_scalar(self.dtype, other)
new = dok_matrix(self.shape, dtype=res_dtype)
# Multiply this scalar by every element.
for (key, val) in iteritems(self):
new[key] = val / other
# new.dtype.char = self.dtype.char
return new
else:
return self.tocsr() / other
def __itruediv__(self, other):
if isscalarlike(other):
# Multiply this scalar by every element.
for (key, val) in iteritems(self):
self[key] = val / other
return self
else:
return NotImplemented
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? For now it returns the number
# of non-zeros.
def transpose(self):
""" Return the transpose
"""
M, N = self.shape
new = dok_matrix((N, M), dtype=self.dtype)
for key, value in iteritems(self):
new[key[1], key[0]] = value
return new
def conjtransp(self):
""" Return the conjugate transpose
"""
M, N = self.shape
new = dok_matrix((N, M), dtype=self.dtype)
for key, value in iteritems(self):
new[key[1], key[0]] = np.conj(value)
return new
def copy(self):
new = dok_matrix(self.shape, dtype=self.dtype)
new.update(self)
return new
def getrow(self, i):
"""Returns a copy of row i of the matrix as a (1 x n)
DOK matrix.
"""
out = self.__class__((1, self.shape[1]), dtype=self.dtype)
for j in range(self.shape[1]):
out[0, j] = self[i, j]
return out
def getcol(self, j):
"""Returns a copy of column j of the matrix as a (m x 1)
DOK matrix.
"""
out = self.__class__((self.shape[0], 1), dtype=self.dtype)
for i in range(self.shape[0]):
out[i, 0] = self[i, j]
return out
def tocoo(self):
""" Return a copy of this matrix in COOrdinate format"""
from .coo import coo_matrix
if self.nnz == 0:
return coo_matrix(self.shape, dtype=self.dtype)
else:
idx_dtype = get_index_dtype(maxval=max(self.shape[0], self.shape[1]))
data = np.asarray(_list(self.values()), dtype=self.dtype)
indices = np.asarray(_list(self.keys()), dtype=idx_dtype).T
return coo_matrix((data,indices), shape=self.shape, dtype=self.dtype)
def todok(self,copy=False):
if copy:
return self.copy()
else:
return self
def tocsr(self):
""" Return a copy of this matrix in Compressed Sparse Row format"""
return self.tocoo().tocsr()
def tocsc(self):
""" Return a copy of this matrix in Compressed Sparse Column format"""
return self.tocoo().tocsc()
def resize(self, shape):
""" Resize the matrix in-place to dimensions given by 'shape'.
Any non-zero elements that lie outside the new shape are removed.
"""
if not isshape(shape):
raise TypeError("dimensions must be a 2-tuple of positive"
" integers")
newM, newN = shape
M, N = self.shape
if newM < M or newN < N:
# Remove all elements outside new dimensions
for (i, j) in list(self.keys()):
if i >= newM or j >= newN:
del self[i, j]
self._shape = shape
def _list(x):
"""Force x to a list."""
if not isinstance(x, list):
x = list(x)
return x
def isspmatrix_dok(x):
return isinstance(x, dok_matrix)
def _prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
|
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import os
import sys
import tempfile
sys.path.append("..")
import click # noqa: E402
from platformio import fs, util # noqa: E402
from platformio.package.manager.platform import PlatformPackageManager # noqa: E402
from platformio.platform.factory import PlatformFactory # noqa: E402
try:
from urlparse import ParseResult, urlparse, urlunparse
except ImportError:
from urllib.parse import ParseResult, urlparse, urlunparse
RST_COPYRIGHT = """.. Copyright (c) 2014-present PlatformIO <contact@platformio.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
DOCS_ROOT_DIR = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "docs")
)
REGCLIENT = PlatformPackageManager().get_registry_client_instance()
def reg_package_url(type_, owner, name):
if type_ == "library":
type_ = "libraries"
else:
type_ += "s"
return f"https://registry.platformio.org/{type_}/{owner}/{name}"
def campaign_url(url, source="platformio.org", medium="docs"):
data = urlparse(url)
query = data.query
if query:
query += "&"
query += "utm_source=%s&utm_medium=%s" % (source, medium)
return urlunparse(
ParseResult(
data.scheme, data.netloc, data.path, data.params, query, data.fragment
)
)
def install_platforms():
print("Installing platforms...")
page = 1
pm = PlatformPackageManager()
while True:
result = REGCLIENT.list_packages(filters=dict(types=["platform"]), page=page)
for item in result["items"]:
spec = "%s/%s" % (item["owner"]["username"], item["name"])
skip_conds = [
item["owner"]["username"] != "platformio",
item["tier"] == "community",
]
if all(skip_conds):
click.secho("Skip community platform: %s" % spec, fg="yellow")
continue
pm.install(spec, skip_default_package=True)
page += 1
if not result["items"] or result["page"] * result["limit"] >= result["total"]:
break
@functools.cache
def get_frameworks():
items = {}
for pkg in PlatformPackageManager().get_installed():
p = PlatformFactory.new(pkg)
for name, options in (p.frameworks or {}).items():
if name in items or not set(options.keys()).issuperset(
set(["title", "description"])
):
continue
items[name] = dict(
name=name, title=options["title"], description=options["description"]
)
return sorted(items.values(), key=lambda item: item["name"])
def is_compat_platform_and_framework(platform, framework):
p = PlatformFactory.new(platform)
return framework in (p.frameworks or {}).keys()
def generate_boards_table(boards, skip_columns=None):
columns = [
("Name", ":ref:`board_{platform}_{id}`"),
("Platform", ":ref:`platform_{platform}`"),
("Debug", "{debug}"),
("MCU", "{mcu}"),
("Frequency", "{f_cpu}MHz"),
("Flash", "{rom}"),
("RAM", "{ram}"),
]
lines = []
lines.append(
"""
.. list-table::
:header-rows: 1
"""
)
# add header
for (name, template) in columns:
if skip_columns and name in skip_columns:
continue
prefix = " * - " if name == "Name" else " - "
lines.append(prefix + name)
for data in sorted(boards, key=lambda item: item["name"]):
has_onboard_debug = data.get("debug") and any(
t.get("onboard") for (_, t) in data["debug"]["tools"].items()
)
debug = "No"
if has_onboard_debug:
debug = "On-board"
elif data.get("debug"):
debug = "External"
variables = dict(
id=data["id"],
name=data["name"],
platform=data["platform"],
debug=debug,
mcu=data["mcu"].upper(),
f_cpu=int(data["fcpu"] / 1000000.0),
ram=fs.humanize_file_size(data["ram"]),
rom=fs.humanize_file_size(data["rom"]),
)
for (name, template) in columns:
if skip_columns and name in skip_columns:
continue
prefix = " * - " if name == "Name" else " - "
lines.append(prefix + template.format(**variables))
if lines:
lines.append("")
return lines
def generate_frameworks_contents(frameworks):
if not frameworks:
return []
lines = []
lines.append(
"""
Frameworks
----------
.. list-table::
:header-rows: 1
* - Name
- Description"""
)
known = set()
for framework in get_frameworks():
known.add(framework["name"])
if framework["name"] not in frameworks:
continue
lines.append(
"""
* - :ref:`framework_{name}`
- {description}""".format(
**framework
)
)
if set(frameworks) - known:
click.secho("Unknown frameworks %s " % (set(frameworks) - known), fg="red")
return lines
def generate_platforms_contents(platforms):
if not platforms:
return []
lines = []
lines.append(
"""
Platforms
---------
.. list-table::
:header-rows: 1
* - Name
- Description"""
)
for name in sorted(platforms):
p = PlatformFactory.new(name)
lines.append(
"""
* - :ref:`platform_{name}`
- {description}""".format(
name=p.name, description=p.description
)
)
return lines
def generate_debug_contents(boards, skip_board_columns=None, extra_rst=None):
if not skip_board_columns:
skip_board_columns = []
skip_board_columns.append("Debug")
lines = []
onboard_debug = [
b
for b in boards
if b.get("debug")
and any(t.get("onboard") for (_, t) in b["debug"]["tools"].items())
]
external_debug = [b for b in boards if b.get("debug") and b not in onboard_debug]
if not onboard_debug and not external_debug:
return lines
lines.append(
"""
Debugging
---------
:ref:`piodebug` - "1-click" solution for debugging with a zero configuration.
.. contents::
:local:
"""
)
if extra_rst:
lines.append(".. include:: %s" % extra_rst)
lines.append(
"""
Tools & Debug Probes
~~~~~~~~~~~~~~~~~~~~
Supported debugging tools are listed in "Debug" column. For more detailed
information, please scroll table by horizontal.
You can switch between debugging :ref:`debugging_tools` using
:ref:`projectconf_debug_tool` option in :ref:`projectconf`.
.. warning::
You will need to install debug tool drivers depending on your system.
Please click on compatible debug tool below for the further instructions.
"""
)
if onboard_debug:
lines.append(
"""
On-Board Debug Tools
^^^^^^^^^^^^^^^^^^^^
Boards listed below have on-board debug probe and **ARE READY** for debugging!
You do not need to use/buy external debug probe.
"""
)
lines.extend(
generate_boards_table(onboard_debug, skip_columns=skip_board_columns)
)
if external_debug:
lines.append(
"""
External Debug Tools
^^^^^^^^^^^^^^^^^^^^
Boards listed below are compatible with :ref:`piodebug` but **DEPEND ON**
external debug probe. They **ARE NOT READY** for debugging.
Please click on board name for the further details.
"""
)
lines.extend(
generate_boards_table(external_debug, skip_columns=skip_board_columns)
)
return lines
def generate_packages(platform, packages, is_embedded):
if not packages:
return
lines = []
lines.append(
"""
Packages
--------
"""
)
lines.append(
""".. list-table::
:header-rows: 1
* - Name
- Description"""
)
for name, options in dict(sorted(packages.items())).items():
package = REGCLIENT.get_package(
"tool", options.get("owner", "platformio"), name
)
lines.append(
"""
* - `{name} <{url}>`__
- {description}""".format(
name=package["name"],
url=reg_package_url(
"tool", package["owner"]["username"], package["name"]
),
description=package["description"],
)
)
if is_embedded:
lines.append(
"""
.. warning::
**Linux Users**:
* Install "udev" rules :ref:`faq_udev_rules`
* Raspberry Pi users, please read this article
`Enable serial port on Raspberry Pi <https://hallard.me/enable-serial-port-on-raspberry-pi/>`__.
"""
)
if platform == "teensy":
lines.append(
"""
**Windows Users:**
Teensy programming uses only Windows built-in HID
drivers. When Teensy is programmed to act as a USB Serial device,
Windows XP, Vista, 7 and 8 require `this serial driver
<http://www.pjrc.com/teensy/serial_install.exe>`_
is needed to access the COM port your program uses. No special driver
installation is necessary on Windows 10.
"""
)
else:
lines.append(
"""
**Windows Users:**
Please check that you have a correctly installed USB driver from board
manufacturer
"""
)
return "\n".join(lines)
def generate_platform(pkg, rst_dir):
name = pkg.metadata.name
print("Processing platform: %s" % name)
compatible_boards = [
board
for board in PlatformPackageManager().get_installed_boards()
if name == board["platform"]
]
lines = []
lines.append(RST_COPYRIGHT)
p = PlatformFactory.new(name)
assert p.repository_url.endswith(".git")
github_url = p.repository_url[:-4]
registry_url = reg_package_url("platform", pkg.metadata.spec.owner, name)
lines.append(".. _platform_%s:" % p.name)
lines.append("")
lines.append(p.title)
lines.append("=" * len(p.title))
lines.append("")
lines.append(":Registry:")
lines.append(" `%s <%s>`__" % (registry_url, registry_url))
lines.append(":Configuration:")
lines.append(" :ref:`projectconf_env_platform` = ``%s``" % p.name)
lines.append("")
lines.append(p.description)
lines.append(
"""
For more detailed information please visit `vendor site <%s>`_."""
% campaign_url(p.homepage)
)
lines.append(
"""
.. contents:: Contents
:local:
:depth: 1
"""
)
#
# Extra
#
if os.path.isfile(os.path.join(rst_dir, "%s_extra.rst" % name)):
lines.append(".. include:: %s_extra.rst" % p.name)
#
# Examples
#
lines.append(
"""
Examples
--------
Examples are listed from `%s development platform repository <%s>`_:
"""
% (p.title, campaign_url("%s/tree/master/examples" % github_url))
)
examples_dir = os.path.join(p.get_dir(), "examples")
if os.path.isdir(examples_dir):
for eitem in os.listdir(examples_dir):
example_dir = os.path.join(examples_dir, eitem)
if not os.path.isdir(example_dir) or not os.listdir(example_dir):
continue
url = "%s/tree/master/examples/%s" % (github_url, eitem)
lines.append("* `%s <%s>`_" % (eitem, campaign_url(url)))
#
# Debugging
#
if compatible_boards:
lines.extend(
generate_debug_contents(
compatible_boards,
skip_board_columns=["Platform"],
extra_rst="%s_debug.rst" % name
if os.path.isfile(os.path.join(rst_dir, "%s_debug.rst" % name))
else None,
)
)
#
# Development version of dev/platform
#
lines.append(
"""
Stable and upstream versions
----------------------------
You can switch between `stable releases <{github_url}/releases>`__
of {title} development platform and the latest upstream version using
:ref:`projectconf_env_platform` option in :ref:`projectconf` as described below.
Stable
~~~~~~
.. code-block:: ini
; Latest stable version
[env:latest_stable]
platform = {name}
board = ...
; Custom stable version
[env:custom_stable]
platform = {name}@x.y.z
board = ...
Upstream
~~~~~~~~
.. code-block:: ini
[env:upstream_develop]
platform = {github_url}.git
board = ...
""".format(
name=p.name, title=p.title, github_url=github_url
)
)
#
# Packages
#
_packages_content = generate_packages(name, p.packages, p.is_embedded())
if _packages_content:
lines.append(_packages_content)
#
# Frameworks
#
compatible_frameworks = []
for framework in get_frameworks():
if is_compat_platform_and_framework(name, framework["name"]):
compatible_frameworks.append(framework["name"])
lines.extend(generate_frameworks_contents(compatible_frameworks))
#
# Boards
#
if compatible_boards:
vendors = {}
for board in compatible_boards:
if board["vendor"] not in vendors:
vendors[board["vendor"]] = []
vendors[board["vendor"]].append(board)
lines.append(
"""
Boards
------
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command
* For more detailed ``board`` information please scroll the tables below by
horizontally.
"""
)
for vendor, boards in sorted(vendors.items()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.extend(generate_boards_table(boards, skip_columns=["Platform"]))
return "\n".join(lines)
def update_platform_docs():
platforms_dir = os.path.join(DOCS_ROOT_DIR, "platforms")
for pkg in PlatformPackageManager().get_installed():
rst_path = os.path.join(platforms_dir, "%s.rst" % pkg.metadata.name)
with open(rst_path, "w") as f:
f.write(generate_platform(pkg, platforms_dir))
def generate_framework(type_, framework, rst_dir=None):
print("Processing framework: %s" % type_)
compatible_platforms = [
pkg
for pkg in PlatformPackageManager().get_installed()
if is_compat_platform_and_framework(pkg.metadata.name, type_)
]
compatible_boards = [
board
for board in PlatformPackageManager().get_installed_boards()
if type_ in board["frameworks"]
]
lines = []
lines.append(RST_COPYRIGHT)
lines.append(".. _framework_%s:" % type_)
lines.append("")
lines.append(framework["title"])
lines.append("=" * len(framework["title"]))
lines.append("")
lines.append(":Configuration:")
lines.append(" :ref:`projectconf_env_framework` = ``%s``" % type_)
lines.append("")
lines.append(framework["description"])
lines.append(
"""
.. contents:: Contents
:local:
:depth: 1"""
)
# Extra
if os.path.isfile(os.path.join(rst_dir, "%s_extra.rst" % type_)):
lines.append(".. include:: %s_extra.rst" % type_)
if compatible_platforms:
# Platforms
lines.extend(
generate_platforms_contents(
[pkg.metadata.name for pkg in compatible_platforms]
)
)
# examples
lines.append(
"""
Examples
--------
"""
)
for pkg in compatible_platforms:
p = PlatformFactory.new(pkg)
lines.append(
"* `%s for %s <%s>`_"
% (
framework["title"],
p.title,
campaign_url("%s/tree/master/examples" % p.repository_url[:-4]),
)
)
#
# Debugging
#
if compatible_boards:
lines.extend(
generate_debug_contents(
compatible_boards,
extra_rst="%s_debug.rst" % type_
if os.path.isfile(os.path.join(rst_dir, "%s_debug.rst" % type_))
else None,
)
)
#
# Boards
#
if compatible_boards:
vendors = {}
for board in compatible_boards:
if board["vendor"] not in vendors:
vendors[board["vendor"]] = []
vendors[board["vendor"]].append(board)
lines.append(
"""
Boards
------
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command
* For more detailed ``board`` information please scroll the tables below by horizontally.
"""
)
for vendor, boards in sorted(vendors.items()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.extend(generate_boards_table(boards))
return "\n".join(lines)
def update_framework_docs():
frameworks_dir = os.path.join(DOCS_ROOT_DIR, "frameworks")
for framework in get_frameworks():
name = framework["name"]
rst_path = os.path.join(frameworks_dir, "%s.rst" % name)
with open(rst_path, "w") as f:
f.write(generate_framework(name, framework, frameworks_dir))
def update_boards():
print("Updating boards...")
lines = []
lines.append(RST_COPYRIGHT)
lines.append(".. _boards:")
lines.append("")
lines.append("Boards")
lines.append("======")
lines.append(
"""
Rapid Embedded Development, Continuous and IDE integration in a few
steps with PlatformIO thanks to built-in project generator for the most
popular embedded boards and IDEs.
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command
* For more detailed ``board`` information please scroll tables below by horizontal.
"""
)
platforms = {}
installed_boards = PlatformPackageManager().get_installed_boards()
for data in installed_boards:
platform = data["platform"]
if platform in platforms:
platforms[platform].append(data)
else:
platforms[platform] = [data]
for platform, boards in sorted(platforms.items()):
p = PlatformFactory.new(platform)
lines.append(p.title)
lines.append("-" * len(p.title))
lines.append(
"""
.. toctree::
:maxdepth: 1
"""
)
for board in sorted(boards, key=lambda item: item["name"]):
lines.append(" %s/%s" % (platform, board["id"]))
lines.append("")
emboards_rst = os.path.join(DOCS_ROOT_DIR, "boards", "index.rst")
with open(emboards_rst, "w") as f:
f.write("\n".join(lines))
# individual board page
for data in installed_boards:
rst_path = os.path.join(
DOCS_ROOT_DIR, "boards", data["platform"], "%s.rst" % data["id"]
)
if not os.path.isdir(os.path.dirname(rst_path)):
os.makedirs(os.path.dirname(rst_path))
update_embedded_board(rst_path, data)
def update_embedded_board(rst_path, board):
platform = PlatformFactory.new(board["platform"])
board_config = platform.board_config(board["id"])
board_manifest_url = platform.repository_url
assert board_manifest_url
if board_manifest_url.endswith(".git"):
board_manifest_url = board_manifest_url[:-4]
board_manifest_url += "/blob/master/boards/%s.json" % board["id"]
variables = dict(
id=board["id"],
name=board["name"],
platform=board["platform"],
platform_description=platform.description,
url=campaign_url(board["url"]),
mcu=board_config.get("build", {}).get("mcu", ""),
mcu_upper=board["mcu"].upper(),
f_cpu=board["fcpu"],
f_cpu_mhz=int(int(board["fcpu"]) / 1000000),
ram=fs.humanize_file_size(board["ram"]),
rom=fs.humanize_file_size(board["rom"]),
vendor=board["vendor"],
board_manifest_url=board_manifest_url,
upload_protocol=board_config.get("upload.protocol", ""),
)
lines = [RST_COPYRIGHT]
lines.append(".. _board_{platform}_{id}:".format(**variables))
lines.append("")
lines.append(board["name"])
lines.append("=" * len(board["name"]))
lines.append(
"""
.. contents::
Hardware
--------
Platform :ref:`platform_{platform}`: {platform_description}
.. list-table::
* - **Microcontroller**
- {mcu_upper}
* - **Frequency**
- {f_cpu_mhz:d}MHz
* - **Flash**
- {rom}
* - **RAM**
- {ram}
* - **Vendor**
- `{vendor} <{url}>`__
""".format(
**variables
)
)
#
# Configuration
#
lines.append(
"""
Configuration
-------------
Please use ``{id}`` ID for :ref:`projectconf_env_board` option in :ref:`projectconf`:
.. code-block:: ini
[env:{id}]
platform = {platform}
board = {id}
You can override default {name} settings per build environment using
``board_***`` option, where ``***`` is a JSON object path from
board manifest `{id}.json <{board_manifest_url}>`_. For example,
``board_build.mcu``, ``board_build.f_cpu``, etc.
.. code-block:: ini
[env:{id}]
platform = {platform}
board = {id}
; change microcontroller
board_build.mcu = {mcu}
; change MCU frequency
board_build.f_cpu = {f_cpu}L
""".format(
**variables
)
)
#
# Uploading
#
upload_protocols = board_config.get("upload.protocols", [])
if len(upload_protocols) > 1:
lines.append(
"""
Uploading
---------
%s supports the following uploading protocols:
"""
% board["name"]
)
for protocol in sorted(upload_protocols):
lines.append("* ``%s``" % protocol)
lines.append(
"""
Default protocol is ``%s``"""
% variables["upload_protocol"]
)
lines.append(
"""
You can change upload protocol using :ref:`projectconf_upload_protocol` option:
.. code-block:: ini
[env:{id}]
platform = {platform}
board = {id}
upload_protocol = {upload_protocol}
""".format(
**variables
)
)
#
# Debugging
#
lines.append("Debugging")
lines.append("---------")
if not board.get("debug"):
lines.append(
":ref:`piodebug` currently does not support {name} board.".format(
**variables
)
)
else:
default_debug_tool = board_config.get_debug_tool_name()
has_onboard_debug = any(
t.get("onboard") for (_, t) in board["debug"]["tools"].items()
)
lines.append(
"""
:ref:`piodebug` - "1-click" solution for debugging with a zero configuration.
.. warning::
You will need to install debug tool drivers depending on your system.
Please click on compatible debug tool below for the further
instructions and configuration information.
You can switch between debugging :ref:`debugging_tools` using
:ref:`projectconf_debug_tool` option in :ref:`projectconf`.
"""
)
if has_onboard_debug:
lines.append(
"{name} has on-board debug probe and **IS READY** for "
"debugging. You don't need to use/buy external debug probe.".format(
**variables
)
)
else:
lines.append(
"{name} does not have on-board debug probe and **IS NOT "
"READY** for debugging. You will need to use/buy one of "
"external probe listed below.".format(**variables)
)
lines.append(
"""
.. list-table::
:header-rows: 1
* - Compatible Tools
- On-board
- Default"""
)
for (tool_name, tool_data) in sorted(board["debug"]["tools"].items()):
lines.append(
""" * - :ref:`debugging_tool_{name}`
- {onboard}
- {default}""".format(
name=tool_name,
onboard="Yes" if tool_data.get("onboard") else "",
default="Yes" if tool_name == default_debug_tool else "",
)
)
if board["frameworks"]:
lines.extend(generate_frameworks_contents(board["frameworks"]))
with open(rst_path, "w") as f:
f.write("\n".join(lines))
def update_debugging():
tool_to_platforms = {}
tool_to_boards = {}
vendors = {}
platforms = []
frameworks = []
for data in PlatformPackageManager().get_installed_boards():
if not data.get("debug"):
continue
for tool in data["debug"]["tools"]:
tool = str(tool)
if tool not in tool_to_platforms:
tool_to_platforms[tool] = []
tool_to_platforms[tool].append(data["platform"])
if tool not in tool_to_boards:
tool_to_boards[tool] = []
tool_to_boards[tool].append(data["id"])
platforms.append(data["platform"])
frameworks.extend(data["frameworks"])
vendor = data["vendor"]
if vendor in vendors:
vendors[vendor].append(data)
else:
vendors[vendor] = [data]
platforms = sorted(set(platforms))
frameworks = sorted(set(frameworks))
lines = [".. _debugging_platforms:"]
lines.extend(generate_platforms_contents(platforms))
lines.extend(generate_frameworks_contents(frameworks))
# Boards
lines.append(
"""
Boards
------
.. note::
For more detailed ``board`` information please scroll tables below by horizontal.
"""
)
for vendor, boards in sorted(vendors.items()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.extend(generate_boards_table(boards))
# save
with open(
os.path.join(fs.get_source_dir(), "..", "docs", "plus", "debugging.rst"), "r+"
) as fp:
content = fp.read()
fp.seek(0)
fp.truncate()
fp.write(
content[: content.index(".. _debugging_platforms:")] + "\n".join(lines)
)
# Debug tools
for tool, platforms in tool_to_platforms.items():
tool_path = os.path.join(DOCS_ROOT_DIR, "plus", "debug-tools", "%s.rst" % tool)
if not os.path.isfile(tool_path):
click.secho("Unknown debug tool `%s`" % tool, fg="red")
continue
platforms = sorted(set(platforms))
lines = [".. begin_platforms"]
lines.extend(generate_platforms_contents(platforms))
tool_frameworks = []
for platform in platforms:
for framework in frameworks:
if is_compat_platform_and_framework(platform, framework):
tool_frameworks.append(framework)
lines.extend(generate_frameworks_contents(tool_frameworks))
lines.append(
"""
Boards
------
.. note::
For more detailed ``board`` information please scroll tables below by horizontal.
"""
)
lines.extend(
generate_boards_table(
[
b
for b in PlatformPackageManager().get_installed_boards()
if b["id"] in tool_to_boards[tool]
],
skip_columns=None,
)
)
with open(tool_path, "r+") as fp:
content = fp.read()
fp.seek(0)
fp.truncate()
fp.write(content[: content.index(".. begin_platforms")] + "\n".join(lines))
def update_project_examples():
platform_readme_tpl = """
# {title}: development platform for [PlatformIO](https://platformio.org)
{description}
* [Home](https://platformio.org/platforms/{name}) (home page in PlatformIO Registry)
* [Documentation](https://docs.platformio.org/page/platforms/{name}.html) (advanced usage, packages, boards, frameworks, etc.)
# Examples
{examples}
"""
framework_readme_tpl = """
# {title}: framework for [PlatformIO](https://platformio.org)
{description}
* [Home](https://platformio.org/frameworks/{name}) (home page in PlatformIO Registry)
* [Documentation](https://docs.platformio.org/page/frameworks/{name}.html)
# Examples
{examples}
"""
project_examples_dir = os.path.join(fs.get_source_dir(), "..", "examples")
framework_examples_md_lines = {}
embedded = []
desktop = []
for pkg in PlatformPackageManager().get_installed():
p = PlatformFactory.new(pkg)
github_url = p.repository_url[:-4]
# Platform README
platform_examples_dir = os.path.join(p.get_dir(), "examples")
examples_md_lines = []
if os.path.isdir(platform_examples_dir):
for item in sorted(os.listdir(platform_examples_dir)):
example_dir = os.path.join(platform_examples_dir, item)
if not os.path.isdir(example_dir) or not os.listdir(example_dir):
continue
url = "%s/tree/master/examples/%s" % (github_url, item)
examples_md_lines.append("* [%s](%s)" % (item, url))
readme_dir = os.path.join(project_examples_dir, "platforms", p.name)
if not os.path.isdir(readme_dir):
os.makedirs(readme_dir)
with open(os.path.join(readme_dir, "README.md"), "w") as fp:
fp.write(
platform_readme_tpl.format(
name=p.name,
title=p.title,
description=p.description,
examples="\n".join(examples_md_lines),
)
)
# Framework README
for framework in get_frameworks():
if not is_compat_platform_and_framework(p.name, framework["name"]):
continue
if framework["name"] not in framework_examples_md_lines:
framework_examples_md_lines[framework["name"]] = []
lines = []
lines.append("- [%s](%s)" % (p.title, github_url))
lines.extend(" %s" % line for line in examples_md_lines)
lines.append("")
framework_examples_md_lines[framework["name"]].extend(lines)
# Root README
line = "* [%s](%s)" % (p.title, "%s/tree/master/examples" % github_url)
if p.is_embedded():
embedded.append(line)
else:
desktop.append(line)
# Frameworks
frameworks = []
for framework in get_frameworks():
if framework["name"] not in framework_examples_md_lines:
continue
readme_dir = os.path.join(project_examples_dir, "frameworks", framework["name"])
if not os.path.isdir(readme_dir):
os.makedirs(readme_dir)
with open(os.path.join(readme_dir, "README.md"), "w") as fp:
fp.write(
framework_readme_tpl.format(
name=framework["name"],
title=framework["title"],
description=framework["description"],
examples="\n".join(framework_examples_md_lines[framework["name"]]),
)
)
url = campaign_url(
"https://docs.platformio.org/en/latest/frameworks/%s.html#examples"
% framework["name"],
source="github",
medium="examples",
)
frameworks.append("* [%s](%s)" % (framework["title"], url))
with open(os.path.join(project_examples_dir, "README.md"), "w") as fp:
fp.write(
"""# PlatformIO Project Examples
- [Development platforms](#development-platforms):
- [Embedded](#embedded)
- [Desktop](#desktop)
- [Frameworks](#frameworks)
## Development platforms
### Embedded
%s
### Desktop
%s
## Frameworks
%s
"""
% ("\n".join(embedded), "\n".join(desktop), "\n".join(frameworks))
)
def main():
with tempfile.TemporaryDirectory() as tmp_dir:
print("Core directory: %s" % tmp_dir)
os.environ["PLATFORMIO_CORE_DIR"] = tmp_dir
install_platforms()
update_platform_docs()
update_framework_docs()
update_boards()
update_debugging()
update_project_examples()
if __name__ == "__main__":
sys.exit(main())
|
|
from etk.timeseries.annotation.utility import date_cell, text_cell, empty_cell, blank_cell, number_cell, row_key, column_key, column_orientation
import logging
class rectangular_block:
def __init__(self):
self.upper_row = 0
self.lower_row = 0
self.left_col = 0
self.right_col = 0
def __str__(self):
attrs = vars(self)
return ', '.join("%s: %s" % item for item in attrs.items())
def find_label_block(tags, start_break, row_lim, col_start, col_end, orientation, detected_blocks):
blocks = []
tag_user = tags
if orientation == row_key:
#Transpose tag matrix
tag_user = list(map(list, zip(*tags)))
for rb in find_label_block(tag_user, start_break, row_lim, col_start, col_end, column_key, detected_blocks):
t1, t2 = rb.upper_row, rb.lower_row
rb.upper_row, rb.lower_row = rb.left_col, rb.right_col
rb.left_col, rb.right_col = t1, t2
yield rb
else:
curr_row = row_lim
while curr_row < start_break or (curr_row < len(tags) and is_empty_label_row(tags[curr_row], col_end)):
curr_row += 1
# edit here to first go and look for the first non empty row that matches...
while curr_row < len(tags) and not is_empty_label_row(tags[curr_row], col_end):
curr_row += 1
#Should we check till start of sheet?
for col in range(col_end - 1, col_start - 1, -1): # should n't I only keep the closest?
#Note: Removing this as labels are not necessarily text. Run against dataset and check TODO
#if text_cell not in tags[curr_row-1][col]:
# continue
if empty_cell in tags[curr_row - 1][col]:
continue
if check_cell_in_block(detected_blocks, curr_row-1, col):
continue
if check_cell_in_block(blocks, curr_row-1, col):
continue
blocks.append(BFS_limited_block(curr_row-1, col, tags, row_lim, col_start, col_end))
for block in blocks:
minimum_boundary_rect = find_minimum_boundry_rectangle(block)
logging.info("Label Block: " + str(minimum_boundary_rect))
detected_blocks.append(block)
yield minimum_boundary_rect
def is_empty_label_row(tags, col_lim):
for i in range(col_lim):
if empty_cell not in tags[i]:
return False
else: # or maybe analyze the meaning of the text in order to find out if it is the end or not
continue
return True
def BFS_limited_block(row, col, tags, row_lim, col_start, col_end):
Q = []
Q.append((row, col))
block = []
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
while len(Q) > 0:
front = Q[0]
block.append(Q.pop(0))
# check the neighbors
for dir in directions:
if front[1] + dir[1] < len(tags[front[0]]) and front[1] + dir[1] < col_end and front[1] + dir[1] >= col_start and front[0] + dir[0] >= row_lim and front[0] + dir[0] < len(tags) and front[0] + dir[0] >= 0 and front[1] + dir[1] >= 0:
#Removing check for text cell
if empty_cell not in tags[front[0] + dir[0]][front[1] + dir[1]]:
if (front[0] + dir[0], front[1] + dir[1]) not in Q and (
front[0] + dir[0], front[1] + dir[1]) not in block:
Q.append((front[0] + dir[0], front[1] + dir[1]))
return block
def find_data_block(a, tags, time_header_begin_row, time_header_end_row, time_header_begin_col, time_header_end_col, time_series_orientation):
if time_series_orientation == column_orientation:
#Transpose tag matrix
tag_user = map(list, zip(*tags))
return find_data_block(a, tag_user, time_header_begin_col, time_header_end_col, time_header_begin_row, time_header_end_row, row_key)
else:
# find the first non-empty row (should I add containing only numbers?)
start_row = time_header_end_row
for i in range(time_header_end_row, len(tags)): # continue untill reaching the first non empty row
if is_a_time_series_row(tags[i], time_header_begin_col, time_header_end_col):
start_row = i
break
j = start_row
for j in range(start_row, len(tags)):
if is_a_time_series_row(tags[j], time_header_begin_col, time_header_end_col):
continue
return start_row, j
return start_row, j
def find_minimum_boundry_rectangle(block):
first_cell = block[0]
rb = rectangular_block()
min_row = first_cell[0]
min_col = first_cell[1]
max_row = min_row
max_col = min_col
for cell in block:
if cell[0] < min_row:
min_row = cell[0]
if cell[0] > max_row:
max_row = cell[0]
if cell[1] < min_col:
min_col = cell[1]
if cell[1] > max_col:
max_col = cell[1]
rb.left_col = min_col
rb.right_col = max_col+1
rb.upper_row = min_row
rb.lower_row = max_row + 1
return rb
def BFS_date_block(row, col, tags, desired_tag):
logging.info("Starting BFS from [%d, %d]", row, col)
Q = []
Q.append((row, col))
block = []
directions = [(0,1), (0, -1), (1, 0), (-1, 0)]
while len(Q) > 0:
front = Q[0]
block.append(Q.pop(0))
# check the neighbors
for dir in directions:
if front[1] + dir[1] < len(tags[front[0]]) and front[0]+dir[0] < len(tags) and front[0]+dir[0] >= 0 and front[1] + dir[1] >= 0:
if desired_tag in tags[front[0]+dir[0]][front[1]+dir[1]]:
if (front[0]+dir[0], front[1]+dir[1]) not in Q and (front[0]+dir[0], front[1]+dir[1]) not in block:
Q.append((front[0]+dir[0], front[1]+dir[1]))
logging.info(block)
return block
def check_in_merged_block(blocks, row, col):
for block in blocks:
if row >= block[0] and row < block[1] and col >= block[2] and col < block[3]:
return True
return False
def check_cell_in_block(blocks, row, col):
for block in blocks:
for cell in block:
if cell[0] == row and cell[1] == col:
return True
return False
def is_a_time_series_row(row, left_col, right_col):
seen_tags = row[left_col:right_col]
seen_number = False
# if number cell not available or cells are empty
for x in seen_tags:
if x == {text_cell} or x == {date_cell}:
return False
if number_cell in x:
seen_number = True
continue
if blank_cell in x:
seen_number = True
continue
if empty_cell in x:
continue
if not seen_number:
return False
return True
def is_empty_row(row_tags):
for tag in row_tags:
if empty_cell not in tag:
return False
return True
def is_empty_col(tags, start_row, end_row, col):
for row in range(start_row, end_row):
if col >= len(tags[row]):
continue
if empty_cell not in tags[row][col]:
return False
return True
def find_closest_date_cell(tags, row, col, borders):
while row > borders.upper_row:
row -= 1
if date_cell in tags[row][col]:
return row, col
while col > borders.left_col:
col -= 1
if date_cell in tags[row][col]:
return row, col
|
|
#!/usr/bin/env python3
# Copyright (c) 2020 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Compact, self-contained ELF implementation for bitcoin-core security checks.
'''
import struct
import types
from typing import Dict, List, Optional, Union, Tuple
# you can find all these values in elf.h
EI_NIDENT = 16
# Byte indices in e_ident
EI_CLASS = 4 # ELFCLASSxx
EI_DATA = 5 # ELFDATAxxxx
ELFCLASS32 = 1 # 32-bit
ELFCLASS64 = 2 # 64-bit
ELFDATA2LSB = 1 # little endian
ELFDATA2MSB = 2 # big endian
# relevant values for e_machine
EM_386 = 3
EM_PPC64 = 21
EM_ARM = 40
EM_AARCH64 = 183
EM_X86_64 = 62
EM_RISCV = 243
# relevant values for e_type
ET_DYN = 3
# relevant values for sh_type
SHT_PROGBITS = 1
SHT_STRTAB = 3
SHT_DYNAMIC = 6
SHT_DYNSYM = 11
SHT_GNU_verneed = 0x6ffffffe
SHT_GNU_versym = 0x6fffffff
# relevant values for p_type
PT_LOAD = 1
PT_GNU_STACK = 0x6474e551
PT_GNU_RELRO = 0x6474e552
# relevant values for p_flags
PF_X = (1 << 0)
PF_W = (1 << 1)
PF_R = (1 << 2)
# relevant values for d_tag
DT_NEEDED = 1
DT_FLAGS = 30
# relevant values of `d_un.d_val' in the DT_FLAGS entry
DF_BIND_NOW = 0x00000008
# relevant d_tags with string payload
STRING_TAGS = {DT_NEEDED}
# rrlevant values for ST_BIND subfield of st_info (symbol binding)
STB_LOCAL = 0
class ELFRecord(types.SimpleNamespace):
'''Unified parsing for ELF records.'''
def __init__(self, data: bytes, offset: int, eh: 'ELFHeader', total_size: Optional[int]) -> None:
hdr_struct = self.STRUCT[eh.ei_class][0][eh.ei_data]
if total_size is not None and hdr_struct.size > total_size:
raise ValueError(f'{self.__class__.__name__} header size too small ({total_size} < {hdr_struct.size})')
for field, value in zip(self.STRUCT[eh.ei_class][1], hdr_struct.unpack(data[offset:offset + hdr_struct.size])):
setattr(self, field, value)
def BiStruct(chars: str) -> Dict[int, struct.Struct]:
'''Compile a struct parser for both endians.'''
return {
ELFDATA2LSB: struct.Struct('<' + chars),
ELFDATA2MSB: struct.Struct('>' + chars),
}
class ELFHeader(ELFRecord):
FIELDS = ['e_type', 'e_machine', 'e_version', 'e_entry', 'e_phoff', 'e_shoff', 'e_flags', 'e_ehsize', 'e_phentsize', 'e_phnum', 'e_shentsize', 'e_shnum', 'e_shstrndx']
STRUCT = {
ELFCLASS32: (BiStruct('HHIIIIIHHHHHH'), FIELDS),
ELFCLASS64: (BiStruct('HHIQQQIHHHHHH'), FIELDS),
}
def __init__(self, data: bytes, offset: int) -> None:
self.e_ident = data[offset:offset + EI_NIDENT]
if self.e_ident[0:4] != b'\x7fELF':
raise ValueError('invalid ELF magic')
self.ei_class = self.e_ident[EI_CLASS]
self.ei_data = self.e_ident[EI_DATA]
super().__init__(data, offset + EI_NIDENT, self, None)
def __repr__(self) -> str:
return f'Header(e_ident={self.e_ident!r}, e_type={self.e_type}, e_machine={self.e_machine}, e_version={self.e_version}, e_entry={self.e_entry}, e_phoff={self.e_phoff}, e_shoff={self.e_shoff}, e_flags={self.e_flags}, e_ehsize={self.e_ehsize}, e_phentsize={self.e_phentsize}, e_phnum={self.e_phnum}, e_shentsize={self.e_shentsize}, e_shnum={self.e_shnum}, e_shstrndx={self.e_shstrndx})'
class Section(ELFRecord):
name: Optional[bytes] = None
FIELDS = ['sh_name', 'sh_type', 'sh_flags', 'sh_addr', 'sh_offset', 'sh_size', 'sh_link', 'sh_info', 'sh_addralign', 'sh_entsize']
STRUCT = {
ELFCLASS32: (BiStruct('IIIIIIIIII'), FIELDS),
ELFCLASS64: (BiStruct('IIQQQQIIQQ'), FIELDS),
}
def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
super().__init__(data, offset, eh, eh.e_shentsize)
self._data = data
def __repr__(self) -> str:
return f'Section(sh_name={self.sh_name}({self.name!r}), sh_type=0x{self.sh_type:x}, sh_flags={self.sh_flags}, sh_addr=0x{self.sh_addr:x}, sh_offset=0x{self.sh_offset:x}, sh_size={self.sh_size}, sh_link={self.sh_link}, sh_info={self.sh_info}, sh_addralign={self.sh_addralign}, sh_entsize={self.sh_entsize})'
def contents(self) -> bytes:
'''Return section contents.'''
return self._data[self.sh_offset:self.sh_offset + self.sh_size]
class ProgramHeader(ELFRecord):
STRUCT = {
# different ELF classes have the same fields, but in a different order to optimize space versus alignment
ELFCLASS32: (BiStruct('IIIIIIII'), ['p_type', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_flags', 'p_align']),
ELFCLASS64: (BiStruct('IIQQQQQQ'), ['p_type', 'p_flags', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_align']),
}
def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
super().__init__(data, offset, eh, eh.e_phentsize)
def __repr__(self) -> str:
return f'ProgramHeader(p_type={self.p_type}, p_offset={self.p_offset}, p_vaddr={self.p_vaddr}, p_paddr={self.p_paddr}, p_filesz={self.p_filesz}, p_memsz={self.p_memsz}, p_flags={self.p_flags}, p_align={self.p_align})'
class Symbol(ELFRecord):
STRUCT = {
# different ELF classes have the same fields, but in a different order to optimize space versus alignment
ELFCLASS32: (BiStruct('IIIBBH'), ['st_name', 'st_value', 'st_size', 'st_info', 'st_other', 'st_shndx']),
ELFCLASS64: (BiStruct('IBBHQQ'), ['st_name', 'st_info', 'st_other', 'st_shndx', 'st_value', 'st_size']),
}
def __init__(self, data: bytes, offset: int, eh: ELFHeader, symtab: Section, strings: bytes, version: Optional[bytes]) -> None:
super().__init__(data, offset, eh, symtab.sh_entsize)
self.name = _lookup_string(strings, self.st_name)
self.version = version
def __repr__(self) -> str:
return f'Symbol(st_name={self.st_name}({self.name!r}), st_value={self.st_value}, st_size={self.st_size}, st_info={self.st_info}, st_other={self.st_other}, st_shndx={self.st_shndx}, version={self.version!r})'
@property
def is_import(self) -> bool:
'''Returns whether the symbol is an imported symbol.'''
return self.st_bind != STB_LOCAL and self.st_shndx == 0
@property
def is_export(self) -> bool:
'''Returns whether the symbol is an exported symbol.'''
return self.st_bind != STB_LOCAL and self.st_shndx != 0
@property
def st_bind(self) -> int:
'''Returns STB_*.'''
return self.st_info >> 4
class Verneed(ELFRecord):
DEF = (BiStruct('HHIII'), ['vn_version', 'vn_cnt', 'vn_file', 'vn_aux', 'vn_next'])
STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
super().__init__(data, offset, eh, None)
def __repr__(self) -> str:
return f'Verneed(vn_version={self.vn_version}, vn_cnt={self.vn_cnt}, vn_file={self.vn_file}, vn_aux={self.vn_aux}, vn_next={self.vn_next})'
class Vernaux(ELFRecord):
DEF = (BiStruct('IHHII'), ['vna_hash', 'vna_flags', 'vna_other', 'vna_name', 'vna_next'])
STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
def __init__(self, data: bytes, offset: int, eh: ELFHeader, strings: bytes) -> None:
super().__init__(data, offset, eh, None)
self.name = _lookup_string(strings, self.vna_name)
def __repr__(self) -> str:
return f'Veraux(vna_hash={self.vna_hash}, vna_flags={self.vna_flags}, vna_other={self.vna_other}, vna_name={self.vna_name}({self.name!r}), vna_next={self.vna_next})'
class DynTag(ELFRecord):
STRUCT = {
ELFCLASS32: (BiStruct('II'), ['d_tag', 'd_val']),
ELFCLASS64: (BiStruct('QQ'), ['d_tag', 'd_val']),
}
def __init__(self, data: bytes, offset: int, eh: ELFHeader, section: Section) -> None:
super().__init__(data, offset, eh, section.sh_entsize)
def __repr__(self) -> str:
return f'DynTag(d_tag={self.d_tag}, d_val={self.d_val})'
def _lookup_string(data: bytes, index: int) -> bytes:
'''Look up string by offset in ELF string table.'''
endx = data.find(b'\x00', index)
assert endx != -1
return data[index:endx]
VERSYM_S = BiStruct('H') # .gnu_version section has a single 16-bit integer per symbol in the linked section
def _parse_symbol_table(section: Section, strings: bytes, eh: ELFHeader, versym: bytes, verneed: Dict[int, bytes]) -> List[Symbol]:
'''Parse symbol table, return a list of symbols.'''
data = section.contents()
symbols = []
versym_iter = (verneed.get(v[0]) for v in VERSYM_S[eh.ei_data].iter_unpack(versym))
for ofs, version in zip(range(0, len(data), section.sh_entsize), versym_iter):
symbols.append(Symbol(data, ofs, eh, section, strings, version))
return symbols
def _parse_verneed(section: Section, strings: bytes, eh: ELFHeader) -> Dict[int, bytes]:
'''Parse .gnu.version_r section, return a dictionary of {versym: 'GLIBC_...'}.'''
data = section.contents()
ofs = 0
result = {}
while True:
verneed = Verneed(data, ofs, eh)
aofs = ofs + verneed.vn_aux
while True:
vernaux = Vernaux(data, aofs, eh, strings)
result[vernaux.vna_other] = vernaux.name
if not vernaux.vna_next:
break
aofs += vernaux.vna_next
if not verneed.vn_next:
break
ofs += verneed.vn_next
return result
def _parse_dyn_tags(section: Section, strings: bytes, eh: ELFHeader) -> List[Tuple[int, Union[bytes, int]]]:
'''Parse dynamic tags. Return array of tuples.'''
data = section.contents()
ofs = 0
result = []
for ofs in range(0, len(data), section.sh_entsize):
tag = DynTag(data, ofs, eh, section)
val = _lookup_string(strings, tag.d_val) if tag.d_tag in STRING_TAGS else tag.d_val
result.append((tag.d_tag, val))
return result
class ELFFile:
sections: List[Section]
program_headers: List[ProgramHeader]
dyn_symbols: List[Symbol]
dyn_tags: List[Tuple[int, Union[bytes, int]]]
def __init__(self, data: bytes) -> None:
self.data = data
self.hdr = ELFHeader(self.data, 0)
self._load_sections()
self._load_program_headers()
self._load_dyn_symbols()
self._load_dyn_tags()
self._section_to_segment_mapping()
def _load_sections(self) -> None:
self.sections = []
for idx in range(self.hdr.e_shnum):
offset = self.hdr.e_shoff + idx * self.hdr.e_shentsize
self.sections.append(Section(self.data, offset, self.hdr))
shstr = self.sections[self.hdr.e_shstrndx].contents()
for section in self.sections:
section.name = _lookup_string(shstr, section.sh_name)
def _load_program_headers(self) -> None:
self.program_headers = []
for idx in range(self.hdr.e_phnum):
offset = self.hdr.e_phoff + idx * self.hdr.e_phentsize
self.program_headers.append(ProgramHeader(self.data, offset, self.hdr))
def _load_dyn_symbols(self) -> None:
# first, load 'verneed' section
verneed = None
for section in self.sections:
if section.sh_type == SHT_GNU_verneed:
strtab = self.sections[section.sh_link].contents() # associated string table
assert verneed is None # only one section of this kind please
verneed = _parse_verneed(section, strtab, self.hdr)
assert verneed is not None
# then, correlate GNU versym sections with dynamic symbol sections
versym = {}
for section in self.sections:
if section.sh_type == SHT_GNU_versym:
versym[section.sh_link] = section
# finally, load dynsym sections
self.dyn_symbols = []
for idx, section in enumerate(self.sections):
if section.sh_type == SHT_DYNSYM: # find dynamic symbol tables
strtab_data = self.sections[section.sh_link].contents() # associated string table
versym_data = versym[idx].contents() # associated symbol version table
self.dyn_symbols += _parse_symbol_table(section, strtab_data, self.hdr, versym_data, verneed)
def _load_dyn_tags(self) -> None:
self.dyn_tags = []
for idx, section in enumerate(self.sections):
if section.sh_type == SHT_DYNAMIC: # find dynamic tag tables
strtab = self.sections[section.sh_link].contents() # associated string table
self.dyn_tags += _parse_dyn_tags(section, strtab, self.hdr)
def _section_to_segment_mapping(self) -> None:
for ph in self.program_headers:
ph.sections = []
for section in self.sections:
if ph.p_vaddr <= section.sh_addr < (ph.p_vaddr + ph.p_memsz):
ph.sections.append(section)
def query_dyn_tags(self, tag_in: int) -> List[Union[int, bytes]]:
'''Return the values of all dyn tags with the specified tag.'''
return [val for (tag, val) in self.dyn_tags if tag == tag_in]
def load(filename: str) -> ELFFile:
with open(filename, 'rb') as f:
data = f.read()
return ELFFile(data)
|
|
import psycopg2
import psycopg2.extras
#import DB.classification.Analysis_3 as A
import DB.classification.detect_events as D
import DB.classification.RandomForestModel as RMF
import DB.classification.models as BC
# Database user credentials
DATABASE = "seads"
USER = "seadapi"
TABLE = "data_raw"
def query(parsed_url):
"""
Handle parsed URL data and query the database as appropriate
:param parsed_url: Array of url parameters
:return: Generator of result strings
"""
if 'device_id' not in parsed_url.keys():
raise Exception("Received malformed URL data: missing device_id")
device_id = parsed_url['device_id']
header = ['time', 'I', 'W', 'V', 'T']
start_time = end_time = data_type = subset = limit = device = granularity =None
diff = json = reverse = classify = list_format = events = total_energy = False
if 'type' in parsed_url.keys():
data_type = parsed_url['type']
header = ['time', parsed_url['type']]
if 'start_time' in parsed_url.keys():
start_time = parsed_url['start_time']
if 'end_time' in parsed_url.keys():
end_time = parsed_url['end_time']
if 'subset' in parsed_url.keys():
subset = parsed_url['subset']
if 'limit' in parsed_url.keys():
limit = parsed_url['limit']
if 'json' in parsed_url.keys():
json = parsed_url['json']
if 'reverse' in parsed_url.keys():
reverse = parsed_url['reverse']
if 'classify' in parsed_url.keys():
classify = parsed_url['classify']
if 'device' in parsed_url.keys():
device = parsed_url['device']
if 'diff' in parsed_url.keys():
diff = parsed_url['diff']
if 'granularity' in parsed_url.keys():
granularity = parsed_url['granularity']
if 'list_format' in parsed_url.keys():
list_format = parsed_url['list_format']
if 'events' in parsed_url.keys():
events = parsed_url['events']
if 'total_energy' in parsed_url.keys():
total_energy = parsed_url['total_energy']
if classify:
if device is not None and start_time is not None:
model = RMF.RandomForestModel.get_model()
model.train()
classification = model.classify(time=start_time, serial=device_id, panel=device)
return format_data(['data'], classification, json=True)
raise Exception("Received malformed URL data: missing start_time")
if total_energy:
results = generate_total_energy(device_id, start_time, end_time, device)
return results
results = retrieve_within_filters(
device_id,
start_time,
end_time,
data_type,
subset,
limit,
reverse,
device,
diff,
granularity,
list_format
)
if events and diff:
if device and start_time and end_time and data_type == 'P' and list_format == 'event':
return format_list(D.detect(results, events), list_format)
raise Exception("Event detection requires device, start_time, end_time, data_type=P, and "\
"list_format=event")
if list_format:
return format_list(results, list_format)
return format_data(header, results, json)
def generate_total_energy(device_id, start_time, end_time, channel):
"""
Returns total energy for a particular "channel" (device in table data_raw),
over a specified time period
:param device_id: The serial number of the device in question
:param start_time: The start of the time range for which to query for data
:param end_time: The end of the time range for which to query for data
:param channel: channel filter
"""
# Initialize parameter list and WHERE clause
start_params = [device_id]
start_query = "SELECT data FROM " + TABLE + " as raw WHERE serial = %s AND type = 'P'"
end_params = [device_id]
end_query = "SELECT data FROM " + TABLE + " as raw WHERE serial = %s AND type = 'P'"
# Generate WHERE clauses and execute queries
start_query += " AND device = %s AND time >= to_timestamp(%s) ORDER BY time DESC LIMIT 1;"
start_params.append(channel)
start_params.append(start_time)
start_row = perform_query(start_query, tuple(start_params))
end_query += " AND device = %s AND time <= to_timestamp(%s) ORDER BY time ASC LIMIT 1;"
end_params.append(channel)
end_params.append(end_time)
end_row = perform_query(end_query, tuple(end_params))
# Calculate total energy
total_energy = (abs(start_row[0][0]) - abs(end_row[0][0])) / 36e6
return '{ total_energy: ' + str(total_energy) + '}'
def retrieve_within_filters(device_id, start_time, end_time, data_type, subset, limit, reverse,
device, diff, granularity, list_format):
"""
Return sensor data for a device within a specified timeframe
:param device_id: The serial number of the device in question
:param start_time: The start of the time range for which to query for data
:param end_time: The end of the time range for which to query for data
:param data_type: The type of data to query for
:param subset: The size of the subset
:param limit: Truncate result to this many rows
:param reverse: Return results in reverse order
:param device: Device filter
:param diff: Give the differences between rows instead of the actual rows themselves
:param granularity: Used to set the interval of an energy_list query
:param list_format: controls if an energy_list query is preformed
:return: Generator of database row tuples
"""
# Initialize parameter list and WHERE clause
params = [device_id]
where = "WHERE serial = %s"
# Add subset size parameter if set
if subset:
params.insert(0, float(subset) + 1.0)
# Generate WHERE clause
if start_time:
where += " AND time >= to_timestamp(%s)"
params.append(start_time)
if end_time:
where += " AND time <= to_timestamp(%s)"
params.append(end_time)
if data_type:
where += " AND type = %s"
params.append(data_type)
if device == "seadplug":
where += " AND device IS NULL"
elif device == "egauge":
where += " AND device IS NOT NULL"
elif device:
where += " AND device = %s"
params.append(device)
query = "FROM " + TABLE + " as raw " + where
prefix = "SELECT time, data "
if device and diff:
prefix += " - lag(data) OVER (ORDER BY time"
if reverse:
prefix += " ASC"
else:
prefix += " DESC"
prefix += ") as diff "
# TODO: add this to the diff logic
if device and granularity and data_type == "P" and list_format == "energy":
prefix = "SELECT time, abs(CAST(lag(data) OVER (ORDER BY time DESC) - data AS DECIMAL)"\
" / 36e5) "
query += " AND CAST(extract(epoch from time) as INTEGER) %% %s = 0"
params.append(granularity)
query = prefix + query
else:
# If no data type is set we return all data types
query = write_crosstab(where, TABLE)
if subset:
query = write_subsample(query, data_type is None)
# Required for LIMIT, analysis code assumes sorted data
query += " ORDER BY time"
if reverse:
query += " ASC"
else:
query += " DESC"
if limit:
query += " LIMIT %s"
params.append(limit)
query += ";"
rows = perform_query(query, tuple(params))
return rows
def write_crosstab(where, data=TABLE):
"""
Write a PostgreSQL crosstab() query to create a pivot table and rearrange the data into a more useful form
:param where: WHERE clause for SQL query
:param data: Table or subquery from which to get the data
:return: Complete SQL query
"""
query = "SELECT * FROM crosstab(" + \
"'SELECT time, type, data from " + data + " as raw " + where + "'," + \
" 'SELECT unnest(ARRAY[''I'', ''W'', ''V'', ''T''])') " + \
"AS ct_result(time TIMESTAMP, I BIGINT, W BIGINT, V BIGINT, T BIGINT)"
return query
def perform_query(query, params):
"""
Initiate a connection to the database and return a cursor to return db rows a dictionaries
:param query: SQL query string
:param params: List of SQL query parameters
:return: Result cursor
"""
con = None
try:
con = psycopg2.connect(database=DATABASE, user=USER)
cursor = con.cursor()
print("Query:", query)
print("Parameters:", params)
cursor.execute(query, params)
return cursor.fetchall()
except psycopg2.DatabaseError as e:
print('Database error: %s' % e)
finally:
if con:
con.close()
def format_list(rows, name):
"""
Formats result set for energy_list query
:param results:
:return:
"""
yield '{ "data": ['
for i, row in enumerate(rows):
if 0 < i < (len(rows) - 1):
yield '{ "time": "' + str(row[0]) + '", "' + name + '": "' + str(row[1]) + '" },'
elif i == (len(rows) - 1):
yield '{ "time": "' + str(row[0]) + '" ,"' + name + '": "' + str(row[1]) + '" }'
yield ']}'
def format_data_row(row):
"""
Formats result row into result row string
:param row: Result row
:return: Result row string
"""
return '[' + ", ".join(map(lambda x: '"' + str(x) + '"', row)) + ']'
def format_data(header, data, json=False):
"""
Process rows of data returned by the db and format them appropriately
:param header: The first row of the result
:param data: Result cursor
:param json: Whether or not to use JSON format.
:return: Generator of result strings
"""
if json:
yield '{"data": '
yield "[" + format_data_row(header) # No comma before header
for row in data:
yield ',' + format_data_row(row)
yield "]"
if json:
yield "}"
def write_subsample(query, crosstab=False):
"""
Adds subsampling to a query. This should be the absolute last step in query building. This function call should be immediately proceeded with params.insert(0, subset).
:param query: The exiting query to subsample
:param crosstab: Whether or not the query is a crosstab
:return: Query with subsampling enabled.
"""
new_query = '''SELECT '''
if crosstab:
new_query += '''time, I, W, V, T''' # SELECT all data type columns
else:
new_query += "time, data" # Single data type query
new_query += ''' FROM ( SELECT *, ((row_number() OVER (ORDER BY "time"))
%% ceil(count(*) OVER () / %s)::int) AS rn
FROM ('''
new_query += query
new_query += ") AS subquery ) sub WHERE sub.rn = 0"
return new_query
|
|
# -*- coding: utf-8 -*-
import itsdangerous
import mock
import pytest
import unittest
from future.moves.urllib.parse import urlparse, parse_qs
from uuid import UUID
from api.base.settings.defaults import API_BASE
from framework.auth.cas import CasResponse
from osf.models import OSFUser, Session, ApiOAuth2PersonalToken
from osf_tests.factories import (
AuthUserFactory,
UserFactory,
OSFGroupFactory,
ProjectFactory,
ApiOAuth2ScopeFactory,
RegistrationFactory,
Auth,
)
from osf.utils.permissions import CREATOR_PERMISSIONS
from website import settings
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestUsers:
@pytest.fixture()
def user_one(self):
return AuthUserFactory(fullname='Freddie Mercury I')
@pytest.fixture()
def user_two(self):
return AuthUserFactory(fullname='Freddie Mercury II')
def test_returns_200(self, app):
res = app.get('/{}users/'.format(API_BASE))
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
def test_find_user_in_users(self, app, user_one, user_two):
url = '/{}users/'.format(API_BASE)
res = app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert user_two._id in ids
def test_all_users_in_users(self, app, user_one, user_two):
url = '/{}users/'.format(API_BASE)
res = app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert user_one._id in ids
assert user_two._id in ids
def test_merged_user_is_not_in_user_list_after_2point3(
self, app, user_one, user_two):
user_two.merge_user(user_one)
res = app.get('/{}users/?version=2.3'.format(API_BASE))
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert res.status_code == 200
assert user_two._id in ids
assert user_one._id not in ids
def test_merged_user_is_returned_before_2point3(
self, app, user_one, user_two):
user_two.merge_user(user_one)
res = app.get('/{}users/'.format(API_BASE))
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert res.status_code == 200
assert user_two._id in ids
assert user_one._id in ids
def test_find_multiple_in_users(self, app, user_one, user_two):
url = '/{}users/?filter[full_name]=fred'.format(API_BASE)
res = app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert user_one._id in ids
assert user_two._id in ids
def test_find_single_user_in_users(self, app, user_one, user_two):
url = '/{}users/?filter[full_name]=my'.format(API_BASE)
user_one.fullname = 'My Mom'
user_one.save()
res = app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert user_one._id in ids
assert user_two._id not in ids
def test_find_no_user_in_users(self, app, user_one, user_two):
url = '/{}users/?filter[full_name]=NotMyMom'.format(API_BASE)
res = app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert user_one._id not in ids
assert user_two._id not in ids
def test_more_than_one_projects_in_common(self, app, user_one, user_two):
group = OSFGroupFactory(creator=user_one)
group.make_member(user_two)
project1 = ProjectFactory(creator=user_one)
project1.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project1.save()
project2 = ProjectFactory(creator=user_one)
project2.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project2.save()
project3 = ProjectFactory()
project4 = ProjectFactory()
project3.add_osf_group(group)
project4.add_osf_group(group)
project4.is_deleted = True
project3.save()
project4.save()
RegistrationFactory(
project=project1,
creator=user_one,
is_public=True)
url = '/{}users/?show_projects_in_common=true'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
if user['id'] == user_two._id:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 4
def test_users_projects_in_common(self, app, user_one, user_two):
user_one.fullname = 'hello'
user_one.save()
url = '/{}users/?show_projects_in_common=true'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 0
def test_users_projects_in_common_with_embed_and_right_query(
self, app, user_one, user_two):
project = ProjectFactory(creator=user_one)
project.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project.save()
url = '/{}users/{}/nodes/?embed=contributors&show_projects_in_common=true'.format(
API_BASE, user_two._id)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data'][0]['embeds']['contributors']['data']
for user in user_json:
meta = user['embeds']['users']['data']['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 1
def test_users_projects_in_common_exclude_deleted_projects(
self, app, user_one, user_two):
project_list = []
for x in range(1, 10):
project = ProjectFactory(creator=user_one)
project.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project.save()
project_list.append(project)
for x in range(1, 5):
project = project_list[x]
project.reload()
project.remove_node(auth=Auth(user=user_one))
url = '/{}users/{}/nodes/?embed=contributors&show_projects_in_common=true'.format(
API_BASE, user_two._id)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data'][0]['embeds']['contributors']['data']
for user in user_json:
meta = user['embeds']['users']['data']['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 5
def test_users_projects_in_common_with_embed_without_right_query(
self, app, user_one, user_two):
project = ProjectFactory(creator=user_one)
project.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project.save()
url = '/{}users/{}/nodes/?embed=contributors'.format(
API_BASE, user_two._id)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data'][0]['embeds']['contributors']['data']
for user in user_json:
meta = user['embeds']['users']['data']['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' not in meta
def test_users_no_projects_in_common_with_wrong_query(
self, app, user_one, user_two):
user_one.fullname = 'hello'
user_one.save()
url = '/{}users/?filter[full_name]={}'.format(
API_BASE, user_one.fullname)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' not in meta
def test_users_no_projects_in_common_without_filter(
self, app, user_one, user_two):
user_one.fullname = 'hello'
user_one.save()
url = '/{}users/'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' not in meta
def test_users_list_takes_profile_image_size_param(
self, app, user_one, user_two):
size = 42
url = '/{}users/?profile_image_size={}'.format(API_BASE, size)
res = app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['links']['profile_image']
query_dict = parse_qs(
urlparse(profile_image_url).query)
assert int(query_dict.get('s')[0]) == size
def test_users_list_filter_multiple_field(self, app, user_one, user_two):
john_doe = UserFactory(fullname='John Doe')
john_doe.given_name = 'John'
john_doe.family_name = 'Doe'
john_doe.save()
doe_jane = UserFactory(fullname='Doe Jane')
doe_jane.given_name = 'Doe'
doe_jane.family_name = 'Jane'
doe_jane.save()
url = '/{}users/?filter[given_name,family_name]=Doe'.format(API_BASE)
res = app.get(url)
data = res.json['data']
assert len(data) == 2
def test_users_list_filter_multiple_fields_with_additional_filters(
self, app, user_one, user_two):
john_doe = UserFactory(fullname='John Doe')
john_doe.given_name = 'John'
john_doe.family_name = 'Doe'
john_doe.save()
doe_jane = UserFactory(fullname='Doe Jane')
doe_jane.given_name = 'Doe'
doe_jane.family_name = 'Jane'
doe_jane.save()
url = '/{}users/?filter[given_name,family_name]=Doe&filter[id]={}'.format(
API_BASE, john_doe._id)
res = app.get(url)
data = res.json['data']
assert len(data) == 1
def test_users_list_filter_multiple_fields_with_bad_filter(
self, app, user_one, user_two):
url = '/{}users/?filter[given_name,not_a_filter]=Doe'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 400
@pytest.mark.django_db
class TestUsersCreate:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def email_unconfirmed(self):
return 'tester@fake.io'
@pytest.fixture()
def url_base(self):
return '/{}users/'.format(API_BASE)
@pytest.fixture()
def data(self, email_unconfirmed):
return {
'data': {
'type': 'users',
'attributes': {
'username': email_unconfirmed,
'full_name': 'Test Account'
}
}
}
def tearDown(self, app):
super(TestUsersCreate, self).tearDown()
app.reset() # clears cookies
OSFUser.remove()
@mock.patch('framework.auth.views.mails.send_mail')
def test_logged_in_user_with_basic_auth_cannot_create_other_user_or_send_mail(
self, mock_mail, app, user, email_unconfirmed, data, url_base):
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
auth=user.auth,
expect_errors=True
)
assert res.status_code == 403
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
def test_logged_out_user_cannot_create_other_user_or_send_mail(
self, mock_mail, app, email_unconfirmed, data, url_base):
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
expect_errors=True
)
assert res.status_code == 401
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
assert mock_mail.call_count == 0
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
def test_cookied_requests_can_create_and_email(
self, mock_mail, app, user, email_unconfirmed, data, url_base):
session = Session(data={'auth_user_id': user._id})
session.save()
cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(session._id)
app.set_cookie(settings.COOKIE_NAME, str(cookie))
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data
)
assert res.status_code == 201
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 1
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.users.create unavailable')
def test_properly_scoped_token_can_create_and_send_email(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Authorized Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.users.create'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
assert res.json['data']['attributes']['username'] == email_unconfirmed
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 1
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.users.create unavailable')
def test_properly_scoped_token_does_not_send_email_without_kwarg(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Authorized Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.users.create'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
url_base,
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
assert res.json['data']['attributes']['username'] == email_unconfirmed
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 0
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.users.create unavailable')
def test_properly_scoped_token_can_create_without_username_but_not_send_email(
self, mock_auth, mock_mail, app, user, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Authorized Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.users.create'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
data['data']['attributes'] = {'full_name': 'No Email'}
assert OSFUser.objects.filter(fullname='No Email').count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
username = res.json['data']['attributes']['username']
try:
UUID(username)
except ValueError:
raise AssertionError('Username is not a valid UUID')
assert OSFUser.objects.filter(fullname='No Email').count() == 1
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
def test_improperly_scoped_token_can_not_create_or_email(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Unauthorized Token',
)
token.save()
scope = ApiOAuth2ScopeFactory()
scope.name = 'unauthorized scope'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)},
expect_errors=True
)
assert res.status_code == 403
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
assert mock_mail.call_count == 0
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.admin unavailable')
def test_admin_scoped_token_can_create_and_send_email(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Admin Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.admin'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
assert res.json['data']['attributes']['username'] == email_unconfirmed
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 1
|
|
from django.contrib.auth.models import User, Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models.query import QuerySet
from django.test import TestCase
from guardian.shortcuts import get_perms_for_model
from guardian.core import ObjectPermissionChecker
from guardian.shortcuts import assign
from guardian.shortcuts import remove_perm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_objects_for_user
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import NotUserNorGroup
from guardian.exceptions import WrongAppError
from guardian.tests.core_test import ObjectPermissionTestCase
class ShortcutsTests(ObjectPermissionTestCase):
def test_get_perms_for_model(self):
self.assertEqual(get_perms_for_model(self.user).count(), 3)
self.assertTrue(list(get_perms_for_model(self.user)) ==
list(get_perms_for_model(User)))
self.assertEqual(get_perms_for_model(Permission).count(), 3)
model_str = 'contenttypes.ContentType'
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(ContentType).values_list()))
obj = ContentType()
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(obj).values_list()))
class AssignTest(ObjectPermissionTestCase):
"""
Tests permission assigning for user/group and object.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, assign,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, assign,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_assign(self):
assign("change_contenttype", self.user, self.ctype)
assign("change_contenttype", self.group, self.ctype)
self.assertTrue(self.user.has_perm("change_contenttype", self.ctype))
def test_group_assing(self):
assign("change_contenttype", self.group, self.ctype)
assign("delete_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertTrue(check.has_perm("change_contenttype", self.ctype))
self.assertTrue(check.has_perm("delete_contenttype", self.ctype))
def test_user_assign_global(self):
perm = assign("contenttypes.change_contenttype", self.user)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
def test_group_assing_global(self):
perm = assign("contenttypes.change_contenttype", self.group)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
class RemovePermTest(ObjectPermissionTestCase):
"""
Tests object permissions removal.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, remove_perm,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, remove_perm,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_remove_perm(self):
# assign perm first
assign("change_contenttype", self.user, self.ctype)
remove_perm("change_contenttype", self.user, self.ctype)
self.assertFalse(self.user.has_perm("change_contenttype", self.ctype))
def test_group_remove_perm(self):
# assign perm first
assign("change_contenttype", self.group, self.ctype)
remove_perm("change_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertFalse(check.has_perm("change_contenttype", self.ctype))
def test_user_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign(perm, self.user)
remove_perm(perm, self.user)
self.assertFalse(self.user.has_perm(perm))
def test_group_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign(perm, self.group)
remove_perm(perm, self.group)
app_label, codename = perm.split('.')
perm_obj = Permission.objects.get(codename=codename,
content_type__app_label=app_label)
self.assertFalse(perm_obj in self.group.permissions.all())
class GetPermsTest(ObjectPermissionTestCase):
"""
Tests get_perms function (already done at core tests but left here as a
placeholder).
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, get_perms,
user_or_group=None,
obj=self.ctype)
def test_user(self):
perms_to_assign = ("change_contenttype",)
for perm in perms_to_assign:
assign("change_contenttype", self.user, self.ctype)
perms = get_perms(self.user, self.ctype)
for perm in perms_to_assign:
self.assertTrue(perm in perms)
class GetUsersWithPermsTest(TestCase):
"""
Tests get_users_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_users_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertEqual(list(result), [])
result = get_users_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign("change_contenttype", self.user1, self.obj1)
assign("delete_contenttype", self.user2, self.obj1)
assign("delete_contenttype", self.user3, self.obj2)
result = get_users_with_perms(self.obj1)
result_vals = result.values_list('username', flat=True)
self.assertEqual(
set(result_vals),
set([user.username for user in (self.user1, self.user2)]),
)
def test_users_groups_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign("change_contenttype", self.group1, self.obj1)
assign("change_contenttype", self.group2, self.obj1)
assign("delete_contenttype", self.group3, self.obj2)
result = get_users_with_perms(self.obj1).values_list('id',
flat=True)
self.assertEqual(
set(result),
set([u.id for u in (self.user1, self.user2)])
)
def test_users_groups_after_removal(self):
self.test_users_groups_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1).values_list('id',
flat=True)
self.assertEqual(
set(result),
set([self.user2.id]),
)
def test_attach_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign("change_contenttype", self.group1, self.obj1)
assign("change_contenttype", self.group2, self.obj1)
assign("delete_contenttype", self.group3, self.obj2)
assign("delete_contenttype", self.user2, self.obj1)
assign("change_contenttype", self.user3, self.obj2)
# Check contenttype1
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {
self.user1: ["change_contenttype"],
self.user2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.iteritems():
self.assertEqual(set(perms), set(expected[key]))
# Check contenttype2
result = get_users_with_perms(self.obj2, attach_perms=True)
expected = {
self.user3: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.iteritems():
self.assertEqual(set(perms), set(expected[key]))
def test_attach_groups_only_has_perms(self):
self.user1.groups.add(self.group1)
assign("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {self.user1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_mixed(self):
self.user1.groups.add(self.group1)
assign("change_contenttype", self.group1, self.obj1)
assign("change_contenttype", self.user2, self.obj1)
assign("delete_contenttype", self.user2, self.obj1)
assign("delete_contenttype", self.user2, self.obj2)
assign("change_contenttype", self.user3, self.obj2)
assign("change_user", self.user3, self.user1)
result = get_users_with_perms(self.obj1)
self.assertEqual(
set(result),
set([self.user1, self.user2]),
)
def test_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
assign("change_contenttype", self.user1, self.obj1)
result = get_users_with_perms(self.obj1, with_superusers=True)
self.assertEqual(
set(result),
set([self.user1, admin]),
)
def test_without_group_users(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign("change_contenttype", self.group1, self.obj1)
assign("change_contenttype", self.user2, self.obj1)
assign("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False)
expected = set([self.user2])
self.assertEqual(set(result), expected)
def test_without_group_users_but_perms_attached(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign("change_contenttype", self.group1, self.obj1)
assign("change_contenttype", self.user2, self.obj1)
assign("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
attach_perms=True)
expected = {self.user2: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_without_group_users_no_result(self):
self.user1.groups.add(self.group1)
assign("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True,
with_group_users=False)
expected = {}
self.assertEqual(result, expected)
def test_without_group_users_no_result_but_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
self.user1.groups.add(self.group1)
assign("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
with_superusers=True)
expected = [admin]
self.assertEqual(set(result), set(expected))
class GetGroupsWithPerms(TestCase):
"""
Tests get_groups_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_groups_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertFalse(bool(result))
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], self.group1)
def test_simple_after_removal(self):
self.test_simple()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 0)
def test_simple_attach_perms(self):
assign("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {self.group1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_simple_attach_perms_after_removal(self):
self.test_simple_attach_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertEqual(len(result), 0)
def test_mixed(self):
assign("change_contenttype", self.group1, self.obj1)
assign("change_contenttype", self.group1, self.obj2)
assign("change_user", self.group1, self.user3)
assign("change_contenttype", self.group2, self.obj2)
assign("change_contenttype", self.group2, self.obj1)
assign("delete_contenttype", self.group2, self.obj1)
assign("change_user", self.group3, self.user1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(set(result), set([self.group1, self.group2]))
def test_mixed_attach_perms(self):
assign("change_contenttype", self.group1, self.obj1)
assign("change_contenttype", self.group1, self.obj2)
assign("change_group", self.group1, self.group3)
assign("change_contenttype", self.group2, self.obj2)
assign("change_contenttype", self.group2, self.obj1)
assign("delete_contenttype", self.group2, self.obj1)
assign("change_group", self.group3, self.group1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {
self.group1: ["change_contenttype"],
self.group2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.iteritems():
self.assertEqual(set(perms), set(expected[key]))
class GetObjectsForUser(TestCase):
def setUp(self):
self.user = User.objects.create(username='joe')
self.group = Group.objects.create(name='group')
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
def test_superuser(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
self.assertEqual(set(ctypes), set(objects))
def test_mixed_perms(self):
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, ['auth.change_user', 'auth.change_permission'])
def test_perms_with_mixed_apps(self):
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, ['auth.change_user', 'contenttypes.change_contenttype'])
def test_mixed_perms_and_klass(self):
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, ['auth.change_group'], User)
def test_no_app_label_nor_klass(self):
self.assertRaises(WrongAppError, get_objects_for_user, self.user,
['change_group'])
def test_empty_perms_sequence(self):
self.assertEqual(
set(get_objects_for_user(self.user, [], Group.objects.all())),
set()
)
def test_perms_single(self):
perm = 'auth.change_group'
assign(perm, self.user, self.group)
self.assertEqual(
set(get_objects_for_user(self.user, perm)),
set(get_objects_for_user(self.user, [perm])))
def test_klass_as_model(self):
assign('contenttypes.change_contenttype', self.user, self.ctype)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ContentType)
self.assertEqual([obj.name for obj in objects], [self.ctype.name])
def test_klass_as_manager(self):
assign('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_klass_as_queryset(self):
assign('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects.all())
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_ensure_returns_queryset(self):
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign('change_group', self.user, group)
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertEqual(len(objects), len(groups))
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set(group_names))
def test_multiple_perms_to_check(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign('auth.change_group', self.user, group)
assign('auth.delete_group', self.user, groups[1])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'])
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[1].name]))
def test_groups_perms(self):
group1 = Group.objects.create(name='group1')
group2 = Group.objects.create(name='group2')
group3 = Group.objects.create(name='group3')
groups = [group1, group2, group3]
for group in groups:
self.user.groups.add(group)
# Objects to operate on
ctypes = dict(((ct.id, ct) for ct in ContentType.objects.all()))
assign('change_contenttype', self.user, ctypes[1])
assign('change_contenttype', self.user, ctypes[2])
assign('delete_contenttype', self.user, ctypes[2])
assign('delete_contenttype', self.user, ctypes[3])
assign('change_contenttype', groups[0], ctypes[4])
assign('change_contenttype', groups[1], ctypes[4])
assign('change_contenttype', groups[2], ctypes[5])
assign('delete_contenttype', groups[0], ctypes[1])
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set([1, 2, 4, 5]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype',
'contenttypes.delete_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set([1, 2]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set([1, 2, 4, 5]))
|
|
import gen
import os
import sys
import string
from string import Template
from gen import Gen
from nysa.ibuilder.lib import utils
from nysa.ibuilder.lib import verilog_utils as vutils
class GenMemInterconnect(Gen):
def __init__(self):
return
def gen_script (self, tags = {}, buf = "", user_paths = [], debug=False):
buf = generate_wb_mem_interconnect(tags = tags, user_paths = user_paths, debug = debug)
return buf
def get_name (self):
print "wishbone_mem_interconnect.py"
def generate_wb_mem_interconnect(tags = {}, user_paths = [], debug = False):
if "MEMORY" not in tags:
return ""
num_slaves = len(tags["MEMORY"].keys())
if debug: print "Number of slaves: %d" % num_slaves
buf = ""
#Allow errors to pass up to the calling class
directory = utils.get_local_verilog_path("nysa-verilog")
wb_i_loc = os.path.join( directory,
"verilog",
"wishbone",
"interconnect",
"wishbone_mem_interconnect.v")
f = open(wb_i_loc, "r")
buf = f.read()
f.close()
template = Template(buf)
port_buf = ""
port_def_buf = ""
mem_select_buf = ""
assign_buf = ""
data_block_buf = ""
ack_block_buf = ""
int_block_buf = ""
param_buf = ""
#start with 1 to account for SDB
num_mems = 0
if (tags.has_key("MEMORY")):
#got a list of all the slaves to add to make room for
mem_list = tags["MEMORY"]
num_mems = num_mems + len(mem_list)
if num_mems == 0:
return ""
if debug:
"Memory Keys\n\n"
for key in tags["MEMORY"]:
print key + ":" + str(tags["MEMORY"][key])
print "\n\n"
slave_keywords = [
"SDB_ABI_VERSION_MAJOR",
"SDB_ABI_VERSION_MINOR",
"SDB_SIZE"
]
mem_offset = 0
#generate the parameters
for i in range(0, num_mems):
key = tags["MEMORY"].keys()[i]
absfilename = utils.find_rtl_file_location(tags["MEMORY"][key]["filename"], user_paths)
#print "filename: %s" % absfilename
slave_tags = vutils.get_module_tags(filename = absfilename,
bus = "wishbone",
keywords = slave_keywords,
project_tags = tags)
if debug:
print "slave tags: " + str(slave_tags)
mem_size = int(slave_tags["keywords"]["SDB_SIZE"].strip(), 0)
param_buf = param_buf + "parameter MEM_SEL_%d\t=\t%d;\n" % (i, i)
#param_buf = param_buf + "parameter MEM_OFFSET_%d\t=\t %d;\n" % (i, mem_offset)
param_buf = param_buf + "parameter MEM_OFFSET_%d\t=\t 32'h%08X;\n" % (i, mem_offset)
param_buf = param_buf + "parameter MEM_SIZE_%d\t=\t 32'h%02X;\n" % (i, mem_size)
mem_offset += mem_size
#generate the memory select logic
mem_select_buf = "reg [31:0] mem_select;\n"
mem_select_buf += "\n"
mem_select_buf += "always @(rst or i_m_adr or mem_select) begin\n"
mem_select_buf += "\tif (rst) begin\n"
mem_select_buf += "\t\t//nothing selected\n"
mem_select_buf += "\t\tmem_select <= 32'hFFFFFFFF;\n"
mem_select_buf += "\tend\n"
mem_select_buf += "\telse begin\n"
for i in range (num_mems):
if (i == 0):
mem_select_buf += "\t\tif "
else:
mem_select_buf += "\t\telse if "
mem_select_buf += "((i_m_adr >= MEM_OFFSET_%d) && (i_m_adr < (MEM_OFFSET_%d + MEM_SIZE_%d))) begin\n" % (i, i, i)
mem_select_buf += "\t\t\tmem_select <= MEM_SEL_%d;\n" % i
mem_select_buf += "\t\tend\n"
mem_select_buf += "\t\telse begin\n"
mem_select_buf += "\t\t\tmem_select <= 32'hFFFFFFFF;\n"
mem_select_buf += "\t\tend\n"
mem_select_buf += "\tend\n"
mem_select_buf += "end\n"
#Ports
for i in range (0, num_slaves):
port_buf += "\t//Slave %d\n" % i
port_buf += "\toutput\t\t\t\t\t\t\to_s%d_we,\n" % i
port_buf += "\toutput\t\t\t\t\t\t\to_s%d_cyc,\n" % i
port_buf += "\toutput\t\t\t\t\t\t\to_s%d_stb,\n" % i
port_buf += "\toutput\t\t[3:0]\t\t\to_s%d_sel,\n" % i
port_buf += "\tinput\t\t\t\t\t\t\t\ti_s%d_ack,\n" % i
port_buf += "\toutput\t\t[31:0]\t\to_s%d_dat,\n" % i
port_buf += "\tinput\t\t\t[31:0]\t\ti_s%d_dat,\n" % i
port_buf += "\toutput\t\t[31:0]\t\to_s%d_adr,\n" % i
port_buf += "\tinput\t\t\t\t\t\t\t\ti_s%d_int" % i
#if this isn't the last slave add a comma
if (i < num_slaves - 1):
port_buf += ",\n"
port_buf += "\n"
#assign defines
for i in range (0, num_mems):
assign_buf += "assign o_s%d_we =\t(mem_select == MEM_SEL_%d) ? i_m_we: 1'b0;\n" % (i, i)
assign_buf += "assign o_s%d_stb =\t(mem_select == MEM_SEL_%d) ? i_m_stb: 1'b0;\n" % (i, i)
assign_buf += "assign o_s%d_sel =\t(mem_select == MEM_SEL_%d) ? i_m_sel: 4'b0;\n" % (i, i)
assign_buf += "assign o_s%d_cyc =\t(mem_select == MEM_SEL_%d) ? i_m_cyc: 1'b0;\n" % (i, i)
if i == 0:
assign_buf += "assign o_s%d_adr =\t(mem_select == MEM_SEL_%d) ? i_m_adr: 32'h0;\n" % (i, i)
else:
assign_buf += "assign o_s%d_adr =\t(mem_select == MEM_SEL_%d) ? i_m_adr - MEM_OFFSET_%d: 32'h0;\n" %(i, i, i)
assign_buf += "assign o_s%d_dat =\t(mem_select == MEM_SEL_%d) ? i_m_dat: 32'h0;\n" % (i, i)
assign_buf +="\n"
#data in block
data_block_buf = "//data in from slave\n"
data_block_buf += "always @ (mem_select"
for i in range (0, num_mems):
data_block_buf += " or i_s%d_dat" % i
data_block_buf += ") begin\n\tcase (mem_select)\n"
for i in range (0, num_mems):
data_block_buf += "\t\tMEM_SEL_%d: begin\n\t\t\to_m_dat <= i_s%d_dat;\n\t\tend\n" % (i, i)
data_block_buf += "\t\tdefault: begin\n\t\t\to_m_dat <= 32\'h0000;\n\t\tend\n\tendcase\nend\n\n"
#ack in block
ack_block_buf = "//ack in from mem slave\n"
ack_block_buf += "always @ (mem_select"
for i in range (0, num_mems):
ack_block_buf += " or i_s%d_ack" % i
ack_block_buf += ") begin\n\tcase (mem_select)\n"
for i in range (0, num_mems):
ack_block_buf += "\t\tMEM_SEL_%d: begin\n\t\t\to_m_ack <= i_s%d_ack;\n\t\tend\n" % (i, i)
ack_block_buf += "\t\tdefault: begin\n\t\t\to_m_ack <= 1\'h0;\n\t\tend\n\tendcase\nend\n\n"
#int in block
int_block_buf = "//int in from slave\n"
int_block_buf += "always @ (mem_select"
for i in range (0, num_mems):
int_block_buf += " or i_s%d_int" % (i)
int_block_buf += ") begin\n\tcase (mem_select)\n"
for i in range (0, num_mems):
int_block_buf += "\t\tMEM_SEL_%d: begin\n\t\t\to_m_int <= i_s%d_int;\n\t\tend\n" % (i, i)
int_block_buf += "\t\tdefault: begin\n\t\t\to_m_int <= 1\'h0;\n\t\tend\n\tendcase\nend\n\n"
buf = template.substitute( PORTS=port_buf,
MEM_SELECT=mem_select_buf,
ASSIGN=assign_buf,
DATA=data_block_buf,
ACK=ack_block_buf,
INT=int_block_buf,
MEM_PARAMS=param_buf)
buf = string.expandtabs(buf, 2)
return buf
|
|
"""Support for Radarr."""
import logging
import time
from datetime import datetime, timedelta
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_MONITORED_CONDITIONS,
CONF_SSL,
)
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_DAYS = "days"
CONF_INCLUDED = "include_paths"
CONF_UNIT = "unit"
CONF_URLBASE = "urlbase"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 7878
DEFAULT_URLBASE = ""
DEFAULT_DAYS = "1"
DEFAULT_UNIT = "GB"
SCAN_INTERVAL = timedelta(minutes=10)
SENSOR_TYPES = {
"diskspace": ["Disk Space", "GB", "mdi:harddisk"],
"upcoming": ["Upcoming", "Movies", "mdi:television"],
"wanted": ["Wanted", "Movies", "mdi:television"],
"movies": ["Movies", "Movies", "mdi:television"],
"commands": ["Commands", "Commands", "mdi:code-braces"],
"status": ["Status", "Status", "mdi:information"],
}
ENDPOINTS = {
"diskspace": "http{0}://{1}:{2}/{3}api/diskspace",
"upcoming": "http{0}://{1}:{2}/{3}api/calendar?start={4}&end={5}",
"movies": "http{0}://{1}:{2}/{3}api/movie",
"commands": "http{0}://{1}:{2}/{3}api/command",
"status": "http{0}://{1}:{2}/{3}api/system/status",
}
# Support to Yottabytes for the future, why not
BYTE_SIZES = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DAYS, default=DEFAULT_DAYS): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_INCLUDED, default=[]): cv.ensure_list,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["movies"]): vol.All(
cv.ensure_list, [vol.In(list(SENSOR_TYPES))]
),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): vol.In(BYTE_SIZES),
vol.Optional(CONF_URLBASE, default=DEFAULT_URLBASE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Radarr platform."""
conditions = config.get(CONF_MONITORED_CONDITIONS)
add_entities([RadarrSensor(hass, config, sensor) for sensor in conditions], True)
class RadarrSensor(Entity):
"""Implementation of the Radarr sensor."""
def __init__(self, hass, conf, sensor_type):
"""Create Radarr entity."""
from pytz import timezone
self.conf = conf
self.host = conf.get(CONF_HOST)
self.port = conf.get(CONF_PORT)
self.urlbase = conf.get(CONF_URLBASE)
if self.urlbase:
self.urlbase = "{}/".format(self.urlbase.strip("/"))
self.apikey = conf.get(CONF_API_KEY)
self.included = conf.get(CONF_INCLUDED)
self.days = int(conf.get(CONF_DAYS))
self.ssl = "s" if conf.get(CONF_SSL) else ""
self._state = None
self.data = []
self._tz = timezone(str(hass.config.time_zone))
self.type = sensor_type
self._name = SENSOR_TYPES[self.type][0]
if self.type == "diskspace":
self._unit = conf.get(CONF_UNIT)
else:
self._unit = SENSOR_TYPES[self.type][1]
self._icon = SENSOR_TYPES[self.type][2]
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format("Radarr", self._name)
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def available(self):
"""Return sensor availability."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of the sensor."""
return self._unit
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attributes = {}
if self.type == "upcoming":
for movie in self.data:
attributes[to_key(movie)] = get_release_date(movie)
elif self.type == "commands":
for command in self.data:
attributes[command["name"]] = command["state"]
elif self.type == "diskspace":
for data in self.data:
free_space = to_unit(data["freeSpace"], self._unit)
total_space = to_unit(data["totalSpace"], self._unit)
percentage_used = (
0 if total_space == 0 else free_space / total_space * 100
)
attributes[data["path"]] = "{:.2f}/{:.2f}{} ({:.2f}%)".format(
free_space, total_space, self._unit, percentage_used
)
elif self.type == "movies":
for movie in self.data:
attributes[to_key(movie)] = movie["downloaded"]
elif self.type == "status":
attributes = self.data
return attributes
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
def update(self):
"""Update the data for the sensor."""
start = get_date(self._tz)
end = get_date(self._tz, self.days)
try:
res = requests.get(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port, self.urlbase, start, end
),
headers={"X-Api-Key": self.apikey},
timeout=10,
)
except OSError:
_LOGGER.warning("Host %s is not available", self.host)
self._available = False
self._state = None
return
if res.status_code == 200:
if self.type in ["upcoming", "movies", "commands"]:
self.data = res.json()
self._state = len(self.data)
elif self.type == "diskspace":
# If included paths are not provided, use all data
if self.included == []:
self.data = res.json()
else:
# Filter to only show lists that are included
self.data = list(
filter(lambda x: x["path"] in self.included, res.json())
)
self._state = "{:.2f}".format(
to_unit(sum([data["freeSpace"] for data in self.data]), self._unit)
)
elif self.type == "status":
self.data = res.json()
self._state = self.data["version"]
self._available = True
def get_date(zone, offset=0):
"""Get date based on timezone and offset of days."""
day = 60 * 60 * 24
return datetime.date(datetime.fromtimestamp(time.time() + day * offset, tz=zone))
def get_release_date(data):
"""Get release date."""
date = data.get("physicalRelease")
if not date:
date = data.get("inCinemas")
return date
def to_key(data):
"""Get key."""
return "{} ({})".format(data["title"], data["year"])
def to_unit(value, unit):
"""Convert bytes to give unit."""
return value / 1024 ** BYTE_SIZES.index(unit)
|
|
import logging
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from wagtail.core.log_actions import log
from wagtail.core.signals import page_published
logger = logging.getLogger("wagtail.core")
class PublishPagePermissionError(PermissionDenied):
"""
Raised when the page publish cannot be performed due to insufficient permissions.
"""
pass
class PublishPageRevisionAction:
"""
Publish or schedule revision for publishing.
:param revision: revision to publish
:param user: the publishing user
:param changed: indicated whether content has changed
:param log_action:
flag for the logging action. Pass False to skip logging. Cannot pass an action string as the method
performs several actions: "publish", "revert" (and publish the reverted revision),
"schedule publishing with a live revision", "schedule revision reversal publishing, with a live revision",
"schedule publishing", "schedule revision reversal publishing"
:param previous_revision: indicates a revision reversal. Should be set to the previous revision instance
"""
def __init__(self, revision, user=None, changed=True, log_action=True, previous_revision=None):
self.revision = revision
self.page = self.revision.as_page_object()
self.user = user
self.changed = changed
self.log_action = log_action
self.previous_revision = previous_revision
def check(self, skip_permission_checks=False):
if (
self.user
and not skip_permission_checks
and not self.page.permissions_for_user(self.user).can_publish()
):
raise PublishPagePermissionError(
"You do not have permission to publish this page"
)
def log_scheduling_action(self):
log(
instance=self.page,
action="wagtail.publish.schedule",
user=self.user,
data={
"revision": {
"id": self.revision.id,
"created": self.revision.created_at.strftime("%d %b %Y %H:%M"),
"go_live_at": self.page.go_live_at.strftime("%d %b %Y %H:%M"),
"has_live_version": self.page.live,
}
},
revision=self.revision,
content_changed=self.changed,
)
def _publish_page_revision(self, revision, page, user, changed, log_action, previous_revision):
from wagtail.core.models import COMMENTS_RELATION_NAME, PageRevision
if page.go_live_at and page.go_live_at > timezone.now():
page.has_unpublished_changes = True
# Instead set the approved_go_live_at of this revision
revision.approved_go_live_at = page.go_live_at
revision.save()
# And clear the the approved_go_live_at of any other revisions
page.revisions.exclude(id=revision.id).update(approved_go_live_at=None)
# if we are updating a currently live page skip the rest
if page.live_revision:
# Log scheduled publishing
if log_action:
self.log_scheduling_action()
return
# if we have a go_live in the future don't make the page live
page.live = False
else:
page.live = True
# at this point, the page has unpublished changes if and only if there are newer revisions than this one
page.has_unpublished_changes = not revision.is_latest_revision()
# If page goes live clear the approved_go_live_at of all revisions
page.revisions.update(approved_go_live_at=None)
page.expired = False # When a page is published it can't be expired
# Set first_published_at, last_published_at and live_revision
# if the page is being published now
if page.live:
now = timezone.now()
page.last_published_at = now
page.live_revision = revision
if page.first_published_at is None:
page.first_published_at = now
if previous_revision:
previous_revision_page = previous_revision.as_page_object()
old_page_title = (
previous_revision_page.title
if page.title != previous_revision_page.title
else None
)
else:
try:
previous = revision.get_previous()
except PageRevision.DoesNotExist:
previous = None
old_page_title = (
previous.page.title
if previous and page.title != previous.page.title
else None
)
else:
# Unset live_revision if the page is going live in the future
page.live_revision = None
page.save()
for comment in getattr(page, COMMENTS_RELATION_NAME).all().only("position"):
comment.save(update_fields=["position"])
revision.submitted_for_moderation = False
page.revisions.update(submitted_for_moderation=False)
workflow_state = page.current_workflow_state
if workflow_state and getattr(settings, "WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH", True):
workflow_state.cancel(user=user)
if page.live:
page_published.send(sender=page.specific_class, instance=page.specific, revision=revision)
# Update alias pages
page.update_aliases(revision=revision, user=user, _content_json=revision.content_json)
if log_action:
data = None
if previous_revision:
data = {
"revision": {
"id": previous_revision.id,
"created": previous_revision.created_at.strftime(
"%d %b %Y %H:%M"
),
}
}
if old_page_title:
data = data or {}
data["title"] = {
"old": old_page_title,
"new": page.title,
}
log(
instance=page,
action="wagtail.rename",
user=user,
data=data,
revision=revision,
)
log(
instance=page,
action=log_action if isinstance(log_action, str) else "wagtail.publish",
user=user,
data=data,
revision=revision,
content_changed=changed,
)
logger.info(
'Page published: "%s" id=%d revision_id=%d',
page.title,
page.id,
revision.id,
)
elif page.go_live_at:
logger.info(
'Page scheduled for publish: "%s" id=%d revision_id=%d go_live_at=%s',
page.title,
page.id,
revision.id,
page.go_live_at.isoformat(),
)
if log_action:
self.log_scheduling_action()
def execute(self, skip_permission_checks=False):
self.check(skip_permission_checks=skip_permission_checks)
return self._publish_page_revision(
self.revision,
self.page,
user=self.user,
changed=self.changed,
log_action=self.log_action,
previous_revision=self.previous_revision,
)
|
|
#!/usr/bin/env python
#
# test_multibytecodec.py
# Unit test for multibytecodec itself
#
from test import support
from test import test_multibytecodec_support
from test.support import TESTFN
import unittest, io, codecs, sys, os
import _multibytecodec
ALL_CJKENCODINGS = [
# _codecs_cn
'gb2312', 'gbk', 'gb18030', 'hz',
# _codecs_hk
'big5hkscs',
# _codecs_jp
'cp932', 'shift_jis', 'euc_jp', 'euc_jisx0213', 'shift_jisx0213',
'euc_jis_2004', 'shift_jis_2004',
# _codecs_kr
'cp949', 'euc_kr', 'johab',
# _codecs_tw
'big5', 'cp950',
# _codecs_iso2022
'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_2004',
'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr',
]
class Test_MultibyteCodec(unittest.TestCase):
def test_nullcoding(self):
for enc in ALL_CJKENCODINGS:
self.assertEqual(b''.decode(enc), '')
self.assertEqual(str(b'', enc), '')
self.assertEqual(''.encode(enc), b'')
def test_str_decode(self):
for enc in ALL_CJKENCODINGS:
self.assertEqual('abcd'.encode(enc), b'abcd')
def test_errorcallback_longindex(self):
dec = codecs.getdecoder('euc-kr')
myreplace = lambda exc: ('', sys.maxsize+1)
codecs.register_error('test.cjktest', myreplace)
self.assertRaises(IndexError, dec,
b'apple\x92ham\x93spam', 'test.cjktest')
def test_codingspec(self):
try:
for enc in ALL_CJKENCODINGS:
print('# coding:', enc, file=io.open(TESTFN, 'w'))
exec(open(TESTFN).read())
finally:
support.unlink(TESTFN)
def test_init_segfault(self):
# bug #3305: this used to segfault
self.assertRaises(AttributeError,
_multibytecodec.MultibyteStreamReader, None)
self.assertRaises(AttributeError,
_multibytecodec.MultibyteStreamWriter, None)
def test_decode_unicode(self):
# Trying to decode an unicode string should raise a TypeError
for enc in ALL_CJKENCODINGS:
self.assertRaises(TypeError, codecs.getdecoder(enc), "")
class Test_IncrementalEncoder(unittest.TestCase):
def test_stateless(self):
# cp949 encoder isn't stateful at all.
encoder = codecs.getincrementalencoder('cp949')()
self.assertEqual(encoder.encode('\ud30c\uc774\uc36c \ub9c8\uc744'),
b'\xc6\xc4\xc0\xcc\xbd\xe3 \xb8\xb6\xc0\xbb')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u2606\u223c\u2606', True),
b'\xa1\xd9\xa1\xad\xa1\xd9')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('', True), b'')
self.assertEqual(encoder.encode('', False), b'')
self.assertEqual(encoder.reset(), None)
def test_stateful(self):
# jisx0213 encoder is stateful for a few codepoints. eg)
# U+00E6 => A9DC
# U+00E6 U+0300 => ABC4
# U+0300 => ABDC
encoder = codecs.getincrementalencoder('jisx0213')()
self.assertEqual(encoder.encode('\u00e6\u0300'), b'\xab\xc4')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertEqual(encoder.encode('\u0300'), b'\xab\xc4')
self.assertEqual(encoder.encode('\u00e6', True), b'\xa9\xdc')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertEqual(encoder.encode('', True), b'\xa9\xdc')
self.assertEqual(encoder.encode('', True), b'')
def test_stateful_keep_buffer(self):
encoder = codecs.getincrementalencoder('jisx0213')()
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.encode('\u0300\u00e6'), b'\xab\xc4')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.encode('', True), b'\xa9\xdc')
def test_issue5640(self):
encoder = codecs.getincrementalencoder('shift-jis')('backslashreplace')
self.assertEqual(encoder.encode('\xff'), b'\\xff')
self.assertEqual(encoder.encode('\n'), b'\n')
class Test_IncrementalDecoder(unittest.TestCase):
def test_dbcs(self):
# cp949 decoder is simple with only 1 or 2 bytes sequences.
decoder = codecs.getincrementaldecoder('cp949')()
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0\xcc\xbd'),
'\ud30c\uc774')
self.assertEqual(decoder.decode(b'\xe3 \xb8\xb6\xc0\xbb'),
'\uc36c \ub9c8\uc744')
self.assertEqual(decoder.decode(b''), '')
def test_dbcs_keep_buffer(self):
decoder = codecs.getincrementaldecoder('cp949')()
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c')
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True)
self.assertEqual(decoder.decode(b'\xcc'), '\uc774')
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c')
self.assertRaises(UnicodeDecodeError, decoder.decode,
b'\xcc\xbd', True)
self.assertEqual(decoder.decode(b'\xcc'), '\uc774')
def test_iso2022(self):
decoder = codecs.getincrementaldecoder('iso2022-jp')()
ESC = b'\x1b'
self.assertEqual(decoder.decode(ESC + b'('), '')
self.assertEqual(decoder.decode(b'B', True), '')
self.assertEqual(decoder.decode(ESC + b'$'), '')
self.assertEqual(decoder.decode(b'B@$'), '\u4e16')
self.assertEqual(decoder.decode(b'@$@'), '\u4e16')
self.assertEqual(decoder.decode(b'$', True), '\u4e16')
self.assertEqual(decoder.reset(), None)
self.assertEqual(decoder.decode(b'@$'), '@$')
self.assertEqual(decoder.decode(ESC + b'$'), '')
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True)
self.assertEqual(decoder.decode(b'B@$'), '\u4e16')
def test_decode_unicode(self):
# Trying to decode an unicode string should raise a TypeError
for enc in ALL_CJKENCODINGS:
decoder = codecs.getincrementaldecoder(enc)()
self.assertRaises(TypeError, decoder.decode, "")
class Test_StreamReader(unittest.TestCase):
def test_bug1728403(self):
try:
f = open(TESTFN, 'wb')
try:
f.write(b'\xa1')
finally:
f.close()
f = codecs.open(TESTFN, encoding='cp949')
try:
self.assertRaises(UnicodeDecodeError, f.read, 2)
finally:
f.close()
finally:
support.unlink(TESTFN)
class Test_StreamWriter(unittest.TestCase):
if len('\U00012345') == 2: # UCS2
def test_gb18030(self):
s= io.BytesIO()
c = codecs.getwriter('gb18030')(s)
c.write('123')
self.assertEqual(s.getvalue(), b'123')
c.write('\U00012345')
self.assertEqual(s.getvalue(), b'123\x907\x959')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(), b'123\x907\x959')
c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
self.assertRaises(UnicodeError, c.reset)
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
def test_utf_8(self):
s= io.BytesIO()
c = codecs.getwriter('utf-8')(s)
c.write('123')
self.assertEqual(s.getvalue(), b'123')
c.write('\U00012345')
self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85')
# Python utf-8 codec can't buffer surrogate pairs yet.
if 0:
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85')
c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac')
c.reset()
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac\xed\xa0\x88')
c.write('\U00012345'[1])
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85')
else: # UCS4
pass
def test_streamwriter_strwrite(self):
s = io.BytesIO()
wr = codecs.getwriter('gb18030')(s)
wr.write('abcd')
self.assertEqual(s.getvalue(), b'abcd')
class Test_ISO2022(unittest.TestCase):
def test_g2(self):
iso2022jp2 = b'\x1b(B:hu4:unit\x1b.A\x1bNi de famille'
uni = ':hu4:unit\xe9 de famille'
self.assertEqual(iso2022jp2.decode('iso2022-jp-2'), uni)
def test_iso2022_jp_g0(self):
self.failIf(b'\x0e' in '\N{SOFT HYPHEN}'.encode('iso-2022-jp-2'))
for encoding in ('iso-2022-jp-2004', 'iso-2022-jp-3'):
e = '\u3406'.encode(encoding)
self.failIf(any(x > 0x80 for x in e))
def test_bug1572832(self):
if sys.maxunicode >= 0x10000:
myunichr = chr
else:
myunichr = lambda x: chr(0xD7C0+(x>>10)) + chr(0xDC00+(x&0x3FF))
for x in range(0x10000, 0x110000):
# Any ISO 2022 codec will cause the segfault
myunichr(x).encode('iso_2022_jp', 'ignore')
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
import linalg
import numpy
from itertools import chain
import warnings
from scipy.optimize import leastsq
from math import degrees
def find_plane_of_ring(centered_ring_coords):
'''
Returns the normal to the plane of best fit of a ring system
Accepts:
A numpy array of the coordinates of the atoms in the ring
Returns:
A numpy array of the vector normal to the plane of the ring
'''
p0 = [1.0, 1.0, 1.0, 1.0]
def f_min(X, p):
plane_xyz = p[0:3]
distance = (plane_xyz*X).sum(axis=1) + p[3]
return distance / numpy.linalg.norm(plane_xyz)
def residuals(params, signal, X):
return f_min(X, params)
sol = leastsq(residuals, p0, args=(None,centered_ring_coords))[0]
norm = numpy.array(sol[0:3])
return norm
def is_flippable(mol, ring_at_ids):
'''
Test if a ring is flippable
Rings which are noticeably non-planar are defined as being flippable
Takes:
A molecule object
A list of atom ids of the ring
Returns:
A boolean
'''
# 3 points define a plane, and <4 atoms can't be non-planar
if len(ring_at_ids) <= 3:
return False
# Center the ring
ring_coords = numpy.array([mol.atoms[i].coords for i in ring_at_ids])
center_of_ring = numpy.average(ring_coords, axis=0)
ring_coords -= center_of_ring
# Find the normal to the plane of the ring, and normalise it (set length to 1)
with warnings.catch_warnings(record=True) as warnings_log:
norm = linalg.normalise(find_plane_of_ring(ring_coords))
if len([i for i in warnings_log if 'reached maxfev' in str(i.message)]):
# leastsq in find_plane_of_rings didn't converge.
# Likely to be caused by a flat gradient near the minimum
# so it *shouldn't* affect anything
# Let the user know just in case
ring_name = ', '.join([str(i) for i in ring_at_ids])
msg = 'Ring_lib.is_flippable() can\'t fit a plane to the ring with atoms %s. Continuing. This may result in identical conformers being produced, so please check the output ensemble'
warnings.warn(msg %ring_name, RuntimeWarning)
xy_align = linalg.rotation_from_axes(numpy.array([0.0, 0.0, 1.0]), norm)
aligned_ring_coords = ring_coords.dot(xy_align)
for each_at_coords in aligned_ring_coords:
if each_at_coords[2] > 0.1: #Is this a suitable value? I don't know
return True
return False
def all_ring_conformers(mol, full_flip=2):
'''
Find all combinations of all ring conformers for all the rings in a molecule
Accepts:
A molecule object
An integer decribing the degree of ring conformer generation to undertake:
none (0), partial (1), or full (2)
Returns:
A list of nomlecule objects, each with a different combination of ring conformers
'''
all_ring_systems = find_ring_systems(mol)
ring_conformer_combinations = [mol]
for each_ring_system in all_ring_systems:
if is_flippable(mol, each_ring_system):
grouped_conformer_combinations = [find_ring_conformers(i, each_ring_system, full_flip)
for i in ring_conformer_combinations]
ring_conformer_combinations = [i for i in chain(*grouped_conformer_combinations)]
return ring_conformer_combinations
def find_ring_conformers(base_mol, each_ring_system, full_flip=2):
'''
Finds all unique conformers for a given ring system
Accepts:
A molecule object
A list of atom ids of the ring
An integer decribing the degree of ring conformer generation to undertake:
none (0), partial (1), or full (2)
Returns:
A list of molecule objects, each with a different combination of ring conformers
'''
if full_flip > 1: # Full ring conformer generation
ret = []
found_conformers = []
all_dihedrals = [i for i in base_mol.all_torsions()]
idx = 1
for each_base_conformer in base_ring_variations(base_mol, each_ring_system):
torsions = [int(degrees(each_base_conformer.get_torsion(*i)))
for i in all_dihedrals]
if ((torsions not in found_conformers) and
([-1 * i for i in torsions] not in found_conformers)):
ret.append(each_base_conformer)
found_conformers.append(torsions)
mirror_image = flip_ring(each_base_conformer, each_ring_system)
torsions = [int(degrees(mirror_image.get_torsion(*i))) for i in all_dihedrals]
if ((torsions not in found_conformers) and
([-1 * i for i in torsions] not in found_conformers)):
ret.append(mirror_image)
found_conformers.append(torsions)
idx += 1
elif full_flip == 1: # Partial ring conformer generation
ret = [base_mol.copy(), flip_ring(base_mol, each_ring_system)]
else: # No ring conformer generation
ret = [base_mol.copy()]
return ret
def base_ring_variations(base_mol, each_ring_system):
'''
Finds all two-atom flips-of-fragments
The flip of fragments is defined in Mekenyan et al, J. Chem. Inf. Model. 2005, p. 283
Accepts:
A molecule object
A list of atom ids of the ring
Returns:
A list of molecule objects, each with a different combination of ring conformers
'''
aligned_mol = base_mol.copy() # Don't overwrite the base molecule
# Center the molecule on the ring to be flipped
ring_coords = numpy.array([aligned_mol.atoms[i].coords for i in each_ring_system])
center_of_ring = numpy.average(ring_coords, axis=0)
aligned_mol.translate(-1*center_of_ring)
# Align the ring to be flipped on the xy plane
ring_coords = numpy.array([aligned_mol.atoms[i].coords for i in each_ring_system])
norm = find_plane_of_ring(ring_coords)
xy_align = linalg.rotation_from_axes(norm, numpy.array([0.0, 0.0, 1.0]))
aligned_mol.rotate(xy_align)
# Build the list of flips of fragments
ret = [aligned_mol.copy()]
ring_coords = numpy.array([aligned_mol.atoms[i].coords for i in each_ring_system])
z_to_precision = [abs(int(round(100*i[2]))) for i in ring_coords]
if len(set(z_to_precision)) > 1 and len(each_ring_system) > 4:
# Make a list of two-atom groups in the ring
tmp_neighbour_pairs = [[frozenset([i, j]) for j in aligned_mol.atoms[i].get_bond_ids()
if j in each_ring_system]
for i in each_ring_system]
neighbour_pairs = set([i for i in chain(*tmp_neighbour_pairs)])
for each_pair_to_flip in neighbour_pairs:
# The pair needs a distinct order, which frozensets don't
each_pair_to_flip = list(each_pair_to_flip)
new_mol = aligned_mol.copy()
junction
for i, j in zip(each_pair_to_flip, each_pair_to_flip[::-1]):
for k in new_mol.atoms[i].search_away_from(j):
if k in each_ring_system:
junction.append(k)
# Don't flip the ring at a ring junction
if len(junction) != 2:
break
# Don't flip atoms with pi-bonding
if (base_mol.atoms[junction[0]].get_hybridisation() != 3 or
base_mol.atoms[junction[1]].get_hybridisation() != 3):
break
substituents = []
for i in each_pair_to_flip + junction:
for j in chain(*new_mol.atoms[i].all_neighbours_away_from(*each_ring_system)):
substituents.append(j)
atoms_reference = []
for i, j in zip(junction, each_pair_to_flip):
for k in new_mol.atoms[i].search_away_from(j):
if k in each_ring_system:
atoms_reference.append(k)
translate_by = sum([new_mol.atoms[i].coords for i in junction]) / -2.0
new_mol.translate(translate_by)
reference_point = sum([new_mol.atoms[i].coords for i in atoms_reference]) / 2.0
reflect_by = linalg.reflection_plane(new_mol.atoms[junction[0]].coords, reference_point)
for each_id in substituents + each_pair_to_flip:
each_atom = new_mol.atoms[each_id]
each_atom.coords = reflect_by.dot(each_atom.coords)
for each_id in each_pair_to_flip + junction:
flip_substituents(new_mol, each_ring_system, each_id)
new_mol.translate(-1*translate_by)
ret.append(new_mol)
return ret
def flip_ring(base_mol, each_ring_system):
'''
Takes the mirror image of a ring skeleton while preserving stereocehmistry
at all the individual ring atoms
This includes flipping a chair or half-chair conformation
Accepts:
A molecule object
A list of atom ids of the ring
Returns:
A molecule object
'''
mirror_image = base_mol.copy()
z_mirror = numpy.array([1.0, 1.0, -1.0])
for each_atom in mirror_image.atoms:
each_atom.coords *= z_mirror
# Fix the stereochemistry
for each_id_ring_atom in each_ring_system:
each_ring_atom = mirror_image.atoms[each_id_ring_atom]
in_ring_neighbours = [mirror_image.atoms[i] for i in filter(lambda x: x in each_ring_system,
each_ring_atom.get_bond_ids())]
if len(in_ring_neighbours) <= 2:
flip_substituents(mirror_image, each_ring_system, each_id_ring_atom)
return mirror_image
def flip_substituents(mol, each_ring_system, at_id):
'''
Change the stereocehsmitry at atom <at_id> in molecule <mol>
This is part of the flip-of-fragments operation of Mekenyan et al
See Mekenyan et al, J. Chem. Inf. Model. 2005, p. 283
Accepts:
A molecule object
A list of atom ids of the ring
The atom id to be corrected
Returns:
Nothing, this is a destructive method
'''
ring_atom = mol.atoms[at_id]
in_ring_neighbours = [mol.atoms[i] for i in ring_atom.get_bond_ids()
if i in each_ring_system]
neighbour_ids = [i.get_id() for i in in_ring_neighbours]
substituents = [mol.atoms[i] for i in ring_atom.all_neighbours_away_from(*neighbour_ids)]
# Centre everything:
translate_by = -1 * ring_atom.coords
for i in in_ring_neighbours + substituents:
i.translate(translate_by)
second_reflection = linalg.reflection_plane(in_ring_neighbours[0].coords,
in_ring_neighbours[1].coords)
for each_atom in substituents:
each_atom.coords = second_reflection.dot(each_atom.coords)
translate_back = -1 * translate_by
for each_atom in in_ring_neighbours + substituents:
each_atom.translate(translate_back)
def find_ring_systems(mol):
'''
Groups all ring systems in a molecule that share atoms together
Takes: a molecule molecule
Returns: a list of lists of ids
Each list in the return is all atoms that are connected by an unbroken set of rings
That is, any two atoms are a) in the same ring
b) in fused rings
c) in bridged rings
Spiro rings are not included because their conformations are not coupled
(e.g a sprio[6,6] system will have the two rings flip independently
where as in a cis-decalin, both must flip at the same time)
'''
ring_systems = {}
new_key = 0
rings = [i for i in mol.rings]
while len(rings) > 0:
curr_ring = [rings.pop()]
for k, v in ring_systems.items():
count = 0
for i in v:
if i in curr_ring[0]:
if count:
curr_ring.append(v)
del ring_systems[k]
break
else:
count += 1
expanded_system = set([i for i in
chain(*curr_ring)])
ring_systems[new_key] = expanded_system
new_key += 1
return ring_systems.values()
|
|
import os
import numpy as np
from ..utils.importing import import_module_from_source
class ImageClassifier(object):
"""
ImageClassifier workflow.
This workflow is used to train image classification tasks, typically when
the dataset cannot be stored in memory.
Submissions need to contain two files, which by default are named:
image_preprocessor.py and batch_classifier.py (they can be modified
by changing `workflow_element_names`).
image_preprocessor.py needs a `tranform` function, which
is used for preprocessing the images. It takes an image as input
and it returns an image as an output. Optionally, image_preprocessor.py
can also have a function `transform_test`, which is used only to preprocess
images at test time. Otherwise, if `transform_test` does not exist,
`transform` is used at train and test time.
batch_classifier.py needs a `BatchClassifier` class, which implements
`fit` and `predict_proba`, where `fit` takes as input an instance
of `BatchGeneratorBuilder`.
Parameters
==========
test_batch_size : int
batch size used for testing.
chunk_size : int
size of the chunk used to load data from disk into memory.
(see at the top of the file what a chunk is and its difference
with the mini-batch size of neural nets).
n_jobs : int
the number of jobs used to load images from disk to memory as `chunks`.
n_classes : int
Total number of classes.
"""
def __init__(self, test_batch_size, chunk_size, n_jobs, n_classes,
workflow_element_names=[
'image_preprocessor', 'batch_classifier']):
self.element_names = workflow_element_names
self.test_batch_size = test_batch_size
self.chunk_size = chunk_size
self.n_jobs = n_jobs
self.n_classes = n_classes
def train_submission(self, module_path, folder_X_array, y_array,
train_is=None):
"""Train a batch image classifier.
module_path : str
module where the submission is. the folder of the module
have to contain batch_classifier.py and image_preprocessor.py.
X_array : ArrayContainer vector of int
vector of image IDs to train on
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
y_array : vector of int
vector of image labels corresponding to X_train
train_is : vector of int
indices from X_array to train on
"""
folder, X_array = folder_X_array
if train_is is None:
train_is = slice(None, None, None)
image_preprocessor = import_module_from_source(
os.path.join(module_path, self.element_names[0] + '.py'),
self.element_names[0],
sanitize=True
)
transform_img = image_preprocessor.transform
transform_test_img = getattr(image_preprocessor,
'transform_test',
transform_img)
batch_classifier = import_module_from_source(
os.path.join(module_path, self.element_names[1] + '.py'),
self.element_names[1],
sanitize=True
)
clf = batch_classifier.BatchClassifier()
gen_builder = BatchGeneratorBuilder(
X_array[train_is], y_array[train_is],
transform_img, transform_test_img,
folder=folder,
chunk_size=self.chunk_size, n_classes=self.n_classes,
n_jobs=self.n_jobs)
clf.fit(gen_builder)
return transform_img, transform_test_img, clf
def test_submission(self, trained_model, folder_X_array):
"""Train a batch image classifier.
trained_model : tuple (function, Classifier)
tuple of a trained model returned by `train_submission`.
X_array : ArrayContainer of int
vector of image IDs to test on.
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
"""
folder, X_array = folder_X_array
transform_img, transform_test_img, clf = trained_model
it = _chunk_iterator(
X_array, folder=folder, chunk_size=self.chunk_size)
y_proba = []
for X in it:
for i in range(0, len(X), self.test_batch_size):
# 1) Preprocessing
X_batch = X[i: i + self.test_batch_size]
# X_batch = Parallel(n_jobs=self.n_jobs, backend='threading')(
# delayed(transform_img)(x) for x in X_batch)
X_batch = [transform_test_img(x) for x in X_batch]
# X is a list of numpy arrays at this point, convert it to a
# single numpy array.
try:
X_batch = [x[np.newaxis, :, :, :] for x in X_batch]
except IndexError:
# single channel
X_batch = [
x[np.newaxis, np.newaxis, :, :] for x in X_batch]
X_batch = np.concatenate(X_batch, axis=0)
# 2) Prediction
y_proba_batch = clf.predict_proba(X_batch)
y_proba.append(y_proba_batch)
y_proba = np.concatenate(y_proba, axis=0)
return y_proba
class BatchGeneratorBuilder(object):
"""A batch generator builder for generating images on the fly.
This class is a way to build training and
validation generators that yield each time a tuple (X, y) of mini-batches.
The generators are built in a way to fit into keras API of `fit_generator`
(see https://keras.io/models/model/).
An instance of this class is exposed to users `Classifier` through
the `fit` function : model fitting is called by using
"clf.fit(gen_builder)" where `gen_builder` is an instance
of this class : `BatchGeneratorBuilder`.
The fit function from `Classifier` should then use the instance
to build train and validation generators, using the method
`get_train_valid_generators`
Parameters
==========
X_array : ArrayContainer of int
vector of image IDs to train on
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
y_array : vector of int
vector of image labels corresponding to `X_array`
folder : str
folder where the images are
chunk_size : int
size of the chunk used to load data from disk into memory.
(see at the top of the file what a chunk is and its difference
with the mini-batch size of neural nets).
n_classes : int
Total number of classes. This is needed because the array
of labels, which is a vector of ints, is transformed into
a onehot representation.
n_jobs : int
the number of jobs used to load images from disk to memory as `chunks`.
"""
def __init__(self, X_array, y_array,
transform_img, transform_test_img,
folder, chunk_size, n_classes, n_jobs):
self.X_array = X_array
self.y_array = y_array
self.transform_img = transform_img
self.transform_test_img = transform_test_img
self.folder = folder
self.chunk_size = chunk_size
self.n_classes = n_classes
self.n_jobs = n_jobs
self.nb_examples = len(X_array)
def get_train_valid_generators(self, batch_size=256, valid_ratio=0.1):
"""Build train and valid generators for keras.
This method is used by the user defined `Classifier` to o build train
and valid generators that will be used in keras `fit_generator`.
Parameters
==========
batch_size : int
size of mini-batches
valid_ratio : float between 0 and 1
ratio of validation data
Returns
=======
a 4-tuple (gen_train, gen_valid, nb_train, nb_valid) where:
- gen_train is a generator function for training data
- gen_valid is a generator function for valid data
- nb_train is the number of training examples
- nb_valid is the number of validation examples
The number of training and validation data are necessary
so that we can use the keras method `fit_generator`.
"""
nb_valid = int(valid_ratio * self.nb_examples)
nb_train = self.nb_examples - nb_valid
indices = np.arange(self.nb_examples)
train_indices = indices[0:nb_train]
valid_indices = indices[nb_train:]
gen_train = self._get_generator(
indices=train_indices, batch_size=batch_size)
gen_valid = self._get_generator(
indices=valid_indices, batch_size=batch_size)
return gen_train, gen_valid, nb_train, nb_valid
def _get_generator(self, indices=None, batch_size=256):
if indices is None:
indices = np.arange(self.nb_examples)
# Infinite loop, as required by keras `fit_generator`.
# However, as we provide the number of examples per epoch
# and the user specifies the total number of epochs, it will
# be able to end.
while True:
it = _chunk_iterator(
X_array=self.X_array[indices], folder=self.folder,
y_array=self.y_array[indices], chunk_size=self.chunk_size,
n_jobs=self.n_jobs)
for X, y in it:
# 1) Preprocessing of X and y
# X = Parallel(
# n_jobs=self.n_jobs, backend='threading')(delayed(
# self.transform_img)(x) for x in X)
X = np.array([self.transform_img(x) for x in X])
# # X is a list of numpy arrays at this point, convert it to a
# single numpy array.
try:
X = [x[np.newaxis, :, :, :] for x in X]
except IndexError:
# single channel
X = [x[np.newaxis, np.newaxis, :, :] for x in X]
X = np.concatenate(X, axis=0)
X = np.array(X, dtype='float32')
# Convert y to onehot representation
y = _to_categorical(y, num_classes=self.n_classes)
# 2) Yielding mini-batches
for i in range(0, len(X), batch_size):
yield X[i:i + batch_size], y[i:i + batch_size]
def _chunk_iterator(X_array, folder, y_array=None, chunk_size=1024, n_jobs=8):
"""Generate chunks of images, optionally with their labels.
Parameters
==========
X_array : ArrayContainer of int
image ids to load
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
y_array : vector of int
labels corresponding to each image from X_array
chunk_size : int
chunk size
folder : str
folder where the images are
n_jobs : int
number of jobs used to load images in parallel
Yields
======
if y_array is provided (not None):
it yields each time a tuple (X, y) where X is a list
of numpy arrays of images and y is a list of ints (labels).
The length of X and y is `chunk_size` at most (it can be smaller).
if y_array is not provided (it is None)
it yields each time X where X is a list of numpy arrays
of images. The length of X is `chunk_size` at most (it can be
smaller).
This is used for testing, where we don't have/need the labels.
The shape of each element of X in both cases
is (height, width, color), where color is 1 or 3 or 4 and height/width
vary according to examples (hence the fact that X is a list instead of
numpy array).
"""
from skimage.io import imread
from joblib import delayed
from joblib import Parallel
for i in range(0, len(X_array), chunk_size):
X_chunk = X_array[i:i + chunk_size]
filenames = [
os.path.join(folder, '{}'.format(x))
for x in X_chunk]
X = Parallel(n_jobs=n_jobs, backend='threading')(delayed(imread)(
filename) for filename in filenames)
if y_array is not None:
y = y_array[i:i + chunk_size]
yield X, y
else:
yield X
def _to_categorical(y, num_classes=None):
"""Convert a class vector (integers) to binary class matrix.
Taken from keras:
https://github.com/fchollet/keras/blob/master/keras/utils/np_utils.py
The reason it was taken from keras is to avoid importing theano which
clashes with pytorch.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
"""
y = np.array(y, dtype='int').ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
return categorical
def get_nb_minibatches(nb_samples, batch_size):
"""Compute the number of minibatches for keras.
See [https://keras.io/models/sequential]
"""
return (nb_samples // batch_size) +\
(1 if (nb_samples % batch_size) > 0 else 0)
|
|
import itertools
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from wtforms.compat import with_metaclass, iteritems, itervalues
from wtforms.meta import DefaultMeta
__all__ = (
'BaseForm',
'Form',
)
class BaseForm(object):
"""
Base Form Class. Provides core behaviour like field construction,
validation, and data and error proxying.
"""
def __init__(self, fields, prefix='', meta=DefaultMeta()):
"""
:param fields:
A dict or sequence of 2-tuples of partially-constructed fields.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
:param meta:
A meta instance which is used for configuration and customization
of WTForms behaviors.
"""
if prefix and prefix[-1] not in '-_;:/.':
prefix += '-'
self.meta = meta
self._prefix = prefix
self._errors = None
self._fields = OrderedDict()
if hasattr(fields, 'items'):
fields = fields.items()
translations = self._get_translations()
extra_fields = []
if meta.csrf:
self._csrf = meta.build_csrf(self)
extra_fields.extend(self._csrf.setup_form(self))
for name, unbound_field in itertools.chain(fields, extra_fields):
options = dict(name=name, prefix=prefix, translations=translations)
field = meta.bind_field(self, unbound_field, options)
self._fields[name] = field
def __iter__(self):
"""Iterate form fields in creation order."""
return iter(itervalues(self._fields))
def __contains__(self, name):
""" Returns `True` if the named field is a member of this form. """
return (name in self._fields)
def __getitem__(self, name):
""" Dict-style access to this form's fields."""
return self._fields[name]
def __setitem__(self, name, value):
""" Bind a field to this form. """
self._fields[name] = value.bind(form=self, name=name, prefix=self._prefix)
def __delitem__(self, name):
""" Remove a field from this form. """
del self._fields[name]
def _get_translations(self):
"""
.. deprecated:: 2.0
`_get_translations` is being removed in WTForms 3.0, use
`Meta.get_translations` instead.
Override in subclasses to provide alternate translations factory.
Must return an object that provides gettext() and ngettext() methods.
"""
return self.meta.get_translations(self)
def populate_obj(self, obj):
"""
Populates the attributes of the passed `obj` with data from the form's
fields.
:note: This is a destructive operation; Any attribute with the same name
as a field will be overridden. Use with caution.
"""
for name, field in iteritems(self._fields):
field.populate_obj(obj, name)
def process(self, formdata=None, obj=None, data=None, **kwargs):
"""
Take form, object data, and keyword arg input and have the fields
process them.
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent.
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param data:
If provided, must be a dictionary of data. This is only used if
`formdata` is empty or not provided and `obj` does not contain
an attribute named the same as the field.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
formdata = self.meta.wrap_formdata(self, formdata)
if data is not None:
# XXX we want to eventually process 'data' as a new entity.
# Temporarily, this can simply be merged with kwargs.
kwargs = dict(data, **kwargs)
for name, field, in iteritems(self._fields):
if obj is not None and hasattr(obj, name):
field.process(formdata, getattr(obj, name))
elif name in kwargs:
field.process(formdata, kwargs[name])
else:
field.process(formdata)
def validate(self, extra_validators=None):
"""
Validates the form by calling `validate` on each field.
:param extra_validators:
If provided, is a dict mapping field names to a sequence of
callables which will be passed as extra validators to the field's
`validate` method.
Returns `True` if no errors occur.
"""
self._errors = None
success = True
for name, field in iteritems(self._fields):
if extra_validators is not None and name in extra_validators:
extra = extra_validators[name]
else:
extra = tuple()
if not field.validate(self, extra):
success = False
return success
@property
def data(self):
return dict((name, f.data) for name, f in iteritems(self._fields))
@property
def errors(self):
if self._errors is None:
self._errors = dict((name, f.errors) for name, f in iteritems(self._fields) if f.errors)
return self._errors
class FormMeta(type):
"""
The metaclass for `Form` and any subclasses of `Form`.
`FormMeta`'s responsibility is to create the `_unbound_fields` list, which
is a list of `UnboundField` instances sorted by their order of
instantiation. The list is created at the first instantiation of the form.
If any fields are added/removed from the form, the list is cleared to be
re-generated on the next instantiaton.
Any properties which begin with an underscore or are not `UnboundField`
instances are ignored by the metaclass.
"""
def __init__(cls, name, bases, attrs):
type.__init__(cls, name, bases, attrs)
cls._unbound_fields = None
cls._wtforms_meta = None
def __call__(cls, *args, **kwargs):
"""
Construct a new `Form` instance, creating `_unbound_fields` on the
class if it is empty.
"""
if cls._unbound_fields is None:
fields = []
for name in dir(cls):
if not name.startswith('_'):
unbound_field = getattr(cls, name)
if hasattr(unbound_field, '_formfield'):
fields.append((name, unbound_field))
# We keep the name as the second element of the sort
# to ensure a stable sort.
fields.sort(key=lambda x: (x[1].creation_counter, x[0]))
cls._unbound_fields = fields
if cls._wtforms_meta is None:
bases = []
for mro_class in cls.__mro__:
if 'Meta' in mro_class.__dict__:
bases.append(mro_class.Meta)
cls._wtforms_meta = type('Meta', tuple(bases), {})
return type.__call__(cls, *args, **kwargs)
def __setattr__(cls, name, value):
"""
Add an attribute to the class, clearing `_unbound_fields` if needed.
"""
if name == 'Meta':
cls._wtforms_meta = None
elif not name.startswith('_') and hasattr(value, '_formfield'):
cls._unbound_fields = None
type.__setattr__(cls, name, value)
def __delattr__(cls, name):
"""
Remove an attribute from the class, clearing `_unbound_fields` if
needed.
"""
if not name.startswith('_'):
cls._unbound_fields = None
type.__delattr__(cls, name)
class Form(with_metaclass(FormMeta, BaseForm)):
"""
Declarative Form base class. Extends BaseForm's core behaviour allowing
fields to be defined on Form subclasses as class attributes.
In addition, form and instance input data are taken at construction time
and passed to `process()`.
"""
Meta = DefaultMeta
def __init__(self, formdata=None, obj=None, prefix='', data=None, meta=None, **kwargs):
"""
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent. formdata should be some sort of request-data wrapper which
can get multiple parameters from the form input, and values are unicode
strings, e.g. a Werkzeug/Django/WebOb MultiDict
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
:param data:
Accept a dictionary of data. This is only used if `formdata` and
`obj` are not present.
:param meta:
If provided, this is a dictionary of values to override attributes
on this form's meta instance.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
meta_obj = self._wtforms_meta()
if meta is not None and isinstance(meta, dict):
meta_obj.update_values(meta)
super(Form, self).__init__(self._unbound_fields, meta=meta_obj, prefix=prefix)
for name, field in iteritems(self._fields):
# Set all the fields to attributes so that they obscure the class
# attributes with the same names.
setattr(self, name, field)
self.process(formdata, obj, data=data, **kwargs)
def __setitem__(self, name, value):
raise TypeError('Fields may not be added to Form instances, only classes.')
def __delitem__(self, name):
del self._fields[name]
setattr(self, name, None)
def __delattr__(self, name):
if name in self._fields:
self.__delitem__(name)
else:
# This is done for idempotency, if we have a name which is a field,
# we want to mask it by setting the value to None.
unbound_field = getattr(self.__class__, name, None)
if unbound_field is not None and hasattr(unbound_field, '_formfield'):
setattr(self, name, None)
else:
super(Form, self).__delattr__(name)
def validate(self):
"""
Validates the form by calling `validate` on each field, passing any
extra `Form.validate_<fieldname>` validators to the field validator.
"""
extra = {}
for name in self._fields:
inline = getattr(self.__class__, 'validate_%s' % name, None)
if inline is not None:
extra[name] = [inline]
return super(Form, self).validate(extra)
|
|
from nose.tools import eq_, ok_
from nose.plugins.skip import SkipTest
# Skip test on PY3
from flask.ext.admin._compat import PY2, as_unicode
if not PY2:
raise SkipTest('Peewee is not Python 3 compatible')
import peewee
from wtforms import fields
from flask.ext.admin import form
from flask.ext.admin._compat import iteritems
from flask.ext.admin.contrib.peewee import ModelView
from . import setup
from datetime import datetime, time, date
class CustomModelView(ModelView):
def __init__(self, model,
name=None, category=None, endpoint=None, url=None,
**kwargs):
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(CustomModelView, self).__init__(model,
name, category,
endpoint, url)
def create_models(db):
class BaseModel(peewee.Model):
class Meta:
database = db
class Model1(BaseModel):
def __init__(self, test1=None, test2=None, test3=None, test4=None,
date_field=None, timeonly_field=None,
datetime_field=None):
super(Model1, self).__init__()
self.test1 = test1
self.test2 = test2
self.test3 = test3
self.test4 = test4
self.date_field = date_field
self.timeonly_field = timeonly_field
self.datetime_field = datetime_field
test1 = peewee.CharField(max_length=20, null=True)
test2 = peewee.CharField(max_length=20, null=True)
test3 = peewee.TextField(null=True)
test4 = peewee.TextField(null=True)
date_field = peewee.DateField(null=True)
timeonly_field = peewee.TimeField(null=True)
datetime_field = peewee.DateTimeField(null=True)
def __str__(self):
# "or ''" fixes error when loading choices for relation field:
# TypeError: coercing to Unicode: need string or buffer, NoneType found
return self.test1 or ''
class Model2(BaseModel):
def __init__(self, char_field=None, int_field=None, float_field=None,
bool_field=0):
super(Model2, self).__init__()
self.char_field = char_field
self.int_field = int_field
self.float_field = float_field
self.bool_field = bool_field
char_field = peewee.CharField(max_length=20)
int_field = peewee.IntegerField(null=True)
float_field = peewee.FloatField(null=True)
bool_field = peewee.BooleanField()
# Relation
model1 = peewee.ForeignKeyField(Model1, null=True)
Model1.create_table()
Model2.create_table()
return Model1, Model2
def fill_db(Model1, Model2):
Model1('test1_val_1', 'test2_val_1').save()
Model1('test1_val_2', 'test2_val_2').save()
Model1('test1_val_3', 'test2_val_3').save()
Model1('test1_val_4', 'test2_val_4').save()
Model1(None, 'empty_obj').save()
Model2('char_field_val_1', None, None).save()
Model2('char_field_val_2', None, None).save()
Model2('char_field_val_3', 5000, 25.9).save()
Model2('char_field_val_4', 9000, 75.5).save()
Model1('date_obj1', date_field=date(2014,11,17)).save()
Model1('date_obj2', date_field=date(2013,10,16)).save()
Model1('timeonly_obj1', timeonly_field=time(11,10,9)).save()
Model1('timeonly_obj2', timeonly_field=time(10,9,8)).save()
Model1('datetime_obj1', datetime_field=datetime(2014,4,3,1,9,0)).save()
Model1('datetime_obj2', datetime_field=datetime(2013,3,2,0,8,0)).save()
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1)
admin.add_view(view)
eq_(view.model, Model1)
eq_(view.name, 'Model1')
eq_(view.endpoint, 'model1')
eq_(view._primary_key, 'id')
ok_('test1' in view._sortable_columns)
ok_('test2' in view._sortable_columns)
ok_('test3' in view._sortable_columns)
ok_('test4' in view._sortable_columns)
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view._search_supported, False)
eq_(view._filters, None)
# Verify form
eq_(view._create_form_class.test1.field_class, fields.StringField)
eq_(view._create_form_class.test2.field_class, fields.StringField)
eq_(view._create_form_class.test3.field_class, fields.TextAreaField)
eq_(view._create_form_class.test4.field_class, fields.TextAreaField)
# Make some test clients
client = app.test_client()
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model1/new/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
model = Model1.select().get()
eq_(model.test1, 'test1large')
eq_(model.test2, 'test2')
ok_(model.test3 is None or model.test3 == '')
ok_(model.test4 is None or model.test4 == '')
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
ok_('test1large' in rv.data)
url = '/admin/model1/edit/?id=%s' % model.id
rv = client.get(url)
eq_(rv.status_code, 200)
rv = client.post(url,
data=dict(test1='test1small', test2='test2large'))
eq_(rv.status_code, 302)
model = Model1.select().get()
eq_(model.test1, 'test1small')
eq_(model.test2, 'test2large')
ok_(model.test3 is None or model.test3 == '')
ok_(model.test4 is None or model.test4 == '')
url = '/admin/model1/delete/?id=%s' % model.id
rv = client.post(url)
eq_(rv.status_code, 302)
eq_(Model1.select().count(), 0)
def test_column_editable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1,
column_editable_list=[
'test1', 'enum_field'])
admin.add_view(view)
fill_db(Model1, Model2)
client = app.test_client()
# Test in-line edit field rendering
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('data-role="x-editable"' in data)
# Form - Test basic in-line edit functionality
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1': 'change-success-1',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# ensure the value has changed
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('change-success-1' in data)
# Test validation error
rv = client.post('/admin/model1/ajax/update/', data={
'enum_field-1': 'problematic-input',
})
eq_(rv.status_code, 500)
# Test invalid primary key
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1000': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test editing column not in column_editable_list
rv = client.post('/admin/model1/ajax/update/', data={
'test2-1': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test in-line editing for relations
view = CustomModelView(Model2,
column_editable_list=[
'model1'])
admin.add_view(view)
rv = client.post('/admin/model2/ajax/update/', data={
'model1-1': '3',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# confirm the value has changed
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
ok_('test1_val_3' in data)
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
fill_db(Model1, Model2)
# Test string filter
view = CustomModelView(Model1, column_filters=['test1'])
admin.add_view(view)
eq_(len(view._filters), 7)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'contains'),
(3, 'not contains'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# Make some test clients
client = app.test_client()
# string - equals
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not equal
rv = client.get('/admin/model1/?flt0_1=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - contains
rv = client.get('/admin/model1/?flt0_2=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not contains
rv = client.get('/admin/model1/?flt0_3=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - empty
rv = client.get('/admin/model1/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' in data)
ok_('test1_val_1' not in data)
ok_('test1_val_2' not in data)
# string - not empty
rv = client.get('/admin/model1/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' not in data)
ok_('test1_val_1' in data)
ok_('test1_val_2' in data)
# string - in list
rv = client.get('/admin/model1/?flt0_5=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test1_val_3' not in data)
ok_('test1_val_4' not in data)
# string - not in list
rv = client.get('/admin/model1/?flt0_6=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test1_val_3' in data)
ok_('test1_val_4' in data)
# Test int filter
view = CustomModelView(Model2, column_filters=['int_field'])
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# Test float filter
view = CustomModelView(Model2, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' in data)
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_3' in data)
ok_('char_field_val_4' not in data)
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' not in data)
ok_('char_field_val_2' not in data)
ok_('char_field_val_3' in data)
ok_('char_field_val_4' in data)
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('char_field_val_1' in data)
ok_('char_field_val_2' in data)
ok_('char_field_val_3' not in data)
ok_('char_field_val_4' not in data)
# Test date, time, and datetime filters
view = CustomModelView(Model1,
column_filters=['date_field', 'datetime_field', 'timeonly_field'],
endpoint="_datetime")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Date Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']],
[
(7, 'equals'),
(8, 'not equal'),
(9, 'greater than'),
(10, 'smaller than'),
(11, 'between'),
(12, 'not between'),
(13, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Timeonly Field']],
[
(14, 'equals'),
(15, 'not equal'),
(16, 'greater than'),
(17, 'smaller than'),
(18, 'between'),
(19, 'not between'),
(20, 'empty'),
])
# date - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - between
rv = client.get('/admin/_datetime/?flt0_4=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('date_obj1' not in data)
ok_('date_obj2' not in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('date_obj1' in data)
ok_('date_obj2' in data)
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_7=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_8=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_9=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_10=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - between
rv = client.get('/admin/_datetime/?flt0_11=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_12=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_13=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' not in data)
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_13=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('datetime_obj1' in data)
ok_('datetime_obj2' in data)
# time - equals
rv = client.get('/admin/_datetime/?flt0_14=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not equal
rv = client.get('/admin/_datetime/?flt0_15=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - greater
rv = client.get('/admin/_datetime/?flt0_16=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - smaller
rv = client.get('/admin/_datetime/?flt0_17=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - between
rv = client.get('/admin/_datetime/?flt0_18=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not between
rv = client.get('/admin/_datetime/?flt0_19=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - empty
rv = client.get('/admin/_datetime/?flt0_20=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' not in data)
# time - not empty
rv = client.get('/admin/_datetime/?flt0_20=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' in data)
def test_default_sort():
app, db, admin = setup()
M1, _ = create_models(db)
M1('c', 1).save()
M1('b', 2).save()
M1('a', 3).save()
eq_(M1.select().count(), 3)
view = CustomModelView(M1, column_default_sort='test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
eq_(data[0].test1, 'a')
eq_(data[1].test1, 'b')
eq_(data[2].test1, 'c')
def test_extra_fields():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
ok_('Extra Field' in data)
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 < pos1)
def test_custom_form_base():
app, db, admin = setup()
class TestForm(form.BaseForm):
pass
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_base_class=TestForm
)
admin.add_view(view)
ok_(hasattr(view._create_form_class, 'test1'))
create_form = view.create_form()
ok_(isinstance(create_form, TestForm))
def test_ajax_fk():
app, db, admin = setup()
class BaseModel(peewee.Model):
class Meta:
database = db
class Model1(BaseModel):
test1 = peewee.CharField(max_length=20)
test2 = peewee.CharField(max_length=20)
def __str__(self):
return self.test1
class Model2(BaseModel):
model1 = peewee.ForeignKeyField(Model1)
Model1.create_table()
Model2.create_table()
view = CustomModelView(
Model2,
url='view',
form_ajax_refs={
'model1': {
'fields': ('test1', 'test2')
}
}
)
admin.add_view(view)
ok_(u'model1' in view._form_ajax_refs)
model = Model1(test1=u'first', test2=u'')
model.save()
model2 = Model1(test1=u'foo', test2=u'bar')
model2.save()
# Check loader
loader = view._form_ajax_refs[u'model1']
mdl = loader.get_one(model.id)
eq_(mdl.test1, model.test1)
items = loader.get_list(u'fir')
eq_(len(items), 1)
eq_(items[0].id, model.id)
items = loader.get_list(u'bar')
eq_(len(items), 1)
eq_(items[0].test1, u'foo')
# Check form generation
form = view.create_form()
eq_(form.model1.__class__.__name__, u'AjaxSelectField')
with app.test_request_context('/admin/view/'):
ok_(u'value=""' not in form.model1())
form.model1.data = model
needle = u'data-json="[%s, "first"]"' % as_unicode(model.id)
ok_(needle in form.model1())
ok_(u'value="%s"' % as_unicode(model.id) in form.model1())
# Check querying
client = app.test_client()
req = client.get(u'/admin/view/ajax/lookup/?name=model1&query=foo')
eq_(req.data, u'[[%s, "foo"]]' % model2.id)
# Check submitting
client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = Model2.select().first()
ok_(mdl is not None)
ok_(mdl.model1 is not None)
eq_(mdl.model1.id, model.id)
eq_(mdl.model1.test1, u'first')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/server.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import collections
import json
import logging
import os
import re
import shutil
import threading
from king_phisher import errors
from king_phisher import find
from king_phisher import geoip
from king_phisher import ipaddress
from king_phisher import sms
from king_phisher import templates
from king_phisher import utilities
from king_phisher import xor
from king_phisher.server import aaa
from king_phisher.server import pages
from king_phisher.server import rest_api
from king_phisher.server import server_rpc
from king_phisher.server import signals
from king_phisher.server import web_sockets
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
import advancedhttpserver
import jinja2
from smoke_zephyr import job
class KingPhisherRequestHandler(advancedhttpserver.RequestHandler):
logger = logging.getLogger('KingPhisher.Server.RequestHandler')
def __init__(self, *args, **kwargs):
self.logger.debug("request handler running in tid: 0x{0:x}".format(threading.current_thread().ident))
# this is for attribute documentation
self.config = None
"""A reference to the main server instance :py:attr:`.KingPhisherServer.config`."""
self.path = None
"""The resource path of the current HTTP request."""
self.rpc_session = None
super(KingPhisherRequestHandler, self).__init__(*args, **kwargs)
def on_init(self):
self.config = self.server.config
regex_prefix = '^'
if self.config.get('server.vhost_directories'):
regex_prefix += r'[\w\.\-]+\/'
for path, handler in self.handler_map.items():
if path.startswith(rest_api.REST_API_BASE):
del self.handler_map[path]
self.handler_map[regex_prefix + path] = handler
self.handler_map[regex_prefix + 'kpdd$'] = self.handle_deaddrop_visit
self.handler_map[regex_prefix + 'kp\\.js$'] = self.handle_javascript_hook
self.web_socket_handler = self.server.ws_manager.dispatch
tracking_image = self.config.get('server.tracking_image')
tracking_image = tracking_image.replace('.', '\\.')
self.handler_map[regex_prefix + tracking_image + '$'] = self.handle_email_opened
signals.safe_send('request-received', self.logger, self)
def end_headers(self, *args, **kwargs):
if self.command != 'RPC':
for header, value in self.server.headers.items():
self.send_header(header, value)
return super(KingPhisherRequestHandler, self).end_headers(*args, **kwargs)
def issue_alert(self, alert_text, campaign_id):
"""
Send an SMS alert. If no *campaign_id* is specified all users
with registered SMS information will receive the alert otherwise
only users subscribed to the campaign specified.
:param str alert_text: The message to send to subscribers.
:param int campaign_id: The campaign subscribers to send the alert to.
"""
session = db_manager.Session()
campaign = db_manager.get_row_by_id(session, db_models.Campaign, campaign_id)
if '{campaign_name}' in alert_text:
alert_text = alert_text.format(campaign_name=campaign.name)
for subscription in campaign.alert_subscriptions:
user = subscription.user
carrier = user.phone_carrier
number = user.phone_number
if carrier is None or number is None:
self.server.logger.warning("skipping alert because user {0} has missing information".format(user.id))
continue
self.server.logger.debug("sending alert SMS message to {0} ({1})".format(number, carrier))
sms.send_sms(alert_text, number, carrier)
session.close()
def adjust_path(self):
"""Adjust the :py:attr:`~.KingPhisherRequestHandler.path` attribute based on multiple factors."""
self.request_path = self.path.split('?', 1)[0]
if not self.config.get('server.vhost_directories'):
return
if not self.vhost:
raise errors.KingPhisherAbortRequestError()
if self.vhost in ('localhost', '127.0.0.1') and self.client_address[0] != '127.0.0.1':
raise errors.KingPhisherAbortRequestError()
self.path = '/' + self.vhost + self.path
def _do_http_method(self, *args, **kwargs):
if self.command != 'RPC':
self.adjust_path()
http_method_handler = getattr(super(KingPhisherRequestHandler, self), 'do_' + self.command)
self.server.throttle_semaphore.acquire()
try:
http_method_handler(*args, **kwargs)
except errors.KingPhisherAbortRequestError as error:
if not error.response_sent:
self.respond_not_found()
finally:
self.server.throttle_semaphore.release()
do_GET = _do_http_method
do_HEAD = _do_http_method
do_POST = _do_http_method
do_RPC = _do_http_method
def get_query_creds(self, check_query=True):
"""
Get credentials that have been submitted in the request. For credentials
to be returned at least a username must have been specified. The
returned username will be None or a non-empty string. The returned
password will be None if the parameter was not found or a string which
maybe empty. This functions checks the query data for credentials first
if *check_query* is True, and then checks the contents of an
Authorization header.
:param bool check_query: Whether or not to check the query data in addition to an Authorization header.
:return: The submitted credentials.
:rtype: tuple
"""
username = None
password = ''
for pname in ('username', 'user', 'u'):
username = (self.get_query(pname) or self.get_query(pname.title()) or self.get_query(pname.upper()))
if username:
break
if username:
for pname in ('password', 'pass', 'p'):
password = (self.get_query(pname) or self.get_query(pname.title()) or self.get_query(pname.upper()))
if password:
break
return username, (password or '')
basic_auth = self.headers.get('authorization')
if basic_auth is None:
return None, ''
basic_auth = basic_auth.split()
if len(basic_auth) == 2 and basic_auth[0] == 'Basic':
try:
basic_auth = base64.b64decode(basic_auth[1])
except TypeError:
return None, ''
basic_auth = basic_auth.decode('utf-8')
basic_auth = basic_auth.split(':', 1)
if len(basic_auth) == 2 and len(basic_auth[0]):
username, password = basic_auth
return username, password
def get_template_vars_client(self):
"""
Build a dictionary of variables for a client with an associated
campaign.
:return: The client specific template variables.
:rtype: dict
"""
client_vars = {
'address': self.get_client_ip()
}
if not self.message_id:
return client_vars
credential_count = 0
expired_campaign = True
visit_count = 0
result = None
session = db_manager.Session()
if self.message_id == self.config.get('server.secret_id'):
client_vars['company_name'] = 'Wonderland Inc.'
client_vars['company'] = {'name': 'Wonderland Inc.'}
result = ('aliddle@wonderland.com', 'Alice', 'Liddle', 0)
elif self.message_id:
message = db_manager.get_row_by_id(session, db_models.Message, self.message_id)
if message:
campaign = message.campaign
client_vars['campaign'] = {
'id': str(campaign.id),
'name': campaign.name,
'created': campaign.created,
'expiration': campaign.expiration,
'has_expired': campaign.has_expired,
'message_count': session.query(db_models.Message).filter_by(campaign_id=campaign.id).count(),
'visit_count': session.query(db_models.Visit).filter_by(campaign_id=campaign.id).count(),
'credential_count': session.query(db_models.Credential).filter_by(campaign_id=campaign.id).count(),
}
if message.campaign.company:
client_vars['company_name'] = message.campaign.company.name
client_vars['company'] = {
'name': campaign.company.name,
'url_email': campaign.company.url_email,
'url_main': campaign.company.url_main,
'url_remote_access': campaign.company.url_remote_access
}
result = (message.target_email, message.first_name, message.last_name, message.trained)
query = session.query(db_models.Credential)
query = query.filter_by(message_id=self.message_id)
credential_count = query.count()
expired_campaign = message.campaign.has_expired
if not result:
session.close()
return client_vars
client_vars['email_address'] = result[0]
client_vars['first_name'] = result[1]
client_vars['last_name'] = result[2]
client_vars['is_trained'] = result[3]
client_vars['message_id'] = self.message_id
if self.visit_id:
visit = db_manager.get_row_by_id(session, db_models.Visit, self.visit_id)
client_vars['visit_id'] = visit.id
visit_count = visit.visit_count
# increment some counters preemptively
if not expired_campaign and self.get_query_creds()[0] is not None:
credential_count += 1
client_vars['credential_count'] = credential_count
client_vars['visit_count'] = visit_count + (0 if expired_campaign else 1)
session.close()
return client_vars
def check_authorization(self):
# don't require authentication for non-RPC requests
cmd = self.command
if cmd == 'GET':
# check if the GET request is to open a web socket
if 'upgrade' not in self.headers:
return True
elif cmd != 'RPC':
return True
if not ipaddress.ip_address(self.client_address[0]).is_loopback:
return False
# the only two RPC methods that do not require authentication
if self.path in ('/login', '/version'):
return True
self.rpc_session = self.server.session_manager.get(self.rpc_session_id)
if not isinstance(self.rpc_session, aaa.AuthenticatedSession):
return False
return True
@property
def rpc_session_id(self):
return self.headers.get(server_rpc.RPC_AUTH_HEADER, None)
@property
def campaign_id(self):
"""
The campaign id that is associated with the current request's
visitor. This is retrieved by looking up the
:py:attr:`~.KingPhisherRequestHandler.message_id` value in the
database. If no campaign is associated, this value is None.
"""
if hasattr(self, '_campaign_id'):
return self._campaign_id
self._campaign_id = None
if self.message_id and self.message_id != self.config.get('server.secret_id'):
session = db_manager.Session()
message = db_manager.get_row_by_id(session, db_models.Message, self.message_id)
if message:
self._campaign_id = message.campaign_id
session.close()
return self._campaign_id
@property
def message_id(self):
"""
The message id that is associated with the current request's
visitor. This is retrieved by looking at an 'id' parameter in the
query and then by checking the
:py:attr:`~.KingPhisherRequestHandler.visit_id` value in the
database. If no message id is associated, this value is None. The
resulting value will be either a confirmed valid value, or the value
of the configurations server.secret_id for testing purposes.
"""
if hasattr(self, '_message_id'):
return self._message_id
self._message_id = None
msg_id = self.get_query('id')
if msg_id == self.config.get('server.secret_id'):
self._message_id = msg_id
return self._message_id
session = db_manager.Session()
if msg_id and db_manager.get_row_by_id(session, db_models.Message, msg_id):
self._message_id = msg_id
elif self.visit_id:
visit = db_manager.get_row_by_id(session, db_models.Visit, self.visit_id)
self._message_id = visit.message_id
session.close()
return self._message_id
@property
def visit_id(self):
"""
The visit id that is associated with the current request's visitor. This
is retrieved by looking for the King Phisher cookie. If no cookie is
set, this value is None.
"""
if hasattr(self, '_visit_id'):
return self._visit_id
self._visit_id = None
kp_cookie_name = self.config.get('server.cookie_name')
if kp_cookie_name in self.cookies:
value = self.cookies[kp_cookie_name].value
session = db_manager.Session()
if db_manager.get_row_by_id(session, db_models.Visit, value):
self._visit_id = value
session.close()
return self._visit_id
@property
def vhost(self):
"""The value of the Host HTTP header."""
return self.headers.get('host', '').split(':')[0]
def get_client_ip(self):
"""
Intelligently get the IP address of the HTTP client, optionally
accounting for proxies that may be in use.
:return: The clients IP address.
:rtype: str
"""
address = self.client_address[0]
header_name = self.config.get_if_exists('server.client_ip_header') # new style
header_name = header_name or self.config.get_if_exists('server.client_ip_cookie') # old style
if not header_name:
return address
header_value = self.headers.get(header_name, '')
if not header_value:
return address
header_value = header_value.split(',')[0]
header_value = header_value.strip()
if header_value.startswith('['):
# header_value looks like an IPv6 address
header_value = header_value.split(']:', 1)[0]
else:
# treat header_value as an IPv4 address
header_value = header_value.split(':', 1)[0]
if ipaddress.is_valid(header_value):
address = header_value
return address
def send_response(self, code, message=None):
super(KingPhisherRequestHandler, self).send_response(code, message)
signals.safe_send('response-sent', self.logger, self, code=code, message=message)
def respond_file(self, file_path, attachment=False, query=None):
self._respond_file_check_id()
file_path = os.path.abspath(file_path)
mime_type = self.guess_mime_type(file_path)
if attachment or (mime_type != 'text/html' and mime_type != 'text/plain'):
self._respond_file_raw(file_path, attachment)
return
try:
template = self.server.template_env.get_template(os.path.relpath(file_path, self.config.get('server.web_root')))
except jinja2.exceptions.TemplateSyntaxError as error:
self.server.logger.error("jinja2 syntax error in template {0}:{1} {2}".format(error.filename, error.lineno, error.message))
raise errors.KingPhisherAbortRequestError()
except jinja2.exceptions.TemplateError:
raise errors.KingPhisherAbortRequestError()
except UnicodeDecodeError as error:
self.server.logger.error("unicode error {0} in template file: {1}:{2}-{3}".format(error.reason, file_path, error.start, error.end))
raise errors.KingPhisherAbortRequestError()
template_data = b''
headers = []
template_vars = {
'client': self.get_template_vars_client(),
'request': {
'command': self.command,
'cookies': dict((c[0], c[1].value) for c in self.cookies.items()),
'parameters': dict(zip(self.query_data.keys(), map(self.get_query, self.query_data.keys()))),
'user_agent': self.headers.get('user-agent')
},
'server': {
'hostname': self.vhost,
'address': self.connection.getsockname()[0]
}
}
template_vars.update(self.server.template_env.standard_variables)
try:
template_module = template.make_module(template_vars)
except (TypeError, jinja2.TemplateError) as error:
self.server.logger.error("jinja2 template {0} render failed: {1} {2}".format(template.filename, error.__class__.__name__, error.message))
raise errors.KingPhisherAbortRequestError()
require_basic_auth = getattr(template_module, 'require_basic_auth', False)
require_basic_auth &= not all(self.get_query_creds(check_query=False))
require_basic_auth &= self.message_id != self.config.get('server.secret_id')
if require_basic_auth:
mime_type = 'text/html'
self.send_response(401)
headers.append(('WWW-Authenticate', "Basic realm=\"{0}\"".format(getattr(template_module, 'basic_auth_realm', 'Authentication Required'))))
else:
try:
template_data = template.render(template_vars)
except (TypeError, jinja2.TemplateError) as error:
self.server.logger.error("jinja2 template {0} render failed: {1} {2}".format(template.filename, error.__class__.__name__, error.message))
raise errors.KingPhisherAbortRequestError()
self.send_response(200)
headers.append(('Last-Modified', self.date_time_string(os.stat(template.filename).st_mtime)))
template_data = template_data.encode('utf-8', 'ignore')
if mime_type.startswith('text'):
mime_type += '; charset=utf-8'
self.send_header('Content-Type', mime_type)
self.send_header('Content-Length', len(template_data))
for header in headers:
self.send_header(*header)
try:
self.handle_page_visit()
except Exception as error:
self.server.logger.error('handle_page_visit raised error: {0}.{1}'.format(error.__class__.__module__, error.__class__.__name__), exc_info=True)
self.end_headers()
self.wfile.write(template_data)
return
def _respond_file_raw(self, file_path, attachment):
try:
file_obj = open(file_path, 'rb')
except IOError:
raise errors.KingPhisherAbortRequestError()
fs = os.fstat(file_obj.fileno())
self.send_response(200)
self.send_header('Content-Type', self.guess_mime_type(file_path))
self.send_header('Content-Length', fs[6])
if attachment:
file_name = os.path.basename(file_path)
self.send_header('Content-Disposition', 'attachment; filename=' + file_name)
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
shutil.copyfileobj(file_obj, self.wfile)
file_obj.close()
return
def _respond_file_check_id(self):
if re.match(r'^/\.well-known/acme-challenge/[a-zA-Z0-9\-_]{40,50}$', self.request_path):
self.server.logger.info('received request for .well-known/acme-challenge')
return
if not self.config.get('server.require_id'):
return
if self.message_id == self.config.get('server.secret_id'):
return
# a valid campaign_id requires a valid message_id
if not self.campaign_id:
self.server.logger.warning('denying request due to lack of a valid id')
raise errors.KingPhisherAbortRequestError()
session = db_manager.Session()
campaign = db_manager.get_row_by_id(session, db_models.Campaign, self.campaign_id)
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=self.campaign_id, hostname=self.vhost)
if query.count() == 0:
self.server.logger.warning('denying request with not found due to invalid hostname')
session.close()
raise errors.KingPhisherAbortRequestError()
if campaign.has_expired:
self.server.logger.warning('denying request because the campaign has expired')
session.close()
raise errors.KingPhisherAbortRequestError()
if campaign.reject_after_credentials and self.visit_id is None:
query = session.query(db_models.Credential)
query = query.filter_by(message_id=self.message_id)
if query.count():
self.server.logger.warning('denying request because credentials were already harvested')
session.close()
raise errors.KingPhisherAbortRequestError()
session.close()
return
def respond_not_found(self):
self.send_response(404, 'Not Found')
self.send_header('Content-Type', 'text/html')
page_404 = find.data_file('error_404.html')
if page_404:
with open(page_404, 'rb') as file_h:
message = file_h.read()
else:
message = b'Resource Not Found\n'
self.send_header('Content-Length', len(message))
self.end_headers()
self.wfile.write(message)
return
def respond_redirect(self, location='/'):
location = location.lstrip('/')
if self.config.get('server.vhost_directories') and location.startswith(self.vhost):
location = location[len(self.vhost):]
if not location.startswith('/'):
location = '/' + location
super(KingPhisherRequestHandler, self).respond_redirect(location)
def handle_deaddrop_visit(self, query):
self.send_response(200)
self.end_headers()
data = self.get_query('token')
if not data:
self.logger.warning('dead drop request received with no \'token\' parameter')
return
try:
data = base64.b64decode(data)
except (binascii.Error, TypeError):
self.logger.error('dead drop request received with invalid \'token\' data')
return
data = xor.xor_decode(data)
try:
data = json.loads(data)
except ValueError:
self.logger.error('dead drop request received with invalid \'token\' data')
return
session = db_manager.Session()
deployment = db_manager.get_row_by_id(session, db_models.DeaddropDeployment, data.get('deaddrop_id'))
if not deployment:
session.close()
self.logger.error('dead drop request received for an unknown campaign')
return
if deployment.campaign.has_expired:
session.close()
self.logger.info('dead drop request received for an expired campaign')
return
local_username = data.get('local_username')
local_hostname = data.get('local_hostname')
if local_username is None or local_hostname is None:
session.close()
self.logger.error('dead drop request received with missing data')
return
local_ip_addresses = data.get('local_ip_addresses')
if isinstance(local_ip_addresses, (list, tuple)):
local_ip_addresses = ' '.join(local_ip_addresses)
query = session.query(db_models.DeaddropConnection)
query = query.filter_by(deployment_id=deployment.id, local_username=local_username, local_hostname=local_hostname)
connection = query.first()
if connection:
connection.visit_count += 1
new_connection = False
else:
connection = db_models.DeaddropConnection(campaign_id=deployment.campaign_id, deployment_id=deployment.id)
connection.visitor_ip = self.get_client_ip()
connection.local_username = local_username
connection.local_hostname = local_hostname
connection.local_ip_addresses = local_ip_addresses
session.add(connection)
new_connection = True
session.commit()
query = session.query(db_models.DeaddropConnection)
query = query.filter_by(campaign_id=deployment.campaign_id)
visit_count = query.count()
session.close()
if new_connection and visit_count > 0 and ((visit_count in [1, 3, 5]) or ((visit_count % 10) == 0)):
alert_text = "{0} deaddrop connections reached for campaign: {{campaign_name}}".format(visit_count)
self.server.job_manager.job_run(self.issue_alert, (alert_text, deployment.campaign_id))
return
def handle_email_opened(self, query):
# image size: 43 Bytes
img_data = '47494638396101000100800100000000ffffff21f90401000001002c00000000'
img_data += '010001000002024c01003b'
img_data = binascii.a2b_hex(img_data)
self.send_response(200)
self.send_header('Content-Type', 'image/gif')
self.send_header('Content-Length', str(len(img_data)))
self.end_headers()
self.wfile.write(img_data)
msg_id = self.get_query('id')
if not msg_id:
return
session = db_manager.Session()
query = session.query(db_models.Message)
query = query.filter_by(id=msg_id, opened=None)
message = query.first()
if message and not message.campaign.has_expired:
message.opened = db_models.current_timestamp()
message.opener_ip = self.get_client_ip()
message.opener_user_agent = self.headers.get('user-agent', None)
session.commit()
session.close()
signals.safe_send('email-opened', self.logger, self)
def handle_javascript_hook(self, query):
kp_hook_js = find.data_file('javascript_hook.js')
if not kp_hook_js:
self.respond_not_found()
return
with open(kp_hook_js, 'r') as kp_hook_js:
javascript = kp_hook_js.read()
if self.config.has_option('beef.hook_url'):
javascript += "\nloadScript('{0}');\n\n".format(self.config.get('beef.hook_url'))
self.send_response(200)
self.send_header('Content-Type', 'text/javascript')
self.send_header('Pragma', 'no-cache')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Expires', '0')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'POST, GET')
self.send_header('Content-Length', len(javascript))
self.end_headers()
if not isinstance(javascript, bytes):
javascript = javascript.encode('utf-8')
self.wfile.write(javascript)
return
def handle_page_visit(self):
if not self.message_id:
return
if self.message_id == self.config.get('server.secret_id'):
return
if not self.campaign_id:
return
client_ip = self.get_client_ip()
session = db_manager.Session()
campaign = db_manager.get_row_by_id(session, db_models.Campaign, self.campaign_id)
if campaign.has_expired:
self.logger.info("ignoring page visit for expired campaign id: {0} from IP address: {1}".format(self.campaign_id, client_ip))
session.close()
return
self.logger.info("handling a page visit for campaign id: {0} from IP address: {1}".format(self.campaign_id, client_ip))
message = db_manager.get_row_by_id(session, db_models.Message, self.message_id)
if message.opened is None and self.config.get_if_exists('server.set_message_opened_on_visit', True):
message.opened = db_models.current_timestamp()
message.opener_ip = self.get_client_ip()
message.opener_user_agent = self.headers.get('user-agent', None)
set_new_visit = True
visit_id = None
if self.visit_id:
visit_id = self.visit_id
set_new_visit = False
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=self.campaign_id, hostname=self.vhost, page=self.request_path[1:])
if query.count():
visit = db_manager.get_row_by_id(session, db_models.Visit, self.visit_id)
if visit.message_id == self.message_id:
visit.visit_count += 1
visit.last_visit = db_models.current_timestamp()
else:
set_new_visit = True
visit_id = None
if visit_id is None:
visit_id = utilities.make_visit_uid()
if set_new_visit:
kp_cookie_name = self.config.get('server.cookie_name')
cookie = "{0}={1}; Path=/; HttpOnly".format(kp_cookie_name, visit_id)
self.send_header('Set-Cookie', cookie)
visit = db_models.Visit(id=visit_id, campaign_id=self.campaign_id, message_id=self.message_id)
visit.visitor_ip = client_ip
visit.visitor_details = self.headers.get('user-agent', '')
session.add(visit)
visit_count = len(campaign.visits)
if visit_count > 0 and ((visit_count in (1, 10, 25)) or ((visit_count % 50) == 0)):
alert_text = "{0} visits reached for campaign: {{campaign_name}}".format(visit_count)
self.server.job_manager.job_run(self.issue_alert, (alert_text, self.campaign_id))
signals.safe_send('visit-received', self.logger, self)
if visit_id is None:
self.logger.error('the visit id has not been set')
raise RuntimeError('the visit id has not been set')
self._handle_page_visit_creds(session, visit_id)
trained = self.get_query('trained')
if isinstance(trained, str) and trained.lower() in ['1', 'true', 'yes']:
message.trained = True
session.commit()
session.close()
def _handle_page_visit_creds(self, session, visit_id):
username, password = self.get_query_creds()
if username is None:
return
cred_count = 0
query = session.query(db_models.Credential)
query = query.filter_by(message_id=self.message_id, username=username, password=password)
if query.count() == 0:
cred = db_models.Credential(campaign_id=self.campaign_id, message_id=self.message_id, visit_id=visit_id)
cred.username = username
cred.password = password
session.add(cred)
campaign = db_manager.get_row_by_id(session, db_models.Campaign, self.campaign_id)
cred_count = len(campaign.credentials)
if cred_count > 0 and ((cred_count in [1, 5, 10]) or ((cred_count % 25) == 0)):
alert_text = "{0} credentials submitted for campaign: {{campaign_name}}".format(cred_count)
self.server.job_manager.job_run(self.issue_alert, (alert_text, self.campaign_id))
signals.safe_send('credentials-received', self.logger, self, username=username, password=password)
class KingPhisherServer(advancedhttpserver.AdvancedHTTPServer):
"""
The main HTTP and RPC server for King Phisher.
"""
def __init__(self, config, plugin_manager, handler_klass, *args, **kwargs):
"""
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
"""
# additional mime types to be treated as html because they're probably cloned pages
handler_klass.extensions_map.update({
'': 'text/html',
'.asp': 'text/html',
'.aspx': 'text/html',
'.cfm': 'text/html',
'.cgi': 'text/html',
'.do': 'text/html',
'.jsp': 'text/html',
'.nsf': 'text/html',
'.php': 'text/html',
'.srf': 'text/html'
})
super(KingPhisherServer, self).__init__(handler_klass, *args, **kwargs)
self.logger = logging.getLogger('KingPhisher.Server')
self.config = config
"""A :py:class:`~smoke_zephyr.configuration.Configuration` instance used as the main King Phisher server configuration."""
self.headers = collections.OrderedDict()
"""A :py:class:`~collections.OrderedDict` containing additional headers specified from the server configuration to include in responses."""
self.plugin_manager = plugin_manager
self.serve_files = True
self.serve_files_root = config.get('server.web_root')
self.serve_files_list_directories = False
self.serve_robots_txt = True
self.database_engine = db_manager.init_database(config.get('server.database'), extra_init=True)
self.throttle_semaphore = threading.Semaphore()
self.session_manager = aaa.AuthenticatedSessionManager(
timeout=config.get_if_exists('server.authentication.cache_timeout', '30m')
)
self.forked_authenticator = aaa.ForkedAuthenticator(
cache_timeout=config.get_if_exists('server.authentication.cache_timeout', '10m'),
required_group=config.get_if_exists('server.authentication.group'),
pam_service=config.get_if_exists('server.authentication.pam_service', 'sshd')
)
self.job_manager = job.JobManager(logger_name='KingPhisher.Server.JobManager')
"""A :py:class:`~smoke_zephyr.job.JobManager` instance for scheduling tasks."""
self.job_manager.start()
loader = jinja2.FileSystemLoader(config.get('server.web_root'))
global_vars = {}
if config.has_section('server.page_variables'):
global_vars = config.get('server.page_variables')
global_vars['embed_youtube_video'] = pages.embed_youtube_video
global_vars['make_csrf_page'] = pages.make_csrf_page
global_vars['make_redirect_page'] = pages.make_redirect_page
self.template_env = templates.TemplateEnvironmentBase(loader=loader, global_vars=global_vars)
self.ws_manager = web_sockets.WebSocketsManager(config, self.job_manager)
for http_server in self.sub_servers:
http_server.config = config
http_server.plugin_manager = plugin_manager
http_server.throttle_semaphore = self.throttle_semaphore
http_server.session_manager = self.session_manager
http_server.forked_authenticator = self.forked_authenticator
http_server.job_manager = self.job_manager
http_server.template_env = self.template_env
http_server.kp_shutdown = self.shutdown
http_server.ws_manager = self.ws_manager
http_server.headers = self.headers
if not config.has_option('server.secret_id'):
config.set('server.secret_id', rest_api.generate_token())
if not config.get_if_exists('server.rest_api.token'):
config.set('server.rest_api.token', rest_api.generate_token())
if config.get('server.rest_api.enabled'):
self.logger.info('rest api initialized with token: ' + config.get('server.rest_api.token'))
self.__geoip_db = geoip.init_database(config.get('server.geoip.database'))
self.__is_shutdown = threading.Event()
self.__is_shutdown.clear()
self.__shutdown_lock = threading.Lock()
plugin_manager.server = self
headers = self.config.get_if_exists('server.headers', [])
for header in headers:
if ': ' not in header:
self.logger.warning("header '{0}' is invalid and will not be included".format(header))
continue
header, value = header.split(': ', 1)
header = header.strip()
self.headers[header] = value
self.logger.info("including {0} custom http headers".format(len(self.headers)))
def shutdown(self, *args, **kwargs):
"""
Request that the server perform any cleanup necessary and then shut
down. This will wait for the server to stop before it returns.
"""
with self.__shutdown_lock:
if self.__is_shutdown.is_set():
return
self.logger.warning('processing shutdown request')
super(KingPhisherServer, self).shutdown(*args, **kwargs)
self.ws_manager.stop()
self.job_manager.stop()
self.session_manager.stop()
self.forked_authenticator.stop()
self.logger.debug('stopped the forked authenticator process')
self.__geoip_db.close()
self.__is_shutdown.set()
|
|
"""
This is an auxiliary script that is used to compute valid PLL values to set
the CPU frequency to a given value. The algorithm here appears as C code
for the machine.freq() function.
"""
from __future__ import print_function
import re
class MCU:
def __init__(
self, range_sysclk, range_m, range_n, range_p, range_q, range_vco_in, range_vco_out
):
self.range_sysclk = range_sysclk
self.range_m = range_m
self.range_n = range_n
self.range_p = range_p
self.range_q = range_q
self.range_vco_in = range_vco_in
self.range_vco_out = range_vco_out
mcu_default = MCU(
range_sysclk=range(2, 216 + 1, 2),
range_m=range(2, 63 + 1),
range_n=range(192, 432 + 1),
range_p=range(2, 8 + 1, 2),
range_q=range(2, 15 + 1),
range_vco_in=range(1, 2 + 1),
range_vco_out=range(192, 432 + 1),
)
mcu_h7 = MCU(
range_sysclk=range(2, 400 + 1, 2), # above 400MHz currently unsupported
range_m=range(1, 63 + 1),
range_n=range(4, 512 + 1),
range_p=range(2, 128 + 1, 2),
range_q=range(1, 128 + 1),
range_vco_in=range(1, 16 + 1),
range_vco_out=range(150, 960 + 1), # 150-420=medium, 192-960=wide
)
def close_int(x):
return abs(x - round(x)) < 0.01
# original version that requires N/M to be an integer (for simplicity)
def compute_pll(hse, sys):
for P in (2, 4, 6, 8): # allowed values of P
Q = sys * P / 48
NbyM = sys * P / hse
# N/M and Q must be integers
if not (close_int(NbyM) and close_int(Q)):
continue
# VCO_OUT must be between 192MHz and 432MHz
if not (192 <= hse * NbyM <= 432):
continue
# compute M
M = int(192 // NbyM)
while hse > 2 * M or NbyM * M < 192:
M += 1
# VCO_IN must be between 1MHz and 2MHz (2MHz recommended)
if not (M <= hse):
continue
# compute N
N = NbyM * M
# N and Q are restricted
if not (192 <= N <= 432 and 2 <= Q <= 15):
continue
# found valid values
assert NbyM == N // M
return (M, N, P, Q)
# no valid values found
return None
# improved version that doesn't require N/M to be an integer
def compute_pll2(hse, sys, relax_pll48):
# Loop over the allowed values of P, looking for a valid PLL configuration
# that gives the desired "sys" frequency.
fallback = None
for P in mcu.range_p:
# VCO_OUT must be between 192MHz and 432MHz
if not sys * P in mcu.range_vco_out:
continue
NbyM = float(sys * P) / hse # float for Python 2
# scan M
M_min = mcu.range_n[0] // int(round(NbyM)) # starting value
while mcu.range_vco_in[-1] * M_min < hse:
M_min += 1
# VCO_IN must be >=1MHz, but higher is better for stability so start high (low M)
for M in range(M_min, hse + 1):
# compute N
N = NbyM * M
# N must be an integer
if not close_int(N):
continue
N = round(N)
# N is restricted
if N not in mcu.range_n:
continue
Q = float(sys * P) / 48 # float for Python 2
# Q must be an integer in a set range
if close_int(Q) and round(Q) in mcu.range_q:
# found valid values
return (M, N, P, Q)
# Re-try Q to get at most 48MHz
Q = (sys * P + 47) // 48
if Q not in mcu.range_q:
continue
if fallback is None:
# the values don't give 48MHz on PLL48 but are otherwise OK
fallback = M, N, P, Q
if relax_pll48:
# might have found values which don't give 48MHz on PLL48
return fallback
else:
# no valid values found which give 48MHz on PLL48
return None
def compute_derived(hse, pll):
hse = float(hse) # float for Python 2
M, N, P, Q = pll
vco_in = hse / M
vco_out = hse * N / M
pllck = hse / M * N / P
pll48ck = hse / M * N / Q
return (vco_in, vco_out, pllck, pll48ck)
def verify_pll(hse, pll):
M, N, P, Q = pll
vco_in, vco_out, pllck, pll48ck = compute_derived(hse, pll)
# verify ints
assert close_int(M)
assert close_int(N)
assert close_int(P)
assert close_int(Q)
# verify range
assert M in mcu.range_m
assert N in mcu.range_n
assert P in mcu.range_p
assert Q in mcu.range_q
assert mcu.range_vco_in[0] <= vco_in <= mcu.range_vco_in[-1]
assert mcu.range_vco_out[0] <= vco_out <= mcu.range_vco_out[-1]
def compute_pll_table(source_clk, relax_pll48):
valid_plls = []
for sysclk in mcu.range_sysclk:
pll = compute_pll2(source_clk, sysclk, relax_pll48)
if pll is not None:
verify_pll(source_clk, pll)
valid_plls.append((sysclk, pll))
return valid_plls
def generate_c_table(hse, valid_plls):
valid_plls.sort()
if (
mcu.range_sysclk[-1] <= 0xFF
and mcu.range_m[-1] <= 0x3F
and mcu.range_p[-1] // 2 - 1 <= 0x3
):
typedef = "uint16_t"
sys_mask = 0xFF
m_shift = 10
m_mask = 0x3F
p_shift = 8
p_mask = 0x3
else:
typedef = "uint32_t"
sys_mask = 0xFFFF
m_shift = 24
m_mask = 0xFF
p_shift = 16
p_mask = 0xFF
print("#define PLL_FREQ_TABLE_SYS(pll) ((pll) & %d)" % (sys_mask,))
print("#define PLL_FREQ_TABLE_M(pll) (((pll) >> %d) & %d)" % (m_shift, m_mask))
print("#define PLL_FREQ_TABLE_P(pll) (((((pll) >> %d) & %d) + 1) * 2)" % (p_shift, p_mask))
print("typedef %s pll_freq_table_t;" % (typedef,))
print("// (M, P/2-1, SYS) values for %u MHz source" % hse)
print("static const pll_freq_table_t pll_freq_table[%u] = {" % (len(valid_plls),))
for sys, (M, N, P, Q) in valid_plls:
print(" (%u << %u) | (%u << %u) | %u," % (M, m_shift, P // 2 - 1, p_shift, sys), end="")
if M >= 2:
vco_in, vco_out, pllck, pll48ck = compute_derived(hse, (M, N, P, Q))
print(
" // M=%u N=%u P=%u Q=%u vco_in=%.2f vco_out=%.2f pll48=%.2f"
% (M, N, P, Q, vco_in, vco_out, pll48ck),
end="",
)
print()
print("};")
def print_table(hse, valid_plls):
print("HSE =", hse, "MHz")
print("sys : M N P Q : VCO_IN VCO_OUT PLLCK PLL48CK")
out_format = "%3u : %2u %.1f %.2f %.2f : %5.2f %6.2f %6.2f %6.2f"
for sys, pll in valid_plls:
print(out_format % ((sys,) + pll + compute_derived(hse, pll)))
print("found %u valid configurations" % len(valid_plls))
def search_header_for_hsx_values(filename, vals):
regex_inc = re.compile(r'#include "(boards/[A-Za-z0-9_./]+)"')
regex_def = re.compile(r"#define +(HSE_VALUE|HSI_VALUE) +\((\(uint32_t\))?([0-9]+)\)")
with open(filename) as f:
for line in f:
line = line.strip()
m = regex_inc.match(line)
if m:
# Search included file
search_header_for_hsx_values(m.group(1), vals)
continue
m = regex_def.match(line)
if m:
# Found HSE_VALUE or HSI_VALUE
val = int(m.group(3)) // 1000000
if m.group(1) == "HSE_VALUE":
vals[0] = val
else:
vals[1] = val
return vals
def main():
global mcu
global out_format
# parse input args
import sys
argv = sys.argv[1:]
c_table = False
mcu_series = "f4"
hse = None
hsi = None
while True:
if argv[0] == "-c":
c_table = True
argv.pop(0)
elif argv[0] == "-m":
argv.pop(0)
mcu_series = argv.pop(0).lower()
else:
break
if len(argv) != 1:
print("usage: pllvalues.py [-c] [-m <mcu_series>] <hse in MHz>")
sys.exit(1)
if argv[0].startswith("file:"):
# extract HSE_VALUE, and optionally HSI_VALUE, from header file
hse, hsi = search_header_for_hsx_values(argv[0][5:], [None, None])
if hse is None:
raise ValueError("%s does not contain a definition of HSE_VALUE" % argv[0])
if hsi is not None and hsi > 16:
# Currently, a HSI value greater than 16MHz is not supported
hsi = None
else:
# HSE given directly as an integer
hse = int(argv[0])
# Select MCU parameters
if mcu_series == "h7":
mcu = mcu_h7
else:
mcu = mcu_default
# Relax constraight on PLLQ being 48MHz on F7 and H7 MCUs, which have separate PLLs for 48MHz
relax_pll48 = mcu_series in ("f7", "h7")
hse_valid_plls = compute_pll_table(hse, relax_pll48)
if hsi is not None:
hsi_valid_plls = compute_pll_table(hsi, relax_pll48)
if c_table:
print("#if MICROPY_HW_CLK_USE_HSI")
if hsi is not None:
hsi_valid_plls.append((hsi, (0, 0, 2, 0)))
generate_c_table(hsi, hsi_valid_plls)
print("#else")
if hsi is not None:
hse_valid_plls.append((hsi, (0, 0, 2, 0)))
hse_valid_plls.append((hse, (1, 0, 2, 0)))
generate_c_table(hse, hse_valid_plls)
print("#endif")
else:
print_table(hse, hse_valid_plls)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from datetime import datetime
from uuid import UUID
from sentry.coreapi import (
APIError, APIUnauthorized, Auth, ClientApiHelper, InvalidFingerprint,
InvalidTimestamp, get_interface
)
from sentry.testutils import TestCase
class BaseAPITest(TestCase):
def setUp(self):
self.user = self.create_user('coreapi@example.com')
self.team = self.create_team(name='Foo')
self.project = self.create_project(team=self.team)
self.pm = self.project.team.member_set.get_or_create(user=self.user)[0]
self.pk = self.project.key_set.get_or_create()[0]
self.helper = ClientApiHelper()
class AuthFromRequestTest(BaseAPITest):
def test_valid(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
result = self.helper.auth_from_request(request)
assert result.public_key == 'value'
def test_invalid_header_defers_to_GET(self):
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = self.helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_legacy_header_defers_to_GET(self):
request = mock.Mock()
request.META = {'HTTP_AUTHORIZATION': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = self.helper.auth_from_request(request)
assert result.version == '1'
class ProjectFromAuthTest(BaseAPITest):
def test_invalid_if_missing_key(self):
self.assertRaises(APIUnauthorized, self.helper.project_from_auth, Auth({}))
def test_valid_with_key(self):
auth = Auth({'sentry_key': self.pk.public_key})
result = self.helper.project_from_auth(auth)
self.assertEquals(result, self.project)
def test_invalid_key(self):
auth = Auth({'sentry_key': 'z'})
self.assertRaises(APIUnauthorized, self.helper.project_from_auth, auth)
def test_invalid_secret(self):
auth = Auth({'sentry_key': self.pk.public_key, 'sentry_secret': 'z'})
self.assertRaises(APIUnauthorized, self.helper.project_from_auth, auth)
class ProcessFingerprintTest(BaseAPITest):
def test_invalid_as_string(self):
self.assertRaises(InvalidFingerprint, self.helper._process_fingerprint, {
'fingerprint': '2012-01-01T10:30:45',
})
def test_invalid_component(self):
self.assertRaises(InvalidFingerprint, self.helper._process_fingerprint, {
'fingerprint': ['foo', ['bar']],
})
def simple(self):
data = self.helper._process_fingerprint({
'fingerprint': ['{{default}}', 1, 'bar', 4.5],
})
self.assertTrue('fingerprint' in data)
self.assertEquals(data['fingerprint'], ['{{default}}', '1', 'bar', '4.5'])
class ProcessDataTimestampTest(BaseAPITest):
def test_iso_timestamp(self):
d = datetime(2012, 01, 01, 10, 30, 45)
data = self.helper._process_data_timestamp({
'timestamp': '2012-01-01T10:30:45'
}, current_datetime=d)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
def test_iso_timestamp_with_ms(self):
d = datetime(2012, 01, 01, 10, 30, 45, 434000)
data = self.helper._process_data_timestamp({
'timestamp': '2012-01-01T10:30:45.434'
}, current_datetime=d)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
def test_timestamp_iso_timestamp_with_Z(self):
d = datetime(2012, 01, 01, 10, 30, 45)
data = self.helper._process_data_timestamp({
'timestamp': '2012-01-01T10:30:45Z'
}, current_datetime=d)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
def test_invalid_timestamp(self):
self.assertRaises(InvalidTimestamp, self.helper._process_data_timestamp, {
'timestamp': 'foo'
})
def test_invalid_numeric_timestamp(self):
self.assertRaises(InvalidTimestamp, self.helper._process_data_timestamp, {
'timestamp': '100000000000000000000.0'
})
def test_future_timestamp(self):
self.assertRaises(InvalidTimestamp, self.helper._process_data_timestamp, {
'timestamp': '2052-01-01T10:30:45Z'
})
def test_long_microseconds_value(self):
d = datetime(2012, 01, 01, 10, 30, 45)
data = self.helper._process_data_timestamp({
'timestamp': '2012-01-01T10:30:45.341324Z'
}, current_datetime=d)
self.assertTrue('timestamp' in data)
self.assertEquals(data['timestamp'], 1325413845.0)
class ValidateDataTest(BaseAPITest):
def test_missing_project_id(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
})
assert data['project'] == self.project.id
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_empty_event_id(self, uuid4):
data = self.helper.validate_data(self.project, {
'event_id': '',
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_missing_event_id(self, uuid4):
data = self.helper.validate_data(self.project, {})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
@mock.patch('uuid.uuid4', return_value=UUID('031667ea1758441f92c7995a428d2d14'))
def test_invalid_event_id(self, uuid4):
data = self.helper.validate_data(self.project, {
'event_id': 'a' * 33,
})
assert data['event_id'] == '031667ea1758441f92c7995a428d2d14'
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'event_id'
assert data['errors'][0]['value'] == 'a' * 33
def test_invalid_event_id_raises(self):
self.assertRaises(APIError, self.helper.validate_data, self.project, {
'event_id': 1
})
def test_unknown_attribute(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'foo': 'bar',
})
assert 'foo' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'foo'
def test_invalid_interface_name(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'foo.baz': 'bar',
})
assert 'foo.baz' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'foo.baz'
def test_invalid_interface_import_path(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'sentry.interfaces.Exception2': 'bar',
})
assert 'sentry.interfaces.Exception2' not in data
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_attribute'
assert data['errors'][0]['name'] == 'sentry.interfaces.Exception2'
def test_does_expand_list(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'exception': [{
'type': 'ValueError',
'value': 'hello world',
'module': 'foo.bar',
}]
})
assert 'sentry.interfaces.Exception' in data
def test_log_level_as_string(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'level': 'error',
})
assert data['level'] == 40
def test_invalid_log_level(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'level': 'foobar',
})
assert data['level'] == 40
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'level'
assert data['errors'][0]['value'] == 'foobar'
def test_tags_as_string(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'tags': 'bar',
})
assert 'tags' not in data
def test_tags_out_of_bounds(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'tags': {'f' * 33: 'value', 'foo': 'v' * 201, 'bar': 'value'},
})
assert data['tags'] == [('bar', 'value')]
assert len(data['errors']) == 2
def test_tags_as_invalid_pair(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'tags': [('foo', 'bar'), ('biz', 'baz', 'boz')],
})
assert data['tags'] == [('foo', 'bar')]
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'invalid_data'
assert data['errors'][0]['name'] == 'tags'
assert data['errors'][0]['value'] == ('biz', 'baz', 'boz')
def test_extra_as_string(self):
data = self.helper.validate_data(self.project, {
'message': 'foo',
'extra': 'bar',
})
assert 'extra' not in data
def test_invalid_culprit_raises(self):
self.assertRaises(APIError, self.helper.validate_data, self.project, {
'culprit': 1
})
def test_release_too_long(self):
data = self.helper.validate_data(self.project, {
'release': 'a' * 65,
})
assert not data.get('release')
assert len(data['errors']) == 1
assert data['errors'][0]['type'] == 'value_too_long'
assert data['errors'][0]['name'] == 'release'
assert data['errors'][0]['value'] == 'a' * 65
def test_release_as_non_string(self):
data = self.helper.validate_data(self.project, {
'release': 42,
})
assert data.get('release') == '42'
class GetInterfaceTest(TestCase):
def test_does_not_let_through_disallowed_name(self):
with self.assertRaises(ValueError):
get_interface('subprocess')
def test_allows_http(self):
from sentry.interfaces.http import Http
result = get_interface('sentry.interfaces.Http')
assert result is Http
result = get_interface('request')
assert result is Http
class EnsureHasIpTest(BaseAPITest):
def test_with_remote_addr(self):
inp = {
'sentry.interfaces.Http': {
'env': {
'REMOTE_ADDR': '192.168.0.1',
},
},
}
out = inp.copy()
self.helper.ensure_has_ip(out, '127.0.0.1')
assert inp == out
def test_with_user_ip(self):
inp = {
'sentry.interfaces.User': {
'ip_address': '192.168.0.1',
},
}
out = inp.copy()
self.helper.ensure_has_ip(out, '127.0.0.1')
assert inp == out
def test_without_ip_values(self):
out = {
'sentry.interfaces.User': {
},
'sentry.interfaces.Http': {
'env': {},
},
}
self.helper.ensure_has_ip(out, '127.0.0.1')
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
def test_without_any_values(self):
out = {}
self.helper.ensure_has_ip(out, '127.0.0.1')
assert out['sentry.interfaces.User']['ip_address'] == '127.0.0.1'
|
|
from __future__ import print_function
import math
import numpy as np
from scipy import linalg, stats
from .linalg_decomp_1 import tiny2zero
#univariate standard normal distribution
#following from scipy.stats.distributions with adjustments
sqrt2pi = math.sqrt(2 * np.pi)
logsqrt2pi = math.log(sqrt2pi)
class StandardNormal(object):
'''Distribution of vector x, with independent distribution N(0,1)
this is the same as univariate normal for pdf and logpdf
other methods not checked/adjusted yet
'''
def rvs(self, size):
return np.random.standard_normal(size)
def pdf(self, x):
return exp(-x**2 * 0.5) / sqrt2pi
def logpdf(self, x):
return -x**2 * 0.5 - logsqrt2pi
def _cdf(self, x):
return special.ndtr(x)
def _logcdf(self, x):
return log(special.ndtr(x))
def _ppf(self, q):
return special.ndtri(q)
class AffineTransform(object):
'''affine full rank transformation of a multivariate distribution
no dimension checking, assumes everything broadcasts correctly
first version without bound support
provides distribution of y given distribution of x
y = const + tmat * x
'''
def __init__(self, const, tmat, dist):
self.const = const
self.tmat = tmat
self.dist = dist
self.nrv = len(const)
if not np.equal(self.nrv, tmat.shape).all():
raise ValueError('dimension of const and tmat do not agree')
#replace the following with a linalgarray class
self.tmatinv = linalg.inv(tmat)
self.absdet = np.abs(np.linalg.det(self.tmat))
self.logabsdet = np.log(np.abs(np.linalg.det(self.tmat)))
self.dist
def rvs(self, size):
#size can only be integer not yet tuple
print((size,)+(self.nrv,))
return self.transform(self.dist.rvs(size=(size,)+(self.nrv,)))
def transform(self, x):
#return np.dot(self.tmat, x) + self.const
return np.dot(x, self.tmat) + self.const
def invtransform(self, y):
return np.dot(self.tmatinv, y - self.const)
def pdf(self, x):
return 1. / self.absdet * self.dist.pdf(self.invtransform(x))
def logpdf(self, x):
return - self.logabsdet + self.dist.logpdf(self.invtransform(x))
from .linalg_decomp_1 import SvdArray, OneTimeProperty
class MultivariateNormal(object):
'''multivariate normal distribution with plain linalg
'''
def __init__(mean, sigma):
self.mean = mean
self.sigma = sigma
self.sigmainv = sigmainv
class MultivariateNormalChol(object):
'''multivariate normal distribution with cholesky decomposition of sigma
ignoring mean at the beginning, maybe
needs testing for broadcasting to contemporaneously but not intertemporaly
correlated random variable, which axis?,
maybe swapaxis or rollaxis if x.ndim != mean.ndim == (sigma.ndim - 1)
initially 1d is ok, 2d should work with iid in axis 0 and mvn in axis 1
'''
def __init__(self, mean, sigma):
self.mean = mean
self.sigma = sigma
self.sigmainv = sigmainv
self.cholsigma = linalg.cholesky(sigma)
#the following makes it lower triangular with increasing time
self.cholsigmainv = linalg.cholesky(sigmainv)[::-1,::-1]
#todo: this might be a trick todo backward instead of forward filtering
def whiten(self, x):
return np.dot(cholsigmainv, x)
def logpdf_obs(self, x):
x = x - self.mean
x_whitened = self.whiten(x)
#sigmainv = linalg.cholesky(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
sigma2 = 1. # error variance is included in sigma
llike = 0.5 * (np.log(sigma2)
- 2.* np.log(np.diagonal(self.cholsigmainv))
+ (x_whitened**2)/sigma2
+ np.log(2*np.pi))
return llike
def logpdf(self, x):
return self.logpdf_obs(x).sum(-1)
def pdf(self, x):
return np.exp(self.logpdf(x))
class MultivariateNormal(object):
def __init__(self, mean, sigma):
self.mean = mean
self.sigma = SvdArray(sigma)
def loglike_ar1(x, rho):
'''loglikelihood of AR(1) process, as a test case
sigma_u partially hard coded
Greene chapter 12 eq. (12-31)
'''
x = np.asarray(x)
u = np.r_[x[0], x[1:] - rho * x[:-1]]
sigma_u2 = 2*(1-rho**2)
loglik = 0.5*(-(u**2).sum(0) / sigma_u2 + np.log(1-rho**2)
- x.shape[0] * (np.log(2*np.pi) + np.log(sigma_u2)))
return loglik
def ar2transform(x, arcoefs):
'''
(Greene eq 12-30)
'''
a1, a2 = arcoefs
y = np.zeros_like(x)
y[0] = np.sqrt((1+a2) * ((1-a2)**2 - a1**2) / (1-a2)) * x[0]
y[1] = np.sqrt(1-a2**2) * x[2] - a1 * np.sqrt(1-a1**2)/(1-a2) * x[1] #TODO:wrong index in x
y[2:] = x[2:] - a1 * x[1:-1] - a2 * x[:-2]
return y
def mvn_loglike(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = linalg.inv(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
llf = - np.dot(x, np.dot(sigmainv, x))
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf
def mvn_nloglike_obs(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
#Still wasteful to calculate pinv first
sigmainv = linalg.inv(sigma)
cholsigmainv = linalg.cholesky(sigmainv)
#2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
# logdet not needed ???
#logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
x_whitened = np.dot(cholsigmainv, x)
#sigmainv = linalg.cholesky(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
sigma2 = 1. # error variance is included in sigma
llike = 0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
+ (x_whitened**2)/sigma2
+ np.log(2*np.pi))
return llike, (x_whitened**2)
nobs = 10
x = np.arange(nobs)
autocov = 2*0.8**np.arange(nobs)# +0.01 * np.random.randn(nobs)
sigma = linalg.toeplitz(autocov)
#sigma = np.diag(1+np.random.randn(10)**2)
cholsigma = linalg.cholesky(sigma).T#, lower=True)
sigmainv = linalg.inv(sigma)
cholsigmainv = linalg.cholesky(sigmainv)
#2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
# logdet not needed ???
#logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
x_whitened = np.dot(cholsigmainv, x)
#sigmainv = linalg.cholesky(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
sigma2 = 1. # error variance is included in sigma
llike = 0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
+ (x_whitened**2)/sigma2
+ np.log(2*np.pi))
ll, ls = mvn_nloglike_obs(x, sigma)
#the following are all the same for diagonal sigma
print(ll.sum(), 'll.sum()')
print(llike.sum(), 'llike.sum()')
print(np.log(stats.norm._pdf(x_whitened)).sum() - 0.5 * logdetsigma,)
print('stats whitened')
print(np.log(stats.norm.pdf(x,scale=np.sqrt(np.diag(sigma)))).sum(),)
print('stats scaled')
print(0.5*(np.dot(linalg.cho_solve((linalg.cho_factor(sigma, lower=False)[0].T,
False),x.T), x)
+ nobs*np.log(2*np.pi)
- 2.* np.log(np.diagonal(cholsigmainv)).sum()))
print(0.5*(np.dot(linalg.cho_solve((linalg.cho_factor(sigma)[0].T, False),x.T), x) + nobs*np.log(2*np.pi)- 2.* np.log(np.diagonal(cholsigmainv)).sum()))
print(0.5*(np.dot(linalg.cho_solve(linalg.cho_factor(sigma),x.T), x) + nobs*np.log(2*np.pi)- 2.* np.log(np.diagonal(cholsigmainv)).sum()))
print(mvn_loglike(x, sigma))
normtransf = AffineTransform(np.zeros(nobs), cholsigma, StandardNormal())
print(normtransf.logpdf(x_whitened).sum())
#print(normtransf.rvs(5)
print(loglike_ar1(x, 0.8))
mch = MultivariateNormalChol(np.zeros(nobs), sigma)
print(mch.logpdf(x))
#print(tiny2zero(mch.cholsigmainv / mch.cholsigmainv[-1,-1])
xw = mch.whiten(x)
print('xSigmax', np.dot(xw,xw))
print('xSigmax', np.dot(x,linalg.cho_solve(linalg.cho_factor(mch.sigma),x)))
print('xSigmax', np.dot(x,linalg.cho_solve((mch.cholsigma, False),x)))
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import re
from oauthlib.oauth2 import MissingCodeError
try:
from urllib.parse import quote_plus, parse_qsl
except ImportError:
from urllib import quote_plus
from urlparse import parse_qsl
import pytest
import mock
import responses
from urlobject import URLObject
import flask
from werkzeug.contrib.fixers import ProxyFix
from flask_dance.consumer import (
OAuth2ConsumerBlueprint, oauth_authorized, oauth_error
)
from flask_dance.consumer.requests import OAuth2Session
try:
import blinker
except ImportError:
blinker = None
requires_blinker = pytest.mark.skipif(not blinker, reason="requires blinker")
def make_app(login_url=None, debug=False):
blueprint = OAuth2ConsumerBlueprint("test-service", __name__,
client_id="client_id",
client_secret="client_secret",
scope="admin",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
redirect_to="index",
login_url=login_url,
)
app = flask.Flask(__name__)
app.secret_key = "secret"
app.register_blueprint(blueprint, url_prefix="/login")
app.debug = debug
@app.route("/")
def index():
return "index"
return app, blueprint
def test_generate_login_url():
app, _ = make_app()
with app.test_request_context("/"):
login_url = flask.url_for("test-service.login")
assert login_url == "/login/test-service"
def test_override_login_url():
app, _ = make_app(login_url="/crazy/custom/url")
with app.test_request_context("/"):
login_url = flask.url_for("test-service.login")
assert login_url == "/login/crazy/custom/url"
@responses.activate
def test_login_url():
app, _ = make_app()
with app.test_client() as client:
resp = client.get(
"/login/test-service",
base_url="https://a.b.c",
follow_redirects=False,
)
# check that we saved the state in the session
assert flask.session["test-service_oauth_state"] == "random-string"
# check that we redirected the client
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.without_query() == "https://example.com/oauth/authorize"
assert location.query_dict["client_id"] == "client_id"
assert location.query_dict["redirect_uri"] == "https://a.b.c/login/test-service/authorized"
assert location.query_dict["scope"] == "admin"
assert location.query_dict["state"] == "random-string"
@responses.activate
def test_login_url_forwarded_proto():
app, _ = make_app()
app.wsgi_app = ProxyFix(app.wsgi_app)
with app.test_client() as client:
resp = client.get(
"/login/test-service",
base_url="http://a.b.c",
headers={"X-Forwarded-Proto": "https"},
follow_redirects=False,
)
# check that we redirected the client with a https redirect_uri
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.query_dict["redirect_uri"] == "https://a.b.c/login/test-service/authorized"
@responses.activate
def test_authorized_url():
responses.add(
responses.POST,
"https://example.com/oauth/access_token",
body='{"access_token":"foobar","token_type":"bearer","scope":"admin"}',
)
app, _ = make_app()
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/"
# check that we obtained an access token
assert len(responses.calls) == 1
request_data = dict(parse_qsl(responses.calls[0].request.body))
assert request_data["client_id"] == "client_id"
assert request_data["redirect_uri"] == "https://a.b.c/login/test-service/authorized"
# check that we stored the access token and secret in the session
assert (
flask.session["test-service_oauth_token"] ==
{'access_token': 'foobar', 'scope': ['admin'], 'token_type': 'bearer'}
)
def test_authorized_url_invalid_response():
app, _ = make_app(debug=True)
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
with pytest.raises(MissingCodeError) as missingError:
client.get(
"/login/test-service/authorized?error_code=1349048&error_message=IMUSEFUL",
base_url="https://a.b.c",
)
match = re.search(r"{[^}]*}", str(missingError.value))
err_dict = json.loads(match.group(0))
assert err_dict == {"error_message": "IMUSEFUL", "error_code": "1349048"}
@responses.activate
def test_provider_error():
app, _ = make_app()
with app.test_client() as client:
# make the request
resp = client.get(
"/login/test-service/authorized?"
"error=invalid_redirect&"
"error_description=Invalid+redirect_URI&"
"error_uri=https%3a%2f%2fexample.com%2fdocs%2fhelp",
base_url="https://a.b.c",
)
# even though there was an error, we should still redirect the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/"
# shouldn't even try getting an access token, though
assert len(responses.calls) == 0
@responses.activate
def test_redirect_url():
responses.add(
responses.POST,
"https://example.com/oauth/access_token",
body='{"access_token":"foobar","token_type":"bearer","scope":"admin"}',
)
blueprint = OAuth2ConsumerBlueprint("test-service", __name__,
client_id="client_id",
client_secret="client_secret",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
redirect_url="http://mysite.cool/whoa?query=basketball",
)
app = flask.Flask(__name__)
app.secret_key = "secret"
app.register_blueprint(blueprint, url_prefix="/login")
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "http://mysite.cool/whoa?query=basketball"
@responses.activate
def test_redirect_to():
responses.add(
responses.POST,
"https://example.com/oauth/access_token",
body='{"access_token":"foobar","token_type":"bearer","scope":"admin"}',
)
blueprint = OAuth2ConsumerBlueprint("test-service", __name__,
client_id="client_id",
client_secret="client_secret",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
redirect_to="my_view",
)
app = flask.Flask(__name__)
app.secret_key = "secret"
app.register_blueprint(blueprint, url_prefix="/login")
@app.route("/blargl")
def my_view():
return "check out my url"
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/blargl"
@responses.activate
def test_redirect_fallback():
responses.add(
responses.POST,
"https://example.com/oauth/access_token",
body='{"access_token":"foobar","token_type":"bearer","scope":"admin"}',
)
blueprint = OAuth2ConsumerBlueprint("test-service", __name__,
client_id="client_id",
client_secret="client_secret",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
)
app = flask.Flask(__name__)
app.secret_key = "secret"
app.register_blueprint(blueprint, url_prefix="/login")
@app.route("/blargl")
def my_view():
return "check out my url"
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/"
@requires_blinker
def test_signal_oauth_authorized(request):
app, bp = make_app()
calls = []
def callback(*args, **kwargs):
calls.append((args, kwargs))
oauth_authorized.connect(callback)
request.addfinalizer(lambda: oauth_authorized.disconnect(callback))
with app.test_client() as client:
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
bp.session.fetch_token = mock.Mock(return_value="test-token")
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
)
# check that we stored the token
assert flask.session["test-service_oauth_token"] == "test-token"
assert len(calls) == 1
assert calls[0][0] == (bp,)
assert calls[0][1] == {"token": "test-token"}
@requires_blinker
def test_signal_oauth_authorized_abort(request):
app, bp = make_app()
calls = []
def callback(*args, **kwargs):
calls.append((args, kwargs))
return False
oauth_authorized.connect(callback)
request.addfinalizer(lambda: oauth_authorized.disconnect(callback))
with app.test_client() as client:
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
bp.session.fetch_token = mock.Mock(return_value="test-token")
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
)
# check that we did NOT store the token
assert "test-service_oauth_token" not in flask.session
# callback still should have been called
assert len(calls) == 1
@requires_blinker
def test_signal_sender_oauth_authorized(request):
app, bp = make_app()
bp2 = OAuth2ConsumerBlueprint("test2", __name__,
client_id="client_id",
client_secret="client_secret",
scope="admin",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
redirect_to="index",
)
app.register_blueprint(bp2, url_prefix="/login")
calls = []
def callback(*args, **kwargs):
calls.append((args, kwargs))
oauth_authorized.connect(callback, sender=bp)
request.addfinalizer(lambda: oauth_authorized.disconnect(callback, sender=bp))
with app.test_client() as client:
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
bp.session.fetch_token = mock.Mock(return_value="test-token")
bp2.session.fetch_token = mock.Mock(return_value="test2-token")
resp = client.get(
"/login/test2/authorized?code=secret-code&state=random-string",
)
assert len(calls) == 0
with app.test_client() as client:
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
bp.session.fetch_token = mock.Mock(return_value="test-token")
bp2.session.fetch_token = mock.Mock(return_value="test2-token")
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
)
assert len(calls) == 1
assert calls[0][0] == (bp,)
assert calls[0][1] == {"token": "test-token"}
with app.test_client() as client:
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
bp.session.fetch_token = mock.Mock(return_value="test-token")
bp2.session.fetch_token = mock.Mock(return_value="test2-token")
resp = client.get(
"/login/test2/authorized?code=secret-code&state=random-string",
)
assert len(calls) == 1 # unchanged
@requires_blinker
def test_signal_oauth_error(request):
app, bp = make_app()
calls = []
def callback(*args, **kwargs):
calls.append((args, kwargs))
oauth_error.connect(callback)
request.addfinalizer(lambda: oauth_error.disconnect(callback))
with app.test_client() as client:
resp = client.get(
"/login/test-service/authorized?"
"error=unauthorized_client&"
"error_description=Invalid+redirect+URI&"
"error_uri=https%3a%2f%2fexample.com%2fdocs%2fhelp",
base_url="https://a.b.c",
)
assert len(calls) == 1
assert calls[0][0] == (bp,)
assert calls[0][1] == {
"error": "unauthorized_client",
"error_description": "Invalid redirect URI",
"error_uri": "https://example.com/docs/help",
}
assert resp.status_code == 302
class CustomOAuth2Session(OAuth2Session):
my_attr = "foobar"
def test_custom_session_class():
bp = OAuth2ConsumerBlueprint("test", __name__,
client_id="client_id",
client_secret="client_secret",
scope="admin",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
redirect_to="index",
session_class=CustomOAuth2Session,
)
assert isinstance(bp.session, CustomOAuth2Session)
assert bp.session.my_attr == "foobar"
|
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
from __future__ import print_function
import os
import subprocess
import time
import scapy.modules.six as six
from threading import Lock, Thread
from scapy.automaton import (
Message,
ObjectPipe,
select_objects,
)
from scapy.consts import WINDOWS
from scapy.error import log_runtime, warning
from scapy.config import conf
from scapy.utils import get_temp_file, do_graph
class PipeEngine(ObjectPipe):
pipes = {}
@classmethod
def list_pipes(cls):
for pn, pc in sorted(cls.pipes.items()):
doc = pc.__doc__ or ""
if doc:
doc = doc.splitlines()[0]
print("%20s: %s" % (pn, doc))
@classmethod
def list_pipes_detailed(cls):
for pn, pc in sorted(cls.pipes.items()):
if pc.__doc__:
print("###### %s\n %s" % (pn, pc.__doc__))
else:
print("###### %s" % pn)
def __init__(self, *pipes):
ObjectPipe.__init__(self)
self.active_pipes = set()
self.active_sources = set()
self.active_drains = set()
self.active_sinks = set()
self._add_pipes(*pipes)
self.thread_lock = Lock()
self.command_lock = Lock()
self.thread = None
def __getattr__(self, attr):
if attr.startswith("spawn_"):
dname = attr[6:]
if dname in self.pipes:
def f(*args, **kargs):
k = self.pipes[dname]
p = k(*args, **kargs)
self.add(p)
return p
return f
raise AttributeError(attr)
def _read_cmd(self):
return self.recv()
def _write_cmd(self, _cmd):
self.send(_cmd)
def add_one_pipe(self, pipe):
self.active_pipes.add(pipe)
if isinstance(pipe, Source):
self.active_sources.add(pipe)
if isinstance(pipe, Drain):
self.active_drains.add(pipe)
if isinstance(pipe, Sink):
self.active_sinks.add(pipe)
def get_pipe_list(self, pipe):
def flatten(p, li):
li.add(p)
for q in p.sources | p.sinks | p.high_sources | p.high_sinks:
if q not in li:
flatten(q, li)
pl = set()
flatten(pipe, pl)
return pl
def _add_pipes(self, *pipes):
pl = set()
for p in pipes:
pl |= self.get_pipe_list(p)
pl -= self.active_pipes
for q in pl:
self.add_one_pipe(q)
return pl
def run(self):
log_runtime.debug("Pipe engine thread started.")
try:
for p in self.active_pipes:
p.start()
sources = self.active_sources
sources.add(self)
exhausted = set([])
RUN = True
STOP_IF_EXHAUSTED = False
while RUN and (not STOP_IF_EXHAUSTED or len(sources) > 1):
fds = select_objects(sources, 0)
for fd in fds:
if fd is self:
cmd = self._read_cmd()
if cmd == "X":
RUN = False
break
elif cmd == "B":
STOP_IF_EXHAUSTED = True
elif cmd == "A":
sources = self.active_sources - exhausted
sources.add(self)
else:
warning("Unknown internal pipe engine command: %r."
" Ignoring.", cmd)
elif fd in sources:
try:
fd.deliver()
except Exception as e:
log_runtime.exception("piping from %s failed: %s",
fd.name, e)
else:
if fd.exhausted():
exhausted.add(fd)
sources.remove(fd)
except KeyboardInterrupt:
pass
finally:
try:
for p in self.active_pipes:
p.stop()
finally:
self.thread_lock.release()
log_runtime.debug("Pipe engine thread stopped.")
def start(self):
if self.thread_lock.acquire(0):
_t = Thread(target=self.run, name="scapy.pipetool.PipeEngine")
_t.setDaemon(True)
_t.start()
self.thread = _t
else:
log_runtime.debug("Pipe engine already running")
def wait_and_stop(self):
self.stop(_cmd="B")
def stop(self, _cmd="X"):
try:
with self.command_lock:
if self.thread is not None:
self._write_cmd(_cmd)
self.thread.join()
try:
self.thread_lock.release()
except Exception:
pass
else:
log_runtime.debug("Pipe engine thread not running")
except KeyboardInterrupt:
print("Interrupted by user.")
def add(self, *pipes):
pipes = self._add_pipes(*pipes)
with self.command_lock:
if self.thread is not None:
for p in pipes:
p.start()
self._write_cmd("A")
def graph(self, **kargs):
g = ['digraph "pipe" {', "\tnode [shape=rectangle];", ]
for p in self.active_pipes:
g.append('\t"%i" [label="%s"];' % (id(p), p.name))
g.append("")
g.append("\tedge [color=blue, arrowhead=vee];")
for p in self.active_pipes:
for q in p.sinks:
g.append('\t"%i" -> "%i";' % (id(p), id(q)))
g.append("")
g.append("\tedge [color=purple, arrowhead=veevee];")
for p in self.active_pipes:
for q in p.high_sinks:
g.append('\t"%i" -> "%i";' % (id(p), id(q)))
g.append("")
g.append("\tedge [color=red, arrowhead=diamond];")
for p in self.active_pipes:
for q in p.trigger_sinks:
g.append('\t"%i" -> "%i";' % (id(p), id(q)))
g.append('}')
graph = "\n".join(g)
do_graph(graph, **kargs)
class _ConnectorLogic(object):
def __init__(self):
self.sources = set()
self.sinks = set()
self.high_sources = set()
self.high_sinks = set()
self.trigger_sources = set()
self.trigger_sinks = set()
def __lt__(self, other):
other.sinks.add(self)
self.sources.add(other)
return other
def __gt__(self, other):
self.sinks.add(other)
other.sources.add(self)
return other
def __eq__(self, other):
self > other
other > self
return other
def __lshift__(self, other):
self.high_sources.add(other)
other.high_sinks.add(self)
return other
def __rshift__(self, other):
self.high_sinks.add(other)
other.high_sources.add(self)
return other
def __floordiv__(self, other):
self >> other
other >> self
return other
def __xor__(self, other):
self.trigger_sinks.add(other)
other.trigger_sources.add(self)
return other
def __hash__(self):
return object.__hash__(self)
class _PipeMeta(type):
def __new__(cls, name, bases, dct):
c = type.__new__(cls, name, bases, dct)
PipeEngine.pipes[name] = c
return c
class Pipe(six.with_metaclass(_PipeMeta, _ConnectorLogic)):
def __init__(self, name=None):
_ConnectorLogic.__init__(self)
if name is None:
name = "%s" % (self.__class__.__name__)
self.name = name
def _send(self, msg):
for s in self.sinks:
s.push(msg)
def _high_send(self, msg):
for s in self.high_sinks:
s.high_push(msg)
def _trigger(self, msg=None):
for s in self.trigger_sinks:
s.on_trigger(msg)
def __repr__(self):
ct = conf.color_theme
s = "%s%s" % (ct.punct("<"), ct.layer_name(self.name))
if self.sources or self.sinks:
s += " %s" % ct.punct("[")
if self.sources:
s += "%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.sources), # noqa: E501
ct.field_value(">"))
s += ct.layer_name("#")
if self.sinks:
s += "%s%s" % (ct.field_value(">"),
ct.punct(",").join(ct.field_name(s.name) for s in self.sinks)) # noqa: E501
s += ct.punct("]")
if self.high_sources or self.high_sinks:
s += " %s" % ct.punct("[")
if self.high_sources:
s += "%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.high_sources), # noqa: E501
ct.field_value(">>"))
s += ct.layer_name("#")
if self.high_sinks:
s += "%s%s" % (ct.field_value(">>"),
ct.punct(",").join(ct.field_name(s.name) for s in self.high_sinks)) # noqa: E501
s += ct.punct("]")
if self.trigger_sources or self.trigger_sinks:
s += " %s" % ct.punct("[")
if self.trigger_sources:
s += "%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.trigger_sources), # noqa: E501
ct.field_value("^"))
s += ct.layer_name("#")
if self.trigger_sinks:
s += "%s%s" % (ct.field_value("^"),
ct.punct(",").join(ct.field_name(s.name) for s in self.trigger_sinks)) # noqa: E501
s += ct.punct("]")
s += ct.punct(">")
return s
class Source(Pipe, ObjectPipe):
def __init__(self, name=None):
Pipe.__init__(self, name=name)
ObjectPipe.__init__(self)
self.is_exhausted = False
def _read_message(self):
return Message()
def deliver(self):
msg = self._read_message
self._send(msg)
def exhausted(self):
return self.is_exhausted
def start(self):
pass
def stop(self):
pass
class Drain(Pipe):
"""Repeat messages from low/high entries to (resp.) low/high exits
.. code::
+-------+
>>-|-------|->>
| |
>-|-------|->
+-------+
"""
def push(self, msg):
self._send(msg)
def high_push(self, msg):
self._high_send(msg)
def start(self):
pass
def stop(self):
pass
class Sink(Pipe):
"""
Does nothing; interface to extend for custom sinks.
All sinks have the following constructor parameters:
:param name: a human-readable name for the element
:type name: str
"""
def push(self, msg):
"""
Called by :py:class:`PipeEngine` when there is a new message for the
low entry.
:param msg: The message data
:returns: None
:rtype: None
"""
pass
def high_push(self, msg):
"""
Called by :py:class:`PipeEngine` when there is a new message for the
high entry.
:param msg: The message data
:returns: None
:rtype: None
"""
pass
def start(self):
pass
def stop(self):
pass
class AutoSource(Source):
def __init__(self, name=None):
Source.__init__(self, name=name)
def _gen_data(self, msg):
ObjectPipe.send(self, (msg, False, False))
def _gen_high_data(self, msg):
ObjectPipe.send(self, (msg, True, False))
def _exhaust(self):
ObjectPipe.send(self, (None, None, True))
def deliver(self):
msg, high, exhaust = self.recv()
if exhaust:
pass
if high:
self._high_send(msg)
else:
self._send(msg)
class ThreadGenSource(AutoSource):
def __init__(self, name=None):
AutoSource.__init__(self, name=name)
self.RUN = False
def generate(self):
pass
def start(self):
self.RUN = True
Thread(target=self.generate,
name="scapy.pipetool.ThreadGenSource").start()
def stop(self):
self.RUN = False
class ConsoleSink(Sink):
"""Print messages on low and high entries to ``stdout``
.. code::
+-------+
>>-|--. |->>
| print |
>-|--' |->
+-------+
"""
def push(self, msg):
print(">" + repr(msg))
def high_push(self, msg):
print(">>" + repr(msg))
class RawConsoleSink(Sink):
"""Print messages on low and high entries, using os.write
.. code::
+-------+
>>-|--. |->>
| write |
>-|--' |->
+-------+
:param newlines: Include a new-line character after printing each packet.
Defaults to True.
:type newlines: bool
"""
def __init__(self, name=None, newlines=True):
Sink.__init__(self, name=name)
self.newlines = newlines
self._write_pipe = 1
def push(self, msg):
if self.newlines:
msg += "\n"
os.write(self._write_pipe, msg.encode("utf8"))
def high_push(self, msg):
if self.newlines:
msg += "\n"
os.write(self._write_pipe, msg.encode("utf8"))
class CLIFeeder(AutoSource):
"""Send messages from python command line:
.. code::
+--------+
>>-| |->>
| send() |
>-| `----|->
+--------+
"""
def send(self, msg):
self._gen_data(msg)
def close(self):
self.is_exhausted = True
class CLIHighFeeder(CLIFeeder):
"""Send messages from python command line to high output:
.. code::
+--------+
>>-| .----|->>
| send() |
>-| |->
+--------+
"""
def send(self, msg):
self._gen_high_data(msg)
class PeriodicSource(ThreadGenSource):
"""Generage messages periodically on low exit:
.. code::
+-------+
>>-| |->>
| msg,T |
>-| `----|->
+-------+
"""
def __init__(self, msg, period, period2=0, name=None):
ThreadGenSource.__init__(self, name=name)
if not isinstance(msg, (list, set, tuple)):
msg = [msg]
self.msg = msg
self.period = period
self.period2 = period2
def generate(self):
while self.RUN:
empty_gen = True
for m in self.msg:
empty_gen = False
self._gen_data(m)
time.sleep(self.period)
if empty_gen:
self.is_exhausted = True
self._exhaust()
time.sleep(self.period2)
class TermSink(Sink):
"""
Prints messages on the low and high entries, on a separate terminal (xterm
or cmd).
.. code::
+-------+
>>-|--. |->>
| print |
>-|--' |->
+-------+
:param keepterm: Leave the terminal window open after :py:meth:`~Pipe.stop`
is called. Defaults to True.
:type keepterm: bool
:param newlines: Include a new-line character after printing each packet.
Defaults to True.
:type newlines: bool
:param openearly: Automatically starts the terminal when the constructor is
called, rather than waiting for :py:meth:`~Pipe.start`.
Defaults to True.
:type openearly: bool
"""
def __init__(self, name=None, keepterm=True, newlines=True,
openearly=True):
Sink.__init__(self, name=name)
self.keepterm = keepterm
self.newlines = newlines
self.openearly = openearly
self.opened = False
if self.openearly:
self.start()
def _start_windows(self):
if not self.opened:
self.opened = True
self.__f = get_temp_file()
open(self.__f, "a").close()
self.name = "Scapy" if self.name is None else self.name
# Start a powershell in a new window and print the PID
cmd = "$app = Start-Process PowerShell -ArgumentList '-command &{$host.ui.RawUI.WindowTitle=\\\"%s\\\";Get-Content \\\"%s\\\" -wait}' -passthru; echo $app.Id" % (self.name, self.__f.replace("\\", "\\\\")) # noqa: E501
proc = subprocess.Popen([conf.prog.powershell, cmd], stdout=subprocess.PIPE) # noqa: E501
output, _ = proc.communicate()
# This is the process PID
self.pid = int(output)
print("PID: %d" % self.pid)
def _start_unix(self):
if not self.opened:
self.opened = True
rdesc, self.wdesc = os.pipe()
cmd = ["xterm"]
if self.name is not None:
cmd.extend(["-title", self.name])
if self.keepterm:
cmd.append("-hold")
cmd.extend(["-e", "cat <&%d" % rdesc])
self.proc = subprocess.Popen(cmd, close_fds=False)
os.close(rdesc)
def start(self):
if WINDOWS:
return self._start_windows()
else:
return self._start_unix()
def _stop_windows(self):
if not self.keepterm:
self.opened = False
# Recipe to kill process with PID
# http://code.activestate.com/recipes/347462-terminating-a-subprocess-on-windows/
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, self.pid) # noqa: E501
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
def _stop_unix(self):
if not self.keepterm:
self.opened = False
self.proc.kill()
self.proc.wait()
def stop(self):
if WINDOWS:
return self._stop_windows()
else:
return self._stop_unix()
def _print(self, s):
if self.newlines:
s += "\n"
if WINDOWS:
wdesc = open(self.__f, "a")
wdesc.write(s)
wdesc.close()
else:
os.write(self.wdesc, s.encode())
def push(self, msg):
self._print(str(msg))
def high_push(self, msg):
self._print(str(msg))
class QueueSink(Sink):
"""
Collects messages on the low and high entries into a :py:class:`Queue`.
Messages are dequeued with :py:meth:`recv`.
Both high and low entries share the same :py:class:`Queue`.
.. code::
+-------+
>>-|--. |->>
| queue |
>-|--' |->
+-------+
"""
def __init__(self, name=None):
Sink.__init__(self, name=name)
self.q = six.moves.queue.Queue()
def push(self, msg):
self.q.put(msg)
def high_push(self, msg):
self.q.put(msg)
def recv(self, block=True, timeout=None):
"""
Reads the next message from the queue.
If no message is available in the queue, returns None.
:param block: Blocks execution until a packet is available in the
queue. Defaults to True.
:type block: bool
:param timeout: Controls how long to wait if ``block=True``. If None
(the default), this method will wait forever. If a
non-negative number, this is a number of seconds to
wait before giving up (and returning None).
:type timeout: None, int or float
"""
try:
return self.q.get(block=block, timeout=timeout)
except six.moves.queue.Empty:
pass
class TransformDrain(Drain):
"""Apply a function to messages on low and high entry:
.. code::
+-------+
>>-|--[f]--|->>
| |
>-|--[f]--|->
+-------+
"""
def __init__(self, f, name=None):
Drain.__init__(self, name=name)
self.f = f
def push(self, msg):
self._send(self.f(msg))
def high_push(self, msg):
self._high_send(self.f(msg))
class UpDrain(Drain):
"""Repeat messages from low entry to high exit:
.. code::
+-------+
>>-| ,--|->>
| / |
>-|--' |->
+-------+
"""
def push(self, msg):
self._high_send(msg)
def high_push(self, msg):
pass
class DownDrain(Drain):
r"""Repeat messages from high entry to low exit:
.. code::
+-------+
>>-|--. |->>
| \ |
>-| `--|->
+-------+
"""
def push(self, msg):
pass
def high_push(self, msg):
self._send(msg)
|
|
# -*- coding: utf8 -*-
import re
import os
import socket
import time
try:
import ujson as json
except ImportError:
import json
import datetime
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import shutil
import subprocess
import tempfile
import codecs
import signal
import sys
from pipes import quote
from select import select
from burpui.misc.utils import human_readable as _hr
from burpui.misc.backend.interface import BUIserverException
from burpui.misc.backend.burp1 import Burp as Burp1
from burpui.misc.parser.burp1 import Parser
if sys.version_info < (3, 3):
TimeoutError = OSError
BURP_MINIMAL_VERSION = 'burp-2.0.18'
g_burpbin = u'/usr/sbin/burp'
g_stripbin = u'/usr/sbin/vss_strip'
g_burpconfcli = u'/etc/burp/burp.conf'
g_burpconfsrv = u'/etc/burp/burp-server.conf'
g_tmpdir = u'/tmp/bui'
# Some functions are the same as in Burp1 backend
class Burp(Burp1):
def __init__(self, server=None, conf=None):
global g_burpbin, g_stripbin, g_burpconfcli, g_burpconfsrv, g_tmpdir, BURP_MINIMAL_VERSION
self.proc = None
self.app = None
self.logger = None
self.acl_handler = False
if server:
if hasattr(server, 'app'):
self.app = server.app
self.set_logger(self.app.logger)
self.acl_handler = server.acl_handler
self.burpbin = g_burpbin
self.stripbin = g_stripbin
self.burpconfcli = g_burpconfcli
self.burpconfsrv = g_burpconfsrv
self.defaults = {'burpbin': g_burpbin, 'stripbin': g_stripbin, 'bconfcli': g_burpconfcli, 'bconfsrv': g_burpconfsrv, 'tmpdir': g_tmpdir}
self.running = []
if conf:
config = ConfigParser.ConfigParser(self.defaults)
version = ''
with codecs.open(conf, 'r', 'utf-8') as fp:
config.readfp(fp)
try:
bbin = self._safe_config_get(config.get, 'burpbin', sect='Burp2')
strip = self._safe_config_get(config.get, 'stripbin', sect='Burp2')
confcli = self._safe_config_get(config.get, 'bconfcli', sect='Burp2')
confsrv = self._safe_config_get(config.get, 'bconfsrv', sect='Burp2')
tmpdir = self._safe_config_get(config.get, 'tmpdir')
if tmpdir and os.path.exists(tmpdir) and not os.path.isdir(tmpdir):
self._logger('warning', "'%s' is not a directory", tmpdir)
tmpdir = g_tmpdir
if confcli and not os.path.isfile(confcli):
self._logger('warning', "The file '%s' does not exist", confcli)
confcli = g_burpconfcli
if confsrv and not os.path.isfile(confsrv):
self._logger('warning', "The file '%s' does not exist", confsrv)
confsrv = g_burpconfsrv
if strip and not strip.startswith('/'):
self._logger('warning', "Please provide an absolute path for the 'stripbin' option. Fallback to '%s'", g_stripbin)
strip = g_stripbin
elif strip and not re.match('^\S+$', strip):
self._logger('warning', "Incorrect value for the 'stripbin' option. Fallback to '%s'", g_stripbin)
strip = g_stripbin
elif strip and (not os.path.isfile(strip) or not os.access(strip, os.X_OK)):
self._logger('warning', "'%s' does not exist or is not executable. Fallback to '%s'", strip, g_stripbin)
strip = g_stripbin
if strip and (not os.path.isfile(strip) or not os.access(strip, os.X_OK)):
self._logger('error', "Ooops, '%s' not found or is not executable", strip)
strip = None
if bbin and not bbin.startswith('/'):
self._logger('warning', "Please provide an absolute path for the 'burpbin' option. Fallback to '%s'", g_burpbin)
bbin = g_burpbin
elif bbin and not re.match('^\S+$', bbin):
self._logger('warning', "Incorrect value for the 'burpbin' option. Fallback to '%s'", g_burpbin)
bbin = g_burpbin
elif bbin and (not os.path.isfile(bbin) or not os.access(bbin, os.X_OK)):
self._logger('warning', "'%s' does not exist or is not executable. Fallback to '%s'", bbin, g_burpbin)
bbin = g_burpbin
if bbin and (not os.path.isfile(bbin) or not os.access(bbin, os.X_OK)):
self._logger('error', "Ooops, '%s' not found or is not executable", bbin)
# The burp binary is mandatory for this backend
raise Exception('This backend *CAN NOT* work without a burp binary')
self.tmpdir = tmpdir
self.burpbin = bbin
self.stripbin = strip
self.burpconfcli = confcli
self.burpconfsrv = confsrv
except ConfigParser.NoOptionError as e:
self._logger('error', str(e))
except ConfigParser.NoSectionError as e:
self._logger('warning', str(e))
# check the burp version because this backend only supports clients newer than BURP_MINIMAL_VERSION
try:
cmd = [self.burpbin, '-v']
version = subprocess.check_output(cmd, universal_newlines=True).rstrip('\n')
if version < BURP_MINIMAL_VERSION:
raise Exception('Your burp version ({}) does not fit the minimal requirements: {}'.format(version, BURP_MINIMAL_VERSION))
except subprocess.CalledProcessError as e:
raise Exception('Unable to determine your burp version: {}'.format(str(e)))
self.parser = Parser(self.app, self.burpconfsrv)
signal.signal(signal.SIGALRM, self._sighandler)
self._logger('info', 'burp binary: %s', self.burpbin)
self._logger('info', 'strip binary: %s', self.stripbin)
self._logger('info', 'burp conf cli: %s', self.burpconfcli)
self._logger('info', 'burp conf srv: %s', self.burpconfsrv)
self._logger('info', 'burp version: %s', version)
def _sighandler(self, signum, frame):
raise TimeoutError('Operation timed out')
# try not to leave child process server side
def __exit__(self, type, value, traceback):
if self._proc_is_alive():
self.proc.stdin.close()
self.proc.communicate()
self.proc.wait()
def _spawn_burp(self):
cmd = [self.burpbin, '-c', self.burpconfcli, '-a', 'm']
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, universal_newlines=True)
# wait a little bit in case the process dies on a network timeout
time.sleep(0.5)
if not self._proc_is_alive():
raise Exception('Unable to spawn burp process')
self.proc.stdin.write('j:pretty-print-off\n')
js = self._read_proc_stdout()
if self._is_warning(js):
self._logger('info', js['warning'])
def _proc_is_alive(self):
if self.proc:
return self.proc.poll() == None
return False
def _is_ignored(self, js):
"""
We ignore the 'logline' lines
"""
if not js:
return True
return 'logline' in js
def _is_warning(self, js):
"""
Returns True if the document is a warning
"""
if not js:
return False
return 'warning' in js
def _is_valid_json(self, doc):
"""
Determine if the retrieved string is a valid json document or not
"""
try:
js = json.loads(doc)
return js
except ValueError:
return None
def _human_st_mode(self, mode):
"""
Convert the st_mode returned by stat in human readable (ls-like) format
"""
hr = ''
if os.path.stat.S_ISREG(mode):
hr = '-'
elif os.path.stat.S_ISLNK(mode):
hr = 'l'
elif os.path.stat.S_ISSOCK(mode):
hr = 's'
elif os.path.stat.S_ISDIR(mode):
hr = 'd'
elif os.path.stat.S_ISBLK(mode):
hr = 'b'
elif os.path.stat.S_ISFIFO(mode):
hr = 'p'
elif os.path.stat.S_ISCHR(mode):
hr = 'c'
else:
hr = '-'
for who in 'USR', 'GRP', 'OTH':
for perm in 'R', 'W', 'X':
if mode & getattr(os.path.stat, 'S_I' + perm + who):
hr += perm.lower()
else:
hr += '-'
return hr
def _read_proc_stdout(self):
"""
reads the burp process stdout and returns a document or None
"""
doc = u''
js = None
while True:
try:
signal.alarm(5)
if not self._proc_is_alive():
raise Exception('process died while reading its output')
doc += self.proc.stdout.readline().rstrip('\n')
js = self._is_valid_json(doc)
# if the string is a valid json and looks like a logline, we
# simply ignore it
if js and self._is_ignored(js):
doc = ''
continue
elif js:
break
except (TimeoutError, IOError, Exception) as e:
# the os throws an exception if there is no data or timeout
self._logger('warning', str(e))
break
finally:
signal.alarm(0)
return js
def status(self, query='c:\n', agent=None):
"""
status spawns a burp process in monitor mode, ask the given 'question'
and parses the output in an array
"""
try:
if not query.endswith('\n'):
q = '{0}\n'.format(query)
else:
q = query
if not self._proc_is_alive():
self._spawn_burp()
self.proc.stdin.write(q)
js = self._read_proc_stdout()
if self._is_warning(js):
self._logger('warning', js['warning'])
return None
return js
except (OSError, Exception) as e:
msg = 'Cannot launch burp process: {}'.format(str(e))
self._logger('error', msg)
raise BUIserverException(msg)
def get_backup_logs(self, number, client, forward=False, agent=None):
if not client or not number:
return {}
query = self.status('c:{0}:b:{1}\n'.format(client, number))
if not query:
return {}
clients = query['clients']
if not clients:
return {}
if 'backups' not in clients[0]:
return {}
backups = clients[0]['backups']
if not backups:
return {}
if 'logs' not in backups[0] and 'list' not in backups[0]['logs']:
return {}
logs = backups[0]['logs']['list']
if 'backup_stats' in logs:
ret = self._parse_backup_stats(number, client, forward)
# else:
# cl = None
# if forward:
# cl = client
# f = self.status('c:{0}:b:{1}:f:log.gz\n'.format(client, number))
# ret = self._parse_backup_log(f, number, cl)
ret['encrypted'] = False
if 'files_enc' in ret and ret['files_enc']['total'] > 0:
ret['encrypted'] = True
return ret
def _parse_backup_stats(self, number, client, forward=False, agent=None):
backup = {'windows': 'unknown', 'number': int(number)}
if forward:
backup['name'] = client
translate = {
'time_start': 'start',
'time_end': 'end',
'time_taken': 'duration',
'bytes': 'totsize',
'bytes_received': 'received',
'bytes_estimated': 'estimated_bytes',
'files': 'files',
'files_encrypted': 'files_enc',
'directories': 'dir',
'soft_links': 'softlink',
'hard_links': 'hardlink',
'meta_data': 'meta',
'meta_data_encrypted': 'meta_enc',
'special_files': 'special',
'efs_files': 'efs',
'vss_headers': 'vssheader',
'vss_headers_encrypted': 'vssheader_enc',
'vss_footers': 'vssfooter',
'vss_footers_encrypted': 'vssfooter_enc',
'total': 'total',
'grand_total': 'total',
}
counts = {
'new': 'count',
'changed': 'changed',
'unchanged': 'same',
'deleted': 'deleted',
'total': 'scanned',
'scanned': 'scanned',
}
single = ['time_start', 'time_end', 'time_taken', 'bytes_received', 'bytes_estimated', 'bytes']
query = self.status('c:{0}:b:{1}:l:backup_stats\n'.format(client, number), agent=agent)
if not query:
return {}
clients = query['clients']
if not clients:
return {}
client = clients[0]
backups = client['backups']
if not backups:
return {}
back = backups[0]
if 'backup_stats' not in back['logs']:
return {}
try:
stats = json.loads(''.join(back['logs']['backup_stats']))
except:
pass
if not stats:
return {}
counters = stats['counters']
for counter in counters:
name = counter['name']
if name in translate:
name = translate[name]
if counter['name'] in single:
backup[name] = counter['count']
else:
backup[name] = {}
for k, v in counts.iteritems():
if v in counter:
backup[name][k] = counter[v]
else:
backup[name][k] = 0
if 'start' in backup and 'end' in backup:
backup['duration'] = backup['end'] - backup['start']
return backup
def _parse_backup_log(self, fh, number, client=None, agent=None):
"""
parse_backup_log parses the log.gz of a given backup and returns a dict
containing different stats used to render the charts in the reporting view
"""
return {}
# def get_clients_report(self, clients, agent=None):
def get_counters(self, name=None, agent=None):
"""
get_counters parses the stats of the live status for a given client and
returns a dict
"""
r = {}
if agent:
if not name or name not in self.running[agent]:
return r
else:
if not name or name not in self.running:
return r
clients = self.status('c:{0}\n'.format(name))
# check the status returned something
if not clients:
return r
clients = clients['clients']
# check there are at least one client
if not clients:
return r
client = clients[0]
# check the client is currently backing-up
if client['run_status'] != 'running':
return r
backup = None
for b in client['backups']:
if 'flags' in b and 'working' in b['flags']:
backup = b
break
# check we found a working backup
if not backup:
return r
# list of single counters (type CNTR_SINGLE_FIELD in cntr.c)
single = [
'bytes_estimated',
'bytes',
'bytes_received',
'bytes_sent',
'time_start',
'time_end',
'warnings',
'errors'
]
# translation table to be compatible with burp1
translate = {'bytes_estimated': 'estimated_bytes'}
for counter in backup['counters']:
name = counter['name']
if name in translate:
name = translate[name]
if counter['name'] not in single:
r[name] = [counter['count'], counter['changed'], counter['same'], counter['deleted'], counter['scanned']]
else:
r[name] = counter['count']
if 'bytes' not in r:
r['bytes'] = 0
if r.viewkeys() & {'time_start', 'estimated_bytes', 'bytes'}:
diff = time.time() - int(r['time_start'])
byteswant = int(r['estimated_bytes'])
bytesgot = int(r['bytes'])
bytespersec = bytesgot / diff
bytesleft = byteswant - bytesgot
r['speed'] = bytespersec
if (bytespersec > 0):
timeleft = int(bytesleft / bytespersec)
r['timeleft'] = timeleft
else:
r['timeleft'] = -1
return r
def is_backup_running(self, name=None, agent=None):
"""
is_backup_running returns True if the given client is currently running a
backup
"""
if not name:
return False
try:
query = self.status('c:{0}\n'.format(name))
except BUIserverException:
return False
if not query:
return False
clients = query['clients']
if not clients:
return False
client = clients[0]
if client['run_status'] in ['running']:
return True
return False
def is_one_backup_running(self, agent=None):
"""
is_one_backup_running returns a list of clients name that are currently
running a backup
"""
r = []
try:
cls = self.get_all_clients()
except BUIserverException:
return r
for c in cls:
if c['state'] in ['running']:
r.append(c['name'])
self.running = r
return r
def _status_human_readable(self, status):
if not status:
return None
if status == 'c crashed':
return 'client crashed'
if status == 's crashed':
return 'server crashed'
return status
def get_all_clients(self, agent=None):
"""
get_all_clients returns a list of dict representing each clients with their
name, state and last backup date
"""
j = []
query = self.status()
if not query or 'clients' not in query:
return j
clients = query['clients']
for cl in clients:
c = {}
c['name'] = cl['name']
c['state'] = self._status_human_readable(cl['run_status'])
infos = cl['backups']
if c['state'] in ['running']:
c['last'] = 'now'
elif not infos:
c['last'] = 'never'
else:
infos = infos[0]
c['last'] = datetime.datetime.fromtimestamp(infos['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
j.append(c)
return j
def get_client(self, name=None, agent=None):
"""
get_client returns a list of dict representing the backups (with its number
and date) of a given client
"""
r = []
if not name:
return r
c = name
query = self.status('c:{0}\n'.format(c))
if not query:
return r
clients = query['clients']
if not clients:
return r
client = clients[0]
backups = client['backups']
for backup in backups:
ba = {}
if 'flags' in backup and 'working' in backup['flags']:
continue
ba['number'] = backup['number']
if 'flags' in backup and 'deletable' in backup['flags']:
ba['deletable'] = True
else:
ba['deletable'] = False
ba['date'] = datetime.datetime.fromtimestamp(backup['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
log = self.get_backup_logs(backup['number'], name)
ba['encrypted'] = log['encrypted']
r.append(ba)
# Here we need to reverse the array so the backups are sorted by date ASC
r.reverse()
return r
def get_tree(self, name=None, backup=None, root=None, agent=None):
"""
get_tree returns a list of dict representing files/dir (with their attr)
within a given path
"""
r = []
if not name or not backup:
return r
if not root:
top = ''
else:
try:
top = root.decode('utf-8', 'replace')
except UnicodeDecodeError:
top = root
result = self.status('c:{0}:b:{1}:p:{2}\n'.format(name, backup, top))
if not result:
return r
clients = result['clients']
if not clients:
return r
client = clients[0]
if 'backups' not in client:
return r
backups = client['backups']
if not backups:
return r
backup = backups[0]
for entry in backup['browse']['entries']:
t = {}
if entry['name'] == '.':
continue
else:
t['name'] = entry['name']
t['mode'] = self._human_st_mode(entry['mode'])
if re.match('^(d|l)', t['mode']):
t['type'] = 'd'
else:
t['type'] = 'f'
t['inodes'] = entry['nlink']
t['uid'] = entry['uid']
t['gid'] = entry['gid']
t['parent'] = top
t['size'] = '{0:.1eM}'.format(_hr(entry['size']))
t['date'] = datetime.datetime.fromtimestamp(entry['mtime']).strftime('%Y-%m-%d %H:%M:%S')
r.append(t)
return r
# Same as in Burp1 backend
# def restore_files(self, name=None, backup=None, files=None, strip=None, archive='zip', password=None, agent=None):
# def read_conf_cli(self, agent=None):
# def read_conf_srv(self, agent=None):
# def store_conf_cli(self, data, agent=None):
# def store_conf_srv(self, data, agent=None):
# def get_parser_attr(self, attr=None, agent=None):
|
|
from itertools import groupby
from functools import cmp_to_key
from operator import itemgetter
import time
import hashlib
import uuid
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse
from django.db import models
from django.dispatch import receiver
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django_extras.db.models import PercentField
from django_languages import LanguageField
from six.moves import map
from imagekit.models import ProcessedImageField
from uuidfield import UUIDField
from . import exceptions, managers, utils
from .processors import RotateAndScale
ONE_DIGIT_CODE_SMS = "P"
ONE_DIGIT_CODE_EMAIL = "E"
SMS_OR_EMAIL = [
(ONE_DIGIT_CODE_SMS, _("SMS")),
(ONE_DIGIT_CODE_EMAIL, _("Email")),
]
def manual_uuid_generation():
return uuid.uuid4().get_hex()
class RegistrationNumber(models.Model):
""" A registration number, linked to a QR code, that is printed on a card.
It is not necessarily assigned to a person.
"""
id = UUIDField(auto=True, primary_key=True, verbose_name=_("ID"),
default=manual_uuid_generation)
number = models.PositiveSmallIntegerField(verbose_name=_("number"))
active = models.BooleanField(default=False, verbose_name=_("active"))
short_id_missing = models.BooleanField(default=False, verbose_name=_("short ID missing"))
class Meta:
verbose_name = _("Registration Number")
verbose_name_plural = _("Registration Numbers")
ordering = ("number", )
def short_id(self):
"""The first few digits of the ID.
This is shortened so as to make QR codes easier to scan and IDs easier
to manually input."""
if self.short_id_missing:
return "0" * settings.ID_LENGTH
return str(self.id)[0:settings.ID_LENGTH]
short_id.short_description = _("ID")
def get_absolute_url(self):
return reverse("scan_card", args=[self.number, self.short_id()])
@property
def qr_code_url(self):
relative_url = self.get_absolute_url()
try:
site = get_current_site(None)
except AttributeError:
raise exceptions.SitesNotInstalledError()
else:
absolute_url = "http://{}/{}".format(site.domain, relative_url)
return utils.qr_code_from_url(absolute_url, size=130)
def __unicode__(self):
return unicode(self.number)
class RegistrationCardBatch(models.Model):
""" A batch of registration numbers, linked to the generation of a PDF.
This can be auto-generated alongside registration numbers, or linked to
existing registration numbers.
"""
registration_numbers = models.ManyToManyField(
RegistrationNumber,
verbose_name=_("Registration Numbers"))
data_file = models.FileField(blank=True, null=True, upload_to="card_data",
verbose_name=_("Data File"))
def __unicode__(self):
return unicode(self.registration_number_format())
def registration_number_format(self):
numbers = self.registration_numbers.order_by("number")
return utils.format_range(numbers)
registration_number_format.short_description = _("Number Range")
class Meta:
ordering = ("id", )
verbose_name = _("Registration Card Batch")
verbose_name_plural = _("Registration Card Batches")
class Language(models.Model):
iso_code = LanguageField(verbose_name=_("Base Language"))
description = models.CharField(max_length=255,
verbose_name=_("Description"))
example_text = models.TextField(max_length=255,
verbose_name=_("Example Text"))
def __unicode__(self):
return u"{}: {}".format(self.iso_code, self.description)
class Meta:
ordering = ("iso_code", )
verbose_name = _("Language")
verbose_name_plural = _("Languages")
USER_IMAGE_PREFIX = "user_images/%m%d%H%M%S/"
class Person(models.Model):
name = models.CharField(max_length=255, verbose_name=_("Name"))
preferred_lang = models.ForeignKey(Language,
verbose_name=_("Preferred language"))
needs = models.TextField(blank=True, null=True, verbose_name=_("Needs"))
email = models.EmailField(blank=True, null=True, verbose_name=_("Email"))
# TODO incredibly basic phone number validation
phone = models.CharField(max_length=20, blank=True, null=True,
verbose_name=_("Phone Number"))
preferred_contact = models.CharField(
max_length=1, choices=SMS_OR_EMAIL, default=ONE_DIGIT_CODE_SMS,
verbose_name=_("Preferred Contact"))
story = models.TextField(blank=True, null=True, verbose_name=_("Story"))
number_of_dependents = models.PositiveSmallIntegerField(
default=0, verbose_name=_("Number of Dependents"))
active = models.BooleanField(default=True, verbose_name=_("Active"))
attendance_percent = PercentField(
blank=True, null=True, verbose_name=_("Distribution attendance"))
# Populated by mobile (although fallback is available)
registration_card = models.OneToOneField(
RegistrationNumber,
related_name="person",
limit_choices_to={"person": None, "active": False},
verbose_name=_("Registration Card"),
)
photo = ProcessedImageField(
blank=True, null=True, upload_to=USER_IMAGE_PREFIX,
verbose_name=_("Photo"),
processors=[RotateAndScale(max_width=600, max_height=800)])
def get_absolute_url(self):
return reverse("reg:stage_2_complete", args=[self.id])
def __unicode__(self):
return u"{}: {}".format(self.registration_card.number, self.name)
class Meta:
verbose_name = _("Person")
verbose_name_plural = _("People")
ordering = ("registration_card__number", )
class DistributionTime(models.Model):
"""The time of the distribution."""
start_time = models.TimeField()
end_time = models.TimeField()
class Meta:
ordering = ("start_time", )
def __unicode__(self):
return u"{} - {}".format(self.start_time, self.end_time)
class MissingContext(Exception):
pass
class Template(models.Model):
"""An Email or SMS template to be sent to attendees."""
type = models.CharField(max_length=1, choices=SMS_OR_EMAIL,
verbose_name=_("Template Type"))
language = models.ForeignKey(Language, verbose_name=_("Language"))
text = models.TextField(verbose_name=_("Template Text"))
def __unicode__(self):
return u"{}: {}".format(self.get_type_display(), self.text)
class Meta:
verbose_name = _("Template")
verbose_name_plural = _("Templates")
def get_invitees(self, distribution=None):
if distribution:
cards = distribution.invitees.all()
else:
cards = RegistrationNumber.objects.filter(active=True)
person_ids = \
cards.exclude(person=None).values_list("person", flat=True)
preferred_contact = ("phone" if self.type == ONE_DIGIT_CODE_SMS
else "email")
people = Person.objects.filter(
id__in=person_ids, preferred_lang=self.language).exclude(**{
preferred_contact: "",
})
return people.values_list(preferred_contact, flat=True)
def get_rendered_text(self, context):
"""Render the text using pyratemp."""
missing = set()
for required in utils.get_variable_names_from_template(self):
if required not in context:
missing.add(required)
if missing:
raise MissingContext(missing)
tmpl = utils.PyratempTemplate(self.text)
context = context.copy()
context["locale"] = self.language.iso_code
return tmpl.render(context)
def _sort_by_previous_finish(finish):
def _sort(x, y):
if x[0] > finish and y[0] <= finish:
# x < y
return -1
elif y[0] > finish and x[0] <= finish:
# x > y
return 1
# x == y
return 0
return _sort
class Distribution(models.Model):
"""A distribution day.
This model holds data about distribution date and expected/actual
attendees.
"""
date = models.DateField(verbose_name=_("Distribution Date"), unique=True)
supplies_quantity = models.SmallIntegerField(
verbose_name=_("Supplies Quantity"))
supplies_description = models.TextField(
blank=True, null=True, verbose_name=_("Supplies Description"))
times = models.ManyToManyField(
DistributionTime, verbose_name=_("Distribution Times"), blank=True)
invitees = models.ManyToManyField(RegistrationNumber,
related_name="distributions_invited_to")
attendees = models.ManyToManyField(
RegistrationNumber, related_name="distributions_attended", blank=True)
templates = models.ManyToManyField(Template, verbose_name=_("Templates"))
finish_number = models.PositiveSmallIntegerField(blank=True, null=True)
objects = managers.DistributionManager()
@property
def hash(self):
timestamp = time.mktime(self.date.timetuple())
secret = "".join(c.encode("hex") for c in settings.SECRET_KEY)
secret_int = int(secret[:8], 16)
return hashlib.sha1(str(timestamp + secret_int)).hexdigest()[:4]
def check_hash(self, password):
return password == self.hash
def __unicode__(self):
return unicode(formats.date_format(self.date))
@property
def numbers(self):
if not hasattr(self, "_numbers"):
numbers = self.invitees.values_list("number", flat=True)
# Taken from http://stackoverflow.com/questions/2154249/
max_padding = 5
groups = []
for key, group in groupby(enumerate(numbers),
lambda (index, item): index - item):
group = list(map(itemgetter(1), group))
if len(groups) and groups[-1][1] + max_padding >= group[0]:
# There is a small gap between the groups. If the cards in
# this gap are all deactivated, pretend it's not there.
extras = range(groups[-1][1] + 1, group[0])
extras = RegistrationNumber.objects.filter(
number__in=extras)
inactive_extras = extras.filter(active=False)
if inactive_extras.count() == extras.count():
group_begin, unused = groups.pop()
if len(group) == 1:
group.insert(0, group_begin)
else:
group[0] = group_begin
groups.append((group[0], group[-1]))
try:
previous = Distribution.objects.get(id=self.id - 1)
except Distribution.DoesNotExist:
pass
else:
groups.sort(key=cmp_to_key(
_sort_by_previous_finish(previous.finish_number)))
self._numbers = groups
return self._numbers
def show_numbers(self):
return "; ".join((u"#{}".format(begin) if begin == end else
u"#{} \u2013 #{}".format(begin, end)
for begin, end in self.numbers))
def get_absolute_url(self):
return reverse("dist:info", args=[self.id])
def get_template_render_context(self):
"""Return the 'free' context used to render email/SMS templates."""
return {
"distribution": self,
"distribution_numbers": self.numbers,
"distribution_times": self.times.all(),
}
class Meta:
verbose_name = _("Distribution")
verbose_name_plural = _("Distributions")
ordering = ("date", )
@receiver(models.signals.post_save, sender=Distribution)
def set_invitees_and_finish_number(instance, created, **kwargs):
if created:
dists = Distribution.objects.exclude(id=instance.id).order_by("id")
last_created_dist = dists.reverse().first()
if last_created_dist:
starting_number = last_created_dist.finish_number + 1
else:
starting_number = 1
active_cards = RegistrationNumber.objects.filter(active=True)
cards = active_cards.filter(number__gte=starting_number)
cards = list(cards[:instance.supplies_quantity])
difference = instance.supplies_quantity - len(cards)
if difference:
cards += list(active_cards[:difference])
instance.invitees = cards
instance.finish_number = cards[-1].number
instance.save()
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import constants
import getpass
import json
import os
import shutil
import sys
import tempfile
import time
if __name__ == '__main__':
sys.path.insert(0, constants.SOURCE_ROOT)
from chromite.buildbot import repository
from chromite.buildbot import manifest_version
from chromite.lib import cros_build_lib
from chromite.lib import git
class ChromiteUpgradeNeeded(Exception):
"""Exception thrown when it's detected that we need to upgrade chromite."""
def __init__(self, version=None):
Exception.__init__(self)
self.version = version
self.args = (version,)
def __str__(self):
version_str = ''
if self.version:
version_str = " Need format version %r support." % (self.version,)
return (
"Your version of cbuildbot is too old; please resync it, "
"and then retry your submission.%s" % (version_str,))
class ValidationError(Exception):
"""Thrown when tryjob validation fails."""
class RemoteTryJob(object):
"""Remote Tryjob that is submitted through a Git repo."""
EXT_SSH_URL = os.path.join(constants.GERRIT_SSH_URL,
'chromiumos/tryjobs')
INT_SSH_URL = os.path.join(constants.GERRIT_INT_SSH_URL,
'chromeos/tryjobs')
# In version 3, remote patches have an extra field.
# In version 4, cherry-picking is the norm, thus multiple patches are
# generated.
TRYJOB_FORMAT_VERSION = 4
TRYJOB_FORMAT_FILE = '.tryjob_minimal_format_version'
NAME_LENGTH_LIMIT = 256
PROPERTY_LENGTH_LIMIT = 1024
def __init__(self, options, bots, local_patches):
"""Construct the object.
Args:
options: The parsed options passed into cbuildbot.
bots: A list of configs to run tryjobs for.
local_patches: A list of LocalPatch objects.
"""
self.options = options
self.user = getpass.getuser()
cwd = os.path.dirname(os.path.realpath(__file__))
self.user_email = git.GetProjectUserEmail(cwd)
cros_build_lib.Info('Using email:%s', self.user_email)
# Name of the job that appears on the waterfall.
patch_list = options.gerrit_patches + options.local_patches
self.name = options.remote_description
if self.name is None:
self.name = ''
if options.branch != 'master':
self.name = '[%s] ' % options.branch
self.name += ','.join(patch_list)
self.bots = bots[:]
self.slaves_request = options.slaves
self.description = ('name: %s\n patches: %s\nbots: %s' %
(self.name, patch_list, self.bots))
self.extra_args = options.pass_through_args
if '--buildbot' not in self.extra_args:
self.extra_args.append('--remote-trybot')
self.extra_args.append('--remote-version=%s'
% (self.TRYJOB_FORMAT_VERSION,))
self.tryjob_repo = None
self.local_patches = local_patches
self.ssh_url = self.EXT_SSH_URL
self.manifest = None
if repository.IsARepoRoot(options.sourceroot):
self.manifest = git.ManifestCheckout.Cached(options.sourceroot)
if repository.IsInternalRepoCheckout(options.sourceroot):
self.ssh_url = self.INT_SSH_URL
@property
def values(self):
return {
'bot' : self.bots,
'email' : [self.user_email],
'extra_args' : self.extra_args,
'name' : self.name,
'slaves_request' : self.slaves_request,
'user' : self.user,
'version' : self.TRYJOB_FORMAT_VERSION,
}
def _VerifyForBuildbot(self):
"""Early validation, to ensure the job can be processed by buildbot."""
val = self.values
# Validate the name of the buildset that buildbot will try to queue.
full_name = '%s:%s' % (val['user'], val['name'])
if len(full_name) > self.NAME_LENGTH_LIMIT:
raise ValidationError(
'The tryjob description is longer than %s characters. '
'Use --remote-description to specify a custom description.'
% self.NAME_LENGTH_LIMIT)
# Buildbot will set extra_args as a buildset 'property'. It will store
# the property in its database in JSON form. The limit of the database
# field is 1023 characters.
if len(json.dumps(val['extra_args'])) > self.PROPERTY_LENGTH_LIMIT:
raise ValidationError(
'The number of extra arguments passed to cbuildbot has exceeded the '
'limit. If you have a lot of local patches, upload them and use the '
'-g flag instead.')
def _Submit(self, testjob, dryrun):
"""Internal submission function. See Submit() for arg description."""
# TODO(rcui): convert to shallow clone when that's available.
current_time = str(int(time.time()))
ref_base = os.path.join('refs/tryjobs', self.user, current_time)
for patch in self.local_patches:
# Isolate the name; if it's a tag or a remote, let through.
# Else if it's a branch, get the full branch name minus refs/heads.
local_branch = git.StripRefsHeads(patch.ref, False)
ref_final = os.path.join(ref_base, local_branch, patch.sha1)
self.manifest.AssertProjectIsPushable(patch.project)
data = self.manifest.projects[patch.project]
print 'Uploading patch %s' % patch
patch.Upload(data['push_url'], ref_final, dryrun=dryrun)
# TODO(rcui): Pass in the remote instead of tag. http://crosbug.com/33937.
tag = constants.EXTERNAL_PATCH_TAG
if data['remote'] == constants.INTERNAL_REMOTE:
tag = constants.INTERNAL_PATCH_TAG
self.extra_args.append('--remote-patches=%s:%s:%s:%s:%s'
% (patch.project, local_branch, ref_final,
patch.tracking_branch, tag))
self._VerifyForBuildbot()
repository.CloneGitRepo(self.tryjob_repo, self.ssh_url)
version_path = os.path.join(self.tryjob_repo,
self.TRYJOB_FORMAT_FILE)
with open(version_path, 'r') as f:
try:
val = int(f.read().strip())
except ValueError:
raise ChromiteUpgradeNeeded()
if val > self.TRYJOB_FORMAT_VERSION:
raise ChromiteUpgradeNeeded(val)
push_branch = manifest_version.PUSH_BRANCH
remote_branch = ('origin', 'refs/remotes/origin/test') if testjob else None
git.CreatePushBranch(push_branch, self.tryjob_repo, sync=False,
remote_push_branch=remote_branch)
file_name = '%s.%s' % (self.user,
current_time)
user_dir = os.path.join(self.tryjob_repo, self.user)
if not os.path.isdir(user_dir):
os.mkdir(user_dir)
fullpath = os.path.join(user_dir, file_name)
with open(fullpath, 'w+') as job_desc_file:
json.dump(self.values, job_desc_file)
cros_build_lib.RunCommand(['git', 'add', fullpath], cwd=self.tryjob_repo)
extra_env = {
# The committer field makes sure the creds match what the remote
# gerrit instance expects while the author field allows lookup
# on the console to work. http://crosbug.com/27939
'GIT_COMMITTER_EMAIL' : self.user_email,
'GIT_AUTHOR_EMAIL' : self.user_email,
}
cros_build_lib.RunCommand(['git', 'commit', '-m', self.description],
cwd=self.tryjob_repo, extra_env=extra_env)
try:
git.PushWithRetry(
push_branch, self.tryjob_repo, retries=3, dryrun=dryrun)
except cros_build_lib.RunCommandError:
cros_build_lib.Error(
'Failed to submit tryjob. This could be due to too many '
'submission requests by users. Please try again.')
raise
def Submit(self, workdir=None, testjob=False, dryrun=False):
"""Submit the tryjob through Git.
Args:
workdir: The directory to clone tryjob repo into. If you pass this
in, you are responsible for deleting the directory. Used for
testing.
testjob: Submit job to the test branch of the tryjob repo. The tryjob
will be ignored by production master.
dryrun: Setting to true will run everything except the final submit step.
"""
self.tryjob_repo = workdir
if self.tryjob_repo is None:
self.tryjob_repo = tempfile.mkdtemp()
try:
self._Submit(testjob, dryrun)
finally:
if workdir is None:
shutil.rmtree(self.tryjob_repo)
def GetTrybotConsoleLink(self):
"""Get link to the console for the user."""
return ('%s/console?name=%s' % (constants.TRYBOT_DASHBOARD,
self.user_email))
def GetTrybotWaterfallLink(self):
"""Get link to the waterfall for the user."""
# Note that this will only show the jobs submitted by the user in the last
# 24 hours.
return ('%s/waterfall?committer=%s' % (constants.TRYBOT_DASHBOARD,
self.user_email))
|
|
"""ClusterMsgs module: Message types for Cluster rendering"""
# This module is intended to supply routines and dataformats common to
# both ClusterClient and ClusterServer.
from pandac.PandaModules import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
import time
#these are the types of messages that are currently supported.
CLUSTER_NONE = 0
CLUSTER_CAM_OFFSET = 1
CLUSTER_CAM_FRUSTUM = 2
CLUSTER_CAM_MOVEMENT = 3
CLUSTER_SWAP_READY = 4
CLUSTER_SWAP_NOW = 5
CLUSTER_COMMAND_STRING = 6
CLUSTER_SELECTED_MOVEMENT = 7
CLUSTER_TIME_DATA = 8
CLUSTER_NAMED_OBJECT_MOVEMENT = 9
CLUSTER_NAMED_MOVEMENT_DONE = 10
CLUSTER_EXIT = 100
#Port number for cluster rendering
# DAEMON PORT IS PORT USED FOR STARTUP MESSAGE EXCHANGE
# CAN BE OVERRIDEN WITH cluster-daemon-client-port for client
# and cluster-daemon-server-port for server
CLUSTER_DAEMON_PORT = 8001
# THIS IS THE TCP PORT USED FOR EXCHANGE OF DATA ONCE STARTUP IS COMPLETE
CLUSTER_SERVER_PORT = 1970
# Precede command string with ! to tell server to execute command string
# NOTE: Had to stick with the import __builtin__ scheme, at startup,
# __builtins__ is a module, not a dictionary, like it is inside of a module
# Note, this startup string obviates the need to set any cluster related
# config variables in the client Configrc files
SERVER_STARTUP_STRING = (
'!bash ppython -c ' +
'"import __builtin__; ' +
'__builtin__.clusterMode = \'server\';' +
'__builtin__.clusterServerPort = %s;' +
'__builtin__.clusterSyncFlag = %d;' +
'__builtin__.clusterDaemonClient = \'%s\';' +
'__builtin__.clusterDaemonPort = %d;'
'from direct.directbase.DirectStart import *; run()"')
class ClusterMsgHandler:
"""ClusterMsgHandler: wrapper for PC clusters/multi-piping networking"""
def __init__(self, packetStart, notify):
# packetStart can be used to distinguish which ClusterMsgHandler
# sends a given packet.
self.packetNumber = packetStart
self.notify = notify
def nonBlockingRead(self, qcr):
"""
Return a datagram iterator and type if data is available on the
queued connection reader
"""
if qcr.dataAvailable():
datagram = NetDatagram()
if qcr.getData(datagram):
(dgi, type) = self.readHeader(datagram)
else:
dgi = None
type = CLUSTER_NONE
self.notify.warning("getData returned false")
else:
datagram = None
dgi = None
type = CLUSTER_NONE
# Note, return datagram to keep a handle on the data
return (datagram, dgi, type)
def blockingRead(self, qcr):
"""
Block until data is available on the queued connection reader.
Returns a datagram iterator and type
"""
while not qcr.dataAvailable():
# The following may not be necessary.
# I just wanted some
# time given to the operating system while
# busy waiting.
time.sleep(0.002)
# Data is available, create a datagram iterator
datagram = NetDatagram()
if qcr.getData(datagram):
(dgi, type) = self.readHeader(datagram)
else:
(dgi, type) = (None, CLUSTER_NONE)
self.notify.warning("getData returned false")
# Note, return datagram to keep a handle on the data
return (datagram, dgi, type)
def readHeader(self, datagram):
dgi = PyDatagramIterator(datagram)
number = dgi.getUint32()
type = dgi.getUint8()
self.notify.debug("Packet %d type %d received" % (number, type))
return (dgi, type)
def makeCamOffsetDatagram(self, xyz, hpr):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_CAM_OFFSET)
datagram.addFloat32(xyz[0])
datagram.addFloat32(xyz[1])
datagram.addFloat32(xyz[2])
datagram.addFloat32(hpr[0])
datagram.addFloat32(hpr[1])
datagram.addFloat32(hpr[2])
return datagram
def parseCamOffsetDatagram(self, dgi):
x=dgi.getFloat32()
y=dgi.getFloat32()
z=dgi.getFloat32()
h=dgi.getFloat32()
p=dgi.getFloat32()
r=dgi.getFloat32()
self.notify.debug('new offset=%f %f %f %f %f %f' % (x, y, z, h, p, r))
return (x, y, z, h, p, r)
def makeCamFrustumDatagram(self, focalLength, filmSize, filmOffset):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_CAM_FRUSTUM)
datagram.addFloat32(focalLength)
datagram.addFloat32(filmSize[0])
datagram.addFloat32(filmSize[1])
datagram.addFloat32(filmOffset[0])
datagram.addFloat32(filmOffset[1])
return datagram
def parseCamFrustumDatagram(self, dgi):
focalLength = dgi.getFloat32()
filmSize = (dgi.getFloat32(), dgi.getFloat32())
filmOffset = (dgi.getFloat32(), dgi.getFloat32())
self.notify.debug('fl, fs, fo=%f, (%f, %f), (%f, %f)' %
(focalLength, filmSize[0], filmSize[1],
filmOffset[0], filmOffset[1]))
return (focalLength, filmSize, filmOffset)
def makeCamMovementDatagram(self, xyz, hpr):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_CAM_MOVEMENT)
datagram.addFloat32(xyz[0])
datagram.addFloat32(xyz[1])
datagram.addFloat32(xyz[2])
datagram.addFloat32(hpr[0])
datagram.addFloat32(hpr[1])
datagram.addFloat32(hpr[2])
return datagram
def makeNamedMovementDone(self):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_NAMED_MOVEMENT_DONE)
return datagram
def makeNamedObjectMovementDatagram(self, xyz, hpr, scale, color, hidden, name):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_NAMED_OBJECT_MOVEMENT)
datagram.addString(name)
datagram.addFloat32(xyz[0])
datagram.addFloat32(xyz[1])
datagram.addFloat32(xyz[2])
datagram.addFloat32(hpr[0])
datagram.addFloat32(hpr[1])
datagram.addFloat32(hpr[2])
datagram.addFloat32(scale[0])
datagram.addFloat32(scale[1])
datagram.addFloat32(scale[2])
datagram.addFloat32(color[0])
datagram.addFloat32(color[1])
datagram.addFloat32(color[2])
datagram.addFloat32(color[3])
datagram.addBool(hidden)
return datagram
def parseCamMovementDatagram(self, dgi):
x=dgi.getFloat32()
y=dgi.getFloat32()
z=dgi.getFloat32()
h=dgi.getFloat32()
p=dgi.getFloat32()
r=dgi.getFloat32()
self.notify.debug((' new position=%f %f %f %f %f %f' %
(x, y, z, h, p, r)))
return (x, y, z, h, p, r)
def parseNamedMovementDatagram(self, dgi):
name = dgi.getString()
x=dgi.getFloat32()
y=dgi.getFloat32()
z=dgi.getFloat32()
h=dgi.getFloat32()
p=dgi.getFloat32()
r=dgi.getFloat32()
sx = dgi.getFloat32()
sy = dgi.getFloat32()
sz = dgi.getFloat32()
red = dgi.getFloat32()
g = dgi.getFloat32()
b = dgi.getFloat32()
a = dgi.getFloat32()
hidden = dgi.getBool()
return (name,x, y, z, h, p, r, sx, sy, sz, red, g, b, a, hidden)
def makeSelectedMovementDatagram(self, xyz, hpr, scale):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_SELECTED_MOVEMENT)
datagram.addFloat32(xyz[0])
datagram.addFloat32(xyz[1])
datagram.addFloat32(xyz[2])
datagram.addFloat32(hpr[0])
datagram.addFloat32(hpr[1])
datagram.addFloat32(hpr[2])
datagram.addFloat32(scale[0])
datagram.addFloat32(scale[1])
datagram.addFloat32(scale[2])
#datagram.addBool(hidden)
return datagram
def parseSelectedMovementDatagram(self, dgi):
x=dgi.getFloat32()
y=dgi.getFloat32()
z=dgi.getFloat32()
h=dgi.getFloat32()
p=dgi.getFloat32()
r=dgi.getFloat32()
sx=dgi.getFloat32()
sy=dgi.getFloat32()
sz=dgi.getFloat32()
self.notify.debug(' new position=%f %f %f %f %f %f %f %f %f' %
(x, y, z, h, p, r, sx, sy, sz))
return (x, y, z, h, p, r, sx, sy, sz)
def makeCommandStringDatagram(self, commandString):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_COMMAND_STRING)
datagram.addString(commandString)
return datagram
def parseCommandStringDatagram(self, dgi):
command = dgi.getString()
return command
def makeSwapNowDatagram(self):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_SWAP_NOW)
return datagram
def makeSwapReadyDatagram(self):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_SWAP_READY)
return datagram
def makeExitDatagram(self):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_EXIT)
return datagram
def makeTimeDataDatagram(self, frameCount, frameTime, dt):
datagram = PyDatagram()
datagram.addUint32(self.packetNumber)
self.packetNumber = self.packetNumber + 1
datagram.addUint8(CLUSTER_TIME_DATA)
datagram.addUint32(frameCount)
datagram.addFloat32(frameTime)
datagram.addFloat32(dt)
return datagram
def parseTimeDataDatagram(self, dgi):
frameCount=dgi.getUint32()
frameTime=dgi.getFloat32()
dt=dgi.getFloat32()
self.notify.debug('time data=%f %f' % (frameTime, dt))
return (frameCount, frameTime, dt)
|
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MyPy test runner script."""
from __future__ import annotations
import argparse
import os
import site
import subprocess
import sys
from scripts import common
from scripts import install_third_party_libs
# List of directories whose files won't be type-annotated ever.
EXCLUDED_DIRECTORIES = [
'proto_files/',
'scripts/linters/test_files/',
'third_party/',
'venv/'
]
# List of files who should be type-annotated but are not.
NOT_FULLY_COVERED_FILES = [
'core/controllers/',
'core/domain/auth_services.py',
'core/domain/auth_services_test.py',
'core/domain/blog_services.py',
'core/domain/blog_services_test.py',
'core/domain/change_domain.py',
'core/domain/classifier_services.py',
'core/domain/classifier_services_test.py',
'core/domain/classroom_services.py',
'core/domain/classroom_services_test.py',
'core/domain/collection_domain.py',
'core/domain/collection_domain_test.py',
'core/domain/collection_services.py',
'core/domain/collection_services_test.py',
'core/domain/cron_services.py',
'core/domain/customization_args_util.py',
'core/domain/customization_args_util_test.py',
'core/domain/draft_upgrade_services.py',
'core/domain/draft_upgrade_services_test.py',
'core/domain/email_manager.py',
'core/domain/email_manager_test.py',
'core/domain/email_services.py',
'core/domain/email_services_test.py',
'core/domain/email_subscription_services.py',
'core/domain/email_subscription_services_test.py',
'core/domain/event_services.py',
'core/domain/event_services_test.py',
'core/domain/exp_domain.py',
'core/domain/exp_domain_test.py',
'core/domain/exp_fetchers.py',
'core/domain/exp_fetchers_test.py',
'core/domain/exp_services.py',
'core/domain/exp_services_test.py',
'core/domain/expression_parser.py',
'core/domain/expression_parser_test.py',
'core/domain/feedback_services.py',
'core/domain/feedback_services_test.py',
'core/domain/fs_domain.py',
'core/domain/fs_domain_test.py',
'core/domain/fs_services.py',
'core/domain/fs_services_test.py',
'core/domain/html_cleaner.py',
'core/domain/html_cleaner_test.py',
'core/domain/html_validation_service.py',
'core/domain/html_validation_service_test.py',
'core/domain/image_validation_services.py',
'core/domain/image_validation_services_test.py',
'core/domain/improvements_services.py',
'core/domain/improvements_services_test.py',
'core/domain/interaction_registry.py',
'core/domain/interaction_registry_test.py',
'core/domain/learner_goals_services.py',
'core/domain/learner_goals_services_test.py',
'core/domain/learner_playlist_services.py',
'core/domain/learner_playlist_services_test.py',
'core/domain/learner_progress_services.py',
'core/domain/learner_progress_services_test.py',
'core/domain/moderator_services.py',
'core/domain/moderator_services_test.py',
'core/domain/object_registry.py',
'core/domain/object_registry_test.py',
'core/domain/opportunity_services.py',
'core/domain/opportunity_services_test.py',
'core/domain/param_domain.py',
'core/domain/param_domain_test.py',
'core/domain/platform_feature_services.py',
'core/domain/platform_feature_services_test.py',
'core/domain/platform_parameter_domain.py',
'core/domain/platform_parameter_domain_test.py',
'core/domain/platform_parameter_list.py',
'core/domain/platform_parameter_list_test.py',
'core/domain/platform_parameter_registry.py',
'core/domain/platform_parameter_registry_test.py',
'core/domain/playthrough_issue_registry.py',
'core/domain/playthrough_issue_registry_test.py',
'core/domain/question_domain.py',
'core/domain/question_domain_test.py',
'core/domain/question_fetchers.py',
'core/domain/question_fetchers_test.py',
'core/domain/question_services.py',
'core/domain/question_services_test.py',
'core/domain/rating_services.py',
'core/domain/rating_services_test.py',
'core/domain/recommendations_services.py',
'core/domain/recommendations_services_test.py',
'core/domain/rights_manager.py',
'core/domain/rights_manager_test.py',
'core/domain/role_services.py',
'core/domain/role_services_test.py',
'core/domain/rte_component_registry.py',
'core/domain/rte_component_registry_test.py',
'core/domain/rules_registry.py',
'core/domain/rules_registry_test.py',
'core/domain/search_services.py',
'core/domain/search_services_test.py',
'core/domain/skill_domain.py',
'core/domain/skill_domain_test.py',
'core/domain/skill_fetchers.py',
'core/domain/skill_fetchers_test.py',
'core/domain/skill_services.py',
'core/domain/skill_services_test.py',
'core/domain/state_domain.py',
'core/domain/state_domain_test.py',
'core/domain/stats_domain.py',
'core/domain/stats_domain_test.py',
'core/domain/stats_services.py',
'core/domain/stats_services_test.py',
'core/domain/story_domain.py',
'core/domain/story_domain_test.py',
'core/domain/story_fetchers.py',
'core/domain/story_fetchers_test.py',
'core/domain/story_services.py',
'core/domain/story_services_test.py',
'core/domain/subscription_services.py',
'core/domain/subscription_services_test.py',
'core/domain/subtopic_page_domain.py',
'core/domain/subtopic_page_domain_test.py',
'core/domain/subtopic_page_services.py',
'core/domain/subtopic_page_services_test.py',
'core/domain/suggestion_registry.py',
'core/domain/suggestion_registry_test.py',
'core/domain/suggestion_services.py',
'core/domain/suggestion_services_test.py',
'core/domain/summary_services.py',
'core/domain/summary_services_test.py',
'core/domain/takeout_service.py',
'core/domain/takeout_service_test.py',
'core/domain/taskqueue_services.py',
'core/domain/taskqueue_services_test.py',
'core/domain/topic_fetchers.py',
'core/domain/topic_fetchers_test.py',
'core/domain/topic_services.py',
'core/domain/topic_services_test.py',
'core/domain/translatable_object_registry.py',
'core/domain/translatable_object_registry_test.py',
'core/domain/translation_fetchers.py',
'core/domain/translation_fetchers_test.py',
'core/domain/translation_services.py',
'core/domain/translation_services_test.py',
'core/domain/user_domain.py',
'core/domain/user_domain_test.py',
'core/domain/user_query_services.py',
'core/domain/user_query_services_test.py',
'core/domain/user_services.py',
'core/domain/user_services_test.py',
'core/domain/visualization_registry.py',
'core/domain/visualization_registry_test.py',
'core/domain/voiceover_services.py',
'core/domain/voiceover_services_test.py',
'core/domain/wipeout_service.py',
'core/domain/wipeout_service_test.py',
'core/platform/storage/cloud_storage_emulator.py',
'core/platform/storage/cloud_storage_emulator_test.py',
'core/platform_feature_list.py',
'core/platform_feature_list_test.py',
'core/storage/beam_job/gae_models.py',
'core/storage/beam_job/gae_models_test.py',
'core/storage/blog/gae_models.py',
'core/storage/blog/gae_models_test.py',
'core/storage/storage_models_test.py',
'core/tests/build_sources/extensions/CodeRepl.py',
'core/tests/build_sources/extensions/DragAndDropSortInput.py',
'core/tests/build_sources/extensions/base.py',
'core/tests/build_sources/extensions/base_test.py',
'core/tests/build_sources/extensions/models_test.py',
'core/tests/data/failing_tests.py',
'core/tests/data/image_constants.py',
'core/tests/data/unicode_and_str_handler.py',
'core/tests/gae_suite.py',
'core/tests/gae_suite_test.py',
'core/tests/load_tests/feedback_thread_summaries_test.py',
'core/tests/test_utils.py',
'core/tests/test_utils_test.py',
'core/jobs',
'core/python_utils.py',
'core/python_utils_test.py',
'extensions/',
'scripts/build.py',
'scripts/build_test.py',
'scripts/check_e2e_tests_are_captured_in_ci.py',
'scripts/check_e2e_tests_are_captured_in_ci_test.py',
'scripts/check_frontend_test_coverage.py',
'scripts/check_frontend_test_coverage_test.py',
'scripts/check_if_pr_is_low_risk.py',
'scripts/check_if_pr_is_low_risk_test.py',
'scripts/clean.py',
'scripts/clean_test.py',
'scripts/common.py',
'scripts/common_test.py',
'scripts/concurrent_task_utils.py',
'scripts/concurrent_task_utils_test.py',
'scripts/create_expression_parser.py',
'scripts/create_topological_sort_of_all_services.py',
'scripts/create_topological_sort_of_all_services_test.py',
'scripts/docstrings_checker.py',
'scripts/docstrings_checker_test.py',
'scripts/extend_index_yaml.py',
'scripts/extend_index_yaml_test.py',
'scripts/flake_checker.py',
'scripts/flake_checker_test.py',
'scripts/install_backend_python_libs.py',
'scripts/install_backend_python_libs_test.py',
'scripts/install_chrome_for_ci.py',
'scripts/install_chrome_for_ci_test.py',
'scripts/install_third_party_libs.py',
'scripts/install_third_party_libs_test.py',
'scripts/install_third_party.py',
'scripts/install_third_party_test.py',
'scripts/pre_commit_hook.py',
'scripts/pre_commit_hook_test.py',
'scripts/pre_push_hook.py',
'scripts/pre_push_hook_test.py',
'scripts/regenerate_requirements.py',
'scripts/regenerate_requirements_test.py',
'scripts/rtl_css.py',
'scripts/rtl_css_test.py',
'scripts/run_backend_tests.py',
'scripts/run_custom_eslint_tests.py',
'scripts/run_e2e_tests.py',
'scripts/run_e2e_tests_test.py',
'scripts/run_frontend_tests.py',
'scripts/run_lighthouse_tests.py',
'scripts/run_mypy_checks.py',
'scripts/run_mypy_checks_test.py',
'scripts/run_portserver.py',
'scripts/run_presubmit_checks.py',
'scripts/run_tests.py',
'scripts/script_import_test.py',
'scripts/scripts_test_utils.py',
'scripts/scripts_test_utils_test.py',
'scripts/servers.py',
'scripts/servers_test.py',
'scripts/setup.py',
'scripts/setup_test.py',
'scripts/typescript_checks.py',
'scripts/typescript_checks_test.py',
'scripts/linters/',
'scripts/release_scripts/'
]
CONFIG_FILE_PATH = os.path.join('.', 'mypy.ini')
MYPY_REQUIREMENTS_FILE_PATH = os.path.join('.', 'mypy_requirements.txt')
MYPY_TOOLS_DIR = os.path.join(os.getcwd(), 'third_party', 'python3_libs')
PYTHON3_CMD = 'python3'
_PATHS_TO_INSERT = [MYPY_TOOLS_DIR, ]
_PARSER = argparse.ArgumentParser(
description='Python type checking using mypy script.'
)
_PARSER.add_argument(
'--skip-install',
help='If passed, skips installing dependencies.'
' By default, they are installed.',
action='store_true')
_PARSER.add_argument(
'--install-globally',
help='optional; if specified, installs mypy and its requirements globally.'
' By default, they are installed to %s' % MYPY_TOOLS_DIR,
action='store_true')
_PARSER.add_argument(
'--files',
help='Files to type-check',
action='store',
nargs='+'
)
def install_third_party_libraries(skip_install: bool) -> None:
"""Run the installation script.
Args:
skip_install: bool. Whether to skip running the installation script.
"""
if not skip_install:
install_third_party_libs.main()
def get_mypy_cmd(files, mypy_exec_path, using_global_mypy):
"""Return the appropriate command to be run.
Args:
files: list(list(str)). List having first element as list of string.
mypy_exec_path: str. Path of mypy executable.
using_global_mypy: bool. Whether generated command should run using
global mypy.
Returns:
list(str). List of command line arguments.
"""
if using_global_mypy:
mypy_cmd = 'mypy'
else:
mypy_cmd = mypy_exec_path
if files:
cmd = [mypy_cmd, '--config-file', CONFIG_FILE_PATH] + files
else:
excluded_files_regex = (
'|'.join(NOT_FULLY_COVERED_FILES + EXCLUDED_DIRECTORIES))
cmd = [
mypy_cmd, '--exclude', excluded_files_regex,
'--config-file', CONFIG_FILE_PATH, '.'
]
return cmd
def install_mypy_prerequisites(install_globally):
"""Install mypy and type stubs from mypy_requirements.txt.
Args:
install_globally: bool. Whether mypy and its requirements are to be
installed globally.
Returns:
tuple(int, str). The return code from installing prerequisites and the
path of the mypy executable.
"""
# TODO(#13398): Change MyPy installation after Python3 migration. Now, we
# install packages globally for CI. In CI, pip installation is not in a way
# we expect.
if install_globally:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH
]
else:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH, '--target', MYPY_TOOLS_DIR,
'--upgrade'
]
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.communicate()
if b'can\'t combine user with prefix' in output[1]:
uextention_text = ['--user', '--prefix=', '--system']
new_process = subprocess.Popen(
cmd + uextention_text, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
new_process.communicate()
_PATHS_TO_INSERT.append(os.path.join(site.USER_BASE, 'bin'))
mypy_exec_path = os.path.join(site.USER_BASE, 'bin', 'mypy')
return (new_process.returncode, mypy_exec_path)
else:
_PATHS_TO_INSERT.append(os.path.join(MYPY_TOOLS_DIR, 'bin'))
mypy_exec_path = os.path.join(MYPY_TOOLS_DIR, 'bin', 'mypy')
return (process.returncode, mypy_exec_path)
def main(args=None):
"""Runs the MyPy type checks."""
parsed_args = _PARSER.parse_args(args=args)
for directory in common.DIRS_TO_ADD_TO_SYS_PATH:
# The directories should only be inserted starting at index 1. See
# https://stackoverflow.com/a/10095099 and
# https://stackoverflow.com/q/10095037 for more details.
sys.path.insert(1, directory)
install_third_party_libraries(parsed_args.skip_install)
common.fix_third_party_imports()
print('Installing Mypy and stubs for third party libraries.')
return_code, mypy_exec_path = install_mypy_prerequisites(
parsed_args.install_globally)
if return_code != 0:
print('Cannot install Mypy and stubs for third party libraries.')
sys.exit(1)
print('Installed Mypy and stubs for third party libraries.')
print('Starting Mypy type checks.')
cmd = get_mypy_cmd(
parsed_args.files, mypy_exec_path, parsed_args.install_globally)
env = os.environ.copy()
for path in _PATHS_TO_INSERT:
env['PATH'] = '%s%s' % (path, os.pathsep) + env['PATH']
env['PYTHONPATH'] = MYPY_TOOLS_DIR
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
if process.returncode == 0:
print('Mypy type checks successful.')
else:
print(
'Mypy type checks unsuccessful. Please fix the errors. '
'For more information, visit: '
'https://github.com/oppia/oppia/wiki/Backend-Type-Annotations')
sys.exit(2)
return process.returncode
if __name__ == '__main__': # pragma: no cover
main()
|
|
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from ....external.qt.QtGui import QMainWindow
from ....external.qt.QtTest import QTest
from ....external.qt.QtCore import Qt
from ....external.qt.QtGui import QItemSelectionModel
from mock import MagicMock, patch
import pytest
from ..layer_tree_widget import (LayerTreeWidget, Clipboard,
save_subset)
from ....tests import example_data
from .... import core
class TestLayerTree(object):
""" Unit tests for the layer_tree_widget class """
def setup_method(self, method):
self.data = example_data.test_data()
self.collect = core.data_collection.DataCollection(list(self.data))
self.hub = self.collect.hub
self.widget = LayerTreeWidget()
self.win = QMainWindow()
self.win.setCentralWidget(self.widget)
self.widget.setup(self.collect)
for key, value in self.widget._actions.items():
self.__setattr__("%s_action" % key, value)
def teardown_method(self, method):
self.win.close()
def select_layers(self, *layers):
self.widget.layerTree.set_selected_layers(layers)
def remove_layer(self, layer):
""" Remove a layer via the widget remove button """
self.select_layers(layer)
QTest.mousePress(self.widget.layerRemoveButton, Qt.LeftButton)
QTest.mouseRelease(self.widget.layerRemoveButton, Qt.LeftButton)
def add_layer(self, layer=None):
""" Add a layer through a hub message """
layer = layer or core.Data()
self.widget.data_collection.append(layer)
return layer
def layer_present(self, layer):
""" Test that a layer exists in the data collection """
return layer in self.collect or \
getattr(layer, 'data', None) in self.collect
def test_current_layer_method_correct(self):
layer = self.add_layer()
self.select_layers(layer)
assert self.widget.current_layer() is layer
def test_add(self):
""" Test that a layer exists in widget once added """
data = core.Data()
assert not self.layer_present(data)
self.add_layer(data)
assert self.layer_present(data)
def test_remove_layer(self):
""" Test that widget remove button works properly """
layer = self.add_layer()
self.remove_layer(layer)
assert not self.layer_present(layer)
def test_remove_subset_triggers_selection_changed(self):
layer = self.add_layer()
grp = self.collect.new_subset_group()
mock = MagicMock()
self.select_layers(grp)
self.widget.layerTree.selection_changed.connect(mock)
QTest.mousePress(self.widget.layerRemoveButton, Qt.LeftButton)
QTest.mouseRelease(self.widget.layerRemoveButton, Qt.LeftButton)
assert mock.call_count > 0
def test_remove_subset_layer(self):
""" Test that widget remove button works properly on subset groups"""
layer = self.add_layer()
grp = self.collect.new_subset_group()
assert self.layer_present(grp)
self.remove_layer(grp)
assert not self.layer_present(grp)
def test_empty_removal_does_nothing(self):
""" Make sure widgets are only removed when selected """
layer = self.add_layer()
self.widget.layerTree.clearSelection()
QTest.mousePress(self.widget.layerRemoveButton, Qt.LeftButton)
assert self.layer_present(layer)
@patch('glue.qt.widgets.layer_tree_widget.LinkEditor')
def test_link_data(self, le):
layer = self.add_layer()
self.select_layers(layer)
self.link_action.trigger()
assert le.update_links.call_count == 1
def test_new_subset_action(self):
""" new action creates a new subset group """
layer = self.add_layer()
self.new_action.trigger()
assert len(self.collect.subset_groups) == 1
def test_copy_paste_subset_action(self):
layer = self.add_layer()
grp = self.collect.new_subset_group()
self.select_layers(grp)
self.copy_action.trigger()
grp2 = self.collect.new_subset_group()
self.select_layers(grp2)
state0 = grp2.subset_state
self.paste_action.trigger()
assert grp2.subset_state is not state0
def setup_two_subset_selection(self):
layer = self.add_layer()
g1 = self.collect.new_subset_group()
g2 = self.collect.new_subset_group()
self.select_layers(g1, g2)
return layer
def test_invert(self):
layer = self.add_layer()
sub = self.collect.new_subset_group()
self.select_layers(sub)
self.invert_action.trigger()
assert isinstance(sub.subset_state, core.subset.InvertState)
def test_actions_enabled_single_subset_group_selection(self):
Clipboard().contents = None
layer = self.add_layer()
grp = self.collect.new_subset_group()
self.select_layers(grp)
assert self.new_action.isEnabled()
assert self.copy_action.isEnabled()
assert not self.paste_action.isEnabled()
assert self.invert_action.isEnabled()
assert self.clear_action.isEnabled()
def test_actions_enabled_single_data_selection(self):
layer = self.add_layer()
self.select_layers(layer)
assert self.new_action.isEnabled()
assert not self.copy_action.isEnabled()
assert not self.paste_action.isEnabled()
assert not self.invert_action.isEnabled()
assert not self.clear_action.isEnabled()
def test_actions_enabled_multi_subset_group_selection(self):
layer = self.setup_two_subset_selection()
assert self.new_action.isEnabled()
assert not self.copy_action.isEnabled()
assert not self.paste_action.isEnabled()
assert not self.invert_action.isEnabled()
assert not self.clear_action.isEnabled()
def test_checkable_toggle(self):
self.widget.set_checkable(True)
assert self.widget.is_checkable()
self.widget.set_checkable(False)
assert not self.widget.is_checkable()
def test_load_data(self):
pth = 'glue.qt.widgets.layer_tree_widget.qtutil.data_wizard'
with patch(pth) as wizard:
wizard.return_value = [self.data[0]]
self.widget._load_data()
assert self.layer_present(self.data[0])
def test_clear_subset_group(self):
layer = self.add_layer()
sub = self.collect.new_subset_group()
self.select_layers(sub)
dummy_state = MagicMock()
sub.subset_state = dummy_state
self.clear_action.trigger()
assert sub.subset_state is not dummy_state
def test_single_selection_updates_editable(self):
self.widget.bind_selection_to_edit_subset()
layer = self.add_layer()
grp1 = self.collect.new_subset_group()
grp2 = self.collect.new_subset_group()
assert layer.edit_subset[0].group is not grp1
self.select_layers(grp1)
assert layer.edit_subset[0].group is grp1
def test_multi_selection_updates_editable(self):
"""Selection disables edit_subset for all other data"""
self.widget.bind_selection_to_edit_subset()
layer = self.add_layer()
layer2 = self.add_layer()
grps = [self.collect.new_subset_group() for _ in range(3)]
self.select_layers(*grps[:2])
selected = [s.group for s in layer.edit_subset + layer2.edit_subset]
assert grps[0] in selected
assert grps[1] in selected
assert grps[2] not in selected
def test_selection_updates_on_data_add(self):
layer = self.add_layer()
assert self.widget.selected_layers() == [layer]
def test_selection_updates_on_subset_group_add(self):
layer = self.add_layer()
grp = self.collect.new_subset_group()
assert self.widget.selected_layers() == [grp]
def test_save_subset(self):
subset = MagicMock(core.Subset)
with patch('glue.qt.widgets.layer_tree_widget.QFileDialog') as d:
d.getSaveFileName.return_value = ('test.fits', None)
save_subset(subset)
subset.write_mask.assert_called_once_with('test.fits')
def test_save_subset_cancel(self):
subset = MagicMock(core.Subset)
with patch('glue.qt.widgets.layer_tree_widget.QFileDialog') as d:
d.getSaveFileName.return_value = ('', '')
save_subset(subset)
assert subset.write_mask.call_count == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.