gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Lint as: python3
# pylint: disable=bad-indentation,line-too-long
"""DFIV Learner implementation."""
import datetime
from typing import Dict, List
import acme
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
from src.utils.tf_linear_reg_utils import fit_linear, linear_reg_loss, linear_reg_pred, add_const_col
# Default Acme checkpoint TTL is 5 days.
_CHECKPOINT_TTL = int(datetime.timedelta(days=30).total_seconds())
class DFIVLearner(acme.Learner, tf2_savers.TFSaveable):
"""DFIVLearner.
This is the learning component of a DFIV learner. IE it takes a dataset as
input and implements update functionality to learn from this dataset.
Optionally it takes a replay client as well to allow for updating of
priorities.
"""
def __init__(self,
value_func: snt.Module,
instrumental_feature: snt.Module,
policy_net: snt.Module,
discount: float,
value_learning_rate: float,
instrumental_learning_rate: float,
value_reg: float,
instrumental_reg: float,
stage1_reg: float,
stage2_reg: float,
instrumental_iter: int,
value_iter: int,
dataset: tf.data.Dataset,
d_tm1_weight: float = 1.0,
counter: counting.Counter = None,
logger: loggers.Logger = None,
checkpoint: bool = True,
checkpoint_interval_minutes: int = 10.0):
"""Initializes the learner.
Args:
value_func: value function network
instrumental_feature: dual function network.
policy_net: policy network.
discount: global discount.
value_learning_rate: learning rate for the treatment_net update.
instrumental_learning_rate: learning rate for the instrumental_net update.
value_reg: L2 regularizer for value net.
instrumental_reg: L2 regularizer for instrumental net.
stage1_reg: ridge regularizer for stage 1 regression
stage2_reg: ridge regularizer for stage 2 regression
instrumental_iter: number of iteration for instrumental net
value_iter: number of iteration for value function,
dataset: dataset to learn from.
d_tm1_weight: weights for terminal state transitions.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner.
checkpoint_interval_minutes: checkpoint interval in minutes.
"""
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
self.stage1_reg = stage1_reg
self.stage2_reg = stage2_reg
self.instrumental_iter = instrumental_iter
self.value_iter = value_iter
self.discount = discount
self.value_reg = value_reg
self.instrumental_reg = instrumental_reg
self.d_tm1_weight = d_tm1_weight
# Get an iterator over the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self.value_func = value_func
self.value_feature = value_func._feature
self.instrumental_feature = instrumental_feature
self.policy = policy_net
self._value_func_optimizer = snt.optimizers.Adam(
value_learning_rate, beta1=0.5, beta2=0.9)
self._instrumental_func_optimizer = snt.optimizers.Adam(
instrumental_learning_rate, beta1=0.5, beta2=0.9)
# Define additional variables.
self.stage1_weight = tf.Variable(
tf.zeros((instrumental_feature.feature_dim(),
value_func.feature_dim()), dtype=tf.float32))
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._variables = [
self.value_func.trainable_variables,
self.instrumental_feature.trainable_variables,
self.stage1_weight,
]
# Create a checkpointer object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
objects_to_save=self.state,
time_delta_minutes=checkpoint_interval_minutes,
checkpoint_ttl_seconds=_CHECKPOINT_TTL)
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'value_func': self.value_func,
'instrumental_feature': self.instrumental_feature,
}, time_delta_minutes=60.)
def update_batch(self):
stage1_input = next(self._iterator)
stage2_input = next(self._iterator)
return stage1_input.data, stage2_input.data
def _get_d_tm1(self, data):
if len(data) > 6:
return data[6]
else:
return tf.ones(data[1].shape[0], dtype=tf.float32)
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
stage1_loss = None
stage2_loss = None
for _ in range(self.value_iter):
# Pull out the data needed for updates/priorities.
stage1_input, stage2_input = self.update_batch()
for _ in range(self.instrumental_iter // self.value_iter):
o_tm1, a_tm1, r_t, d_t, o_t = stage1_input[:5]
d_tm1 = self._get_d_tm1(stage1_input)
stage1_loss = self.update_instrumental(o_tm1, a_tm1, r_t, d_t, o_t, d_tm1)
stage2_loss = self.update_value(stage1_input, stage2_input)
self.update_final_weight(stage1_input, stage2_input)
self._num_steps.assign_add(1)
# Compute the global norm of the gradients for logging.
fetches = {'stage1_loss': stage1_loss, 'stage2_loss': stage2_loss}
return fetches
def cal_validation_err(self, valid_input):
"""Return prediction MSE on the validation dataset."""
stage1_weight = self.stage1_weight
stage2_weight = self.value_func.weight
se_sum = 0.
se2_sum = 0.
weight_sum = 0.
for sample in valid_input:
data = sample.data
current_obs, action, reward = data[:3]
d_tm1 = self._get_d_tm1(data)
d_tm1 = tf.expand_dims(d_tm1, axis=1)
instrumental_feature = self.instrumental_feature(obs=current_obs, action=action,
training=False)
predicted_feature = linear_reg_pred(instrumental_feature, stage1_weight)
current_feature = add_const_col(self.value_feature(obs=current_obs, action=action,
training=True))
predicted_feature = current_feature - d_tm1 * self.discount * predicted_feature
predict = linear_reg_pred(predicted_feature, stage2_weight)
weight = d_tm1 + (1.0 - d_tm1) * tf.convert_to_tensor(self.d_tm1_weight, dtype=tf.float32)
weight = tf.square(weight)
sq_err = tf.square(tf.expand_dims(reward, -1) - predict)
se_sum += tf.reduce_sum(weight * sq_err)
se2_sum += tf.reduce_sum(weight * tf.square(sq_err))
weight_sum += tf.reduce_sum(weight)
mse = se_sum / weight_sum
mse_err_std = tf.sqrt((se2_sum / weight_sum - mse ** 2) / weight_sum)
return mse, mse_err_std
def update_instrumental(self, current_obs, action, reward, discount, next_obs, d_tm1):
next_action = self.policy(next_obs)
discount = tf.expand_dims(discount, axis=1)
d_tm1 = tf.expand_dims(d_tm1, axis=1)
# target = discount * self.value_feature(next_obs, next_action, training=False)
target = d_tm1 * discount * add_const_col(self.value_feature(next_obs, next_action, training=False))
l2 = snt.regularizers.L2(self.instrumental_reg)
with tf.GradientTape() as tape:
feature = self.instrumental_feature(obs=current_obs, action=action, training=True)
feature = d_tm1 * feature
loss = linear_reg_loss(target, feature, self.stage1_reg)
loss = loss + l2(self.instrumental_feature.trainable_variables)
loss /= action.shape[0]
gradient = tape.gradient(loss, self.instrumental_feature.trainable_variables)
self._instrumental_func_optimizer.apply(gradient, self.instrumental_feature.trainable_variables)
return loss
def update_value(self, stage1_input, stage2_input):
current_obs_1st, action_1st, _, discount_1st, next_obs_1st = stage1_input[:5]
d_tm1_1st = self._get_d_tm1(stage1_input)
current_obs_2nd, action_2nd, reward_2nd = stage2_input[:3]
d_tm1_2nd = self._get_d_tm1(stage2_input)
next_action_1st = self.policy(next_obs_1st)
discount_1st = tf.expand_dims(discount_1st, axis=1)
d_tm1_1st = tf.expand_dims(d_tm1_1st, axis=1)
d_tm1_2nd = tf.expand_dims(d_tm1_2nd, axis=1)
instrumental_feature_1st = self.instrumental_feature(obs=current_obs_1st, action=action_1st,
training=False)
instrumental_feature_1st = d_tm1_1st * instrumental_feature_1st
instrumental_feature_2nd = self.instrumental_feature(obs=current_obs_2nd, action=action_2nd,
training=False)
l2 = snt.regularizers.L2(self.value_reg)
with tf.GradientTape() as tape:
# target_1st = discount_1st * self.value_feature(obs=next_obs_1st, action=next_action_1st, training=True)
target_1st = d_tm1_1st * discount_1st * add_const_col(self.value_feature(obs=next_obs_1st, action=next_action_1st, training=True))
stage1_weight = fit_linear(target_1st, instrumental_feature_1st, self.stage1_reg)
predicted_feature = linear_reg_pred(instrumental_feature_2nd, stage1_weight)
# current_feature = self.value_feature(obs=current_obs_2nd, action=action_2nd, training=True)
current_feature = add_const_col(self.value_feature(obs=current_obs_2nd, action=action_2nd, training=True))
predicted_feature = current_feature - d_tm1_2nd * self.discount * predicted_feature
# loss = linear_reg_loss(tf.expand_dims(reward_2nd, -1), predicted_feature, self.stage2_reg)
weight = d_tm1_2nd + (1.0 - d_tm1_2nd) * tf.convert_to_tensor(self.d_tm1_weight, dtype=tf.float32)
loss = linear_reg_loss(weight * tf.expand_dims(reward_2nd, -1), weight * predicted_feature, self.stage2_reg)
loss = loss + l2(self.value_feature.trainable_variables)
loss /= action_2nd.shape[0]
gradient = tape.gradient(loss, self.value_feature.trainable_variables)
self._value_func_optimizer.apply(gradient, self.value_feature.trainable_variables)
return loss
def update_final_weight(self, stage1_input, stage2_input):
current_obs_1st, action_1st, _, discount_1st, next_obs_1st = stage1_input[:5]
d_tm1_1st = self._get_d_tm1(stage1_input)
current_obs_2nd, action_2nd, reward_2nd = stage2_input[:3]
d_tm1_2nd = self._get_d_tm1(stage2_input)
next_action_1st = self.policy(next_obs_1st)
discount_1st = tf.expand_dims(discount_1st, axis=1)
d_tm1_1st = tf.expand_dims(d_tm1_1st, axis=1)
d_tm1_2nd = tf.expand_dims(d_tm1_2nd, axis=1)
instrumental_feature_1st = self.instrumental_feature(obs=current_obs_1st, action=action_1st,
training=False)
instrumental_feature_1st = d_tm1_1st * instrumental_feature_1st
instrumental_feature_2nd = self.instrumental_feature(obs=current_obs_2nd, action=action_2nd,
training=False)
# target_1st = discount_1st * self.value_feature(obs=next_obs_1st, action=next_action_1st, training=False)
target_1st = d_tm1_1st * discount_1st * add_const_col(self.value_feature(obs=next_obs_1st, action=next_action_1st, training=False))
stage1_weight = fit_linear(target_1st, instrumental_feature_1st, self.stage1_reg)
self.stage1_weight.assign(stage1_weight)
predicted_feature = linear_reg_pred(instrumental_feature_2nd, stage1_weight)
# current_feature = self.value_feature(obs=current_obs_2nd, action=action_2nd, training=False)
current_feature = add_const_col(self.value_feature(obs=current_obs_2nd, action=action_2nd, training=False))
# predicted_feature = add_const_col(current_feature) - self.discount * add_const_col(predicted_feature)
predicted_feature = current_feature - d_tm1_2nd * self.discount * predicted_feature
# self.value_func._weight.assign(
# fit_linear(tf.expand_dims(reward_2nd, -1), predicted_feature, self.stage2_reg))
weight = d_tm1_2nd + (1.0 - d_tm1_2nd) * tf.convert_to_tensor(self.d_tm1_weight, dtype=tf.float32)
stage2_weight = fit_linear(weight * tf.expand_dims(reward_2nd, -1), weight * predicted_feature, self.stage2_reg)
self.value_func.weight.assign(stage2_weight)
return stage1_weight, stage2_weight
def step(self):
# Do a batch of SGD.
result = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
result.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'value_func': self.value_func,
'instrumental_feature': self.instrumental_feature,
'stage1_weight': self.stage1_weight,
'value_func_optimizer': self._value_func_optimizer,
'instrumental_func_optimizer': self._instrumental_func_optimizer,
'num_steps': self._num_steps,
'counter': self._counter,
}
|
|
"""
This module implements all the functions to read a video or a picture
using ffmpeg. It is quite ugly, as there are many pitfalls to avoid
"""
from __future__ import division
import subprocess as sp
import re
import warnings
import logging
logging.captureWarnings(True)
import numpy as np
from moviepy.config import get_setting # ffmpeg, ffmpeg.exe, etc...
from moviepy.tools import cvsecs
import os
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
class FFMPEG_VideoReader:
def __init__(self, filename, print_infos=False, bufsize = None,
pix_fmt="rgb24", check_duration=True):
self.filename = filename
infos = ffmpeg_parse_infos(filename, print_infos, check_duration)
self.fps = infos['video_fps']
self.size = infos['video_size']
self.duration = infos['video_duration']
self.ffmpeg_duration = infos['duration']
self.nframes = infos['video_nframes']
self.infos = infos
self.pix_fmt = pix_fmt
if pix_fmt == 'rgba':
self.depth = 4
else:
self.depth = 3
if bufsize is None:
w, h = self.size
bufsize = self.depth * w * h + 100
self.bufsize= bufsize
self.initialize()
self.pos = 1
self.lastread = self.read_frame()
def initialize(self, starttime=0):
"""Opens the file, creates the pipe. """
self.close() # if any
if starttime != 0 :
offset = min(1, starttime)
i_arg = ['-ss', "%.06f" % (starttime - offset),
'-i', self.filename,
'-ss', "%.06f" % offset]
else:
i_arg = [ '-i', self.filename]
cmd = ([get_setting("FFMPEG_BINARY")]+ i_arg +
['-loglevel', 'error',
'-f', 'image2pipe',
"-pix_fmt", self.pix_fmt,
'-vcodec', 'rawvideo', '-'])
popen_params = {"bufsize": self.bufsize,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
self.proc = sp.Popen(cmd, **popen_params)
def skip_frames(self, n=1):
"""Reads and throws away n frames """
w, h = self.size
for i in range(n):
self.proc.stdout.read(self.depth*w*h)
#self.proc.stdout.flush()
self.pos += n
def read_frame(self):
w, h = self.size
nbytes= self.depth*w*h
s = self.proc.stdout.read(nbytes)
if len(s) != nbytes:
warnings.warn("Warning: in file %s, "%(self.filename)+
"%d bytes wanted but %d bytes read,"%(nbytes, len(s))+
"at frame %d/%d, at time %.02f/%.02f sec. "%(
self.pos,self.nframes,
1.0*self.pos/self.fps,
self.duration)+
"Using the last valid frame instead.",
UserWarning)
if not hasattr(self, 'lastread'):
raise IOError(("MoviePy error: failed to read the first frame of "
"video file %s. That might mean that the file is "
"corrupted. That may also mean that you are using "
"a deprecated version of FFMPEG. On Ubuntu/Debian "
"for instance the version in the repos is deprecated. "
"Please update to a recent version from the website.")%(
self.filename))
result = self.lastread
else:
result = np.fromstring(s, dtype='uint8')
result.shape =(h, w, len(s)//(w*h)) # reshape((h, w, len(s)//(w*h)))
self.lastread = result
return result
def get_frame(self, t):
""" Read a file video frame at time t.
Note for coders: getting an arbitrary frame in the video with
ffmpeg can be painfully slow if some decoding has to be done.
This function tries to avoid fectching arbitrary frames
whenever possible, by moving between adjacent frames.
"""
# these definitely need to be rechecked sometime. Seems to work.
# I use that horrible '+0.00001' hack because sometimes due to numerical
# imprecisions a 3.0 can become a 2.99999999... which makes the int()
# go to the previous integer. This makes the fetching more robust in the
# case where you get the nth frame by writing get_frame(n/fps).
pos = int(self.fps*t + 0.00001)+1
if pos == self.pos:
return self.lastread
else:
if(pos < self.pos) or (pos > self.pos+100):
self.initialize(t)
self.pos = pos
else:
self.skip_frames(pos-self.pos-1)
result = self.read_frame()
self.pos = pos
return result
def close(self):
if hasattr(self,'proc'):
self.proc.terminate()
self.proc.stdout.close()
self.proc.stderr.close()
del self.proc
def __del__(self):
self.close()
if hasattr(self,'lastread'):
del self.lastread
def ffmpeg_read_image(filename, with_mask=True):
""" Read an image file (PNG, BMP, JPEG...).
Wraps FFMPEG_Videoreader to read just one image.
Returns an ImageClip.
This function is not meant to be used directly in MoviePy,
use ImageClip instead to make clips out of image files.
Parameters
-----------
filename
Name of the image file. Can be of any format supported by ffmpeg.
with_mask
If the image has a transparency layer, ``with_mask=true`` will save
this layer as the mask of the returned ImageClip
"""
if with_mask:
pix_fmt = 'rgba'
else:
pix_fmt = "rgb24"
reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, check_duration=False)
im = reader.lastread
del reader
return im
def ffmpeg_parse_infos(filename, print_infos=False, check_duration=True):
"""Get file infos using ffmpeg.
Returns a dictionnary with the fields:
"video_found", "video_fps", "duration", "video_nframes",
"video_duration", "audio_found", "audio_fps"
"video_duration" is slightly smaller than "duration" to avoid
fetching the uncomplete frames at the end, which raises an error.
"""
# open the file in a pipe, provoke an error, read output
is_GIF = filename.endswith('.gif')
cmd = [get_setting("FFMPEG_BINARY"), "-i", filename]
if is_GIF:
cmd += ["-f", "null", "/dev/null"]
popen_params = {"bufsize": 10**5,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
proc = sp.Popen(cmd, **popen_params)
proc.stdout.readline()
proc.terminate()
infos = proc.stderr.read().decode('utf8')
del proc
if print_infos:
# print the whole info text returned by FFMPEG
print( infos )
lines = infos.splitlines()
if "No such file or directory" in lines[-1]:
raise IOError(("MoviePy error: the file %s could not be found !\n"
"Please check that you entered the correct "
"path.")%filename)
result = dict()
# get duration (in seconds)
result['duration'] = None
if check_duration:
try:
keyword = ('frame=' if is_GIF else 'Duration: ')
line = [l for l in lines if keyword in l][0]
match = re.findall("([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])", line)[0]
result['duration'] = cvsecs(match)
except:
raise IOError(("MoviePy error: failed to read the duration of file %s.\n"
"Here are the file infos returned by ffmpeg:\n\n%s")%(
filename, infos))
# get the output line that speaks about video
lines_video = [l for l in lines if ' Video: ' in l]
result['video_found'] = ( lines_video != [] )
if result['video_found']:
try:
line = lines_video[0]
# get the size, of the form 460x320 (w x h)
match = re.search(" [0-9]*x[0-9]*(,| )", line)
s = list(map(int, line[match.start():match.end()-1].split('x')))
result['video_size'] = s
except:
raise (("MoviePy error: failed to read video dimensions in file %s.\n"
"Here are the file infos returned by ffmpeg:\n\n%s")%(
filename, infos))
# get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
# tbc, and sometimes tbc/2...
# Current policy: Trust tbr first, then fps. If result is near from x*1000/1001
# where x is 23,24,25,50, replace by x*1000/1001 (very common case for the fps).
try:
match = re.search("( [0-9]*.| )[0-9]* tbr", line)
tbr = float(line[match.start():match.end()].split(' ')[1])
result['video_fps'] = tbr
except:
match = re.search("( [0-9]*.| )[0-9]* fps", line)
result['video_fps'] = float(line[match.start():match.end()].split(' ')[1])
# It is known that a fps of 24 is often written as 24000/1001
# but then ffmpeg nicely rounds it to 23.98, which we hate.
coef = 1000.0/1001.0
fps = result['video_fps']
for x in [23,24,25,30,50]:
if (fps!=x) and abs(fps - x*coef) < .01:
result['video_fps'] = x*coef
if check_duration:
result['video_nframes'] = int(result['duration']*result['video_fps'])+1
result['video_duration'] = result['duration']
else:
result['video_nframes'] = 1
result['video_duration'] = None
# We could have also recomputed the duration from the number
# of frames, as follows:
# >>> result['video_duration'] = result['video_nframes'] / result['video_fps']
lines_audio = [l for l in lines if ' Audio: ' in l]
result['audio_found'] = lines_audio != []
if result['audio_found']:
line = lines_audio[0]
try:
match = re.search(" [0-9]* Hz", line)
result['audio_fps'] = int(line[match.start()+1:match.end()])
except:
result['audio_fps'] = 'unknown'
return result
|
|
import warnings
from contextlib import contextmanager
from copy import copy
from django.utils.deprecation import RemovedInDjango20Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
_current_app_undefined = object()
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of Context is deprecated. Use "
"RequestContext and set the current_app attribute of its "
"request instead.", RemovedInDjango20Warning, stacklevel=2)
self.autoescape = autoescape
self._current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.template_name = "unknown"
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@property
def current_app(self):
return None if self._current_app is _current_app_undefined else self._current_app
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
# current_app isn't passed here to avoid triggering the deprecation
# warning in Context.__init__.
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of RequestContext is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango20Warning, stacklevel=2)
self._current_app = current_app
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
self.update({}) # placeholder for context processors output
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request)
if original_context:
context.push(original_context)
return context
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, OneLogin, Inc.
# All rights reserved.
import json
from os.path import dirname, join, exists
import unittest
from xml.dom.minidom import parseString
from onelogin.saml2 import compat
from onelogin.saml2.logout_request import OneLogin_Saml2_Logout_Request
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from onelogin.saml2.utils import OneLogin_Saml2_Utils
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
class OneLogin_Saml2_Logout_Request_Test(unittest.TestCase):
data_path = join(dirname(__file__), '..', '..', '..', 'data')
def loadSettingsJSON(self):
filename = join(dirname(__file__), '..', '..', '..', 'settings', 'settings1.json')
if exists(filename):
stream = open(filename, 'r')
settings = json.load(stream)
stream.close()
return settings
else:
raise Exception('Settings json file does not exist')
def file_contents(self, filename):
f = open(filename, 'r')
content = f.read()
f.close()
return content
def testConstructor(self):
"""
Tests the OneLogin_Saml2_LogoutRequest Constructor.
"""
settings_info = self.loadSettingsJSON()
settings_info['security']['nameIdEncrypted'] = True
settings = OneLogin_Saml2_Settings(settings_info)
logout_request = OneLogin_Saml2_Logout_Request(settings)
parameters = {'SAMLRequest': logout_request.get_request()}
logout_url = OneLogin_Saml2_Utils.redirect('http://idp.example.com/SingleLogoutService.php', parameters, True)
self.assertRegexpMatches(logout_url, '^http://idp\.example\.com\/SingleLogoutService\.php\?SAMLRequest=')
url_parts = urlparse(logout_url)
exploded = parse_qs(url_parts.query)
payload = exploded['SAMLRequest'][0]
inflated = compat.to_string(OneLogin_Saml2_Utils.decode_base64_and_inflate(payload))
self.assertRegexpMatches(inflated, '^<samlp:LogoutRequest')
def testCreateDeflatedSAMLLogoutRequestURLParameter(self):
"""
Tests the OneLogin_Saml2_LogoutRequest Constructor.
The creation of a deflated SAML Logout Request
"""
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
logout_request = OneLogin_Saml2_Logout_Request(settings)
parameters = {'SAMLRequest': logout_request.get_request()}
logout_url = OneLogin_Saml2_Utils.redirect('http://idp.example.com/SingleLogoutService.php', parameters, True)
self.assertRegexpMatches(logout_url, '^http://idp\.example\.com\/SingleLogoutService\.php\?SAMLRequest=')
url_parts = urlparse(logout_url)
exploded = parse_qs(url_parts.query)
payload = exploded['SAMLRequest'][0]
inflated = compat.to_string(OneLogin_Saml2_Utils.decode_base64_and_inflate(payload))
self.assertRegexpMatches(inflated, '^<samlp:LogoutRequest')
def testGetIDFromSAMLLogoutRequest(self):
"""
Tests the get_id method of the OneLogin_Saml2_LogoutRequest
"""
logout_request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml'))
id1 = OneLogin_Saml2_Logout_Request.get_id(logout_request)
self.assertEqual('ONELOGIN_21584ccdfaca36a145ae990442dcd96bfe60151e', id1)
dom = parseString(logout_request)
id2 = OneLogin_Saml2_Logout_Request.get_id(dom.toxml())
self.assertEqual('ONELOGIN_21584ccdfaca36a145ae990442dcd96bfe60151e', id2)
def testGetIDFromDeflatedSAMLLogoutRequest(self):
"""
Tests the get_id method of the OneLogin_Saml2_LogoutRequest
"""
deflated_logout_request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request_deflated.xml.base64'))
logout_request = OneLogin_Saml2_Utils.decode_base64_and_inflate(deflated_logout_request)
id1 = OneLogin_Saml2_Logout_Request.get_id(logout_request)
self.assertEqual('ONELOGIN_21584ccdfaca36a145ae990442dcd96bfe60151e', id1)
def testGetNameIdData(self):
"""
Tests the get_nameid_data method of the OneLogin_Saml2_LogoutRequest
"""
expected_name_id_data = {
'Value': 'ONELOGIN_1e442c129e1f822c8096086a1103c5ee2c7cae1c',
'Format': 'urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified',
'SPNameQualifier': 'http://idp.example.com/'
}
request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml'))
name_id_data = OneLogin_Saml2_Logout_Request.get_nameid_data(request)
self.assertEqual(expected_name_id_data, name_id_data)
dom = parseString(request)
name_id_data_2 = OneLogin_Saml2_Logout_Request.get_nameid_data(dom.toxml())
self.assertEqual(expected_name_id_data, name_id_data_2)
request_2 = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request_encrypted_nameid.xml'))
self.assertRaisesRegexp(Exception, 'Key is required in order to decrypt the NameID',
OneLogin_Saml2_Logout_Request.get_nameid_data, request_2)
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
key = settings.get_sp_key()
name_id_data_4 = OneLogin_Saml2_Logout_Request.get_nameid_data(request_2, key)
expected_name_id_data = {
'Value': 'ONELOGIN_9c86c4542ab9d6fce07f2f7fd335287b9b3cdf69',
'Format': 'urn:oasis:names:tc:SAML:2.0:nameid-format:emailAddress',
'SPNameQualifier': 'https://pitbulk.no-ip.org/newonelogin/demo1/metadata.php'
}
self.assertEqual(expected_name_id_data, name_id_data_4)
dom_2 = parseString(request_2)
encrypted_id_nodes = dom_2.getElementsByTagName('saml:EncryptedID')
encrypted_data = encrypted_id_nodes[0].firstChild.nextSibling
encrypted_id_nodes[0].removeChild(encrypted_data)
self.assertRaisesRegexp(Exception, 'Not NameID found in the Logout Request',
OneLogin_Saml2_Logout_Request.get_nameid_data, dom_2.toxml(), key)
inv_request = self.file_contents(join(self.data_path, 'logout_requests', 'invalids', 'no_nameId.xml'))
self.assertRaisesRegexp(Exception, 'Not NameID found in the Logout Request',
OneLogin_Saml2_Logout_Request.get_nameid_data, inv_request)
def testGetNameId(self):
"""
Tests the get_nameid of the OneLogin_Saml2_LogoutRequest
"""
request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml'))
name_id = OneLogin_Saml2_Logout_Request.get_nameid(request)
self.assertEqual(name_id, 'ONELOGIN_1e442c129e1f822c8096086a1103c5ee2c7cae1c')
request_2 = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request_encrypted_nameid.xml'))
self.assertRaisesRegexp(Exception, 'Key is required in order to decrypt the NameID',
OneLogin_Saml2_Logout_Request.get_nameid, request_2)
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
key = settings.get_sp_key()
name_id_3 = OneLogin_Saml2_Logout_Request.get_nameid(request_2, key)
self.assertEqual('ONELOGIN_9c86c4542ab9d6fce07f2f7fd335287b9b3cdf69', name_id_3)
def testGetIssuer(self):
"""
Tests the get_issuer of the OneLogin_Saml2_LogoutRequest
"""
request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml'))
issuer = OneLogin_Saml2_Logout_Request.get_issuer(request)
self.assertEqual('http://idp.example.com/', issuer)
dom = parseString(request)
issuer_2 = OneLogin_Saml2_Logout_Request.get_issuer(dom.toxml())
self.assertEqual('http://idp.example.com/', issuer_2)
issuer_node = dom.getElementsByTagName('saml:Issuer')[0]
issuer_node.parentNode.removeChild(issuer_node)
issuer_3 = OneLogin_Saml2_Logout_Request.get_issuer(dom.toxml())
self.assertIsNone(issuer_3)
def testGetSessionIndexes(self):
"""
Tests the get_session_indexes of the OneLogin_Saml2_LogoutRequest
"""
request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml'))
session_indexes = OneLogin_Saml2_Logout_Request.get_session_indexes(request)
self.assertEqual(len(session_indexes), 0)
dom = parseString(request)
session_indexes_2 = OneLogin_Saml2_Logout_Request.get_session_indexes(dom.toxml())
self.assertEqual(len(session_indexes_2), 0)
request_2 = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request_with_sessionindex.xml'))
session_indexes_3 = OneLogin_Saml2_Logout_Request.get_session_indexes(request_2)
self.assertEqual(['_ac72a76526cb6ca19f8438e73879a0e6c8ae5131'], session_indexes_3)
def testIsInvalidXML(self):
"""
Tests the is_valid method of the OneLogin_Saml2_LogoutRequest
Case Invalid XML
"""
request = OneLogin_Saml2_Utils.b64encode('<xml>invalid</xml>')
request_data = {
'http_host': 'example.com',
'script_name': 'index.html',
}
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
logout_request = OneLogin_Saml2_Logout_Request(settings, request)
self.assertTrue(logout_request.is_valid(request_data))
settings.set_strict(True)
logout_request2 = OneLogin_Saml2_Logout_Request(settings, request)
self.assertFalse(logout_request2.is_valid(request_data))
def testIsInvalidIssuer(self):
"""
Tests the is_valid method of the OneLogin_Saml2_LogoutRequest
Case Invalid Issuer
"""
request = self.file_contents(join(self.data_path, 'logout_requests', 'invalids', 'invalid_issuer.xml'))
request_data = {
'http_host': 'example.com',
'script_name': 'index.html'
}
current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data)
request = request.replace('http://stuff.com/endpoints/endpoints/sls.php', current_url)
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
logout_request = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
self.assertTrue(logout_request.is_valid(request_data))
settings.set_strict(True)
try:
logout_request2 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
valid = logout_request2.is_valid(request_data)
self.assertFalse(valid)
except Exception as e:
self.assertIn('Invalid issuer in the Logout Request', str(e))
def testIsInvalidDestination(self):
"""
Tests the is_valid method of the OneLogin_Saml2_LogoutRequest
Case Invalid Destination
"""
request_data = {
'http_host': 'example.com',
'script_name': 'index.html'
}
request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml'))
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
logout_request = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
self.assertTrue(logout_request.is_valid(request_data))
settings.set_strict(True)
try:
logout_request2 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
valid = logout_request2.is_valid(request_data)
self.assertFalse(valid)
except Exception as e:
self.assertIn('The LogoutRequest was received at', str(e))
dom = parseString(request)
dom.documentElement.setAttribute('Destination', None)
logout_request3 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(dom.toxml()))
self.assertTrue(logout_request3.is_valid(request_data))
dom.documentElement.removeAttribute('Destination')
logout_request4 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(dom.toxml()))
self.assertTrue(logout_request4.is_valid(request_data))
def testIsInvalidNotOnOrAfter(self):
"""
Tests the is_valid method of the OneLogin_Saml2_LogoutRequest
Case Invalid NotOnOrAfter
"""
request_data = {
'http_host': 'example.com',
'script_name': 'index.html'
}
request = self.file_contents(join(self.data_path, 'logout_requests', 'invalids', 'not_after_failed.xml'))
current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data)
request = request.replace('http://stuff.com/endpoints/endpoints/sls.php', current_url)
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
logout_request = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
self.assertTrue(logout_request.is_valid(request_data))
settings.set_strict(True)
try:
logout_request2 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
valid = logout_request2.is_valid(request_data)
self.assertFalse(valid)
except Exception as e:
self.assertIn('Timing issues (please check your clock settings)', str(e))
def testIsValid(self):
"""
Tests the is_valid method of the OneLogin_Saml2_LogoutRequest
"""
request_data = {
'http_host': 'example.com',
'script_name': 'index.html'
}
request = self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml'))
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
logout_request = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
self.assertTrue(logout_request.is_valid(request_data))
settings.set_strict(True)
logout_request2 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
self.assertFalse(logout_request2.is_valid(request_data))
settings.set_strict(False)
dom = parseString(request)
logout_request3 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(dom.toxml()))
self.assertTrue(logout_request3.is_valid(request_data))
settings.set_strict(True)
logout_request4 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(dom.toxml()))
self.assertFalse(logout_request4.is_valid(request_data))
current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data)
request = request.replace('http://stuff.com/endpoints/endpoints/sls.php', current_url)
logout_request5 = OneLogin_Saml2_Logout_Request(settings, OneLogin_Saml2_Utils.b64encode(request))
self.assertTrue(logout_request5.is_valid(request_data))
|
|
"""Support for Queensland Bushfire Alert Feeds."""
from datetime import timedelta
import logging
from typing import Optional
from georss_qld_bushfire_alert_client import QldBushfireAlertFeedManager
import voluptuous as vol
from homeassistant.components.geo_location import (
PLATFORM_SCHEMA, GeolocationEvent)
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS,
CONF_SCAN_INTERVAL, EVENT_HOMEASSISTANT_START)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, dispatcher_send)
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = 'category'
ATTR_EXTERNAL_ID = 'external_id'
ATTR_PUBLICATION_DATE = 'publication_date'
ATTR_STATUS = 'status'
ATTR_UPDATED_DATE = 'updated_date'
CONF_CATEGORIES = 'categories'
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = 'km'
SCAN_INTERVAL = timedelta(minutes=5)
SIGNAL_DELETE_ENTITY = 'qld_bushfire_delete_{}'
SIGNAL_UPDATE_ENTITY = 'qld_bushfire_update_{}'
SOURCE = 'qld_bushfire'
VALID_CATEGORIES = [
'Emergency Warning',
'Watch and Act',
'Advice',
'Notification',
'Information',
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [vol.In(VALID_CATEGORIES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Queensland Bushfire Alert Feed platform."""
scan_interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
coordinates = (config.get(CONF_LATITUDE, hass.config.latitude),
config.get(CONF_LONGITUDE, hass.config.longitude))
radius_in_km = config[CONF_RADIUS]
categories = config[CONF_CATEGORIES]
# Initialize the entity manager.
feed = QldBushfireFeedEntityManager(
hass, add_entities, scan_interval, coordinates, radius_in_km,
categories)
def start_feed_manager(event):
"""Start feed manager."""
feed.startup()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_feed_manager)
class QldBushfireFeedEntityManager:
"""Feed Entity Manager for Qld Bushfire Alert GeoRSS feed."""
def __init__(self, hass, add_entities, scan_interval, coordinates,
radius_in_km, categories):
"""Initialize the Feed Entity Manager."""
self._hass = hass
self._feed_manager = QldBushfireAlertFeedManager(
self._generate_entity, self._update_entity, self._remove_entity,
coordinates, filter_radius=radius_in_km,
filter_categories=categories)
self._add_entities = add_entities
self._scan_interval = scan_interval
def startup(self):
"""Start up this manager."""
self._feed_manager.update()
self._init_regular_updates()
def _init_regular_updates(self):
"""Schedule regular updates at the specified interval."""
track_time_interval(
self._hass, lambda now: self._feed_manager.update(),
self._scan_interval)
def get_entry(self, external_id):
"""Get feed entry by external id."""
return self._feed_manager.feed_entries.get(external_id)
def _generate_entity(self, external_id):
"""Generate new entity."""
new_entity = QldBushfireLocationEvent(self, external_id)
# Add new entities to HA.
self._add_entities([new_entity], True)
def _update_entity(self, external_id):
"""Update entity."""
dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY.format(external_id))
def _remove_entity(self, external_id):
"""Remove entity."""
dispatcher_send(self._hass, SIGNAL_DELETE_ENTITY.format(external_id))
class QldBushfireLocationEvent(GeolocationEvent):
"""This represents an external event with Qld Bushfire feed data."""
def __init__(self, feed_manager, external_id):
"""Initialize entity with data from feed entry."""
self._feed_manager = feed_manager
self._external_id = external_id
self._name = None
self._distance = None
self._latitude = None
self._longitude = None
self._attribution = None
self._category = None
self._publication_date = None
self._updated_date = None
self._status = None
self._remove_signal_delete = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_delete = async_dispatcher_connect(
self.hass, SIGNAL_DELETE_ENTITY.format(self._external_id),
self._delete_callback)
self._remove_signal_update = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITY.format(self._external_id),
self._update_callback)
@callback
def _delete_callback(self):
"""Remove this entity."""
self._remove_signal_delete()
self._remove_signal_update()
self.hass.async_create_task(self.async_remove())
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for Qld Bushfire Alert feed location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
if feed_entry:
self._update_from_feed(feed_entry)
def _update_from_feed(self, feed_entry):
"""Update the internal state from the provided feed entry."""
self._name = feed_entry.title
self._distance = feed_entry.distance_to_home
self._latitude = feed_entry.coordinates[0]
self._longitude = feed_entry.coordinates[1]
self._attribution = feed_entry.attribution
self._category = feed_entry.category
self._publication_date = feed_entry.published
self._updated_date = feed_entry.updated
self._status = feed_entry.status
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return self._name
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return DEFAULT_UNIT_OF_MEASUREMENT
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_EXTERNAL_ID, self._external_id),
(ATTR_CATEGORY, self._category),
(ATTR_ATTRIBUTION, self._attribution),
(ATTR_PUBLICATION_DATE, self._publication_date),
(ATTR_UPDATED_DATE, self._updated_date),
(ATTR_STATUS, self._status)
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnConnectionsOperations(object):
"""VpnConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnConnection"
"""Retrieves the details of a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.VpnConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
vpn_connection_parameters, # type: "_models.VpnConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
vpn_connection_parameters, # type: "_models.VpnConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnConnection"]
"""Creates a vpn connection to a scalable vpn gateway if it doesn't exist else updates the
existing connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param vpn_connection_parameters: Parameters supplied to create or Update a VPN Connection.
:type vpn_connection_parameters: ~azure.mgmt.network.v2019_07_01.models.VpnConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VpnConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
vpn_connection_parameters=vpn_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def list_by_vpn_gateway(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnConnectionsResult"]
"""Retrieves all vpn connections for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.ListVpnConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'} # type: ignore
|
|
# Copyright (c) 2013, Thomas P. Robitaille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (unicode_literals, division, print_function,
absolute_import)
import time
import argparse
children = []
def get_percent(process):
return process.cpu_percent()
def get_memory(process):
return process.memory_info()
def all_children(pr):
global children
try:
children_of_pr = pr.children(recursive=True)
except Exception: # pragma: no cover
return children
for child in children_of_pr:
if child not in children:
children.append(child)
return children
def main():
parser = argparse.ArgumentParser(
description='Record CPU and memory usage for a process')
parser.add_argument('process_id_or_command', type=str,
help='the process id or command')
parser.add_argument('--log', type=str,
help='output the statistics to a file')
parser.add_argument('--plot', type=str,
help='output the statistics to a plot')
parser.add_argument('--duration', type=float,
help='how long to record for (in seconds). If not '
'specified, the recording is continuous until '
'the job exits.')
parser.add_argument('--interval', type=float,
help='how long to wait between each sample (in '
'seconds). By default the process is sampled '
'as often as possible.')
parser.add_argument('--include-children',
help='include sub-processes in statistics (results '
'in a slower maximum sampling rate).',
action='store_true')
args = parser.parse_args()
# Attach to process
try:
pid = int(args.process_id_or_command)
print("Attaching to process {0}".format(pid))
sprocess = None
except Exception:
import subprocess
command = args.process_id_or_command
print("Starting up command '{0}' and attaching to process"
.format(command))
sprocess = subprocess.Popen(command, shell=True)
pid = sprocess.pid
monitor(pid, logfile=args.log, plot=args.plot, duration=args.duration,
interval=args.interval, include_children=args.include_children)
if sprocess is not None:
sprocess.kill()
def monitor(pid, logfile=None, plot=None, duration=None, interval=None,
include_children=False):
# We import psutil here so that the module can be imported even if psutil
# is not present (for example if accessing the version)
import psutil
pr = psutil.Process(pid)
# Record start time
start_time = time.time()
if logfile:
f = open(logfile, 'w')
f.write("# {0:12s} {1:12s} {2:12s} {3:12s}\n".format(
'Elapsed time'.center(12),
'CPU (%)'.center(12),
'Real (MB)'.center(12),
'Virtual (MB)'.center(12))
)
log = {}
log['times'] = []
log['cpu'] = []
log['mem_real'] = []
log['mem_virtual'] = []
try:
# Start main event loop
while True:
# Find current time
current_time = time.time()
try:
pr_status = pr.status()
except TypeError: # psutil < 2.0
pr_status = pr.status
except psutil.NoSuchProcess: # pragma: no cover
break
# Check if process status indicates we should exit
if pr_status in [psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD]:
print("Process finished ({0:.2f} seconds)"
.format(current_time - start_time))
break
# Check if we have reached the maximum time
if duration is not None and current_time - start_time > duration:
break
# Get current CPU and memory
try:
current_cpu = get_percent(pr)
current_mem = get_memory(pr)
except Exception:
break
current_mem_real = current_mem.rss / 1024. ** 2
current_mem_virtual = current_mem.vms / 1024. ** 2
# Get information for children
if include_children:
for child in all_children(pr):
try:
current_cpu += get_percent(child)
current_mem = get_memory(child)
except Exception:
continue
current_mem_real += current_mem.rss / 1024. ** 2
current_mem_virtual += current_mem.vms / 1024. ** 2
if logfile:
f.write("{0:12.3f} {1:12.3f} {2:12.3f} {3:12.3f}\n".format(
current_time - start_time,
current_cpu,
current_mem_real,
current_mem_virtual))
f.flush()
if interval is not None:
time.sleep(interval)
# If plotting, record the values
if plot:
log['times'].append(current_time - start_time)
log['cpu'].append(current_cpu)
log['mem_real'].append(current_mem_real)
log['mem_virtual'].append(current_mem_virtual)
except KeyboardInterrupt: # pragma: no cover
pass
if logfile:
f.close()
if plot:
# Use non-interactive backend, to enable operation on headless machines
import matplotlib.pyplot as plt
with plt.rc_context({'backend': 'Agg'}):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(log['times'], log['cpu'], '-', lw=1, color='r')
ax.set_ylabel('CPU (%)', color='r')
ax.set_xlabel('time (s)')
ax.set_ylim(0., max(log['cpu']) * 1.2)
ax2 = ax.twinx()
ax2.plot(log['times'], log['mem_real'], '-', lw=1, color='b')
ax2.set_ylim(0., max(log['mem_real']) * 1.2)
ax2.set_ylabel('Real Memory (MB)', color='b')
ax.grid()
fig.savefig(plot)
|
|
import weakref
from fontTools.ttLib import TTFont
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
from defcon.objects.base import BaseObject
try:
import compositor
from defcon.objects.uniData import UnicodeData
from defcon.objects.features import Features
except ImportError:
pass
# ---------
# Factories
# ---------
def _makeCMAP(unicodeData):
mapping = {}
for name, values in unicodeData.items():
mapping[name] = values[0]
return mapping
def _layoutEngineOTLTablesRepresentationFactory(layoutEngine):
font = layoutEngine.font
gdef = gsub = gpos = None
if font.features.text:
otf = TTFont()
otf.setGlyphOrder(sorted(font.keys()))
# compile with fontTools
try:
addOpenTypeFeaturesFromString(otf, font.features.text)
except:
import traceback
print(traceback.format_exc(5))
if "GDEF" in otf:
gdef = otf["GDEF"]
if "GSUB" in otf:
gsub = otf["GSUB"]
if "GPOS" in otf:
gpos = otf["GPOS"]
return gdef, gsub, gpos
# -----------
# Main Object
# -----------
class LayoutEngine(BaseObject):
"""
This object provides a GDEF, GSUB and GPOS OpenType Layout Engine for
the default layer of the given font. The engine uses the ``compositor``
module so you must have that installed to use this object.
**This object posts the following notifications:**
- LayoutEngine.Changed
This object monitors the font's feature text and character mapping. When
those change, the compiled tables will be flagged for recompilation and
the next time the engine is queried the tables will be recompiled. Any
data that you have retrieved, such as the list of feature tags, may no
longer be correct. Thus, the data will need to be retrieved again. To be
notified when this is necessary, subscribe to the ``LayoutEngine.Changed``
notification.
"""
changeNotificationName = "LayoutEngine.Changed"
representationFactories = {
"defcon.layoutEngine.tables" : dict(
factory=_layoutEngineOTLTablesRepresentationFactory,
destructiveNotifications=("LayoutEngine._DestroyCachedTables")
)
}
def __init__(self, font):
self._needsInternalUpdate = True
self._font = weakref.ref(font)
self._layoutEngine = compositor.LayoutEngine()
super(LayoutEngine, self).__init__()
self.beginSelfNotificationObservation()
def _get_font(self):
if self._font is not None:
return self._font()
return None
font = property(_get_font)
def _get_engine(self):
if self._needsInternalUpdate:
self._updateEngine()
return self._layoutEngine
engine = property(_get_engine, doc="The compositor layout engine. This object must always be retrieved from the LayoutEngine for the automatic updating to occur.")
# --------------
# Engine Updates
# --------------
def _updateEngine(self):
font = self.font
cmap = _makeCMAP(font.unicodeData)
self._layoutEngine.setCMAP(cmap)
gdef, gsub, gpos = self.getRepresentation("defcon.layoutEngine.tables")
self._layoutEngine.setFeatureTables(gdef, gsub, gpos)
self._needsInternalUpdate = False
# -------------
# Notifications
# -------------
def beginSelfNotificationObservation(self):
super(LayoutEngine, self).beginSelfNotificationObservation()
self.beginSelfLayersObservation()
self.beginSelfLayerObservation()
self.beginSelfFeaturesObservation()
def endSelfNotificationObservation(self):
self.endSelfLayersObservation()
self.endSelfLayerObservation()
self.endSelfFeaturesObservation()
super(LayoutEngine, self).endSelfNotificationObservation()
self._font = None
# default layer changed (changes cmap)
def beginSelfLayersObservation(self):
layers = self.font.layers
layers.addObserver(observer=self, methodName="_layerSetDefaultLayerWillChange", notification="LayerSet.DefaultLayerWillChange")
layers.addObserver(observer=self, methodName="_layerSetDefaultLayerChanged", notification="LayerSet.DefaultLayerChanged")
def endSelfLayersObservation(self):
layers = self.font.layers
layers.removeObserver(observer=self, notification="LayerSet.DefaultLayerWillChange")
layers.removeObserver(observer=self, notification="LayerSet.DefaultLayerChanged")
def _layerSetDefaultLayerWillChange(self, notification):
self.endSelfLayerObservation()
def _layerSetDefaultLayerChanged(self, notification):
self.beginLayerObservation()
self._postNeedsUpdateNotification()
# cmap change
def beginSelfLayerObservation(self):
layer = self.font.layers.defaultLayer
layer.addObserver(observer=self, methodName="_layerGlyphUnicodesChanged", notification="Layer.GlyphUnicodesChanged")
def endSelfLayerObservation(self):
layer = self.font.layers.defaultLayer
layer.removeObserver(observer=self, notification="Layer.GlyphUnicodesChanged")
def _layerGlyphUnicodesChanged(self):
self._postNeedsUpdateNotification()
# feature text change
def beginSelfFeaturesObservation(self):
features = self.font.features
features.addObserver(observer=self, methodName="_featuresTextChanged", notification="Features.TextChanged")
def endSelfFeaturesObservation(self):
features = self.font.features
features.removeObserver(observer=self, notification="Features.TextChanged")
def _featuresTextChanged(self, notification):
self._destroyCachedTables()
self._postNeedsUpdateNotification()
# posting
def _destroyCachedTables(self):
self.postNotification("LayoutEngine._DestroyCachedTables")
def _postNeedsUpdateNotification(self):
self._needsInternalUpdate = True
self.postNotification(self.changeNotificationName)
# ----------
# Engine API
# ----------
def process(self, stringOrGlyphList, script="latn", langSys=None, rightToLeft=False, case="unchanged"):
"""
Process a string (or list of glyph names) with the current
feature states for the given **script** and **langSys**.
The writing will be left to right unless **rightToLeft**
is set to True.
The case may be changed following the Unicode case conversion
rules by setting **case** to one of the following:
+-----------+
| unchanged |
+-----------+
| upper |
+-----------+
| lower |
+-----------+
"""
self._updateEngine()
glyphRecords = self.engine.process(
stringOrGlyphList,
script=script, langSys=langSys,
rightToLeft=rightToLeft, case=case
)
layer = self.font.layers.defaultLayer
finalGlyphRecords = []
for glyphRecord in glyphRecords:
if glyphRecord.glyphName not in layer:
continue
layerGlyph = layer[glyphRecord.glyphName]
glyphRecord.advanceWidth += layerGlyph.width
glyphRecord.advanceHeight += layerGlyph.height
finalGlyphRecords.append(glyphRecord)
return finalGlyphRecords
def getScriptList(self):
"""
Get a list of defined scripts.
"""
return self.engine.getScriptList()
def getLanguageList(self):
"""
Get a list of defined languages.
"""
return self.engine.getLanguageList()
def getFeatureList(self):
"""
Get a list of defined features.
"""
return self.engine.getFeatureList()
def getFeatureState(self, name):
"""
Get the state for the feature with **name**.
"""
return self.engine.getFeatureState(name)
def setFeatureState(self, name, state):
"""
Set the state for the feature with **name**.
"""
self.engine.setFeatureState(name, state)
|
|
test_sub_dir = "test_data/1019436/session_1"
def test_fd_jenkinson():
import os
import pickle
import pkg_resources as p
from qap.temporal_qc import fd_jenkinson
coord_xfm = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"coordinate_transformation", \
"rest_calc_tshift_resample.aff12.1D"))
ref_out = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"fd_jenkinson", \
"FD_J.1D"))
meanfd = fd_jenkinson(coord_xfm)
# do da check
with open(meanfd,"r") as f:
test_fd_lines = f.readlines()
with open(ref_out,"r") as f:
ref_fd_lines = f.readlines()
os.system("rm FD_J.1D")
assert test_fd_lines == ref_fd_lines
def test_summarize_fd():
import os
import pickle
import pkg_resources as p
from qap.temporal_qc import summarize_fd
coord_xfm = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"coordinate_transformation", \
"rest_calc_tshift_resample.aff12.1D"))
out_tuple = summarize_fd(coord_xfm)
assert out_tuple == (0.050015007171052638, 0.0, 0.0)
def test_summarize_fd_threshold_01():
import os
import pickle
import pkg_resources as p
from qap.temporal_qc import summarize_fd
coord_xfm = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"coordinate_transformation", \
"rest_calc_tshift_resample.aff12.1D"))
out_tuple = summarize_fd(coord_xfm, threshold=0.1)
assert out_tuple == (0.050015007171052638, 14.0, 9.15032679738562)
def test_outlier_timepoints():
import os
import pickle
import pkg_resources as p
from qap.temporal_qc import outlier_timepoints
func_motion = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"func_motion_correct", \
"rest_calc_tshift_resample_" \
"volreg.nii.gz"))
func_mask = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"functional_brain_mask", \
"rest_calc_tshift_resample_volreg" \
"_mask.nii.gz"))
ref_out = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"outlier_timepoints", \
"outlier_timepoints_ref_out.p"))
out_list = outlier_timepoints(func_motion, func_mask)
with open(ref_out, "r") as f:
ref_list = pickle.load(f)
assert out_list == ref_list
def test_quality_timepoints():
import os
import pickle
import pkg_resources as p
from qap.temporal_qc import quality_timepoints
func_motion = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"func_motion_correct", \
"rest_calc_tshift_resample_" \
"volreg.nii.gz"))
ref_out = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"quality_timepoints", \
"quality_timepoints_output.p"))
out_list = quality_timepoints(func_motion)
with open(ref_out, "r") as f:
ref_list = pickle.load(f)
assert out_list == ref_list
def test_quality_timepoints_no_automask():
import os
import pickle
import pkg_resources as p
from qap.temporal_qc import quality_timepoints
func_motion = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"func_motion_correct", \
"rest_calc_tshift_resample_" \
"volreg.nii.gz"))
ref_out = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"quality_timepoints", \
"quality_timepoints_nomask_output.p"))
out_list = quality_timepoints(func_motion, False)
with open(ref_out, "r") as f:
ref_list = pickle.load(f)
assert out_list == ref_list
def test_global_correlation():
import os
import pkg_resources as p
from qap.temporal_qc import global_correlation
func_motion = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"func_motion_correct", \
"rest_calc_tshift_resample_" \
"volreg.nii.gz"))
func_mask = p.resource_filename("qap", os.path.join(test_sub_dir, \
"rest_1", \
"functional_brain_mask", \
"rest_calc_tshift_resample_volreg" \
"_mask.nii.gz"))
gcor = global_correlation(func_motion, func_mask)
assert gcor == 0.0090767564485253263
def run_all_tests_temporal_qc():
test_fd_jenkinson()
test_summarize_fd()
test_summarize_fd_threshold_01()
test_outlier_timepoints()
test_quality_timepoints()
test_quality_timepoints_no_automask()
test_global_correlation()
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: Specifications handling utility methods
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 8)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Pelix
from pelix.utilities import is_string
import pelix.constants
import pelix.ldapfilter
import pelix.remote
import pelix.utilities
# Standard library
try:
# Python 3
# pylint: disable=F0401,E0611
from urllib.parse import urlparse
except ImportError:
# Python 2
# pylint: disable=F0401
from urlparse import urlparse
# ------------------------------------------------------------------------------
PYTHON_LANGUAGE = "python"
""" Prefix to use for the Python specifications """
# ------------------------------------------------------------------------------
class ExportEndpoint(object):
"""
Represents an export end point (one per group of configuration types)
"""
def __init__(self, uid, fw_uid, configurations, name,
svc_ref, service, properties):
"""
Sets up the members
:param uid: Unique identified of the end point
:param fw_uid: The framework UID
:param configurations: Kinds of end point (xmlrpc, ...)
:param name: Name of the end point
:param svc_ref: ServiceReference of the exported service
:param service: Instance of the exported service
:param properties: Extra properties
:raise ValueError: Invalid UID or the end point exports nothing
(all specifications have been filtered)
"""
if not uid:
raise ValueError("Invalid UID")
# Given information
self.__uid = uid
self.__fw_uid = fw_uid
self.__instance = service
self.__reference = svc_ref
self.__configurations = configurations
self.__name = name
# Normalize extra properties
if not isinstance(properties, dict):
self.__properties = {}
else:
self.__properties = properties
# Normalize the list of configurations
if is_string(configurations):
self.__configurations = (configurations,)
else:
self.__configurations = tuple(configurations)
# Exported specifications
self.__exported_specs = []
exported_specs = compute_exported_specifications(svc_ref)
if exported_specs:
# Transform the specifications for export (add the language prefix)
self.__exported_specs = format_specifications(exported_specs)
else:
raise ValueError("Endpoint {0}, {1}, exports nothing"
.format(self.__uid, self.__name))
def __hash__(self):
"""
Custom hash, as we override equality tests
"""
return hash(self.__uid)
def __eq__(self, other):
"""
Equality checked by UID
"""
return self.__uid == other.uid
def __ne__(self, other):
"""
Inequality checked by UID
"""
return self.__uid != other.uid
def __str__(self):
"""
String representation
"""
return "ExportEndpoint(uid={0}, types={1}, specs={2})" \
.format(self.__uid, self.__configurations,
self.__exported_specs)
def get_properties(self):
"""
Returns merged properties
:return: Endpoint merged properties
"""
# Get service properties
properties = self.__reference.get_properties()
# Merge with local properties
properties.update(self.__properties)
# Some properties can't be merged
for key in (pelix.constants.OBJECTCLASS, pelix.constants.SERVICE_ID):
properties[key] = self.__reference.get_property(key)
# Force the exported configurations
properties[pelix.remote.PROP_EXPORTED_CONFIGS] = self.configurations
return properties
def make_import_properties(self):
"""
Returns the properties of this endpoint where export properties have
been replaced by import ones
:return: A dictionary with import properties
"""
# Convert merged properties
props = to_import_properties(self.get_properties())
# Add the framework UID
props[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID] = self.__fw_uid
return props
def rename(self, new_name):
"""
Updates the endpoint name
:param new_name: The new name of the endpoint
"""
if new_name:
# Update the name only if the new one is valid
self.__name = new_name
# Access to the service
@property
def instance(self):
"""
Service instance
"""
return self.__instance
@property
def reference(self):
"""
Service reference
"""
return self.__reference
# End point properties
@property
def uid(self):
"""
End point unique identifier
"""
return self.__uid
@property
def framework(self):
"""
Framework UID
"""
return self.__fw_uid
@property
def configurations(self):
"""
Configurations of this end point
"""
return self.__configurations
@property
def name(self):
"""
Name of the end point
"""
return self.__name
@property
def specifications(self):
"""
Returns the exported specifications
"""
return self.__exported_specs
# ------------------------------------------------------------------------------
class ImportEndpoint(object):
"""
Represents an end point to access an imported service
"""
def __init__(self, uid, framework, configurations, name, specifications,
properties):
"""
Sets up the members
:param uid: Unique identified of the end point
:param framework: UID of the framework exporting the end point
(can be None)
:param configurations: Kinds of end point (xmlrpc, ...)
:param name: Name of the end point
:param specifications: Specifications of the exported service
:param properties: Properties of the service
"""
self.__uid = uid
self.__fw_uid = framework or None
self.__name = name
self.__properties = properties.copy() if properties else {}
# Normalize list of configurations
if is_string(configurations):
self.__configurations = (configurations,)
else:
self.__configurations = tuple(configurations)
# Extract the language prefix in specifications
self.__specifications = extract_specifications(specifications,
self.__properties)
# Public variable: the source server,
# set up by a Pelix discovery service
self.server = None
def __str__(self):
"""
String representation of the end point
"""
return "ImportEndpoint(uid={0}, framework={1}, configurations={2}, " \
"specs={3})".format(self.__uid, self.__fw_uid,
self.__configurations, self.__specifications)
# Access to the service informations
@property
def specifications(self):
"""
Specifications of the service
"""
return self.__specifications
@property
def properties(self):
"""
Properties of the imported service
"""
return self.__properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of the imported service
"""
# Keep a copy of the new properties
self.__properties = properties.copy() if properties else {}
# End point properties
@property
def uid(self):
"""
End point unique identifier
"""
return self.__uid
@property
def framework(self):
"""
UID of the framework exporting this end point
"""
return self.__fw_uid
@property
def configurations(self):
"""
Kind of end point
"""
return self.__configurations
@property
def name(self):
"""
Name of the end point
"""
return self.__name
# ------------------------------------------------------------------------------
class EndpointDescription(object):
"""
Endpoint description bean, according to OSGi specifications:
http://www.osgi.org/javadoc/r4v42/org/osgi/service/remoteserviceadmin/
EndpointDescription.html
This is an importer-side description
"""
def __init__(self, svc_ref, properties):
"""
Sets up the description with the given properties
:raise ValueError: Invalid properties
"""
# Set up properties
all_properties = {}
if svc_ref is not None:
all_properties.update(svc_ref.get_properties())
if properties:
all_properties.update(properties)
# Add some properties if the service reference is given
if svc_ref is not None:
# Service ID
all_properties[pelix.remote.PROP_ENDPOINT_SERVICE_ID] = \
svc_ref.get_property(pelix.constants.SERVICE_ID)
# Convert properties
self.__properties = to_import_properties(all_properties)
# Check their validity
self.__check_properties(self.__properties)
# Keep a copy of the endpoint ID
self.__endpoint_id = self.get_id()
def __hash__(self):
"""
Custom hash, as we override equality tests
"""
return hash(self.__endpoint_id)
def __eq__(self, other):
"""
Equality checked by UID
"""
return self.__endpoint_id == other.__endpoint_id
def __ne__(self, other):
"""
Inequality checked by UID
"""
return self.__endpoint_id != other.__endpoint_id
def __str__(self):
"""
String representation
"""
return "EndpointDescription(id={0}; endpoint.service.id={1}; " \
"framework.uuid={2})".format(self.get_id(),
self.get_service_id(),
self.get_framework_uuid())
def __check_properties(self, props):
"""
Checks that the given dictionary doesn't have export keys and has
import keys
:param props: Properties to validate
:raise ValueError: Invalid properties
"""
# Mandatory properties
mandatory = (pelix.remote.PROP_ENDPOINT_ID,
pelix.remote.PROP_IMPORTED_CONFIGS,
pelix.constants.OBJECTCLASS)
for key in mandatory:
if key not in props:
raise ValueError("Missing property: {0}".format(key))
# Export/Import properties
props_export = (pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES)
for key in props_export:
if key in props:
raise ValueError("Export property found: {0}".format(key))
def get_configuration_types(self):
"""
Returns the configuration types.
A distribution provider exports a service with an endpoint.
This endpoint uses some kind of communications protocol with a set of
configuration parameters.
There are many different types but each endpoint is configured by only
one configuration type.
However, a distribution provider can be aware of different
configuration types and provide synonyms to increase the change a
receiving distribution provider can create a connection to this
endpoint.
This value of the configuration types is stored in the
pelix.remote.PROP_IMPORTED_CONFIGS service property.
:return: The configuration types (list of str)
"""
# Return a copy of the list
return self.__properties[pelix.remote.PROP_IMPORTED_CONFIGS][:]
def get_framework_uuid(self):
"""
Returns the UUID of the framework exporting this endpoint, or None
:return: A framework UUID (str) or None
"""
return self.__properties.get(pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID)
def get_id(self):
"""
Returns the endpoint's id.
"""
return self.__properties[pelix.remote.PROP_ENDPOINT_ID]
def get_intents(self):
"""
Returns the list of intents implemented by this endpoint.
The intents are based on the service.intents on an imported service,
except for any intents that are additionally provided by the importing
distribution provider.
All qualified intents must have been expanded.
This value of the intents is stored in the
pelix.remote.PROP_INTENTS service property.
:return: A list of intents (list of str)
"""
# Return a copy of the list
try:
return self.__properties[pelix.remote.PROP_INTENTS][:]
except KeyError:
return []
def get_interfaces(self):
"""
Provides the list of interfaces implemented by the exported service.
:return: A list of specifications (list of str)
"""
return self.__properties[pelix.constants.OBJECTCLASS][:]
def get_package_version(self, package):
"""
Provides the version of the given package name.
:param package: The name of the package
:return: The version of the specified package as a tuple or (0,0,0)
"""
name = "{0}{1}".format(pelix.remote.PROP_ENDPOINT_PACKAGE_VERSION_,
package)
try:
# Get the version string
version = self.__properties[name]
# Split dots ('.')
return tuple(version.split('.'))
except KeyError:
# No version
return 0, 0, 0
def get_properties(self):
"""
Returns all endpoint properties.
:return: A copy of the endpoint properties
"""
return self.__properties.copy()
def get_service_id(self):
"""
Returns the service id for the service exported through this endpoint.
:return: The ID of service on the exporter side, or 0
"""
try:
return self.__properties[pelix.remote.PROP_ENDPOINT_SERVICE_ID]
except KeyError:
# Not found
return 0
def is_same_service(self, endpoint):
"""
Tests if this endpoint and the given one have the same framework UUID
and service ID
:param endpoint: Another endpoint
:return: True if both endpoints represent the same remote service
"""
return self.get_framework_uuid() == endpoint.get_framework_uuid() \
and self.get_service_id() == endpoint.get_service_id()
def matches(self, ldap_filter):
"""
Tests the properties of this EndpointDescription against the given
filter
:param ldap_filter: A filter
:return: True if properties matches the filter
"""
return pelix.ldapfilter.get_ldap_filter(ldap_filter) \
.matches(self.__properties)
def to_import(self):
"""
Converts an EndpointDescription bean to an ImportEndpoint
:return: An ImportEndpoint bean
"""
# Properties
properties = self.get_properties()
# Framework UUID
fw_uid = self.get_framework_uuid()
# Endpoint name
try:
# From Pelix UID
name = properties[pelix.remote.PROP_ENDPOINT_NAME]
except KeyError:
# Generated
name = '{0}.{1}'.format(fw_uid, self.get_service_id())
# Configuration / kind
configurations = self.get_configuration_types()
# Interfaces
specifications = self.get_interfaces()
return ImportEndpoint(self.get_id(), fw_uid, configurations, name,
specifications, properties)
@classmethod
def from_export(cls, endpoint):
"""
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
"""
assert isinstance(endpoint, ExportEndpoint)
# Service properties
properties = endpoint.get_properties()
# Set import keys
properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid
properties[pelix.remote.PROP_IMPORTED_CONFIGS] = \
endpoint.configurations
properties[pelix.remote.PROP_EXPORTED_INTERFACES] = \
endpoint.specifications
# Remove export keys
for key in (pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES,
pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA):
try:
del properties[key]
except KeyError:
pass
# Other information
properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name
properties[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID] = \
endpoint.framework
return EndpointDescription(None, properties)
# ------------------------------------------------------------------------------
def to_import_properties(properties):
"""
Returns a dictionary where export properties have been replaced by import
ones
:param properties: A dictionary of service properties (with export keys)
:return: A dictionary with import properties
"""
# Copy the given dictionary
props = properties.copy()
# Add the "imported" property
props[pelix.remote.PROP_IMPORTED] = True
# Remote service ID
try:
props[pelix.remote.PROP_ENDPOINT_SERVICE_ID] = \
props.pop(pelix.constants.SERVICE_ID)
except KeyError:
# No service ID
pass
# Replace the "export configs"
configs = props.pop(pelix.remote.PROP_EXPORTED_CONFIGS, None)
if configs:
props[pelix.remote.PROP_IMPORTED_CONFIGS] = configs
# Clear other export properties
for key in (pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA,
pelix.remote.PROP_EXPORTED_INTERFACES):
try:
del props[key]
except KeyError:
# Key wasn't there
pass
return props
# ------------------------------------------------------------------------------
def compute_exported_specifications(svc_ref):
"""
Computes the list of specifications exported by the given service
:param svc_ref: A ServiceReference
:return: The list of exported specifications (or an empty list)
"""
specs = svc_ref.get_property(pelix.constants.OBJECTCLASS)
exported_specs = svc_ref.get_property(
pelix.remote.PROP_EXPORTED_INTERFACES)
rejected_specs = pelix.utilities.to_iterable(
svc_ref.get_property(pelix.remote.PROP_EXPORT_REJECT), False)
if exported_specs and exported_specs != "*":
# A set of specifications is exported, replace "objectClass"
iterable_exports = pelix.utilities.to_iterable(exported_specs, False)
all_exported_specs = [spec for spec in specs
if spec in iterable_exports]
else:
# Export everything
all_exported_specs = pelix.utilities.to_iterable(specs)
# Filter specifications
return [spec for spec in all_exported_specs if spec not in rejected_specs]
def extract_specifications(specifications, properties):
"""
Converts "python:/name" specifications to "name". Keeps the other
specifications as is.
:param specifications: The specifications found in a remote registration
:param properties: Service properties
:return: The filtered specifications (as a list)
"""
all_specs = set(pelix.utilities.to_iterable(specifications))
try:
synonyms = \
pelix.utilities.to_iterable(properties[pelix.remote.PROP_SYNONYMS],
False)
all_specs.update(synonyms)
except KeyError:
# No synonyms property
pass
filtered_specs = set()
for original in all_specs:
try:
# Extract information
lang, spec = _extract_specification_parts(original)
if lang == PYTHON_LANGUAGE:
# Language match: keep the name only
filtered_specs.add(spec)
else:
# Keep the name as is
filtered_specs.add(original)
except ValueError:
# Ignore invalid specifications
pass
return list(filtered_specs)
def format_specifications(specifications):
"""
Transforms the interfaces names into URI strings, with the interface
implementation language as a scheme.
:param specifications: Specifications to transform
:return: The transformed names
"""
transformed = set()
for original in specifications:
try:
lang, spec = _extract_specification_parts(original)
transformed.add(_format_specification(lang, spec))
except ValueError:
# Ignore invalid specifications
pass
return list(transformed)
def _extract_specification_parts(specification):
"""
Extract the language and the interface from a "language:/interface"
interface name
:param specification: The formatted interface name
:return: A (language, interface name) tuple
:raise ValueError: Invalid specification content
"""
try:
# Parse the URI-like string
parsed = urlparse(specification)
except:
# Invalid URL
raise ValueError("Invalid specification URL: {0}"
.format(specification))
# Extract the interface name
interface = parsed.path
# Extract the language, if given
language = parsed.scheme
if not language:
# Simple name, without scheme
language = PYTHON_LANGUAGE
else:
# Formatted name: un-escape it, without the starting '/'
interface = _unescape_specification(interface[1:])
return language, interface
def _format_specification(language, specification):
"""
Formats a "language://interface" string
:param language: Specification language
:param specification: Specification name
:return: A formatted string
"""
return "{0}:/{1}".format(language, _escape_specification(specification))
def _escape_specification(specification):
"""
Escapes the interface string: replaces slashes '/' by '%2F'
:param specification: Specification name
:return: The escaped name
"""
return specification.replace('/', '%2F')
def _unescape_specification(specification):
"""
Unescapes the interface string: replaces '%2F' by slashes '/'
:param specification: Specification name
:return: The unescaped name
"""
return specification.replace('%2F', '/')
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
import six
from cinder import backup
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as sqa_api
from cinder.db.sqlalchemy import models as sqa_models
from cinder import exception
from cinder import objects
from cinder import quota
from cinder import test
import cinder.tests.unit.image.fake
from cinder import volume
CONF = cfg.CONF
class QuotaIntegrationTestCase(test.TestCase):
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.volume_type_name = CONF.default_volume_type
self.volume_type = db.volume_type_create(
context.get_admin_context(),
dict(name=self.volume_type_name))
self.addCleanup(db.volume_type_destroy, context.get_admin_context(),
self.volume_type['id'])
self.flags(quota_volumes=2,
quota_snapshots=2,
quota_gigabytes=20,
quota_backups=2,
quota_backup_gigabytes=20)
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
# Destroy the 'default' quota_class in the database to avoid
# conflicts with the test cases here that are setting up their own
# defaults.
db.quota_class_destroy_all_by_name(self.context, 'default')
self.addCleanup(cinder.tests.unit.image.fake.FakeImageService_reset)
def _create_volume(self, size=1):
"""Create a test volume."""
vol = {}
vol['user_id'] = self.user_id
vol['project_id'] = self.project_id
vol['size'] = size
vol['status'] = 'available'
vol['volume_type_id'] = self.volume_type['id']
vol['host'] = 'fake_host'
vol['availability_zone'] = 'fake_zone'
vol['attach_status'] = 'detached'
volume = objects.Volume(context=self.context, **vol)
volume.create()
return volume
def _create_snapshot(self, volume):
snapshot = {}
snapshot['user_id'] = self.user_id
snapshot['project_id'] = self.project_id
snapshot['volume_id'] = volume['id']
snapshot['volume_size'] = volume['size']
snapshot['host'] = volume['host']
snapshot['status'] = 'available'
snapshot = objects.Snapshot(context=self.context, **snapshot)
snapshot.create()
return snapshot
def _create_backup(self, volume):
backup = {}
backup['user_id'] = self.user_id
backup['project_id'] = self.project_id
backup['volume_id'] = volume['id']
backup['volume_size'] = volume['size']
backup['status'] = 'available'
return db.backup_create(self.context, backup)
def test_volume_size_limit_exceeds(self):
resource = 'volumes_%s' % self.volume_type_name
db.quota_class_create(self.context, 'default', resource, 1)
flag_args = {
'quota_volumes': 10,
'quota_gigabytes': 1000,
'per_volume_size_limit': 5
}
self.flags(**flag_args)
self.assertRaises(exception.VolumeSizeExceedsLimit,
volume.API().create,
self.context, 10, '', '',)
def test_too_many_volumes(self):
volume_ids = []
for _i in range(CONF.quota_volumes):
vol_ref = self._create_volume()
volume_ids.append(vol_ref['id'])
ex = self.assertRaises(exception.VolumeLimitExceeded,
volume.API().create,
self.context, 1, '', '',
volume_type=self.volume_type)
msg = ("Maximum number of volumes allowed (%d) exceeded for"
" quota 'volumes'." % CONF.quota_volumes)
self.assertEqual(msg, six.text_type(ex))
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_volumes_of_type(self):
resource = 'volumes_%s' % self.volume_type_name
db.quota_class_create(self.context, 'default', resource, 1)
flag_args = {
'quota_volumes': 2000,
'quota_gigabytes': 2000
}
self.flags(**flag_args)
vol_ref = self._create_volume()
ex = self.assertRaises(exception.VolumeLimitExceeded,
volume.API().create,
self.context, 1, '', '',
volume_type=self.volume_type)
msg = ("Maximum number of volumes allowed (1) exceeded for"
" quota '%s'." % resource)
self.assertEqual(msg, six.text_type(ex))
vol_ref.destroy()
def test_too_many_snapshots_of_type(self):
resource = 'snapshots_%s' % self.volume_type_name
db.quota_class_create(self.context, 'default', resource, 1)
flag_args = {
'quota_volumes': 2000,
'quota_gigabytes': 2000,
}
self.flags(**flag_args)
vol_ref = self._create_volume()
snap_ref = self._create_snapshot(vol_ref)
self.assertRaises(exception.SnapshotLimitExceeded,
volume.API().create_snapshot,
self.context, vol_ref, '', '')
snap_ref.destroy()
vol_ref.destroy()
def test_too_many_backups(self):
resource = 'backups'
db.quota_class_create(self.context, 'default', resource, 1)
flag_args = {
'quota_backups': 2000,
'quota_backup_gigabytes': 2000
}
self.flags(**flag_args)
vol_ref = self._create_volume()
backup_ref = self._create_backup(vol_ref)
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
mock__is_backup_service_enabled:
mock__is_backup_service_enabled.return_value = True
self.assertRaises(exception.BackupLimitExceeded,
backup.API().create,
self.context,
'name',
'description',
vol_ref['id'],
'container',
False,
None)
db.backup_destroy(self.context, backup_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
def test_too_many_gigabytes(self):
volume_ids = []
vol_ref = self._create_volume(size=20)
volume_ids.append(vol_ref['id'])
raised_exc = self.assertRaises(
exception.VolumeSizeExceedsAvailableQuota, volume.API().create,
self.context, 1, '', '', volume_type=self.volume_type)
expected = exception.VolumeSizeExceedsAvailableQuota(
requested=1, quota=20, consumed=20)
self.assertEqual(str(expected), str(raised_exc))
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_combined_gigabytes(self):
vol_ref = self._create_volume(size=10)
snap_ref = self._create_snapshot(vol_ref)
self.assertRaises(exception.QuotaError,
volume.API().create_snapshot,
self.context, vol_ref, '', '')
usages = db.quota_usage_get_all_by_project(self.context,
self.project_id)
self.assertEqual(20, usages['gigabytes']['in_use'])
snap_ref.destroy()
vol_ref.destroy()
def test_too_many_combined_backup_gigabytes(self):
vol_ref = self._create_volume(size=10000)
backup_ref = self._create_backup(vol_ref)
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
mock__is_backup_service_enabled:
mock__is_backup_service_enabled.return_value = True
self.assertRaises(
exception.VolumeBackupSizeExceedsAvailableQuota,
backup.API().create,
context=self.context,
name='name',
description='description',
volume_id=vol_ref['id'],
container='container',
incremental=False)
db.backup_destroy(self.context, backup_ref['id'])
vol_ref.destroy()
def test_no_snapshot_gb_quota_flag(self):
self.flags(quota_volumes=2,
quota_snapshots=2,
quota_gigabytes=20,
no_snapshot_gb_quota=True)
vol_ref = self._create_volume(size=10)
snap_ref = self._create_snapshot(vol_ref)
snap_ref2 = volume.API().create_snapshot(self.context,
vol_ref, '', '')
# Make sure the snapshot volume_size isn't included in usage.
vol_ref2 = volume.API().create(self.context, 10, '', '')
usages = db.quota_usage_get_all_by_project(self.context,
self.project_id)
self.assertEqual(20, usages['gigabytes']['in_use'])
self.assertEqual(0, usages['gigabytes']['reserved'])
snap_ref.destroy()
snap_ref2.destroy()
vol_ref.destroy()
vol_ref2.destroy()
def test_backup_gb_quota_flag(self):
self.flags(quota_volumes=2,
quota_snapshots=2,
quota_backups=2,
quota_gigabytes=20
)
vol_ref = self._create_volume(size=10)
backup_ref = self._create_backup(vol_ref)
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
mock__is_backup_service_enabled:
mock__is_backup_service_enabled.return_value = True
backup_ref2 = backup.API().create(self.context,
'name',
'description',
vol_ref['id'],
'container',
False,
None)
# Make sure the backup volume_size isn't included in usage.
vol_ref2 = volume.API().create(self.context, 10, '', '')
usages = db.quota_usage_get_all_by_project(self.context,
self.project_id)
self.assertEqual(20, usages['gigabytes']['in_use'])
self.assertEqual(0, usages['gigabytes']['reserved'])
db.backup_destroy(self.context, backup_ref['id'])
db.backup_destroy(self.context, backup_ref2['id'])
vol_ref.destroy()
vol_ref2.destroy()
def test_too_many_gigabytes_of_type(self):
resource = 'gigabytes_%s' % self.volume_type_name
db.quota_class_create(self.context, 'default', resource, 10)
flag_args = {
'quota_volumes': 2000,
'quota_gigabytes': 2000,
}
self.flags(**flag_args)
vol_ref = self._create_volume(size=10)
raised_exc = self.assertRaises(
exception.VolumeSizeExceedsAvailableQuota, volume.API().create,
self.context, 1, '', '', volume_type=self.volume_type)
expected = exception.VolumeSizeExceedsAvailableQuota(
requested=1, quota=10, consumed=10, name=resource)
self.assertEqual(str(expected), str(raised_exc))
vol_ref.destroy()
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
self.user_id = 'fake_user'
self.project_id = project_id
self.quota_class = quota_class
def elevated(self):
elevated = self.__class__(self.project_id, self.quota_class)
elevated.is_admin = True
return elevated
class FakeDriver(object):
def __init__(self, by_project=None, by_class=None, reservations=None):
self.called = []
self.by_project = by_project or {}
self.by_class = by_class or {}
self.reservations = reservations or []
def get_by_project(self, context, project_id, resource):
self.called.append(('get_by_project', context, project_id, resource))
try:
return self.by_project[project_id][resource]
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
return self.by_class[quota_class][resource]
except KeyError:
raise exception.QuotaClassNotFound(class_name=quota_class)
def get_default(self, context, resource, parent_project_id=None):
self.called.append(('get_default', context, resource,
parent_project_id))
return resource.default
def get_defaults(self, context, resources, parent_project_id=None):
self.called.append(('get_defaults', context, resources,
parent_project_id))
return resources
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
self.called.append(('get_class_quotas', context, resources,
quota_class, defaults))
return resources
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, usages=True,
parent_project_id=None):
self.called.append(('get_project_quotas', context, resources,
project_id, quota_class, defaults, usages,
parent_project_id))
return resources
def limit_check(self, context, resources, values, project_id=None):
self.called.append(('limit_check', context, resources,
values, project_id))
def reserve(self, context, resources, deltas, expire=None,
project_id=None):
self.called.append(('reserve', context, resources, deltas,
expire, project_id))
return self.reservations
def commit(self, context, reservations, project_id=None):
self.called.append(('commit', context, reservations, project_id))
def rollback(self, context, reservations, project_id=None):
self.called.append(('rollback', context, reservations, project_id))
def destroy_by_project(self, context, project_id):
self.called.append(('destroy_by_project', context, project_id))
def expire(self, context):
self.called.append(('expire', context))
class BaseResourceTestCase(test.TestCase):
def test_no_flag(self):
resource = quota.BaseResource('test_resource')
self.assertEqual('test_resource', resource.name)
self.assertIsNone(resource.flag)
self.assertEqual(-1, resource.default)
def test_with_flag(self):
# We know this flag exists, so use it...
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
self.assertEqual('test_resource', resource.name)
self.assertEqual('quota_volumes', resource.flag)
self.assertEqual(10, resource.default)
def test_with_flag_no_quota(self):
self.flags(quota_volumes=-1)
resource = quota.BaseResource('test_resource', 'quota_volumes')
self.assertEqual('test_resource', resource.name)
self.assertEqual('quota_volumes', resource.flag)
self.assertEqual(-1, resource.default)
def test_quota_no_project_no_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver()
context = FakeContext(None, None)
quota_value = resource.quota(driver, context)
self.assertEqual(10, quota_value)
def test_quota_with_project_no_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(
by_project=dict(
test_project=dict(test_resource=15), ))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(15, quota_value)
def test_quota_no_project_with_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(
by_class=dict(
test_class=dict(test_resource=20), ))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(20, quota_value)
def test_quota_with_project_with_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15), ),
by_class=dict(test_class=dict(test_resource=20), ))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(15, quota_value)
def test_quota_override_project_with_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
override_project=dict(test_resource=20), ))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.assertEqual(20, quota_value)
def test_quota_override_subproject_no_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes',
parent_project_id='test_parent_project')
driver = FakeDriver()
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(0, quota_value)
def test_quota_with_project_override_class(self):
self.flags(quota_volumes=10)
resource = quota.BaseResource('test_resource', 'quota_volumes')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=15),
override_class=dict(test_resource=20), ))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
self.assertEqual(20, quota_value)
class VolumeTypeResourceTestCase(test.TestCase):
def test_name_and_flag(self):
volume_type_name = 'foo'
volume = {'name': volume_type_name, 'id': 'myid'}
resource = quota.VolumeTypeResource('volumes', volume)
self.assertEqual('volumes_%s' % volume_type_name, resource.name)
self.assertIsNone(resource.flag)
self.assertEqual(-1, resource.default)
class QuotaEngineTestCase(test.TestCase):
def test_init(self):
quota_obj = quota.QuotaEngine()
self.assertEqual({}, quota_obj.resources)
self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
def test_init_override_string(self):
quota_obj = quota.QuotaEngine(
quota_driver_class='cinder.tests.unit.test_quota.FakeDriver')
self.assertEqual({}, quota_obj.resources)
self.assertIsInstance(quota_obj._driver, FakeDriver)
def test_init_override_obj(self):
quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
self.assertEqual({}, quota_obj.resources)
self.assertEqual(FakeDriver, quota_obj._driver)
def test_register_resource(self):
quota_obj = quota.QuotaEngine()
resource = quota.AbsoluteResource('test_resource')
quota_obj.register_resource(resource)
self.assertEqual(dict(test_resource=resource), quota_obj.resources)
def test_register_resources(self):
quota_obj = quota.QuotaEngine()
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource3'), ]
quota_obj.register_resources(resources)
self.assertEqual(dict(test_resource1=resources[0],
test_resource2=resources[1],
test_resource3=resources[2], ),
quota_obj.resources)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(
by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
self.assertEqual([('get_by_project',
context,
'test_project',
'test_resource'), ], driver.called)
self.assertEqual(42, result)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(
by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
self.assertEqual([('get_by_class',
context,
'test_class',
'test_resource'), ], driver.called)
self.assertEqual(42, result)
def _make_quota_obj(self, driver):
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
resources = [
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource1'), ]
quota_obj.register_resources(resources)
return quota_obj
def test_get_defaults(self):
context = FakeContext(None, None)
parent_project_id = None
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
self.assertEqual([('get_defaults',
context,
quota_obj.resources,
parent_project_id), ], driver.called)
self.assertEqual(quota_obj.resources, result)
def test_get_class_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_class_quotas(context, 'test_class')
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual([
('get_class_quotas',
context,
quota_obj.resources,
'test_class', True),
('get_class_quotas',
context, quota_obj.resources,
'test_class', False), ], driver.called)
self.assertEqual(quota_obj.resources, result1)
self.assertEqual(quota_obj.resources, result2)
def test_get_project_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
parent_project_id = None
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project')
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual([
('get_project_quotas',
context,
quota_obj.resources,
'test_project',
None,
True,
True,
parent_project_id),
('get_project_quotas',
context,
quota_obj.resources,
'test_project',
'test_class',
False,
False,
parent_project_id), ], driver.called)
self.assertEqual(quota_obj.resources, result1)
self.assertEqual(quota_obj.resources, result2)
def test_get_subproject_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
parent_project_id = 'test_parent_project_id'
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project',
parent_project_id=
parent_project_id)
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False,
parent_project_id=
parent_project_id)
self.assertEqual([
('get_project_quotas',
context,
quota_obj.resources,
'test_project',
None,
True,
True,
parent_project_id),
('get_project_quotas',
context,
quota_obj.resources,
'test_project',
'test_class',
False,
False,
parent_project_id), ], driver.called)
self.assertEqual(quota_obj.resources, result1)
self.assertEqual(quota_obj.resources, result2)
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource5',
True, foo='bar')
def test_count_wrong_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource1',
True, foo='bar')
def test_count(self):
def fake_count(context, *args, **kwargs):
self.assertEqual((True,), args)
self.assertEqual(dict(foo='bar'), kwargs)
return 5
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.register_resource(quota.CountableResource('test_resource5',
fake_count))
result = quota_obj.count(context, 'test_resource5', True, foo='bar')
self.assertEqual(5, result)
def test_limit_check(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
test_resource3=2, test_resource4=1)
self.assertEqual([
('limit_check',
context,
quota_obj.resources,
dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,),
None), ],
driver.called)
def test_reserve(self):
context = FakeContext(None, None)
driver = FakeDriver(reservations=['resv-01',
'resv-02',
'resv-03',
'resv-04', ])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource4=1)
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
result3 = quota_obj.reserve(context, project_id='fake_project',
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
self.assertEqual([
('reserve',
context,
quota_obj.resources,
dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1, ),
None,
None),
('reserve',
context,
quota_obj.resources,
dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4, ),
3600,
None),
('reserve',
context,
quota_obj.resources,
dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4, ),
None,
'fake_project'), ],
driver.called)
self.assertEqual(['resv-01',
'resv-02',
'resv-03',
'resv-04', ], result1)
self.assertEqual(['resv-01',
'resv-02',
'resv-03',
'resv-04', ], result2)
self.assertEqual(['resv-01',
'resv-02',
'resv-03',
'resv-04', ], result3)
def test_commit(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual([('commit',
context,
['resv-01',
'resv-02',
'resv-03'],
None), ],
driver.called)
def test_rollback(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual([('rollback',
context,
['resv-01',
'resv-02',
'resv-03'],
None), ],
driver.called)
def test_destroy_by_project(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_by_project(context, 'test_project')
self.assertEqual([('destroy_by_project',
context,
'test_project'), ],
driver.called)
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
self.assertEqual([('expire', context), ], driver.called)
def test_resource_names(self):
quota_obj = self._make_quota_obj(None)
self.assertEqual(['test_resource1', 'test_resource2',
'test_resource3', 'test_resource4'],
quota_obj.resource_names)
class VolumeTypeQuotaEngineTestCase(test.TestCase):
def test_default_resources(self):
def fake_vtga(context, inactive=False, filters=None):
return {}
self.stubs.Set(db, 'volume_type_get_all', fake_vtga)
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(['backup_gigabytes', 'backups',
'gigabytes', 'per_volume_gigabytes',
'snapshots', 'volumes'],
engine.resource_names)
def test_volume_type_resources(self):
ctx = context.RequestContext('admin', 'admin', is_admin=True)
vtype = db.volume_type_create(ctx, {'name': 'type1'})
vtype2 = db.volume_type_create(ctx, {'name': 'type_2'})
def fake_vtga(context, inactive=False, filters=None):
return {
'type1': {
'id': vtype['id'],
'name': 'type1',
'extra_specs': {},
},
'type_2': {
'id': vtype['id'],
'name': 'type_2',
'extra_specs': {},
},
}
self.stubs.Set(db, 'volume_type_get_all', fake_vtga)
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(['backup_gigabytes', 'backups',
'gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
'per_volume_gigabytes', 'snapshots',
'snapshots_type1', 'snapshots_type_2', 'volumes',
'volumes_type1', 'volumes_type_2',
], engine.resource_names)
db.volume_type_destroy(ctx, vtype['id'])
db.volume_type_destroy(ctx, vtype2['id'])
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(DbQuotaDriverTestCase, self).setUp()
self.flags(quota_volumes=10,
quota_snapshots=10,
quota_gigabytes=1000,
quota_backups=10,
quota_backup_gigabytes=1000,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.driver = quota.DbQuotaDriver()
self.calls = []
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime.utcnow()
def test_get_defaults(self):
# Use our pre-defined resources
self._stub_quota_class_get_default()
self._stub_volume_type_get_all()
result = self.driver.get_defaults(None, quota.QUOTAS.resources)
self.assertEqual(
dict(
volumes=10,
snapshots=10,
gigabytes=1000,
backups=10,
backup_gigabytes=1000,
per_volume_gigabytes=-1), result)
def test_subproject_get_defaults(self):
# Test subproject default values.
self._stub_volume_type_get_all()
parent_project_id = 'test_parent_project_id'
result = self.driver.get_defaults(None,
quota.QUOTAS.resources,
parent_project_id)
self.assertEqual(
dict(
volumes=0,
snapshots=0,
gigabytes=0,
backups=0,
backup_gigabytes=0,
per_volume_gigabytes=0), result)
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
def fake_qcgd(context):
self.calls.append('quota_class_get_default')
return dict(volumes=10,
snapshots=10,
gigabytes=1000,
backups=10,
backup_gigabytes=1000
)
self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
def _stub_volume_type_get_all(self):
def fake_vtga(context, inactive=False, filters=None):
return {}
self.stubs.Set(db, 'volume_type_get_all', fake_vtga)
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual('test_class', quota_class)
return dict(gigabytes=500, volumes=10, snapshots=10, backups=10,
backup_gigabytes=500)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self._stub_quota_class_get_all_by_name()
self._stub_volume_type_get_all()
result = self.driver.get_class_quotas(None, quota.QUOTAS.resources,
'test_class')
self.assertEqual(['quota_class_get_all_by_name'], self.calls)
self.assertEqual(dict(volumes=10,
gigabytes=500,
snapshots=10,
backups=10,
backup_gigabytes=500,
per_volume_gigabytes=-1), result)
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS.resources,
'test_class', False)
self.assertEqual(['quota_class_get_all_by_name'], self.calls)
self.assertEqual(dict(volumes=10,
gigabytes=500,
snapshots=10,
backups=10,
backup_gigabytes=500), result)
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual('test_project', project_id)
return dict(volumes=10, gigabytes=50, reserved=0,
snapshots=10, backups=10,
backup_gigabytes=50)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual('test_project', project_id)
return dict(volumes=dict(in_use=2, reserved=0),
snapshots=dict(in_use=2, reserved=0),
gigabytes=dict(in_use=10, reserved=0),
backups=dict(in_use=2, reserved=0),
backup_gigabytes=dict(in_use=10, reserved=0)
)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self._stub_quota_class_get_all_by_name()
self._stub_quota_class_get_default()
def _stub_get_by_subproject(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual('test_project', project_id)
return dict(volumes=10, gigabytes=50, reserved=0)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual('test_project', project_id)
return dict(volumes=dict(in_use=2, reserved=0),
gigabytes=dict(in_use=10, reserved=0))
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self._stub_quota_class_get_all_by_name()
def test_get_project_quotas(self):
self._stub_get_by_project()
self._stub_volume_type_get_all()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources, 'test_project')
self.assertEqual(['quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default', ], self.calls)
self.assertEqual(dict(volumes=dict(limit=10,
in_use=2,
reserved=0, ),
snapshots=dict(limit=10,
in_use=2,
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
per_volume_gigabytes=dict(in_use=0,
limit=-1,
reserved= 0)
), result)
def test_get_subproject_quotas(self):
self._stub_get_by_subproject()
self._stub_volume_type_get_all()
parent_project_id = 'test_parent_project_id'
result = self.driver.get_project_quotas(
FakeContext('test_project', None),
quota.QUOTAS.resources, 'test_project',
parent_project_id=parent_project_id)
self.assertEqual(['quota_get_all_by_project',
'quota_usage_get_all_by_project', ], self.calls)
self.assertEqual(dict(volumes=dict(limit=10,
in_use=2,
reserved=0, ),
snapshots=dict(limit=0,
in_use=0,
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
backups=dict(limit=0,
in_use=0,
reserved=0, ),
backup_gigabytes=dict(limit=0,
in_use=0,
reserved=0, ),
per_volume_gigabytes=dict(in_use=0,
limit=0,
reserved= 0)
), result)
def test_get_project_quotas_alt_context_no_class(self):
self._stub_get_by_project()
self._stub_volume_type_get_all()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS.resources, 'test_project')
self.assertEqual(['quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_default', ], self.calls)
self.assertEqual(dict(volumes=dict(limit=10,
in_use=2,
reserved=0, ),
snapshots=dict(limit=10,
in_use=2,
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
per_volume_gigabytes=dict(in_use=0,
limit=-1,
reserved=0)
), result)
def test_get_project_quotas_alt_context_with_class(self):
self._stub_get_by_project()
self._stub_volume_type_get_all()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS.resources, 'test_project', quota_class='test_class')
self.assertEqual(['quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default', ], self.calls)
self.assertEqual(dict(volumes=dict(limit=10,
in_use=2,
reserved=0, ),
snapshots=dict(limit=10,
in_use=2,
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
per_volume_gigabytes=dict(in_use=0,
limit=-1,
reserved= 0)),
result)
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
self._stub_volume_type_get_all()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources, 'test_project', defaults=False)
self.assertEqual(['quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default', ], self.calls)
self.assertEqual(dict(backups=dict(limit=10,
in_use=2,
reserved=0, ),
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
snapshots=dict(limit=10,
in_use=2,
reserved=0, ),
volumes=dict(limit=10,
in_use=2,
reserved=0, ),
), result)
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
self._stub_volume_type_get_all()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources, 'test_project', usages=False)
self.assertEqual(['quota_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default', ], self.calls)
self.assertEqual(dict(volumes=dict(limit=10, ),
snapshots=dict(limit=10, ),
backups=dict(limit=10, ),
gigabytes=dict(limit=50, ),
backup_gigabytes=dict(limit=50, ),
per_volume_gigabytes=dict(limit=-1, )), result)
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True, parent_project_id=None):
self.calls.append('get_project_quotas')
return {k: dict(limit=v.default) for k, v in resources.items()}
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
def test_get_quotas_has_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS.resources,
['unknown'], True)
self.assertEqual([], self.calls)
def test_get_quotas_no_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS.resources,
['unknown'], False)
self.assertEqual([], self.calls)
def test_get_quotas_has_sync_no_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS.resources,
['metadata_items'], True)
self.assertEqual([], self.calls)
def test_get_quotas_no_sync_has_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS.resources,
['volumes'], False)
self.assertEqual([], self.calls)
def test_get_quotas_has_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS.resources,
['volumes', 'gigabytes'],
True)
self.assertEqual(['get_project_quotas'], self.calls)
self.assertEqual(dict(volumes=10, gigabytes=1000, ), result)
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources,
dict(volumes=2), expire='invalid')
self.assertEqual([], self.calls)
def test_reserve_default_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources,
dict(volumes=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(['get_project_quotas',
('quota_reserve', expire, 0, 0), ], self.calls)
self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result)
def test_reserve_int_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources,
dict(volumes=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(['get_project_quotas',
('quota_reserve', expire, 0, 0), ], self.calls)
self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result)
def test_reserve_timedelta_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources,
dict(volumes=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
self.assertEqual(['get_project_quotas',
('quota_reserve', expire, 0, 0), ], self.calls)
self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result)
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources,
dict(volumes=2), expire=expire)
self.assertEqual(['get_project_quotas',
('quota_reserve', expire, 0, 0), ], self.calls)
self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result)
def test_reserve_until_refresh(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources,
dict(volumes=2), expire=expire)
self.assertEqual(['get_project_quotas',
('quota_reserve', expire, 500, 0), ], self.calls)
self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result)
def test_reserve_max_age(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS.resources,
dict(volumes=2), expire=expire)
self.assertEqual(['get_project_quotas',
('quota_reserve', expire, 0, 86400), ], self.calls)
self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result)
def _stub_quota_destroy_by_project(self):
def fake_quota_destroy_by_project(context, project_id):
self.calls.append(('quota_destroy_by_project', project_id))
return None
self.stubs.Set(sqa_api, 'quota_destroy_by_project',
fake_quota_destroy_by_project)
def test_destroy_quota_by_project(self):
self._stub_quota_destroy_by_project()
self.driver.destroy_by_project(FakeContext('test_project',
'test_class'),
'test_project')
self.assertEqual([('quota_destroy_by_project', ('test_project')), ],
self.calls)
class FakeSession(object):
def begin(self):
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
pass
class QuotaReserveSqlAlchemyTestCase(test.TestCase):
# cinder.db.sqlalchemy.api.quota_reserve is so complex it needs its
# own test case, and since it's a quota manipulator, this is the
# best place to put it...
def setUp(self):
super(QuotaReserveSqlAlchemyTestCase, self).setUp()
self.sync_called = set()
def make_sync(res_name):
def fake_sync(context, project_id, volume_type_id=None,
volume_type_name=None, session=None):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0}
return fake_sync
self.resources = {}
QUOTA_SYNC_FUNCTIONS = {}
for res_name in ('volumes', 'gigabytes'):
res = quota.ReservableResource(res_name, '_sync_%s' % res_name)
QUOTA_SYNC_FUNCTIONS['_sync_%s' % res_name] = make_sync(res_name)
self.resources[res_name] = res
self.stubs.Set(sqa_api, 'QUOTA_SYNC_FUNCTIONS', QUOTA_SYNC_FUNCTIONS)
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
self.reservations_created = {}
def fake_get_session():
return FakeSession()
def fake_get_quota_usages(context, session, project_id):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, resource, in_use,
reserved, until_refresh, session=None,
save=True):
quota_usage_ref = self._make_quota_usage(
project_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
def fake_reservation_create(context, uuid, usage_id, project_id,
resource, delta, expire, session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime.utcnow()
def _make_quota_usage(self, project_id, resource, in_use, reserved,
until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.created_at = created_at
quota_usage_ref.updated_at = updated_at
quota_usage_ref.deleted_at = None
quota_usage_ref.deleted = False
return quota_usage_ref
def init_usage(self, project_id, resource, in_use, reserved,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
quota_usage_ref = self._make_quota_usage(project_id, resource, in_use,
reserved, until_refresh,
created_at, updated_at)
self.usages[resource] = quota_usage_ref
def compare_usage(self, usage_dict, expected):
for usage in expected:
resource = usage['resource']
for key, value in usage.items():
actual = getattr(usage_dict[resource], key)
self.assertEqual(value, actual,
"%s != %s on usage for resource %s" %
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.created_at = created_at
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
return reservation_ref
def compare_reservation(self, reservations, expected):
reservations = set(reservations)
for resv in expected:
resource = resv['resource']
resv_obj = self.reservations_created[resource]
self.assertIn(resv_obj.uuid, reservations)
reservations.discard(resv_obj.uuid)
for key, value in resv.items():
actual = getattr(resv_obj, key)
self.assertEqual(value, actual,
"%s != %s on reservation for resource %s" %
(actual, value, resource))
self.assertEqual(0, len(reservations))
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5,
gigabytes=10 * 1024, )
deltas = dict(volumes=2,
gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages_created,
[dict(resource='volumes',
project_id='test_project',
in_use=0,
reserved=2,
until_refresh=None),
dict(resource='gigabytes',
project_id='test_project',
in_use=0,
reserved=2 * 1024,
until_refresh=None), ])
self.compare_reservation(
result,
[dict(resource='volumes',
usage_id=self.usages_created['volumes'],
project_id='test_project',
delta=2),
dict(resource='gigabytes',
usage_id=self.usages_created['gigabytes'],
delta=2 * 1024), ])
def test_quota_reserve_negative_in_use(self):
self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1)
self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5,
gigabytes=10 * 1024, )
deltas = dict(volumes=2,
gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='gigabytes',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5), ])
self.assertEqual({}, self.usages_created)
self.compare_reservation(result,
[dict(resource='volumes',
usage_id=self.usages['volumes'],
project_id='test_project',
delta=2),
dict(resource='gigabytes',
usage_id=self.usages['gigabytes'],
delta=2 * 1024), ])
def test_quota_reserve_until_refresh(self):
self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1)
self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5, gigabytes=10 * 1024, )
deltas = dict(volumes=2, gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='gigabytes',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5), ])
self.assertEqual({}, self.usages_created)
self.compare_reservation(result,
[dict(resource='volumes',
usage_id=self.usages['volumes'],
project_id='test_project',
delta=2),
dict(resource='gigabytes',
usage_id=self.usages['gigabytes'],
delta=2 * 1024), ])
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
self.init_usage('test_project', 'volumes', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'gigabytes', 3, 0,
created_at=record_created, updated_at=record_created)
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5, gigabytes=10 * 1024, )
deltas = dict(volumes=2, gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, max_age)
self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='gigabytes',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=None), ])
self.assertEqual({}, self.usages_created)
self.compare_reservation(result,
[dict(resource='volumes',
usage_id=self.usages['volumes'],
project_id='test_project',
delta=2),
dict(resource='gigabytes',
usage_id=self.usages['gigabytes'],
delta=2 * 1024), ])
def test_quota_reserve_no_refresh(self):
self.init_usage('test_project', 'volumes', 3, 0)
self.init_usage('test_project', 'gigabytes', 3, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5, gigabytes=10 * 1024, )
deltas = dict(volumes=2, gigabytes=2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(set([]), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
in_use=3,
reserved=2,
until_refresh=None),
dict(resource='gigabytes',
project_id='test_project',
in_use=3,
reserved=2 * 1024,
until_refresh=None), ])
self.assertEqual({}, self.usages_created)
self.compare_reservation(result,
[dict(resource='volumes',
usage_id=self.usages['volumes'],
project_id='test_project',
delta=2),
dict(resource='gigabytes',
usage_id=self.usages['gigabytes'],
delta=2 * 1024), ])
def test_quota_reserve_unders(self):
self.init_usage('test_project', 'volumes', 1, 0)
self.init_usage('test_project', 'gigabytes', 1 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5, gigabytes=10 * 1024, )
deltas = dict(volumes=-2, gigabytes=-2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(set([]), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
in_use=1,
reserved=0,
until_refresh=None),
dict(resource='gigabytes',
project_id='test_project',
in_use=1 * 1024,
reserved=0,
until_refresh=None), ])
self.assertEqual({}, self.usages_created)
self.compare_reservation(result,
[dict(resource='volumes',
usage_id=self.usages['volumes'],
project_id='test_project',
delta=-2),
dict(resource='gigabytes',
usage_id=self.usages['gigabytes'],
delta=-2 * 1024), ])
def test_quota_reserve_overs(self):
self.init_usage('test_project', 'volumes', 4, 0)
self.init_usage('test_project', 'gigabytes', 10 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5, gigabytes=10 * 1024, )
deltas = dict(volumes=2, gigabytes=2 * 1024, )
self.assertRaises(exception.OverQuota,
sqa_api.quota_reserve,
context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(set([]), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
in_use=4,
reserved=0,
until_refresh=None),
dict(resource='gigabytes',
project_id='test_project',
in_use=10 * 1024,
reserved=0,
until_refresh=None), ])
self.assertEqual({}, self.usages_created)
self.assertEqual({}, self.reservations_created)
def test_quota_reserve_reduction(self):
self.init_usage('test_project', 'volumes', 10, 0)
self.init_usage('test_project', 'gigabytes', 20 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(volumes=5, gigabytes=10 * 1024, )
deltas = dict(volumes=-2, gigabytes=-2 * 1024, )
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(set([]), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
in_use=10,
reserved=0,
until_refresh=None),
dict(resource='gigabytes',
project_id='test_project',
in_use=20 * 1024,
reserved=0,
until_refresh=None), ])
self.assertEqual({}, self.usages_created)
self.compare_reservation(result,
[dict(resource='volumes',
usage_id=self.usages['volumes'],
project_id='test_project',
delta=-2),
dict(resource='gigabytes',
usage_id=self.usages['gigabytes'],
project_id='test_project',
delta=-2 * 1024), ])
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from eventlet import timeout as etimeout
import mock
from nova.compute import vm_states
from nova import exception
from nova.objects import flavor as flavor_obj
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_virtual_interface
from nova.virt import hardware
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import units
from hyperv.nova import constants
from hyperv.nova import vmops
from hyperv.nova import vmutils
from hyperv.nova import volumeops
from hyperv.tests import fake_instance
from hyperv.tests.unit import test_base
CONF = cfg.CONF
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
FAKE_SIZE = 10
FAKE_DIR = 'fake_dir'
FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s'
FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso'
FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd'
FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
FAKE_LOG = 'fake_log'
_WIN_VERSION_6_3 = '6.3.0'
_WIN_VERSION_6_4 = '6.4.0'
ISO9660 = 'iso9660'
_FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd'
def setUp(self):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps()
self._vmops._vmutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
self._vmops._serial_console_ops = mock.MagicMock()
def test_get_vif_driver_cached(self):
self._vmops._vif_driver_cache = mock.MagicMock()
self._vmops._vif_driver_cache.get.return_value = mock.sentinel.VIF_DRV
self._vmops._get_vif_driver(mock.sentinel.VIF_TYPE)
self._vmops._vif_driver_cache.get.assert_called_with(
mock.sentinel.VIF_TYPE)
@mock.patch('hyperv.nova.vif.get_vif_driver')
def test_get_vif_driver_not_cached(self, mock_get_vif_driver):
mock_get_vif_driver.return_value = mock.sentinel.VIF_DRV
self._vmops._get_vif_driver(mock.sentinel.VIF_TYPE)
mock_get_vif_driver.assert_called_once_with(mock.sentinel.VIF_TYPE)
self.assertEqual(mock.sentinel.VIF_DRV,
self._vmops._vif_driver_cache[mock.sentinel.VIF_TYPE])
def test_list_instances(self):
mock_instance = mock.MagicMock()
self._vmops._vmutils.list_instances.return_value = [mock_instance]
response = self._vmops.list_instances()
self._vmops._vmutils.list_instances.assert_called_once_with()
self.assertEqual(response, [mock_instance])
def _test_get_info(self, vm_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_info = mock.MagicMock(spec_set=dict)
fake_info = {'EnabledState': 2,
'MemoryUsage': mock.sentinel.FAKE_MEM_KB,
'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU,
'UpTime': mock.sentinel.FAKE_CPU_NS}
def getitem(key):
return fake_info[key]
mock_info.__getitem__.side_effect = getitem
expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2],
max_mem_kb=mock.sentinel.FAKE_MEM_KB,
mem_kb=mock.sentinel.FAKE_MEM_KB,
num_cpu=mock.sentinel.FAKE_NUM_CPU,
cpu_time_ns=mock.sentinel.FAKE_CPU_NS)
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._vmutils.get_vm_summary_info.return_value = mock_info
if not vm_exists:
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info, mock_instance)
else:
response = self._vmops.get_info(mock_instance)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
self._vmops._vmutils.get_vm_summary_info.assert_called_once_with(
mock_instance.name)
self.assertEqual(response, expected)
def test_get_info(self):
self._test_get_info(vm_exists=True)
def test_get_info_exception(self):
self._test_get_info(vm_exists=False)
def _prepare_create_root_vhd_mocks(self, use_cow_images, vhd_format,
vhd_size):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.root_gb = self.FAKE_SIZE
self.flags(use_cow_images=use_cow_images)
self._vmops._vhdutils.get_vhd_info.return_value = {'MaxInternalSize':
vhd_size * units.Gi}
self._vmops._vhdutils.get_vhd_format.return_value = vhd_format
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
get_size.return_value = root_vhd_internal_size
self._vmops._pathutils.exists.return_value = True
return mock_instance
@mock.patch('hyperv.nova.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_exception(self, mock_get_cached_image,
vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE + 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
self.assertRaises(vmutils.VHDResizeException,
self._vmops._create_root_vhd, self.context,
mock_instance)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
self._vmops._pathutils.exists.assert_called_once_with(
fake_root_path)
self._vmops._pathutils.remove.assert_called_once_with(
fake_root_path)
@mock.patch('hyperv.nova.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=True, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format, False)
differencing_vhd = self._vmops._vhdutils.create_differencing_vhd
differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path)
self._vmops._vhdutils.get_vhd_info.assert_called_once_with(
fake_vhd_path)
if vhd_format is constants.DISK_FORMAT_VHD:
self.assertFalse(get_size.called)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
get_size.assert_called_once_with(fake_vhd_path,
root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
@mock.patch('hyperv.nova.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd(self, mock_get_cached_image, vhd_format,
is_rescue_vhd=False):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
rescue_image_id = (
mock.sentinel.rescue_image_id if is_rescue_vhd else None)
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(
context=self.context,
instance=mock_instance,
rescue_image_id=rescue_image_id)
self.assertEqual(fake_root_path, response)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance,
rescue_image_id)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format, is_rescue_vhd)
self._vmops._pathutils.copyfile.assert_called_once_with(
fake_vhd_path, fake_root_path)
get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size)
if is_rescue_vhd:
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size,
is_file_max_size=False)
def test_create_root_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhd_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_rescue_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD,
is_rescue_vhd=True)
def test_create_root_vhdx_size_less_than_internal(self):
self._test_create_root_vhd_exception(
vhd_format=constants.DISK_FORMAT_VHD)
def test_is_resize_needed_exception(self):
inst = mock.MagicMock()
self.assertRaises(
vmutils.VHDResizeException, self._vmops._is_resize_needed,
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst)
def test_is_resize_needed_true(self):
inst = mock.MagicMock()
self.assertTrue(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst))
def test_is_resize_needed_false(self):
inst = mock.MagicMock()
self.assertFalse(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst))
def test_create_ephemeral_vhd(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.ephemeral_gb = self.FAKE_SIZE
best_supported = self._vmops._vhdutils.get_best_supported_vhd_format
best_supported.return_value = mock.sentinel.FAKE_FORMAT
self._vmops._pathutils.get_ephemeral_vhd_path.return_value = (
mock.sentinel.FAKE_PATH)
response = self._vmops.create_ephemeral_vhd(instance=mock_instance)
self._vmops._pathutils.get_ephemeral_vhd_path.assert_called_with(
mock_instance.name, mock.sentinel.FAKE_FORMAT)
self._vmops._vhdutils.create_dynamic_vhd.assert_called_with(
mock.sentinel.FAKE_PATH, mock_instance.ephemeral_gb * units.Gi,
mock.sentinel.FAKE_FORMAT)
self.assertEqual(mock.sentinel.FAKE_PATH, response)
@mock.patch('hyperv.nova.vmops.VMOps.destroy')
@mock.patch('hyperv.nova.vmops.VMOps.power_on')
@mock.patch('hyperv.nova.vmops.VMOps.attach_config_drive')
@mock.patch('hyperv.nova.vmops.VMOps._create_config_drive')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('hyperv.nova.vmops.VMOps.create_instance')
@mock.patch('hyperv.nova.vmops.VMOps.get_image_vm_generation')
@mock.patch('hyperv.nova.vmops.VMOps.create_ephemeral_vhd')
@mock.patch('hyperv.nova.vmops.VMOps._create_root_vhd')
@mock.patch('hyperv.nova.volumeops.VolumeOps.'
'ebs_root_in_block_devices')
@mock.patch('hyperv.nova.vmops.VMOps._delete_disk_files')
@mock.patch('hyperv.nova.vif.get_vif_driver')
def _test_spawn(self, mock_get_vif_driver, mock_delete_disk_files,
mock_ebs_root_in_block_devices, mock_create_root_vhd,
mock_create_ephemeral_vhd, mock_get_image_vm_gen,
mock_create_instance, mock_configdrive_required,
mock_create_config_drive, mock_attach_config_drive,
mock_power_on, mock_destroy, exists, boot_from_volume,
configdrive_required, fail):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_image_meta = mock.MagicMock()
fake_root_path = mock_create_root_vhd.return_value
fake_root_path = None if boot_from_volume else fake_root_path
fake_ephemeral_path = mock_create_ephemeral_vhd.return_value
fake_vm_gen = mock_get_image_vm_gen.return_value
fake_config_drive_path = mock_create_config_drive.return_value
fake_network_info = {'id': mock.sentinel.ID,
'address': mock.sentinel.ADDRESS}
self._vmops._vmutils.vm_exists.return_value = exists
mock_ebs_root_in_block_devices.return_value = boot_from_volume
mock_create_root_vhd.return_value = fake_root_path
mock_configdrive_required.return_value = configdrive_required
mock_create_instance.side_effect = fail
if exists:
self.assertRaises(exception.InstanceExists, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
elif fail is vmutils.HyperVException:
self.assertRaises(vmutils.HyperVException, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
mock_destroy.assert_called_once_with(mock_instance)
else:
self._vmops.spawn(self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
[fake_network_info], mock.sentinel.DEV_INFO)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
mock_ebs_root_in_block_devices.assert_called_once_with(
mock.sentinel.DEV_INFO)
if not boot_from_volume:
mock_create_root_vhd.assert_called_once_with(self.context,
mock_instance)
mock_create_ephemeral_vhd.assert_called_once_with(mock_instance)
mock_get_image_vm_gen.assert_called_once_with(fake_root_path,
mock_image_meta)
mock_create_instance.assert_called_once_with(
mock_instance, [fake_network_info], mock.sentinel.DEV_INFO,
fake_root_path, fake_ephemeral_path, fake_vm_gen,
mock_image_meta)
mock_configdrive_required.assert_called_once_with(mock_instance)
if configdrive_required:
mock_create_config_drive.assert_called_once_with(
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
[fake_network_info])
mock_attach_config_drive.assert_called_once_with(
mock_instance, fake_config_drive_path, fake_vm_gen)
mock_power_on.assert_called_once_with(
mock_instance, network_info=[fake_network_info])
def test_spawn(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_instance_exists(self):
self._test_spawn(exists=True, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_create_instance_exception(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True,
fail=vmutils.HyperVException)
def test_spawn_not_required(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=False, fail=None)
def test_spawn_root_in_block(self):
self._test_spawn(exists=False, boot_from_volume=True,
configdrive_required=False, fail=None)
def test_spawn_no_admin_permissions(self):
self._vmops._vmutils.check_admin_permissions.side_effect = (
vmutils.HyperVException)
self.assertRaises(vmutils.HyperVException,
self._vmops.spawn,
self.context, mock.DEFAULT, mock.DEFAULT,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
@mock.patch('hyperv.nova.vif.get_vif_driver')
@mock.patch.object(vmops.VMOps, '_set_instance_disk_qos_specs')
@mock.patch.object(volumeops.VolumeOps, 'attach_volumes')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, '_get_image_serial_port_settings')
@mock.patch.object(vmops.VMOps, '_create_vm_com_port_pipes')
@mock.patch.object(vmops.VMOps, '_configure_remotefx')
def _test_create_instance(self, mock_configure_remotefx, mock_create_pipes,
mock_get_port_settings, mock_attach_drive,
mock_attach_volumes, mock_set_qos_specs,
mock_get_vif_driver,
fake_root_path, fake_ephemeral_path,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1, remotefx=False):
mock_vif_driver = mock_get_vif_driver()
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
fake_network_info = {'id': mock.sentinel.ID,
'address': mock.sentinel.ADDRESS}
mock_instance = fake_instance.fake_instance_obj(self.context)
instance_path = os.path.join(CONF.instances_path, mock_instance.name)
flavor = flavor_obj.Flavor(**test_flavor.fake_flavor)
if remotefx is True:
flavor.extra_specs['hyperv:remotefx'] = "1920x1200,2"
mock_instance.flavor = flavor
if remotefx is True and vm_gen == constants.VM_GEN_2:
self.assertRaises(vmutils.HyperVException,
self._vmops.create_instance,
instance=mock_instance,
network_info=[fake_network_info],
block_device_info=mock.sentinel.DEV_INFO,
root_vhd_path=fake_root_path,
eph_vhd_path=fake_ephemeral_path,
vm_gen=vm_gen,
image_meta=mock.sentinel.image_meta)
else:
self._vmops.create_instance(
instance=mock_instance,
network_info=[fake_network_info],
block_device_info=mock.sentinel.DEV_INFO,
root_vhd_path=fake_root_path,
eph_vhd_path=fake_ephemeral_path,
vm_gen=vm_gen,
image_meta=mock.sentinel.image_meta)
if remotefx is True:
mock_configure_remotefx.assert_called_once_with(
mock_instance,
flavor.extra_specs['hyperv:remotefx'])
self._vmops._vmutils.create_vm.assert_called_once_with(
mock_instance.name, mock_instance.memory_mb,
mock_instance.vcpus, CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio, vm_gen, instance_path,
[mock_instance.uuid])
expected = []
ctrl_type = vmops.VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
ctrl_disk_addr = 0
if fake_root_path:
expected.append(mock.call(mock_instance.name, fake_root_path,
0, ctrl_disk_addr, ctrl_type,
constants.DISK))
ctrl_disk_addr += 1
if fake_ephemeral_path:
expected.append(mock.call(mock_instance.name,
fake_ephemeral_path, 0,
ctrl_disk_addr,
ctrl_type, constants.DISK))
mock_attach_drive.has_calls(expected)
mock_create_scsi_ctrl = self._vmops._vmutils.create_scsi_controller
mock_create_scsi_ctrl.assert_called_once_with(mock_instance.name)
ebs_root = (
vm_gen is not constants.VM_GEN_2 and fake_root_path is None)
mock_attach_volumes.assert_called_once_with(mock.sentinel.DEV_INFO,
mock_instance.name,
ebs_root)
mock_get_port_settings.assert_called_with(mock.sentinel.image_meta)
mock_create_pipes.assert_called_once_with(
mock_instance, mock_get_port_settings.return_value)
self._vmops._vmutils.create_nic.assert_called_once_with(
mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS)
mock_vif_driver.plug.assert_called_once_with(mock_instance,
fake_network_info)
mock_enable = self._vmops._vmutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
mock_set_qos_specs.assert_called_once_with(mock_instance)
def test_create_instance(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_root_path(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_ephemeral_path(self):
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=None,
enable_instance_metrics=True)
def test_create_instance_no_path(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False)
def test_create_instance_enable_instance_metrics_false(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=False)
def test_create_instance_gen2(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2)
def test_create_instance_with_remote_fx(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False,
vm_gen=constants.VM_GEN_1,
remotefx=True)
def test_create_instance_with_remote_fx_gen2(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2,
remotefx=True)
def test_attach_drive_vm_to_scsi(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_SCSI)
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
constants.DISK)
def test_attach_drive_vm_to_ide(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_IDE)
self._vmops._vmutils.attach_ide_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.DISK)
def _check_get_image_vm_gen_except(self, image_prop):
image_meta = {"properties": {constants.IMAGE_PROP_VM_GEN: image_prop}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self.assertRaises(vmutils.HyperVException,
self._vmops.get_image_vm_generation,
mock.sentinel.FAKE_PATH,
image_meta)
def test_get_image_vm_generation_default(self):
image_meta = {"properties": {}}
self._vmops._hostutils.get_default_vm_generation.return_value = (
constants.IMAGE_PROP_VM_GEN_1)
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_1, response)
def test_get_image_vm_generation_gen2(self):
image_meta = {"properties": {
constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_2}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHDX)
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_2, response)
def test_get_image_vm_generation_bad_prop(self):
self._check_get_image_vm_gen_except(mock.sentinel.FAKE_IMAGE_PROP)
def test_get_image_vm_generation_not_vhdx(self):
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
self._check_get_image_vm_gen_except(constants.IMAGE_PROP_VM_GEN_2)
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder')
@mock.patch('nova.utils.execute')
def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder,
mock_InstanceMetadata, config_drive_format,
config_drive_cdrom, side_effect,
rescue=False):
mock_instance = fake_instance.fake_instance_obj(self.context)
self.flags(config_drive_format=config_drive_format)
self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv')
self.flags(config_drive_inject_password=True, group='hyperv')
mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [
side_effect]
path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)
path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD)
def fake_get_configdrive_path(instance_name, disk_format,
rescue=False):
return (path_iso
if disk_format == constants.DVD_FORMAT else path_vhd)
mock_get_configdrive_path = self._vmops._pathutils.get_configdrive_path
mock_get_configdrive_path.side_effect = fake_get_configdrive_path
expected_get_configdrive_path_calls = [mock.call(mock_instance.name,
constants.DVD_FORMAT,
rescue=rescue)]
if not config_drive_cdrom:
expected_call = mock.call(mock_instance.name,
constants.DISK_FORMAT_VHD,
rescue=rescue)
expected_get_configdrive_path_calls.append(expected_call)
if config_drive_format != self.ISO9660:
self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
elif side_effect is processutils.ProcessExecutionError:
self.assertRaises(processutils.ProcessExecutionError,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
else:
path = self._vmops._create_config_drive(mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
mock_InstanceMetadata.assert_called_once_with(
mock_instance, content=[mock.sentinel.FILE],
extra_md={'admin_pass': mock.sentinel.PASSWORD},
network_info=mock.sentinel.NET_INFO)
mock_get_configdrive_path.assert_has_calls(
expected_get_configdrive_path_calls)
mock_ConfigDriveBuilder.assert_called_with(
instance_md=mock_InstanceMetadata())
mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive
mock_make_drive.assert_called_once_with(path_iso)
if not CONF.hyperv.config_drive_cdrom:
expected = path_vhd
mock_execute.assert_called_once_with(
CONF.hyperv.qemu_img_cmd,
'convert', '-f', 'raw', '-O', 'vpc',
path_iso, path_vhd, attempts=1)
self._vmops._pathutils.remove.assert_called_once_with(
os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO))
else:
expected = path_iso
self.assertEqual(expected, path)
def test_create_config_drive_cdrom(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=True,
side_effect=None)
def test_create_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None)
def test_create_rescue_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None,
rescue=True)
def test_create_config_drive_other_drive_format(self):
self._test_create_config_drive(config_drive_format=mock.sentinel.OTHER,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_execution_error(self):
self._test_create_config_drive(
config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=processutils.ProcessExecutionError)
def test_attach_config_drive_exception(self):
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(exception.InvalidDiskFormat,
self._vmops.attach_config_drive,
instance, 'C:/fake_instance_dir/configdrive.xxx',
constants.VM_GEN_1)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_1)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_IDE, constants.DISK)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive_gen2(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_2)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_SCSI, constants.DISK)
def test_detach_config_drive(self):
is_rescue_configdrive = True
mock_lookup_configdrive = (
self._vmops._pathutils.lookup_configdrive_path)
mock_lookup_configdrive.return_value = mock.sentinel.configdrive_path
self._vmops._detach_config_drive(mock.sentinel.instance_name,
rescue=is_rescue_configdrive,
delete=True)
mock_lookup_configdrive.assert_called_once_with(
mock.sentinel.instance_name,
rescue=is_rescue_configdrive)
self._vmops._vmutils.detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.configdrive_path,
is_physical=False)
self._vmops._pathutils.remove.assert_called_once_with(
mock.sentinel.configdrive_path)
def test_delete_disk_files(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._delete_disk_files(mock_instance.name)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name, create_dir=False, remove_dir=True)
@mock.patch('hyperv.nova.volumeops.VolumeOps.disconnect_volumes')
@mock.patch('hyperv.nova.vmops.VMOps._delete_disk_files')
@mock.patch('hyperv.nova.vmops.VMOps.power_off')
def test_destroy(self, mock_power_off, mock_delete_disk_files,
mock_disconnect_volumes):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = True
self._vmops.destroy(instance=mock_instance,
block_device_info=mock.sentinel.FAKE_BD_INFO)
self._vmops._vmutils.vm_exists.assert_called_with(
mock_instance.name)
mock_power_off.assert_called_once_with(mock_instance)
self._vmops._vmutils.destroy_vm.assert_called_once_with(
mock_instance.name)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.FAKE_BD_INFO)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
def test_destroy_inexistent_instance(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = False
self._vmops.destroy(instance=mock_instance)
self.assertFalse(self._vmops._vmutils.destroy_vm.called)
@mock.patch('hyperv.nova.vmops.VMOps.power_off')
def test_destroy_exception(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.destroy_vm.side_effect = vmutils.HyperVException
self._vmops._vmutils.vm_exists.return_value = True
self.assertRaises(vmutils.HyperVException,
self._vmops.destroy, mock_instance)
def test_reboot_hard(self):
self._test_reboot(vmops.REBOOT_TYPE_HARD,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_reboot_soft(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = True
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_ENABLED)
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_reboot_soft_failed(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("hyperv.nova.vmops.VMOps.power_on")
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
mock_soft_shutdown.return_value = True
mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
instance, {}, vmops.REBOOT_TYPE_SOFT)
mock_soft_shutdown.assert_called_once_with(instance)
mock_power_on.assert_called_once_with(instance, network_info={})
def _test_reboot(self, reboot_type, vm_state):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.reboot(instance, {}, reboot_type)
mock_set_state.assert_called_once_with(instance, vm_state)
@mock.patch("hyperv.nova.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = True
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_once_with(instance.name)
mock_wait_for_power_off.assert_called_once_with(
instance.name, self._FAKE_TIMEOUT)
self.assertTrue(result)
@mock.patch("time.sleep")
def test_soft_shutdown_failed(self, mock_sleep):
instance = fake_instance.fake_instance_obj(self.context)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.side_effect = vmutils.HyperVException(
"Expected failure.")
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm.assert_called_once_with(instance.name)
self.assertFalse(result)
@mock.patch("hyperv.nova.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.side_effect = [False, True]
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
calls = [mock.call(instance.name, 1),
mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertTrue(result)
@mock.patch("hyperv.nova.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = False
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
calls = [mock.call(instance.name, 1.5),
mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertFalse(result)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_pause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.pause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_PAUSED)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_unpause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.unpause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_suspend(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.suspend(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_SUSPENDED)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_resume(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.resume(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
def _test_power_off(self, timeout):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.power_off(instance, timeout)
serialops = self._vmops._serial_console_ops
serialops.stop_console_handler.assert_called_once_with(
instance.name)
mock_set_state.assert_called_once_with(
instance, constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_hard(self):
self._test_power_off(timeout=0)
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_power_off_exception(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_power_off(timeout=1)
@mock.patch("hyperv.nova.vmops.VMOps._set_vm_state")
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
instance = fake_instance.fake_instance_obj(self.context)
mock_soft_shutdown.return_value = True
self._vmops.power_off(instance, 1, 0)
mock_soft_shutdown.assert_called_once_with(
instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(mock_set_state.called)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_power_on(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch('hyperv.nova.volumeops.VolumeOps'
'.fix_instance_volume_disk_paths')
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_power_on_having_block_devices(self, mock_set_vm_state,
mock_fix_instance_vol_paths):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance, mock.sentinel.block_device_info)
mock_fix_instance_vol_paths.assert_called_once_with(
mock_instance.name, mock.sentinel.block_device_info)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch.object(vmops.VMOps, '_get_vif_driver')
def test_power_on_with_network_info(self, mock_get_vif_driver):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_vif1 = {'id': mock.sentinel.ID1,
'type': mock.sentinel.vif_type1}
fake_vif2 = {'id': mock.sentinel.ID2,
'type': mock.sentinel.vif_type2}
mock_network_info = [fake_vif1, fake_vif2]
fake_vif_driver = mock.MagicMock()
mock_get_vif_driver.return_value = fake_vif_driver
calls = [mock.call(mock_instance, fake_vif1),
mock.call(mock_instance, fake_vif2)]
self._vmops.power_on(mock_instance, network_info=mock_network_info)
fake_vif_driver.post_start.assert_has_calls(calls)
def _test_set_vm_state(self, state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._set_vm_state(mock_instance, state)
self._vmops._vmutils.set_vm_state.assert_called_once_with(
mock_instance.name, state)
def test_set_vm_state_disabled(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_DISABLED)
def test_set_vm_state_enabled(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_ENABLED)
def test_set_vm_state_reboot(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_REBOOT)
def test_set_vm_state_exception(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.set_vm_state.side_effect = vmutils.HyperVException
self.assertRaises(vmutils.HyperVException, self._vmops._set_vm_state,
mock_instance, mock.sentinel.STATE)
def test_get_vm_state(self):
summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
with mock.patch.object(self._vmops._vmutils,
'get_vm_summary_info') as mock_get_summary_info:
mock_get_summary_info.return_value = summary_info
response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self.assertTrue(result)
@mock.patch.object(vmops.etimeout, "with_timeout")
def test_wait_for_power_off_false(self, mock_with_timeout):
mock_with_timeout.side_effect = etimeout.Timeout()
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(result)
def test_create_vm_com_port_pipes(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_serial_ports = {
1: constants.SERIAL_PORT_TYPE_RO,
2: constants.SERIAL_PORT_TYPE_RW
}
self._vmops._create_vm_com_port_pipes(mock_instance,
mock_serial_ports)
expected_calls = []
for port_number, port_type in mock_serial_ports.iteritems():
expected_pipe = r'\\.\pipe\%s_%s' % (mock_instance.uuid,
port_type)
expected_calls.append(mock.call(mock_instance.name,
port_number,
expected_pipe))
mock_set_conn = self._vmops._vmutils.set_vm_serial_port_connection
mock_set_conn.assert_has_calls(expected_calls)
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
with mock.patch.object(self._vmops._vmutils,
'list_instance_notes') as mock_list_notes:
mock_list_notes.return_value = [('fake_name', [fake_uuid])]
response = self._vmops.list_instance_uuids()
mock_list_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
def test_copy_vm_dvd_disks(self):
fake_paths = [mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DVD_PATH2]
mock_copy = self._vmops._pathutils.copyfile
mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths
mock_get_dvd_disk_paths.return_value = fake_paths
self._vmops._pathutils.get_instance_dir.return_value = (
mock.sentinel.FAKE_DEST_PATH)
self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST_HOST)
mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST_HOST)
mock_copy.has_calls(mock.call(mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DEST_PATH),
mock.call(mock.sentinel.FAKE_DVD_PATH2,
mock.sentinel.FAKE_DEST_PATH))
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
@mock.patch.object(vmops.VMOps, 'get_image_vm_generation')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, '_create_config_drive')
@mock.patch.object(vmops.VMOps, 'attach_config_drive')
@mock.patch.object(vmops.VMOps, '_detach_config_drive')
@mock.patch.object(vmops.VMOps, 'power_on')
def test_rescue_instance(self, mock_power_on,
mock_detach_config_drive,
mock_attach_config_drive,
mock_create_config_drive,
mock_attach_drive,
mock_get_image_vm_gen,
mock_create_root_vhd,
mock_configdrive_required):
mock_image_meta = {'id': mock.sentinel.rescue_image_id}
mock_vm_gen = constants.VM_GEN_2
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_configdrive_required.return_value = True
mock_create_root_vhd.return_value = mock.sentinel.rescue_vhd_path
mock_get_image_vm_gen.return_value = mock_vm_gen
self._vmops._vmutils.get_vm_gen.return_value = mock_vm_gen
self._vmops._pathutils.lookup_root_vhd_path.return_value = (
mock.sentinel.root_vhd_path)
mock_create_config_drive.return_value = (
mock.sentinel.rescue_configdrive_path)
self._vmops.rescue_instance(self.context,
mock_instance,
mock.sentinel.network_info,
mock_image_meta,
mock.sentinel.rescue_password)
self._vmops._vmutils.detach_vm_disk.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path,
is_physical=False)
mock_attach_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.rescue_vhd_path, 0,
self._vmops._ROOT_DISK_CTRL_ADDR,
vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen])
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path,
drive_type=constants.DISK)
mock_detach_config_drive.assert_called_once_with(mock_instance.name)
mock_create_config_drive.assert_called_once_with(
mock_instance,
injected_files=None,
admin_password=mock.sentinel.rescue_password,
network_info=mock.sentinel.network_info,
rescue=True)
mock_attach_config_drive.assert_called_once_with(
mock_instance, mock.sentinel.rescue_configdrive_path,
mock_vm_gen)
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
@mock.patch.object(vmops.VMOps, 'get_image_vm_generation')
def _test_rescue_instance_exception(self, mock_get_image_vm_gen,
mock_create_root_vhd,
wrong_vm_gen=False,
boot_from_volume=False):
mock_vm_gen = constants.VM_GEN_1
image_vm_gen = (mock_vm_gen
if not wrong_vm_gen else constants.VM_GEN_2)
mock_image_meta = {'id': mock.sentinel.rescue_image_id}
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_get_image_vm_gen.return_value = image_vm_gen
self._vmops._vmutils.get_vm_gen.return_value = mock_vm_gen
self._vmops._pathutils.lookup_root_vhd_path.return_value = (
mock.sentinel.root_vhd_path if not boot_from_volume else None)
self.assertRaises(vmutils.HyperVException,
self._vmops.rescue_instance,
self.context, mock_instance,
mock.sentinel.network_info,
mock_image_meta,
mock.sentinel.rescue_password)
def test_rescue_instance_wrong_vm_gen(self):
# Test the case when the rescue image requires a different
# vm generation than the actual rescued instance.
self._test_rescue_instance_exception(wrong_vm_gen=True)
def test_rescue_instance_boot_from_volume(self):
# Rescuing instances booted from volume is not supported.
self._test_rescue_instance_exception(boot_from_volume=True)
@mock.patch.object(fileutils, 'delete_if_exists')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, 'attach_config_drive')
@mock.patch.object(vmops.VMOps, '_detach_config_drive')
@mock.patch.object(vmops.VMOps, 'power_on')
@mock.patch.object(vmops.VMOps, 'power_off')
def test_unrescue_instance(self, mock_power_on, mock_power_off,
mock_detach_config_drive,
mock_attach_configdrive,
mock_attach_drive,
mock_delete_if_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_vm_gen = constants.VM_GEN_2
self._vmops._vmutils.get_vm_gen.return_value = mock_vm_gen
self._vmops._pathutils.lookup_root_vhd_path.side_effect = (
mock.sentinel.root_vhd_path, mock.sentinel.rescue_vhd_path)
self._vmops._pathutils.lookup_configdrive_path.return_value = (
mock.sentinel.configdrive_path)
self._vmops.unrescue_instance(mock_instance)
self._vmops._pathutils.lookup_root_vhd_path.assert_has_calls(
[mock.call(mock_instance.name),
mock.call(mock_instance.name, rescue=True)])
self._vmops._vmutils.detach_vm_disk.assert_has_calls(
[mock.call(mock_instance.name,
mock.sentinel.root_vhd_path,
is_physical=False),
mock.call(mock_instance.name,
mock.sentinel.rescue_vhd_path,
is_physical=False)])
mock_attach_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path, 0,
self._vmops._ROOT_DISK_CTRL_ADDR,
vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen])
mock_detach_config_drive.assert_called_once_with(
mock_instance.name, rescue=True, delete=True)
mock_delete_if_exists.assert_called_once_with(
mock.sentinel.rescue_vhd_path)
mock_attach_configdrive.assert_called_once_with(
mock_instance, mock.sentinel.configdrive_path, mock_vm_gen)
mock_power_on.assert_called_once_with(mock_instance)
@mock.patch.object(vmops.VMOps, 'power_off')
def test_unrescue_instance_missing_root_image(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.vm_state = vm_states.RESCUED
self._vmops._pathutils.lookup_root_vhd_path.return_value = None
self.assertRaises(vmutils.HyperVException,
self._vmops.unrescue_instance,
mock_instance)
def _test_configure_remotefx(self, exception=False):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_resolution = "1920x1200"
fake_monitor_count = 3
fake_config = "%s,%s" % (fake_resolution, fake_monitor_count)
self._vmops._vmutils.enable_remotefx_video_adapter = mock.MagicMock()
enable_remotefx = self._vmops._vmutils.enable_remotefx_video_adapter
self._vmops._hostutils.check_server_feature = mock.MagicMock()
if exception:
self._vmops._hostutils.check_server_feature.return_value = False
self.assertRaises(vmutils.HyperVException,
self._vmops._configure_remotefx,
mock_instance, fake_config)
else:
self._vmops._configure_remotefx(mock_instance, fake_config)
enable_remotefx.assert_called_once_with(mock_instance.name,
fake_monitor_count,
fake_resolution)
def test_configure_remotefx_exception(self):
self._test_configure_remotefx(exception=True)
def test_configure_remotefx(self):
self._test_configure_remotefx()
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def _test_check_hotplug_is_available(self, mock_get_vm_state, vm_gen,
windows_version, vm_state):
fake_vm = fake_instance.fake_instance_obj(self.context)
mock_get_vm_state.return_value = vm_state
self._vmops._vmutils.get_vm_gen.return_value = vm_gen
fake_check_win_vers = self._vmops._hostutils.check_min_windows_version
if windows_version == self._WIN_VERSION_6_3:
fake_check_win_vers.return_value = False
else:
fake_check_win_vers.return_value = True
if (windows_version == self._WIN_VERSION_6_3 or
vm_gen == constants.VM_GEN_1):
self.assertRaises(exception.InterfaceAttachFailed,
self._vmops._check_hotplug_is_available, fake_vm)
else:
ret = self._vmops._check_hotplug_is_available(fake_vm)
if vm_state == constants.HYPERV_VM_STATE_DISABLED:
self.assertFalse(ret)
else:
self.assertTrue(ret)
def test_check_if_hotplug_is_available_gen1(self):
self._test_check_hotplug_is_available(vm_gen=constants.VM_GEN_1,
windows_version=self._WIN_VERSION_6_4,
vm_state=constants.HYPERV_VM_STATE_ENABLED)
def test_check_if_hotplug_is_available_gen2(self):
self._test_check_hotplug_is_available(vm_gen=constants.VM_GEN_2,
windows_version=self._WIN_VERSION_6_4,
vm_state=constants.HYPERV_VM_STATE_ENABLED)
def test_check_if_hotplug_is_available_win_6_3(self):
self._test_check_hotplug_is_available(vm_gen=constants.VM_GEN_2,
windows_version=self._WIN_VERSION_6_3,
vm_state=constants.HYPERV_VM_STATE_ENABLED)
def test_check_if_hotplug_is_available_vm_disabled(self):
self._test_check_hotplug_is_available(vm_gen=constants.VM_GEN_2,
windows_version=self._WIN_VERSION_6_4,
vm_state=constants.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vif_driver')
def _test_create_and_attach_interface(self, mock_get_vif_driver, hot_plug):
fake_vm = fake_instance.fake_instance_obj(self.context)
fake_vif = test_virtual_interface.fake_vif
fake_vif['type'] = mock.sentinel.VIF_TYPE
fake_vif_driver = mock_get_vif_driver.return_value
self._vmops._create_and_attach_interface(fake_vm, fake_vif, hot_plug)
self._vmops._vmutils.create_nic.assert_called_with(fake_vm.name,
fake_vif['id'], fake_vif['address'])
mock_get_vif_driver.assert_called_once_with(mock.sentinel.VIF_TYPE)
fake_vif_driver.plug.assert_called_once_with(fake_vm, fake_vif)
if hot_plug:
fake_vif_driver.post_start.assert_called_once_with(fake_vm,
fake_vif)
def test_create_and_attach_interface_hot_plugged(self):
self._test_create_and_attach_interface(hot_plug=True)
def test_create_and_attach_interface(self):
self._test_create_and_attach_interface(hot_plug=False)
@mock.patch.object(vmops.VMOps, '_check_hotplug_is_available')
@mock.patch.object(vmops.VMOps, '_create_and_attach_interface')
def _test_attach_interface(self, mock_create_and_attach_interface,
mock_check_hotplug_is_available, hot_plug):
mock_check_hotplug_is_available.return_value = hot_plug
self._vmops.attach_interface(mock.sentinel.FAKE_VM,
mock.sentinel.FAKE_VIF)
mock_check_hotplug_is_available.assert_called_once_with(
mock.sentinel.FAKE_VM)
mock_create_and_attach_interface.assert_called_once_with(
mock.sentinel.FAKE_VM, mock.sentinel.FAKE_VIF, hot_plug)
def test_attach_interface_hot_plugged(self):
self._test_attach_interface(hot_plug=True)
def test_attach_interface(self):
self._test_attach_interface(hot_plug=False)
@mock.patch.object(vmops.VMOps, '_get_vif_driver')
def test_detach_and_destroy_interface(self, mock_get_vif_driver):
fake_vm = fake_instance.fake_instance_obj(self.context)
fake_vif = test_virtual_interface.fake_vif
fake_vif['type'] = mock.sentinel.VIF_TYPE
fake_vif_driver = mock_get_vif_driver.return_value
self._vmops._detach_and_destroy_interface(fake_vm, fake_vif)
fake_vif_driver.unplug.assert_called_once_with(fake_vm, fake_vif)
self._vmops._vmutils.destroy_nic.assert_called_once_with(
fake_vm.name, fake_vif['id'])
@mock.patch.object(vmops.VMOps, '_check_hotplug_is_available')
@mock.patch.object(vmops.VMOps, '_detach_and_destroy_interface')
def test_detach_interface(self, mock_detach_and_destroy_interface,
mock_check_hotplug_is_available):
self._vmops.detach_interface(mock.sentinel.FAKE_VM,
mock.sentinel.FAKE_VIF)
mock_check_hotplug_is_available.assert_called_once_with(
mock.sentinel.FAKE_VM)
mock_detach_and_destroy_interface.assert_called_once_with(
mock.sentinel.FAKE_VM, mock.sentinel.FAKE_VIF)
def _mock_get_port_settings(self, logging_port, interactive_port):
mock_image_port_settings = {
constants.IMAGE_PROP_LOGGING_SERIAL_PORT: logging_port,
constants.IMAGE_PROP_INTERACTIVE_SERIAL_PORT: interactive_port
}
mock_image_meta = {'properties': mock_image_port_settings}
acceptable_ports = [1, 2]
expected_exception = not (logging_port in acceptable_ports and
interactive_port in acceptable_ports)
if expected_exception:
self.assertRaises(vmutils.HyperVException,
self._vmops._get_image_serial_port_settings,
mock_image_meta)
else:
return self._vmops._get_image_serial_port_settings(
mock_image_meta)
def test_get_image_serial_port_settings(self):
logging_port = 1
interactive_port = 2
ret_val = self._mock_get_port_settings(logging_port, interactive_port)
expected_serial_ports = {
logging_port: constants.SERIAL_PORT_TYPE_RO,
interactive_port: constants.SERIAL_PORT_TYPE_RW,
}
self.assertEqual(expected_serial_ports, ret_val)
def test_get_image_serial_port_settings_exception(self):
self._mock_get_port_settings(1, 3)
def test_get_image_serial_port_settings_single_port(self):
interactive_port = 1
ret_val = self._mock_get_port_settings(interactive_port,
interactive_port)
expected_serial_ports = {
interactive_port: constants.SERIAL_PORT_TYPE_RW
}
self.assertEqual(expected_serial_ports, ret_val)
def test_get_instance_local_disks(self):
fake_instance_dir = 'fake_instance_dir'
fake_local_disks = [os.path.join(fake_instance_dir, disk_name)
for disk_name in ['root.vhd', 'configdrive.iso']]
fake_instance_disks = ['fake_remote_disk'] + fake_local_disks
mock_get_storage_paths = self._vmops._vmutils.get_vm_storage_paths
mock_get_storage_paths.return_value = [fake_instance_disks, []]
mock_get_instance_dir = self._vmops._pathutils.get_instance_dir
mock_get_instance_dir.return_value = fake_instance_dir
ret_val = self._vmops._get_instance_local_disks(
mock.sentinel.instance_name)
self.assertEqual(fake_local_disks, ret_val)
@mock.patch.object(vmops.VMOps, '_get_storage_qos_specs')
@mock.patch.object(vmops.VMOps, '_get_instance_local_disks')
def test_set_instance_disk_qos_specs(self, mock_get_local_disks,
mock_get_qos_specs):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_local_disks = [mock.sentinel.root_vhd_path,
mock.sentinel.eph_vhd_path]
mock_get_local_disks.return_value = mock_local_disks
mock_set_qos_specs = self._vmops._vmutils.set_disk_qos_specs
mock_get_qos_specs.return_value = [mock.sentinel.min_iops,
mock.sentinel.max_iops]
self._vmops._set_instance_disk_qos_specs(mock_instance)
mock_get_local_disks.assert_called_once_with(mock_instance.name)
expected_calls = [mock.call(mock_instance.name, disk_path,
mock.sentinel.min_iops,
mock.sentinel.max_iops)
for disk_path in mock_local_disks]
mock_set_qos_specs.assert_has_calls(expected_calls)
@mock.patch.object(volumeops.VolumeOps, 'parse_disk_qos_specs')
def test_get_storage_qos_specs(self, mock_parse_specs):
fake_extra_specs = {'spec_key': 'spec_value',
'storage_qos:min_bytes_sec':
mock.sentinel.min_bytes_sec,
'storage_qos:max_bytes_sec':
mock.sentinel.max_bytes_sec}
mock_instance = mock.Mock(flavor={'extra_specs': fake_extra_specs})
ret_val = self._vmops._get_storage_qos_specs(mock_instance)
expected_qos_specs_dict = {
'min_bytes_sec': mock.sentinel.min_bytes_sec,
'max_bytes_sec': mock.sentinel.max_bytes_sec
}
self.assertEqual(mock_parse_specs.return_value, ret_val)
mock_parse_specs.assert_called_once_with(expected_qos_specs_dict)
|
|
"""
Search for software built from source to include in the blueprint as a tarball.
"""
import errno
import glob
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import subprocess
import tarfile
from blueprint import context_managers
from blueprint import util
def _source(b, r, dirname, old_cwd):
tmpname = os.path.join(os.getcwd(), dirname[1:].replace('/', '-'))
exclude = []
pattern_pip = re.compile(r'\.egg-info/installed-files.txt$')
pattern_egg = re.compile(r'\.egg(?:-info)?(?:/|$)')
pattern_pth = re.compile(
r'lib/python[^/]+/(?:dist|site)-packages/easy-install.pth$')
pattern_bin = re.compile(
r'EASY-INSTALL(?:-ENTRY)?-SCRIPT|This file was generated by RubyGems')
# Create a partial shallow copy of the directory.
for dirpath, dirnames, filenames in os.walk(dirname):
# Definitely ignore the shallow copy directory.
if dirpath.startswith(tmpname):
continue
# Determine if this entire directory should be ignored by default.
ignored = r.ignore_file(dirpath)
dirpath2 = os.path.normpath(
os.path.join(tmpname, os.path.relpath(dirpath, dirname)))
# Create this directory in the shallow copy with matching mode, owner,
# and owning group. Suggest running as `root` if this doesn't work.
os.mkdir(dirpath2)
s = os.lstat(dirpath)
try:
try:
os.lchown(dirpath2, s.st_uid, s.st_gid)
except OverflowError:
logging.warning('{0} has uid:gid {1}:{2} - using chown(1)'.
format(dirpath, s.st_uid, s.st_gid))
p = subprocess.Popen(['chown',
'{0}:{1}'.format(s.st_uid, s.st_gid),
dirpath2],
close_fds=True)
p.communicate()
os.chmod(dirpath2, s.st_mode)
except OSError as e:
logging.warning('{0} caused {1} - try running as root'.
format(dirpath, errno.errorcode[e.errno]))
return
for filename in filenames:
pathname = os.path.join(dirpath, filename)
if r.ignore_source(pathname, ignored):
continue
pathname2 = os.path.join(dirpath2, filename)
# Exclude files that are part of the RubyGems package.
for globname in (
os.path.join('/usr/lib/ruby/gems/*/gems/rubygems-update-*/lib',
pathname[1:]),
os.path.join('/var/lib/gems/*/gems/rubygems-update-*/lib',
pathname[1:])):
if 0 < len(glob.glob(globname)):
continue
# Remember the path to all of `pip`'s `installed_files.txt` files.
if pattern_pip.search(pathname):
exclude.extend([os.path.join(dirpath2, line.rstrip())
for line in open(pathname)])
# Likewise remember the path to Python eggs.
if pattern_egg.search(pathname):
exclude.append(pathname2)
# Exclude `easy_install`'s bookkeeping file, too.
if pattern_pth.search(pathname):
continue
# Exclude executable placed by Python packages or RubyGems.
if pathname.startswith('/usr/local/bin/'):
try:
if pattern_bin.search(open(pathname).read()):
continue
except IOError as e:
pass
# Exclude share/applications/mimeinfo.cache, whatever that is.
if '/usr/local/share/applications/mimeinfo.cache' == pathname:
continue
# Clean up dangling symbolic links. This makes the assumption
# that no one intends to leave dangling symbolic links hanging
# around, which I think is a good assumption.
s = os.lstat(pathname)
if stat.S_ISLNK(s.st_mode):
try:
os.stat(pathname)
except OSError as e:
if errno.ENOENT == e.errno:
logging.warning('ignored dangling symbolic link {0}'.
format(pathname))
continue
# Hard link this file into the shallow copy. Suggest running as
# `root` if this doesn't work though in practice the check above
# will have already caught this problem.
try:
os.link(pathname, pathname2)
except OSError as e:
logging.warning('{0} caused {1} - try running as root'.
format(pathname, errno.errorcode[e.errno]))
return
# Unlink files that were remembered for exclusion above.
for pathname in exclude:
try:
os.unlink(pathname)
except OSError as e:
if e.errno not in (errno.EISDIR, errno.ENOENT):
raise e
# Remove empty directories. For any that hang around, match their
# access and modification times to the source, otherwise the hash of
# the tarball will not be deterministic.
for dirpath, dirnames, filenames in os.walk(tmpname, topdown=False):
try:
os.rmdir(dirpath)
except OSError:
s = os.lstat(os.path.join(dirname, os.path.relpath(dirpath,
tmpname)))
os.utime(dirpath, (s.st_atime, s.st_mtime))
# If the shallow copy of still exists, create a tarball named by its
# SHA1 sum and include it in the blueprint.
try:
tar = tarfile.open('tmp.tar', 'w')
tar.add(tmpname, '.')
except OSError:
return
finally:
tar.close()
sha1 = hashlib.sha1()
f = open('tmp.tar', 'r')
[sha1.update(buf) for buf in iter(lambda: f.read(4096), '')]
f.close()
tarname = '{0}.tar'.format(sha1.hexdigest())
shutil.move('tmp.tar', os.path.join(old_cwd, tarname))
b.add_source(dirname, tarname)
def sources(b, r):
logging.info('searching for software built from source')
for pathname, negate in r['source']:
if negate and os.path.isdir(pathname) \
and not r.ignore_source(pathname):
# Note before creating a working directory within pathname what
# it's atime and mtime should be.
s = os.lstat(pathname)
# Create a working directory within pathname to avoid potential
# EXDEV when creating the shallow copy and tarball.
try:
with context_managers.mkdtemp(pathname) as c:
# Restore the parent of the working directory to its
# original atime and mtime, as if pretending the working
# directory never actually existed.
os.utime(pathname, (s.st_atime, s.st_mtime))
# Create the shallow copy and possibly tarball of the
# relevant parts of pathname.
_source(b, r, pathname, c.cwd)
# Once more restore the atime and mtime after the working
# directory is destroyed.
os.utime(pathname, (s.st_atime, s.st_mtime))
# If creating the temporary directory fails, bail with a warning.
except OSError as e:
logging.warning('{0} caused {1} - try running as root'.
format(pathname, errno.errorcode[e.errno]))
if 0 < len(b.sources):
b.arch = util.arch()
|
|
# pylint: disable=g-bad-file-header
# Copyright 2019 The dm_env Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Classes that describe numpy arrays."""
import inspect
from typing import Optional
import numpy as np
_INVALID_SHAPE = 'Expected shape %r but found %r'
_INVALID_DTYPE = 'Expected dtype %r but found %r'
_OUT_OF_BOUNDS = 'Values were not all within bounds %s <= %s <= %s'
_VAR_ARGS_NOT_ALLOWED = 'Spec subclasses must not accept *args.'
_VAR_KWARGS_NOT_ALLOWED = 'Spec subclasses must not accept **kwargs.'
_MINIMUM_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXIMUM = (
'All values in `minimum` must be less than or equal to their corresponding '
'value in `maximum`, got:\nminimum={minimum!r}\nmaximum={maximum!r}.')
_MINIMUM_INCOMPATIBLE_WITH_SHAPE = '`minimum` is incompatible with `shape`'
_MAXIMUM_INCOMPATIBLE_WITH_SHAPE = '`maximum` is incompatible with `shape`'
class Array:
"""Describes a numpy array or scalar shape and dtype.
An `Array` spec allows an API to describe the arrays that it accepts or
returns, before that array exists.
The equivalent version describing a `tf.Tensor` is `TensorSpec`.
"""
__slots__ = ('_shape', '_dtype', '_name')
__hash__ = None
def __init__(self, shape, dtype, name: Optional[str] = None):
"""Initializes a new `Array` spec.
Args:
shape: An iterable specifying the array shape.
dtype: numpy dtype or string specifying the array dtype.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
Raises:
TypeError: If `shape` is not an iterable of elements convertible to int,
or if `dtype` is not convertible to a numpy dtype.
"""
self._shape = tuple(int(dim) for dim in shape)
self._dtype = np.dtype(dtype)
self._name = name
@property
def shape(self):
"""Returns a `tuple` specifying the array shape."""
return self._shape
@property
def dtype(self):
"""Returns a numpy dtype specifying the array dtype."""
return self._dtype
@property
def name(self):
"""Returns the name of the Array."""
return self._name
def __repr__(self):
return 'Array(shape={}, dtype={}, name={})'.format(self.shape,
repr(self.dtype),
repr(self.name))
def __eq__(self, other):
"""Checks if the shape and dtype of two specs are equal."""
if not isinstance(other, Array):
return False
return self.shape == other.shape and self.dtype == other.dtype
def __ne__(self, other):
return not self == other
def _fail_validation(self, message, *args):
message %= args
if self.name:
message += ' for spec %s' % self.name
raise ValueError(message)
def validate(self, value):
"""Checks if value conforms to this spec.
Args:
value: a numpy array or value convertible to one via `np.asarray`.
Returns:
value, converted if necessary to a numpy array.
Raises:
ValueError: if value doesn't conform to this spec.
"""
value = np.asarray(value)
if value.shape != self.shape:
self._fail_validation(_INVALID_SHAPE, self.shape, value.shape)
if value.dtype != self.dtype:
self._fail_validation(_INVALID_DTYPE, self.dtype, value.dtype)
return value
def generate_value(self):
"""Generate a test value which conforms to this spec."""
return np.zeros(shape=self.shape, dtype=self.dtype)
def _get_constructor_kwargs(self):
"""Returns constructor kwargs for instantiating a new copy of this spec."""
# Get the names and kinds of the constructor parameters.
params = inspect.signature(type(self)).parameters
# __init__ must not accept *args or **kwargs, since otherwise we won't be
# able to infer what the corresponding attribute names are.
kinds = {value.kind for value in params.values()}
if inspect.Parameter.VAR_POSITIONAL in kinds:
raise TypeError(_VAR_ARGS_NOT_ALLOWED)
elif inspect.Parameter.VAR_KEYWORD in kinds:
raise TypeError(_VAR_KWARGS_NOT_ALLOWED)
# Note that we assume direct correspondence between the names of constructor
# arguments and attributes.
return {name: getattr(self, name) for name in params.keys()}
def replace(self, **kwargs):
"""Returns a new copy of `self` with specified attributes replaced.
Args:
**kwargs: Optional attributes to replace.
Returns:
A new copy of `self`.
"""
all_kwargs = self._get_constructor_kwargs()
all_kwargs.update(kwargs)
return type(self)(**all_kwargs)
def __reduce__(self):
return Array, (self._shape, self._dtype, self._name)
class BoundedArray(Array):
"""An `Array` spec that specifies minimum and maximum values.
Example usage:
```python
# Specifying the same minimum and maximum for every element.
spec = BoundedArray((3, 4), np.float64, minimum=0.0, maximum=1.0)
# Specifying a different minimum and maximum for each element.
spec = BoundedArray(
(2,), np.float64, minimum=[0.1, 0.2], maximum=[0.9, 0.9])
# Specifying the same minimum and a different maximum for each element.
spec = BoundedArray(
(3,), np.float64, minimum=-10.0, maximum=[4.0, 5.0, 3.0])
```
Bounds are meant to be inclusive. This is especially important for
integer types. The following spec will be satisfied by arrays
with values in the set {0, 1, 2}:
```python
spec = BoundedArray((3, 4), int, minimum=0, maximum=2)
```
Note that one or both bounds may be infinite. For example, the set of
non-negative floats can be expressed as:
```python
spec = BoundedArray((), np.float64, minimum=0.0, maximum=np.inf)
```
In this case `np.inf` would be considered valid, since the upper bound is
inclusive.
"""
__slots__ = ('_minimum', '_maximum')
__hash__ = None
def __init__(self, shape, dtype, minimum, maximum, name=None):
"""Initializes a new `BoundedArray` spec.
Args:
shape: An iterable specifying the array shape.
dtype: numpy dtype or string specifying the array dtype.
minimum: Number or sequence specifying the minimum element bounds
(inclusive). Must be broadcastable to `shape`.
maximum: Number or sequence specifying the maximum element bounds
(inclusive). Must be broadcastable to `shape`.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
Raises:
ValueError: If `minimum` or `maximum` are not broadcastable to `shape`.
ValueError: If any values in `minimum` are greater than their
corresponding value in `maximum`.
TypeError: If the shape is not an iterable or if the `dtype` is an invalid
numpy dtype.
"""
super(BoundedArray, self).__init__(shape, dtype, name)
try:
bcast_minimum = np.broadcast_to(minimum, shape=shape)
except ValueError as numpy_exception:
raise ValueError(_MINIMUM_INCOMPATIBLE_WITH_SHAPE) from numpy_exception
try:
bcast_maximum = np.broadcast_to(maximum, shape=shape)
except ValueError as numpy_exception:
raise ValueError(_MAXIMUM_INCOMPATIBLE_WITH_SHAPE) from numpy_exception
if np.any(bcast_minimum > bcast_maximum):
raise ValueError(_MINIMUM_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXIMUM.format(
minimum=minimum, maximum=maximum))
self._minimum = np.array(minimum, dtype=self.dtype)
self._minimum.setflags(write=False)
self._maximum = np.array(maximum, dtype=self.dtype)
self._maximum.setflags(write=False)
@property
def minimum(self):
"""Returns a NumPy array specifying the minimum bounds (inclusive)."""
return self._minimum
@property
def maximum(self):
"""Returns a NumPy array specifying the maximum bounds (inclusive)."""
return self._maximum
def __repr__(self):
template = ('BoundedArray(shape={}, dtype={}, name={}, '
'minimum={}, maximum={})')
return template.format(self.shape, repr(self.dtype), repr(self.name),
self._minimum, self._maximum)
def __eq__(self, other):
if not isinstance(other, BoundedArray):
return False
return (super(BoundedArray, self).__eq__(other) and
(self.minimum == other.minimum).all() and
(self.maximum == other.maximum).all())
def validate(self, value):
value = np.asarray(value)
super(BoundedArray, self).validate(value)
if (value < self.minimum).any() or (value > self.maximum).any():
self._fail_validation(_OUT_OF_BOUNDS, self.minimum, value, self.maximum)
return value
def generate_value(self):
return (np.ones(shape=self.shape, dtype=self.dtype) *
self.dtype.type(self.minimum))
def __reduce__(self):
return BoundedArray, (self._shape, self._dtype, self._minimum,
self._maximum, self._name)
_NUM_VALUES_NOT_POSITIVE = '`num_values` must be a positive integer, got {}.'
_DTYPE_NOT_INTEGRAL = '`dtype` must be integral, got {}.'
_DTYPE_OVERFLOW = (
'`dtype` {} is not big enough to hold `num_values` ({}) without overflow.')
class DiscreteArray(BoundedArray):
"""Represents a discrete, scalar, zero-based space.
This is a special case of the parent `BoundedArray` class. It represents a
0-dimensional numpy array containing a single integer value between
0 and num_values - 1 (inclusive), and exposes a scalar `num_values` property
in addition to the standard `BoundedArray` interface.
For an example use-case, this can be used to define the action space of a
simple RL environment that accepts discrete actions.
"""
_REPR_TEMPLATE = (
'DiscreteArray(shape={self.shape}, dtype={self.dtype}, name={self.name}, '
'minimum={self.minimum}, maximum={self.maximum}, '
'num_values={self.num_values})')
__slots__ = ('_num_values',)
def __init__(self, num_values, dtype=np.int32, name=None):
"""Initializes a new `DiscreteArray` spec.
Args:
num_values: Integer specifying the number of possible values to represent.
dtype: The dtype of the array. Must be an integral type large enough to
hold `num_values` without overflow.
name: Optional string specifying the name of the array.
Raises:
ValueError: If `num_values` is not positive, if `dtype` is not integral,
or if `dtype` is not large enough to hold `num_values` without overflow.
"""
if num_values <= 0 or not np.issubdtype(type(num_values), np.integer):
raise ValueError(_NUM_VALUES_NOT_POSITIVE.format(num_values))
if not np.issubdtype(dtype, np.integer):
raise ValueError(_DTYPE_NOT_INTEGRAL.format(dtype))
num_values = int(num_values)
maximum = num_values - 1
dtype = np.dtype(dtype)
if np.min_scalar_type(maximum) > dtype:
raise ValueError(_DTYPE_OVERFLOW.format(dtype, num_values))
super(DiscreteArray, self).__init__(
shape=(),
dtype=dtype,
minimum=0,
maximum=maximum,
name=name)
self._num_values = num_values
@property
def num_values(self):
"""Returns the number of items."""
return self._num_values
def __repr__(self):
return self._REPR_TEMPLATE.format(self=self) # pytype: disable=duplicate-keyword-argument
def __reduce__(self):
return DiscreteArray, (self._num_values, self._dtype, self._name)
_VALID_STRING_TYPES = (str, bytes)
_INVALID_STRING_TYPE = (
'Expected `string_type` to be one of: {}, got: {{!r}}.'
.format(_VALID_STRING_TYPES))
_INVALID_ELEMENT_TYPE = (
'Expected all elements to be of type: %s. Got value: %r of type: %s.')
class StringArray(Array):
"""Represents an array of variable-length Python strings."""
__slots__ = ('_string_type',)
_REPR_TEMPLATE = (
'{self.__class__.__name__}(shape={self.shape}, '
'string_type={self.string_type}, name={self.name})')
def __init__(self, shape, string_type=str, name=None):
"""Initializes a new `StringArray` spec.
Args:
shape: An iterable specifying the array shape.
string_type: The native Python string type for each element; either
unicode or ASCII. Defaults to unicode.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
"""
if string_type not in _VALID_STRING_TYPES:
raise ValueError(_INVALID_STRING_TYPE.format(string_type))
self._string_type = string_type
super(StringArray, self).__init__(shape=shape, dtype=object, name=name)
@property
def string_type(self):
"""Returns the Python string type for each element."""
return self._string_type
def validate(self, value):
"""Checks if value conforms to this spec.
Args:
value: a numpy array or value convertible to one via `np.asarray`.
Returns:
value, converted if necessary to a numpy array.
Raises:
ValueError: if value doesn't conform to this spec.
"""
value = np.asarray(value, dtype=object)
if value.shape != self.shape:
self._fail_validation(_INVALID_SHAPE, self.shape, value.shape)
for item in value.flat:
if not isinstance(item, self.string_type):
self._fail_validation(
_INVALID_ELEMENT_TYPE, self.string_type, item, type(item))
return value
def generate_value(self):
"""Generate a test value which conforms to this spec."""
empty_string = self.string_type() # pylint: disable=not-callable
return np.full(shape=self.shape, dtype=self.dtype, fill_value=empty_string)
def __repr__(self):
return self._REPR_TEMPLATE.format(self=self) # pytype: disable=duplicate-keyword-argument
def __reduce__(self):
return type(self), (self.shape, self.string_type, self.name)
|
|
"""Test Alexa config."""
import contextlib
from homeassistant.components.cloud import ALEXA_SCHEMA, alexa_config
from homeassistant.helpers.entity_registry import EVENT_ENTITY_REGISTRY_UPDATED
from homeassistant.util.dt import utcnow
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import async_fire_time_changed
async def test_alexa_config_expose_entity_prefs(hass, cloud_prefs):
"""Test Alexa config should expose using prefs."""
entity_conf = {"should_expose": False}
await cloud_prefs.async_update(
alexa_entity_configs={"light.kitchen": entity_conf},
alexa_default_expose=["light"],
)
conf = alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None)
assert not conf.should_expose("light.kitchen")
entity_conf["should_expose"] = True
assert conf.should_expose("light.kitchen")
entity_conf["should_expose"] = None
assert conf.should_expose("light.kitchen")
await cloud_prefs.async_update(
alexa_default_expose=["sensor"],
)
assert not conf.should_expose("light.kitchen")
async def test_alexa_config_report_state(hass, cloud_prefs):
"""Test Alexa config should expose using prefs."""
conf = alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None)
assert cloud_prefs.alexa_report_state is False
assert conf.should_report_state is False
assert conf.is_reporting_states is False
with patch.object(conf, "async_get_access_token", AsyncMock(return_value="hello")):
await cloud_prefs.async_update(alexa_report_state=True)
await hass.async_block_till_done()
assert cloud_prefs.alexa_report_state is True
assert conf.should_report_state is True
assert conf.is_reporting_states is True
await cloud_prefs.async_update(alexa_report_state=False)
await hass.async_block_till_done()
assert cloud_prefs.alexa_report_state is False
assert conf.should_report_state is False
assert conf.is_reporting_states is False
async def test_alexa_config_invalidate_token(hass, cloud_prefs, aioclient_mock):
"""Test Alexa config should expose using prefs."""
aioclient_mock.post(
"http://example/alexa_token",
json={
"access_token": "mock-token",
"event_endpoint": "http://example.com/alexa_endpoint",
"expires_in": 30,
},
)
conf = alexa_config.AlexaConfig(
hass,
ALEXA_SCHEMA({}),
cloud_prefs,
Mock(
alexa_access_token_url="http://example/alexa_token",
auth=Mock(async_check_token=AsyncMock()),
websession=hass.helpers.aiohttp_client.async_get_clientsession(),
),
)
token = await conf.async_get_access_token()
assert token == "mock-token"
assert len(aioclient_mock.mock_calls) == 1
token = await conf.async_get_access_token()
assert token == "mock-token"
assert len(aioclient_mock.mock_calls) == 1
assert conf._token_valid is not None
conf.async_invalidate_access_token()
assert conf._token_valid is None
token = await conf.async_get_access_token()
assert token == "mock-token"
assert len(aioclient_mock.mock_calls) == 2
@contextlib.contextmanager
def patch_sync_helper():
"""Patch sync helper.
In Py3.7 this would have been an async context manager.
"""
to_update = []
to_remove = []
def sync_helper(to_upd, to_rem):
to_update.extend([ent_id for ent_id in to_upd if ent_id not in to_update])
to_remove.extend([ent_id for ent_id in to_rem if ent_id not in to_remove])
return True
with patch("homeassistant.components.cloud.alexa_config.SYNC_DELAY", 0), patch(
"homeassistant.components.cloud.alexa_config.AlexaConfig._sync_helper",
side_effect=sync_helper,
):
yield to_update, to_remove
async def test_alexa_update_expose_trigger_sync(hass, cloud_prefs):
"""Test Alexa config responds to updating exposed entities."""
alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None)
with patch_sync_helper() as (to_update, to_remove):
await cloud_prefs.async_update_alexa_entity_config(
entity_id="light.kitchen", should_expose=True
)
await hass.async_block_till_done()
async_fire_time_changed(hass, utcnow())
await hass.async_block_till_done()
assert to_update == ["light.kitchen"]
assert to_remove == []
with patch_sync_helper() as (to_update, to_remove):
await cloud_prefs.async_update_alexa_entity_config(
entity_id="light.kitchen", should_expose=False
)
await cloud_prefs.async_update_alexa_entity_config(
entity_id="binary_sensor.door", should_expose=True
)
await cloud_prefs.async_update_alexa_entity_config(
entity_id="sensor.temp", should_expose=True
)
await hass.async_block_till_done()
async_fire_time_changed(hass, utcnow())
await hass.async_block_till_done()
assert sorted(to_update) == ["binary_sensor.door", "sensor.temp"]
assert to_remove == ["light.kitchen"]
async def test_alexa_entity_registry_sync(hass, mock_cloud_login, cloud_prefs):
"""Test Alexa config responds to entity registry."""
alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, hass.data["cloud"])
with patch_sync_helper() as (to_update, to_remove):
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "create", "entity_id": "light.kitchen"},
)
await hass.async_block_till_done()
assert to_update == ["light.kitchen"]
assert to_remove == []
with patch_sync_helper() as (to_update, to_remove):
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "remove", "entity_id": "light.kitchen"},
)
await hass.async_block_till_done()
assert to_update == []
assert to_remove == ["light.kitchen"]
with patch_sync_helper() as (to_update, to_remove):
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{
"action": "update",
"entity_id": "light.kitchen",
"changes": ["entity_id"],
"old_entity_id": "light.living_room",
},
)
await hass.async_block_till_done()
assert to_update == ["light.kitchen"]
assert to_remove == ["light.living_room"]
with patch_sync_helper() as (to_update, to_remove):
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "update", "entity_id": "light.kitchen", "changes": ["icon"]},
)
await hass.async_block_till_done()
assert to_update == []
assert to_remove == []
async def test_alexa_update_report_state(hass, cloud_prefs):
"""Test Alexa config responds to reporting state."""
alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None)
with patch(
"homeassistant.components.cloud.alexa_config.AlexaConfig.async_sync_entities",
) as mock_sync, patch(
"homeassistant.components.cloud.alexa_config.AlexaConfig.async_enable_proactive_mode",
):
await cloud_prefs.async_update(alexa_report_state=True)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 1
|
|
import json
import pytest
from CiscoEmailSecurity import Client
def get_fetch_data():
with open('./test_data.json', 'r') as f:
return json.loads(f.read())
test_data = get_fetch_data()
def test_date_to_cisco_date():
from CiscoEmailSecurity import date_to_cisco_date
res = date_to_cisco_date('2019-11-20 09:36:09')
assert res == '2019-11-20T09:36:09.000Z'
@pytest.mark.parametrize(
"limit, expected",
[
('', 20),
('100', 20),
('15', 15)
]
)
def test_set_limit(limit, expected):
from CiscoEmailSecurity import set_limit
res = set_limit(limit)
assert res == expected
def test_set_var_to_output_prefix():
from CiscoEmailSecurity import set_var_to_output_prefix
res = set_var_to_output_prefix('mail_incoming_traffic_summary')
assert res == 'MailIncomingTrafficSummary'
def test_build_url_params_for_list_report():
"""
Given:
Arguments To flirt with.
When:
The function builds a URL filter from these arguments.
Then:
We check that the URL filter matches what the command asks for.
"""
from CiscoEmailSecurity import build_url_params_for_list_report
res = build_url_params_for_list_report(test_data['args_for_list_report'], 'reporting_system')
assert res == test_data['url_params_for_list_reports']
def test_build_url_params_for_list_messages():
"""
Given:
Arguments To flirt with.
When:
The function builds a URL filter from these arguments.
Then:
We check that the URL filter matches what the command asks for.
"""
from CiscoEmailSecurity import build_url_params_for_list_messages
res = build_url_params_for_list_messages(test_data['args_for_list_messages'])
assert res == test_data['url_params_for_list_messages']
def test_build_url_params_for_get_details():
"""
Given:
Arguments To flirt with.
When:
The function builds a URL filter from these arguments.
Then:
We check that the URL filter matches what the command asks for.
"""
from CiscoEmailSecurity import build_url_params_for_get_details
res = build_url_params_for_get_details(test_data['args_for_get_details'])
assert res == test_data['url_params_for_get_details']
def test_build_url_params_for_spam_quarantine():
"""
Given:
Arguments To flirt with.
When:
The function builds a URL filter from these arguments.
Then:
We check that the URL filter matches what the command asks for.
"""
from CiscoEmailSecurity import build_url_params_for_spam_quarantine
res = build_url_params_for_spam_quarantine(test_data['args_for_spam_quarantine'])
assert res == test_data['url_params_for_spam_quarantine']
def test_list_search_messages_command(requests_mock):
"""
Given:
Arguments for command - list_search_messages.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (outputs, outputs_prefix, outputs_key_field)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_search_messages_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.get("https://ciscoemailsecurity/sma/api/v2.0/message-tracking/messages?"
"startDate=2017-02-14T09:51:46.000-0600.000Z&endDate=2017-02-14T09:51:46.000-0600.000Z"
"&searchOption=messages&ciscoHost=All_Hosts&offset=0&limit=20",
json=test_data['search_messages_response_data'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_search_messages_command(client, {"start_date": "2017-02-14T09:51:46.000-0600",
"end_date": "2017-02-14T09:51:46.000-0600"})
assert res.outputs == test_data['search_messages_context']
assert res.outputs_prefix == 'CiscoEmailSecurity.Message'
assert res.outputs_key_field == 'attributes.mid'
def test_messages_to_human_readable():
"""
Given:
Messages response data.
When:
The function arranges the data and returns it in the Markdown table.
Then:
We check that the table that the function returns corresponds to the data that the function received.
"""
from CiscoEmailSecurity import messages_to_human_readable
res = messages_to_human_readable(test_data['search_messages_context'])
assert res == test_data['messages_human_readable']
def test_list_get_message_details_command(requests_mock):
"""
Given:
Arguments for command - list_get_message_details.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (outputs, outputs_prefix, outputs_key_field)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_get_message_details_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.get("https://ciscoemailsecurity/sma/api/v2.0/message-tracking/details?"
"startDate=2017-02-14T09:51:46.000-0600.000Z&endDate=2017-02-14T09:51:46.000-0600.000Z&"
"mid=None&icid=None",
json=test_data['get_message_details_response_data'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_get_message_details_command(client, {"start_date": "2017-02-14T09:51:46.000-0600",
"end_date": "2017-02-14T09:51:46.000-0600"})
assert res.outputs == test_data['get_message_details_context']
assert res.outputs_prefix == 'CiscoEmailSecurity.Message'
assert res.outputs_key_field == 'mid'
def test_message_to_human_readable():
"""
Given:
Message response data.
When:
The function arranges the data and returns it in the Markdown table.
Then:
We check that the table that the function returns corresponds to the data that the function received.
"""
from CiscoEmailSecurity import details_get_to_human_readable
res = details_get_to_human_readable(test_data['get_message_details_context'])
assert res == test_data['message_human_readable']
def test_list_search_spam_quarantine_command(requests_mock):
"""
Given:
Arguments for command - list_search_spam_quarantine.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (outputs, outputs_prefix, outputs_key_field)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_search_spam_quarantine_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.get("https://ciscoemailsecurity/sma/api/v2.0/quarantine/messages"
"?startDate=2017-02-14T09:51:46.000-0600.000Z&endDate=2017-02-14T09:51:46.000-0600.000Z"
"&quarantineType=spam&offset=0&limit=20", json=test_data['search_spam_quarantine_response_data'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_search_spam_quarantine_command(client, {"start_date": "2017-02-14T09:51:46.000-0600",
"end_date": "2017-02-14T09:51:46.000-0600"})
assert res.outputs == test_data['search_spam_quarantine_context']
assert res.outputs_prefix == 'CiscoEmailSecurity.SpamQuarantine'
assert res.outputs_key_field == 'mid'
def test_spam_quarantine_to_human_readable():
"""
Given:
Spam quarantine response data.
When:
The function arranges the data and returns it in the Markdown table.
Then:
We check that the table that the function returns corresponds to the data that the function received.
"""
from CiscoEmailSecurity import spam_quarantine_to_human_readable
res = spam_quarantine_to_human_readable(test_data['search_spam_quarantine_context'])
assert res == test_data['spam_quarantine_human_readable']
def test_list_get_quarantine_message_details_command(requests_mock):
"""
Given:
Arguments for command - get_quarantine_message_details.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (outputs, outputs_prefix, outputs_key_field)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_get_quarantine_message_details_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.get("https://ciscoemailsecurity/sma/api/v2.0/quarantine/messages/details?mid=None"
"&quarantineType=spam", json=test_data['quarantine_message_details_response_data'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_get_quarantine_message_details_command(client, {"start_date": "2017-02-14T09:51:46.000-0600",
"end_date": "2017-02-14T09:51:46.000-0600"})
assert res.outputs == test_data['quarantine_message_details_context']
assert res.outputs_prefix == 'CiscoEmailSecurity.QuarantineMessageDetail'
assert res.outputs_key_field == 'mid'
def test_quarantine_message_details_data_to_human_readable():
"""
Given:
Spam quarantine message details response data.
When:
The function arranges the data and returns it in the Markdown table.
Then:
We check that the table that the function returns corresponds to the data that the function received.
"""
from CiscoEmailSecurity import quarantine_message_details_data_to_human_readable
res = quarantine_message_details_data_to_human_readable(test_data['quarantine_message_context_to_human_readable'])
assert res == test_data['quarantine_message_details_human_readable']
def test_list_delete_quarantine_messages_command(requests_mock):
"""
Given:
Arguments for command - delete_quarantine_messages.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (readable_output, outputs_prefix)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_delete_quarantine_messages_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.delete("https://ciscoemailsecurity/sma/api/v2.0/quarantine/messages",
json=test_data['quarantine_delete_message_response_data'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_delete_quarantine_messages_command(client, {"messages_ids": "1234"})
assert res.readable_output == test_data['quarantine_delete_message_response_data']
def test_list_release_quarantine_messages_command(requests_mock):
"""
Given:
Arguments for command - release_quarantine_messages.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (readable_output, outputs_prefix)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_release_quarantine_messages_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/quarantine/messages",
json=test_data['quarantine_release_message_response_data'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_release_quarantine_messages_command(client, {"messages_ids": "1234"})
assert res.readable_output == test_data['quarantine_release_message_response_data']
def test_build_url_filter_for_get_list_entries():
"""
Given:
Arguments To filter with.
When:
The function builds a URL filter from these arguments.
Then:
We check that the URL filter matches what the command asks for.
"""
from CiscoEmailSecurity import build_url_filter_for_get_list_entries
res = build_url_filter_for_get_list_entries({"list_type": "safelist", "view_by": "bla", "order_by": "bla"})
assert res == "?action=view&limit=20&offset=0&quarantineType=spam&orderDir=desc&viewBy=bla&orderBy=bla"
def test_list_entries_get_command(requests_mock):
"""
Given:
Arguments for command - list_entries_get.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (outputs, outputs_prefix)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_entries_get_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.get("https://ciscoemailsecurity/sma/api/v2.0/quarantine/safelist",
json=test_data['get_list_entries_response'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_entries_get_command(client, {"list_type": "safelist", "limit": "25", "order_by": "recipient",
"view_by": "recipient"})
assert res.outputs == test_data['get_list_entries_context']
assert res.outputs_prefix == 'CiscoEmailSecurity.ListEntry.Safelist'
assert res.outputs_key_field == 'Safelist'
def test_build_request_body_for_add_list_entries():
"""
Given:
Arguments To flirt with.
When:
The function builds a request body from these arguments.
Then:
We check that the request body matches what the command asks for.
"""
from CiscoEmailSecurity import build_request_body_for_add_list_entries
res_request_body = build_request_body_for_add_list_entries({"list_type": "safelist",
"action": "add", "recipient_addresses":
"user.com,user.com",
"sender_list": "acme.com",
"view_by": "recipient"})
assert res_request_body == {"action": "add", "quarantineType": "spam", "viewBy": "recipient",
"recipientAddresses": ["user.com", "user.com"], "senderList": ["acme.com"]}
def test_list_entries_add_command(requests_mock):
"""
Given:
Arguments for command - list_entries_add.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (outputs, outputs_prefix)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_entries_add_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/quarantine/safelist",
json=test_data['add_list_entries_response'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_entries_add_command(client, {"list_type": "safelist", "action": "add", "limit": "25",
"recipient_addresses": "user.com,user.com",
"sender_list": "acme.com", "view_by": "recipient"})
assert res.readable_output == test_data['add_list_entries_context']
assert res.outputs_prefix == 'CiscoEmailSecurity.listEntry.Safelist'
assert res.outputs_key_field == 'acme.com'
def test_build_request_body_for_delete_list_entries():
"""
Given:
Arguments To flirt with.
When:
The function builds a request body from these arguments.
Then:
We check that the request body matches what the command asks for.
"""
from CiscoEmailSecurity import build_request_body_for_delete_list_entries
res_request_body = build_request_body_for_delete_list_entries({"list_type": "safelist",
"sender_list": "acme.com",
"view_by": "recipient"})
assert res_request_body == {"quarantineType": "spam", "viewBy": "recipient", "senderList": ["acme.com"]}
def test_list_entries_delete_command(requests_mock):
"""
Given:
Arguments for command - list_entries_add.
When:
The API gives us results according to the arguments we sent.
Then:
We check that what is in context (outputs, outputs_prefix)
is what should be according to the arguments we sent to the API.
"""
from CiscoEmailSecurity import list_entries_delete_command
requests_mock.post("https://ciscoemailsecurity/sma/api/v2.0/login", json=test_data['data_for_login'])
requests_mock.delete("https://ciscoemailsecurity/sma/api/v2.0/quarantine/safelist",
json=test_data['delete_list_entries_response'])
client = Client({"credentials": {"identifier": "a", "password": "b"}, "base_url": "https://ciscoemailsecurity/",
"insecure": False, "proxy": False, "timeout": "2000"})
res = list_entries_delete_command(client, {"list_type": "safelist", "sender_list": "acme.com",
"view_by": "recipient"})
assert res.readable_output == test_data['delete_list_entries_context']
assert res.outputs_prefix == 'CiscoEmailSecurity.listEntry.Safelist'
assert res.outputs_key_field == 'acme.com'
|
|
from __future__ import division
import abc
import os.path as path
import numpy as np
from menpo.image import ShapeImage
from menpo.io.base import (Importer, find_alternative_files,
map_filepath_to_importer)
from scipy.spatial import Delaunay
from menpo.transform.affine import Scale
import re
class SpatialImageImporter(Importer):
r"""
Base class for importing depth images. Depth images are defined by file
types whereby the data lies explicitly on a grid. This grid pattern means
the data can be interpreted as an image.
Parameters
----------
filepath : string
An absolute filepath
"""
def __init__(self, filepath):
super(SpatialImageImporter, self).__init__(filepath)
self.attempted_texture_search = False
self.relative_texture_path = None
self.trilist = None
self.mask = None
self.tcoords = None
self.shape_image = None
def _build_texture_and_landmark_importers(self):
r"""
Search for a texture and landmark file in the same directory as the
mesh. If they exist, create importers for them.
"""
if self.texture_path is None or not path.exists(self.texture_path):
self.texture_importer = None
else:
# This import is here to avoid circular dependencies
from menpo.io.extensions import image_types
self.texture_importer = map_filepath_to_importer(self.texture_path,
image_types)
def _search_for_texture(self):
r"""
Tries to find a texture with the same name as the depth image.
Returns
--------
relative_texture_path : string
The relative path to the texture or ``None`` if one can't be found
"""
# Stop searching every single time we access the property
self.attempted_texture_search = True
# This import is here to avoid circular dependencies
from menpo.io.extensions import image_types
try:
return find_alternative_files('texture', self.filepath,
image_types)
except ImportError:
return None
@property
def texture_path(self):
"""
Get the absolute path to the texture. Returns None if one can't be
found. Makes it's best effort to find an appropriate texture by
searching for textures with the same name as the mesh. Will only
search for the path the first time ``texture_path`` is invoked.
Sets the ``self.relative_texture_path`` attribute.
Returns
-------
texture_path : string
Absolute filepath to the texture
"""
# Try find a texture path if we can
if self.relative_texture_path is None and \
not self.attempted_texture_search:
self.relative_texture_path = self._search_for_texture()
try:
return path.join(self.folder, self.relative_texture_path)
except AttributeError:
return None
@abc.abstractmethod
def _build_image_and_mesh(self):
r"""
Abstract method that handles actually building an image. This involves
reading the image from disk and doing any necessary processing.
Should set the ``self.image`` attribute.
"""
pass
@abc.abstractmethod
def _process_landmarks(self, original_image, landmark_group):
r"""
Abstract method that allows the landmarks to be transformed between
the original image space and the depth image space. The space may
be different because a texture mapped image is unlikely to be equal
in size to the mesh.
Parameters
----------
original_image : :class:`menpo.image.base.Image`
The original image that the landmarks belong to
lmark_group : :class:`menpo.landmark.base.LandmarkGroup`
The landmark group to transform
Returns
-------
transformed_group : :class:`menpo.landmark.base.LandmarkGroup`
The transformed landmark group.
"""
pass
def build(self):
r"""
Overrides the :meth:`build <menpo.io.base.Importer.build>` method.
Builds the image and landmarks. Assigns the landmark set to the image.
Returns
-------
image : :class:`menpo.image.base.Image`
The image object with landmarks attached if they exist
"""
# Build the image as defined by the overridden method and then search
# for valid landmarks that may have been defined by the importer
self._build_image_and_mesh()
self._build_texture_and_landmark_importers()
if self.texture_importer is not None:
texture = self.texture_importer.build()
else:
texture = None
self.image = ShapeImage(self.shape_image, mask=self.mask,
trilist=self.trilist, tcoords=self.tcoords,
texture=texture)
return self.image
class BNTImporter(SpatialImageImporter):
r"""
Allows importing the BNT file format from the bosphorus dataset.
This reads in the 5 channels (3D coordinates and texture coordinates),
splits them appropriately and then triangulates the ``x`` and ``y``
coordinates to create a surface. The texture path is also given in the file
format.
The file format specifies a 'bad_value' which is used to denote inaccurate
data points. This value is replaced with ``np.nan``. The mesh is
created with only those values that are not NaN. The depth image contains
all values.
Parameters
----------
filepath : string
Absolute filepath of the mesh.
"""
def __init__(self, filepath):
# Setup class before super class call
super(BNTImporter, self).__init__(filepath)
def _build_image_and_mesh(self):
r"""
Read the file in and parse appropriately. Includes reading the texture
path.
"""
with open(self.filepath, 'rb') as f:
# Currently these are unused, but they are in the format
# Could possibly store as metadata?
n_rows = np.fromfile(f, dtype=np.uint16, count=1)
n_cols = np.fromfile(f, dtype=np.uint16, count=1)
bad_value = np.fromfile(f, dtype=np.float64, count=1)
# Get integers and convert to valid string
image_path_len = np.fromfile(f, dtype=np.uint16, count=1)
texture_path = np.fromfile(f, dtype=np.uint8, count=image_path_len)
texture_path = ''.join(map(chr, texture_path))
# Get data and reshape (reshape in an odd order due to Matlab's
# Fortran ordering). First three columns are 3D coordinates
# and last two are 2D texture coordinates
coords_len = np.fromfile(f, dtype=np.uint32, count=1)
data = np.fromfile(f, dtype=np.float64, count=coords_len * 5.0)
data = data.reshape([5, coords_len / 5.0]).T
# Get the 3D coordinates
shape_pixels = data[:, :3]
# We want to remove the bad values because otherwise the mesh is not
# renderable. We do this by replacing the bad value values with nan
shape_pixels[shape_pixels == bad_value] = np.nan
# The image contains all coordinates
# Must be flipped LR due to Fortran ordering from Matlab
# Must by flipped upside down due to image vs mesh ordering
self.shape_image = np.fliplr(np.reshape(shape_pixels[:, :3][::-1],
[n_rows, n_cols, 3]))
self.mask = ~np.any(np.isnan(self.shape_image), axis=-1)
# Use only those coordinates with do not contains nans
valid_points = ~np.isnan(shape_pixels).any(axis=1)
# Apparently the texture coordinates are upside down?
#self.tcoords = data[:, -2:][valid_points]
#self.tcoords[:, 1] = 1.0 - self.tcoords[:, 1]
# struggling to interpret these - let the ShapeImage build them
# instead.
self.tcoords = None
self.relative_texture_path = texture_path
class FIMImporter(SpatialImageImporter):
r"""
Allows importing floating point images as depth images.
This reads in the shape in to 3 channels and then triangulates the
``x`` and ``y`` coordinates to create a surface. An example of this
datatype is the aligned BU4D dataset.
Parameters
----------
filepath : string
Absolute filepath of the mesh.
"""
def __init__(self, filepath):
# Setup class before super class call
super(FIMImporter, self).__init__(filepath)
def _build_image_and_mesh(self):
r"""
Read the file and parse it as necessary. Since the data lies on a grid
we can triangulate the 2D coordinates to get a valid triangulation.
The format does not specify texture coordinates.
"""
with open(self.filepath, 'rb') as f:
size = np.fromfile(f, dtype=np.uint32, count=3)
data = np.fromfile(f, dtype=np.float32, count=np.product(size))
data = data.reshape([size[0], size[1], size[2]])
# Replace the zero buffer values with nan so that the image renders
# nicely
data[data == 0] = np.nan
self.shape_image = data
self.mask = ~np.isnan(self.shape_image)
class ABSImporter(SpatialImageImporter):
r"""
Allows importing the ABS file format from the FRGC dataset.
The z-min value is stripped from the mesh to make it renderable.
Parameters
----------
filepath : string
Absolute filepath of the mesh.
"""
def __init__(self, filepath):
# Setup class before super class call
super(ABSImporter, self).__init__(filepath)
def _build_image_and_mesh(self):
r"""
Read in the file and remove the z-min. Triangulate the 2D gridded
coordinates to create a valid triangulation.
"""
with open(self.filepath, 'r') as f:
# Currently these are unused, but they are in the format
# Could possibly store as metadata?
# Assume first result for regexes
re_rows = re.compile(u'([0-9]+) rows')
n_rows = int(re_rows.findall(f.readline())[0])
re_cols = re.compile(u'([0-9]+) columns')
n_cols = int(re_cols.findall(f.readline())[0])
# This also loads the mask
# >>> image_data[:, 0]
image_data = np.loadtxt(self.filepath, skiprows=3, unpack=True)
# Replace the lowest value with nan so that we can render properly
image_data[image_data == np.min(image_data)] = np.nan
self.shape_image = np.reshape(image_data[:, 1:], [n_rows, n_cols, 3])
self.mask = np.reshape(image_data[:, 0], [n_rows, n_cols])
|
|
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import logging
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import shutil
sys.dont_write_bytecode = True # prevent creation of .pyc files
from benchexec import container
from benchexec import containerexecutor
from benchexec import filehierarchylimit
from benchexec.runexecutor import RunExecutor
from benchexec import runexecutor
from benchexec import util
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
try:
unichr(0)
except NameError:
unichr = chr
here = os.path.dirname(__file__)
base_dir = os.path.join(here, '..')
bin_dir = os.path.join(base_dir, 'bin')
runexec = os.path.join(bin_dir, 'runexec')
python = 'python2' if sys.version_info[0] == 2 else 'python3'
class TestRunExecutor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
logging.disable(logging.CRITICAL)
if not hasattr(cls, 'assertRegex'):
cls.assertRegex = cls.assertRegexpMatches
if not hasattr(cls, 'assertRaisesRegex'):
cls.assertRaisesRegex = cls.assertRaisesRegexp
def setUp(self, *args, **kwargs):
self.runexecutor = RunExecutor(*args, **kwargs)
def execute_run(self, *args, **kwargs):
(output_fd, output_filename) = tempfile.mkstemp('.log', 'output_', text=True)
try:
result = self.runexecutor.execute_run(list(args), output_filename, **kwargs)
output_lines = os.read(output_fd, 4096).decode().splitlines()
return (result, output_lines)
finally:
os.close(output_fd)
os.remove(output_filename)
def execute_run_extern(self, *args, **kwargs):
(output_fd, output_filename) = tempfile.mkstemp('.log', 'output_', text=True)
try:
runexec_output = subprocess.check_output(
args=[python, runexec] + list(args) + ['--output', output_filename],
stderr=DEVNULL,
**kwargs
).decode()
output_lines = os.read(output_fd, 4096).decode().splitlines()
except subprocess.CalledProcessError as e:
print(e.output.decode())
raise e
finally:
os.close(output_fd)
os.remove(output_filename)
result={key.strip(): value.strip() for (key, _, value) in (line.partition('=') for line in runexec_output.splitlines())}
return (result, output_lines)
def check_command_in_output(self, output, cmd):
self.assertEqual(output[0], cmd, 'run output misses executed command')
def check_result_keys(self, result, *additional_keys):
expected_keys = {'cputime', 'walltime', 'memory', 'exitcode',
'cpuenergy',
'blkio-read', 'blkio-write',
}
expected_keys.update(additional_keys)
for key in result.keys():
if key.startswith('cputime-cpu'):
self.assertRegex(key, '^cputime-cpu[0-9]+$',
"unexpected result entry '{}={}'".format(key, result[key]))
elif key.startswith('cpuenergy-'):
self.assertRegex(key, '^cpuenergy-pkg[0-9]+(-(core|uncore|dram|psys))?$',
"unexpected result entry '{}={}'".format(key, result[key]))
else:
self.assertIn(key, expected_keys,
"unexpected result entry '{}={}'".format(key, result[key]))
def check_exitcode(self, result, exitcode, msg=None):
self.assertEqual(int(result['exitcode']), exitcode, msg)
def test_command_output(self):
if not os.path.exists('/bin/echo'):
self.skipTest('missing /bin/echo')
(_, output) = self.execute_run('/bin/echo', 'TEST_TOKEN')
self.check_command_in_output(output, '/bin/echo TEST_TOKEN')
self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output')
for line in output[1:-1]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_command_error_output(self):
if not os.path.exists('/bin/echo'):
self.skipTest('missing /bin/echo')
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
def execute_Run_intern(*args, **kwargs):
(error_fd, error_filename) = tempfile.mkstemp('.log', 'error_', text=True)
try:
(_, output_lines) = self.execute_run(*args, error_filename=error_filename, **kwargs)
error_lines = os.read(error_fd, 4096).decode().splitlines()
return (output_lines, error_lines)
finally:
os.close(error_fd)
os.remove(error_filename)
(output_lines, error_lines) = execute_Run_intern('/bin/sh', '-c', '/bin/echo ERROR_TOKEN >&2')
self.assertEqual(error_lines[-1], 'ERROR_TOKEN', 'run error output misses command output')
for line in output_lines[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
for line in error_lines[1:-1]:
self.assertRegex(line, '^-*$', 'unexpected text in run error output')
(output_lines, error_lines) = execute_Run_intern('/bin/echo', 'OUT_TOKEN')
self.check_command_in_output(output_lines, '/bin/echo OUT_TOKEN')
self.check_command_in_output(error_lines, '/bin/echo OUT_TOKEN')
self.assertEqual(output_lines[-1], 'OUT_TOKEN', 'run output misses command output')
for line in output_lines[1:-1]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
for line in error_lines[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run error output')
def test_command_result(self):
if not os.path.exists('/bin/echo'):
self.skipTest('missing /bin/echo')
(result, _) = self.execute_run('/bin/echo', 'TEST_TOKEN')
self.check_exitcode(result, 0, 'exit code of /bin/echo is not zero')
self.assertAlmostEqual(result['walltime'], 0.2, delta=0.2, msg='walltime of /bin/echo not as expected')
self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of /bin/echo not as expected')
self.check_result_keys(result)
def test_cputime_hardlimit(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
(result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i',
hardtimelimit=1)
self.check_exitcode(result, 9, 'exit code of killed process is not 9')
if 'terminationreason' in result:
# not produced currently if killed by ulimit
self.assertEqual(result['terminationreason'], 'cputime', 'termination reason is not "cputime"')
self.assertAlmostEqual(result['walltime'], 1.4, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed')
self.assertAlmostEqual(result['cputime'], 1.4, delta=0.5, msg='cputime is not approximately the time after which the process should have been killed')
self.check_result_keys(result, 'terminationreason')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_cputime_softlimit(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
try:
(result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i',
softtimelimit=1)
except SystemExit as e:
self.assertEqual(str(e), 'Soft time limit cannot be specified without cpuacct cgroup.')
self.skipTest(e)
self.check_exitcode(result, 15, 'exit code of killed process is not 15')
self.assertEqual(result['terminationreason'], 'cputime-soft', 'termination reason is not "cputime-soft"')
self.assertAlmostEqual(result['walltime'], 4, delta=3, msg='walltime is not approximately the time after which the process should have been killed')
self.assertAlmostEqual(result['cputime'], 4, delta=3, msg='cputime is not approximately the time after which the process should have been killed')
self.check_result_keys(result, 'terminationreason')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_walltime_limit(self):
if not os.path.exists('/bin/sleep'):
self.skipTest('missing /bin/sleep')
try:
(result, output) = self.execute_run('/bin/sleep', '10', walltimelimit=1)
except SystemExit as e:
self.assertEqual(str(e), 'Wall time limit is not implemented for systems without cpuacct cgroup.')
self.skipTest(e)
self.check_exitcode(result, 9, 'exit code of killed process is not 9')
self.assertEqual(result['terminationreason'], 'walltime', 'termination reason is not "walltime"')
self.assertAlmostEqual(result['walltime'], 4, delta=3, msg='walltime is not approximately the time after which the process should have been killed')
self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of /bin/sleep is not approximately zero')
self.check_result_keys(result, 'terminationreason')
self.check_command_in_output(output, '/bin/sleep 10')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_cputime_walltime_limit(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
(result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i',
hardtimelimit=1, walltimelimit=5)
self.check_exitcode(result, 9, 'exit code of killed process is not 9')
if 'terminationreason' in result:
# not produced currently if killed by ulimit
self.assertEqual(result['terminationreason'], 'cputime', 'termination reason is not "cputime"')
self.assertAlmostEqual(result['walltime'], 1.4, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed')
self.assertAlmostEqual(result['cputime'], 1.4, delta=0.5, msg='cputime is not approximately the time after which the process should have been killed')
self.check_result_keys(result, 'terminationreason')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_all_timelimits(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
try:
(result, output) = self.execute_run('/bin/sh', '-c', 'i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i',
softtimelimit=1, hardtimelimit=2, walltimelimit=5)
except SystemExit as e:
self.assertEqual(str(e), 'Soft time limit cannot be specified without cpuacct cgroup.')
self.skipTest(e)
self.check_exitcode(result, 15, 'exit code of killed process is not 15')
self.assertEqual(result['terminationreason'], 'cputime-soft', 'termination reason is not "cputime-soft"')
self.assertAlmostEqual(result['walltime'], 1.4, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed')
self.assertAlmostEqual(result['cputime'], 1.4, delta=0.5, msg='cputime is not approximately the time after which the process should have been killed')
self.check_result_keys(result, 'terminationreason')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_input_is_redirected_from_devnull(self):
if not os.path.exists('/bin/cat'):
self.skipTest('missing /bin/cat')
try:
(result, output) = self.execute_run('/bin/cat', walltimelimit=1)
except SystemExit as e:
self.assertEqual(str(e), 'Wall time limit is not implemented for systems without cpuacct cgroup.')
self.skipTest(e)
self.check_exitcode(result, 0, 'exit code of process is not 0')
self.assertAlmostEqual(result['walltime'], 0.2, delta=0.2, msg='walltime of "/bin/cat < /dev/null" is not approximately zero')
self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of "/bin/cat < /dev/null" is not approximately zero')
self.check_result_keys(result)
self.check_command_in_output(output, '/bin/cat')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_input_is_redirected_from_file(self):
if not os.path.exists('/bin/cat'):
self.skipTest('missing /bin/cat')
with tempfile.TemporaryFile() as tmp:
tmp.write(b'TEST_TOKEN')
tmp.flush()
tmp.seek(0)
try:
(result, output) = self.execute_run('/bin/cat', stdin=tmp, walltimelimit=1)
except SystemExit as e:
self.assertEqual(str(e), 'Wall time limit is not implemented for systems without cpuacct cgroup.')
self.skipTest(e)
self.check_exitcode(result, 0, 'exit code of process is not 0')
self.assertAlmostEqual(result['walltime'], 0.2, delta=0.2, msg='walltime of "/bin/cat < /dev/null" is not approximately zero')
self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of "/bin/cat < /dev/null" is not approximately zero')
self.check_result_keys(result)
self.check_command_in_output(output, '/bin/cat')
self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output')
for line in output[1:-1]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_input_is_redirected_from_stdin(self):
if not os.path.exists('/bin/cat'):
self.skipTest('missing /bin/cat')
(output_fd, output_filename) = tempfile.mkstemp('.log', 'output_', text=True)
cmd = [python, runexec, '--input', '-', '--output', output_filename, '--walltime', '1', '/bin/cat']
try:
process = subprocess.Popen(args=cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=DEVNULL)
try:
runexec_output, unused_err = process.communicate(b'TEST_TOKEN')
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
print(runexec_output.decode())
raise subprocess.CalledProcessError(retcode, cmd, output=runexec_output)
output = os.read(output_fd, 4096).decode().splitlines()
finally:
os.close(output_fd)
os.remove(output_filename)
result={key.strip(): value.strip() for (key, _, value) in (line.partition('=') for line in runexec_output.decode().splitlines())}
self.check_exitcode(result, 0, 'exit code of process is not 0')
self.assertAlmostEqual(float(result['walltime'].rstrip('s')), 0.2, delta=0.2, msg='walltime of "/bin/cat < /dev/null" is not approximately zero')
self.assertAlmostEqual(float(result['cputime'].rstrip('s')), 0.2, delta=0.2, msg='cputime of "/bin/cat < /dev/null" is not approximately zero')
self.check_result_keys(result, 'returnvalue')
self.check_command_in_output(output, '/bin/cat')
self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output')
for line in output[1:-1]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_append_environment_variable(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
(_, output) = self.execute_run('/bin/sh', '-c', 'echo $PATH')
path = output[-1]
(_, output) = self.execute_run('/bin/sh', '-c', 'echo $PATH', environments={'additionalEnv': {'PATH': ':TEST_TOKEN'}})
self.assertEqual(output[-1], path + ':TEST_TOKEN')
def test_new_environment_variable(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
(_, output) = self.execute_run('/bin/sh', '-c', 'echo $PATH', environments={'newEnv': {'PATH': '/usr/bin'}})
self.assertEqual(output[-1], '/usr/bin')
def test_stop_run(self):
if not os.path.exists('/bin/sleep'):
self.skipTest('missing /bin/sleep')
thread = _StopRunThread(1, self.runexecutor)
thread.start()
(result, output) = self.execute_run('/bin/sleep', '10')
thread.join()
self.check_exitcode(result, 9, 'exit code of killed process is not 9')
self.assertEqual(result['terminationreason'], 'killed', 'termination reason is not "killed"')
self.assertAlmostEqual(result['walltime'], 1, delta=0.5, msg='walltime is not approximately the time after which the process should have been killed')
self.assertAlmostEqual(result['cputime'], 0.2, delta=0.2, msg='cputime of /bin/sleep is not approximately zero')
self.check_result_keys(result, 'terminationreason')
self.check_command_in_output(output, '/bin/sleep 10')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_reduce_file_size_empty_file(self):
with tempfile.NamedTemporaryFile() as tmp:
runexecutor._reduce_file_size_if_necessary(tmp.name, 0)
self.assertEqual(os.path.getsize(tmp.name), 0)
def test_reduce_file_size_empty_file2(self):
with tempfile.NamedTemporaryFile() as tmp:
runexecutor._reduce_file_size_if_necessary(tmp.name, 500)
self.assertEqual(os.path.getsize(tmp.name), 0)
def test_reduce_file_size_long_line_not_truncated(self):
with tempfile.NamedTemporaryFile(mode='wt') as tmp:
content = 'Long line ' * 500
tmp.write(content)
tmp.flush()
runexecutor._reduce_file_size_if_necessary(tmp.name, 500)
with open(tmp.name, 'rt') as tmp2:
self.assertMultiLineEqual(tmp2.read(), content)
REDUCE_WARNING_MSG = "WARNING: YOUR LOGFILE WAS TOO LONG, SOME LINES IN THE MIDDLE WERE REMOVED."
REDUCE_OVERHEAD = 100
def test_reduce_file_size(self):
with tempfile.NamedTemporaryFile(mode='wt') as tmp:
line = 'Some text\n'
tmp.write(line * 500)
tmp.flush()
limit = 500
runexecutor._reduce_file_size_if_necessary(tmp.name, limit)
self.assertLessEqual(os.path.getsize(tmp.name), limit + self.REDUCE_OVERHEAD)
with open(tmp.name, 'rt') as tmp2:
new_content = tmp2.read()
self.assertIn(self.REDUCE_WARNING_MSG, new_content)
self.assertTrue(new_content.startswith(line))
self.assertTrue(new_content.endswith(line))
def test_reduce_file_size_limit_zero(self):
with tempfile.NamedTemporaryFile(mode='wt') as tmp:
line = 'Some text\n'
tmp.write(line * 500)
tmp.flush()
runexecutor._reduce_file_size_if_necessary(tmp.name, 0)
self.assertLessEqual(os.path.getsize(tmp.name), self.REDUCE_OVERHEAD)
with open(tmp.name, 'rt') as tmp2:
new_content = tmp2.read()
self.assertIn(self.REDUCE_WARNING_MSG, new_content)
self.assertTrue(new_content.startswith(line))
def test_integration(self):
if not os.path.exists('/bin/echo'):
self.skipTest('missing /bin/echo')
(result, output) = self.execute_run_extern('/bin/echo', 'TEST_TOKEN')
self.check_exitcode(result, 0, 'exit code of /bin/echo is not zero')
self.check_result_keys(result, 'returnvalue')
self.check_command_in_output(output, '/bin/echo TEST_TOKEN')
self.assertEqual(output[-1], 'TEST_TOKEN', 'run output misses command output')
for line in output[1:-1]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_home_and_tmp_is_separate(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
(result, output) = self.execute_run('/bin/sh', '-c', 'echo $HOME $TMPDIR')
self.check_exitcode(result, 0, 'exit code of /bin/sh is not zero')
self.assertRegex(output[-1], '/BenchExec_run_[^/]*/home .*/BenchExec_run_[^/]*/tmp',
'HOME or TMPDIR variable does not contain expected temporary directory')
def test_temp_dirs_are_removed(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
(result, output) = self.execute_run('/bin/sh', '-c', 'echo $HOME $TMPDIR')
self.check_exitcode(result, 0, 'exit code of /bin/sh is not zero')
home_dir = output[-1].split(' ')[0]
temp_dir = output[-1].split(' ')[1]
self.assertFalse(os.path.exists(home_dir),
'temporary home directory {} was not cleaned up'.format(home_dir))
self.assertFalse(os.path.exists(temp_dir),
'temporary temp directory {} was not cleaned up'.format(temp_dir))
def test_home_is_writable(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
(result, output) = self.execute_run('/bin/sh', '-c', 'touch $HOME/TEST_FILE')
self.check_exitcode(
result, 0, 'Failed to write to $HOME/TEST_FILE, output was\n{}'.format(output))
def test_no_cleanup_temp(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
self.setUp(cleanup_temp_dir=False) # create RunExecutor with desired parameter
(result, output) = self.execute_run('/bin/sh', '-c', 'echo "$TMPDIR"; echo "" > "$TMPDIR/test"')
self.check_exitcode(result, 0, 'exit code of /bin/sh is not zero')
temp_dir = output[-1]
test_file = os.path.join(temp_dir, 'test')
subprocess.check_call(self.runexecutor._build_cmdline(['test', '-f', test_file]))
self.assertEqual('tmp', os.path.basename(temp_dir), 'unexpected name of temp dir')
self.assertNotEqual('/tmp', temp_dir, 'temp dir should not be the global temp dir')
subprocess.check_call(self.runexecutor._build_cmdline(['rm', '-r', os.path.dirname(temp_dir)]))
def test_require_cgroup_invalid(self):
self.assertRaisesRegex(SystemExit, '.*invalid.*',
lambda: RunExecutor(additional_cgroup_subsystems=['invalid']))
def test_require_cgroup_cpu(self):
try:
self.setUp(additional_cgroup_subsystems=['cpu'])
except SystemExit as e:
self.skipTest(e)
if not os.path.exists('/bin/cat'):
self.skipTest('missing /bin/cat')
(result, output) = self.execute_run('/bin/cat', '/proc/self/cgroup')
self.check_exitcode(result, 0, 'exit code of /bin/cat is not zero')
for line in output:
if re.match('^[0-9]*:([^:]*,)?cpu(,[^:]*)?:/(.*/)?benchmark_.*$',line):
return # Success
self.fail('Not in expected cgroup for subsystem cpu:\n' + '\n'.join(output))
def test_set_cgroup_cpu_shares(self):
if not os.path.exists('/bin/echo'):
self.skipTest('missing /bin/echo')
try:
self.setUp(additional_cgroup_subsystems=['cpu'])
except SystemExit as e:
self.skipTest(e)
(result, _) = self.execute_run('/bin/echo',
cgroupValues={('cpu', 'shares'): 42})
self.check_exitcode(result, 0, 'exit code of /bin/echo is not zero')
# Just assert that execution was successful,
# testing that the value was actually set is much more difficult.
class TestRunExecutorWithSudo(TestRunExecutor):
"""
Run tests using the sudo mode of RunExecutor, if possible.
sudo is typically set up to allow executing as our own user,
so we try that. Note that this will not catch all problems,
for example if we forget to use "sudo kill" to send a signal
and instead send it directly, but requiring a second user for tests
would not be good, either.
"""
# Use user name defined in environment variable if present,
# or fall back to current user (sudo always allows this).
# sudo allows refering to numerical uids with '#'.
user = os.environ.get('BENCHEXEC_TEST_USER', '#' + str(os.getuid()))
def setUp(self, *args, **kwargs):
try:
self.runexecutor = RunExecutor(user=self.user, *args, **kwargs)
except SystemExit as e:
# sudo seems not to be available
self.skipTest(e)
def check_exitcode(self, result, expected, msg=None):
actual = int(result['exitcode'])
if expected == 15 and actual == 0:
# On Ubuntu 16.04, sudo returns 0 if process is killed with signal 15
return
# Using sudo may affect the exit code:
# what was the returnsignal is now the returnvalue.
# The distinction between returnsignal and returnvalue of the actual
# process is lost.
# If the returnsignal (of the sudo process) is 0,
# we replace the exit code with the mixed returnsignal/returnvalue of
# the actual process (with bit for core dump cleared).
returnsignal = actual & 0x7F
returnvalue = (actual >> 8) & 0x7F
if returnsignal == 0:
actual = returnvalue
self.assertEqual(actual, expected, msg)
def check_command_in_output(self, output, cmd):
self.assertTrue(output[0].endswith(cmd), 'run output misses executed command')
def test_detect_new_files_in_home(self):
if not os.path.exists('/bin/mktemp'):
self.skipTest('missing /bin/mktemp')
home_dir = runexecutor._get_user_account_info(self.user).pw_dir
tmp_file_pattern = '.BenchExec_test_runexecutor_XXXXXXXXXX'
(result, output) = self.execute_run(
'/bin/mktemp', '--tmpdir=' + home_dir, tmp_file_pattern)
try:
self.check_exitcode(result, 0, 'exit code of /bin/mktemp is not zero')
tmp_file = output[-1]
self.assertIn(os.path.relpath(tmp_file, home_dir),
self.runexecutor.check_for_new_files_in_home(),
'runexecutor failed to detect new temporary file in home directory')
finally:
subprocess.check_call(self.runexecutor._build_cmdline(['rm', tmp_file]))
def test_append_environment_variable(self):
# sudo-mode has a suboptimal implementation for additionalEnv:
# If an environment variable is not modified, it will be cleared completely and in case of
# PATH sudo will set it. If PATH is specified in additionalEnv, we will copy the value
# from the current process (which is different than what sudo would set)
# and append the given string.
pass
class TestRunExecutorWithContainer(TestRunExecutor):
def setUp(self, *args, **kwargs):
try:
container.execute_in_namespace(lambda: 0)
except OSError as e:
self.skipTest("Namespaces not supported: {}".format(os.strerror(e.errno)))
self.runexecutor = RunExecutor(
use_namespaces=True,
dir_modes={"/": containerexecutor.DIR_READ_ONLY,
"/tmp": containerexecutor.DIR_HIDDEN},
container_system_config=False,
*args, **kwargs)
def execute_run(self, *args, **kwargs):
return super(TestRunExecutorWithContainer, self).execute_run(workingDir="/tmp", *args, **kwargs)
def test_home_and_tmp_is_separate(self):
self.skipTest("not relevant in container")
def test_temp_dirs_are_removed(self):
self.skipTest("not relevant in container")
def test_home_is_writable(self):
self.skipTest("needs container_system_config=True and thus overlay mode")
def test_no_cleanup_temp(self):
self.skipTest("not relevant in container")
def check_result_files(self, shell_cmd, result_files_patterns, expected_result_files):
output_dir = tempfile.mkdtemp("", "output_")
try:
result, output = self.execute_run("/bin/sh", "-c", shell_cmd,
output_dir=output_dir,
result_files_patterns=result_files_patterns)
self.assertNotIn("terminationreason", result)
self.assertEqual(result["exitcode"], 0,
"exit code of {} is not zero,\nresult was {!r},\noutput was\n{}"
.format(" ".join(shell_cmd), result, "\n".join(output)))
result_files = []
for root, unused_dirs, files in os.walk(output_dir):
for file in files:
result_files.append(os.path.relpath(os.path.join(root, file), output_dir))
expected_result_files.sort()
result_files.sort()
self.assertListEqual(result_files, expected_result_files,
"\nList of retrieved result files differs from expected list,\n"
"result was {!r},\noutput was\n{}".format(result, "\n".join(output)))
finally:
shutil.rmtree(output_dir, ignore_errors=True)
def test_result_file_simple(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["."], ["TEST_FILE"])
def test_result_file_recursive(self):
self.check_result_files("mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE", ["."],
["TEST_DIR/TEST_FILE"])
def test_result_file_multiple(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE; echo TEST_TOKEN > TEST_FILE2", ["."],
["TEST_FILE", "TEST_FILE2"])
def test_result_file_symlink(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE; ln -s TEST_FILE TEST_LINK", ["."],
["TEST_FILE"])
def test_result_file_no_match(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["NO_MATCH"], [])
def test_result_file_no_pattern(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", [], [])
def test_result_file_empty_pattern(self):
self.assertRaises(ValueError,
lambda: self.check_result_files("echo TEST_TOKEN > TEST_FILE", [""], []))
def test_result_file_partial_match(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE",
["TEST_DIR"], ["TEST_DIR/TEST_FILE"])
def test_result_file_multiple_patterns(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; "
"echo TEST_TOKEN > TEST_FILE2; "
"mkdir TEST_DIR; "
"echo TEST_TOKEN > TEST_DIR/TEST_FILE; ",
["TEST_FILE", "TEST_DIR/TEST_FILE"], ["TEST_FILE", "TEST_DIR/TEST_FILE"])
def test_result_file_wildcard(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; "
"echo TEST_TOKEN > TEST_FILE2; "
"echo TEST_TOKEN > TEST_NOFILE; ",
["TEST_FILE*"], ["TEST_FILE", "TEST_FILE2"])
def test_result_file_absolute_pattern(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["/"], ["tmp/TEST_FILE"])
def test_result_file_absolute_and_pattern(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE",
["TEST_FILE", "/tmp/TEST_DIR", ], ["tmp/TEST_FILE", "tmp/TEST_DIR/TEST_FILE"])
def test_result_file_relative_traversal(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["foo/../TEST_FILE"], ["TEST_FILE"])
def test_result_file_illegal_relative_traversal(self):
self.assertRaises(ValueError,
lambda: self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["foo/../../bar"], []))
def test_result_file_recursive_pattern(self):
if not util.maybe_recursive_iglob == glob.iglob:
self.skipTest("missing recursive glob.iglob")
self.check_result_files(
"mkdir -p TEST_DIR/TEST_DIR; "
"echo TEST_TOKEN > TEST_FILE.txt; "
"echo TEST_TOKEN > TEST_DIR/TEST_FILE.txt; "
"echo TEST_TOKEN > TEST_DIR/TEST_DIR/TEST_FILE.txt; ",
["**/*.txt"],
["TEST_FILE.txt", "TEST_DIR/TEST_FILE.txt", "TEST_DIR/TEST_DIR/TEST_FILE.txt"])
def test_file_count_limit(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
filehierarchylimit._CHECK_INTERVAL_SECONDS = 0.1
(result, output) = self.execute_run('/bin/sh', '-c', 'for i in $(seq 1 10000); do touch $i; done',
files_count_limit=100, result_files_patterns=None)
self.check_exitcode(result, 9, 'exit code of killed process is not 15')
self.assertEqual(result['terminationreason'], 'files-count', 'termination reason is not "files-count"')
self.check_result_keys(result, 'terminationreason')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
def test_file_size_limit(self):
if not os.path.exists('/bin/sh'):
self.skipTest('missing /bin/sh')
filehierarchylimit._CHECK_INTERVAL_SECONDS = 0.1
(result, output) = self.execute_run('/bin/sh', '-c', 'for i in $(seq 1 100000); do echo $i >> TEST_FILE; done',
files_size_limit=100, result_files_patterns=None)
self.check_exitcode(result, 9, 'exit code of killed process is not 15')
self.assertEqual(result['terminationreason'], 'files-size', 'termination reason is not "files-size"')
self.check_result_keys(result, 'terminationreason')
for line in output[1:]:
self.assertRegex(line, '^-*$', 'unexpected text in run output')
class _StopRunThread(threading.Thread):
def __init__(self, delay, runexecutor):
super(_StopRunThread, self).__init__()
self.daemon = True
self.delay = delay
self.runexecutor = runexecutor
def run(self):
time.sleep(self.delay)
self.runexecutor.stop()
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implements the SSH v2 key agent protocol. This protocol is documented in the
SSH source code, in the file
U{PROTOCOL.agent<http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent>}.
Maintainer: Paul Swartz
"""
import struct
from twisted.conch.ssh.common import NS, getNS, getMP
from twisted.conch.error import ConchError, MissingKeyStoreError
from twisted.conch.ssh import keys
from twisted.internet import defer, protocol
class SSHAgentClient(protocol.Protocol):
"""
The client side of the SSH agent protocol. This is equivalent to
ssh-add(1) and can be used with either ssh-agent(1) or the SSHAgentServer
protocol, also in this package.
"""
def __init__(self):
self.buf = ''
self.deferreds = []
def dataReceived(self, data):
self.buf += data
while 1:
if len(self.buf) <= 4:
return
packLen = struct.unpack('!L', self.buf[:4])[0]
if len(self.buf) < 4 + packLen:
return
packet, self.buf = self.buf[4:4 + packLen], self.buf[4 + packLen:]
reqType = ord(packet[0])
d = self.deferreds.pop(0)
if reqType == AGENT_FAILURE:
d.errback(ConchError('agent failure'))
elif reqType == AGENT_SUCCESS:
d.callback('')
else:
d.callback(packet)
def sendRequest(self, reqType, data):
pack = struct.pack('!LB',len(data) + 1, reqType) + data
self.transport.write(pack)
d = defer.Deferred()
self.deferreds.append(d)
return d
def requestIdentities(self):
"""
@return: A L{Deferred} which will fire with a list of all keys found in
the SSH agent. The list of keys is comprised of (public key blob,
comment) tuples.
"""
d = self.sendRequest(AGENTC_REQUEST_IDENTITIES, '')
d.addCallback(self._cbRequestIdentities)
return d
def _cbRequestIdentities(self, data):
"""
Unpack a collection of identities into a list of tuples comprised of
public key blobs and comments.
"""
if ord(data[0]) != AGENT_IDENTITIES_ANSWER:
raise ConchError('unexpected response: %i' % ord(data[0]))
numKeys = struct.unpack('!L', data[1:5])[0]
keys = []
data = data[5:]
for i in range(numKeys):
blob, data = getNS(data)
comment, data = getNS(data)
keys.append((blob, comment))
return keys
def addIdentity(self, blob, comment = ''):
"""
Add a private key blob to the agent's collection of keys.
"""
req = blob
req += NS(comment)
return self.sendRequest(AGENTC_ADD_IDENTITY, req)
def signData(self, blob, data):
"""
Request that the agent sign the given C{data} with the private key
which corresponds to the public key given by C{blob}. The private
key should have been added to the agent already.
@type blob: C{str}
@type data: C{str}
@return: A L{Deferred} which fires with a signature for given data
created with the given key.
"""
req = NS(blob)
req += NS(data)
req += '\000\000\000\000' # flags
return self.sendRequest(AGENTC_SIGN_REQUEST, req).addCallback(self._cbSignData)
def _cbSignData(self, data):
if ord(data[0]) != AGENT_SIGN_RESPONSE:
raise ConchError('unexpected data: %i' % ord(data[0]))
signature = getNS(data[1:])[0]
return signature
def removeIdentity(self, blob):
"""
Remove the private key corresponding to the public key in blob from the
running agent.
"""
req = NS(blob)
return self.sendRequest(AGENTC_REMOVE_IDENTITY, req)
def removeAllIdentities(self):
"""
Remove all keys from the running agent.
"""
return self.sendRequest(AGENTC_REMOVE_ALL_IDENTITIES, '')
class SSHAgentServer(protocol.Protocol):
"""
The server side of the SSH agent protocol. This is equivalent to
ssh-agent(1) and can be used with either ssh-add(1) or the SSHAgentClient
protocol, also in this package.
"""
def __init__(self):
self.buf = ''
def dataReceived(self, data):
self.buf += data
while 1:
if len(self.buf) <= 4:
return
packLen = struct.unpack('!L', self.buf[:4])[0]
if len(self.buf) < 4 + packLen:
return
packet, self.buf = self.buf[4:4 + packLen], self.buf[4 + packLen:]
reqType = ord(packet[0])
reqName = messages.get(reqType, None)
if not reqName:
self.sendResponse(AGENT_FAILURE, '')
else:
f = getattr(self, 'agentc_%s' % reqName)
if getattr(self.factory, 'keys', None) is None:
self.sendResponse(AGENT_FAILURE, '')
raise MissingKeyStoreError()
f(packet[1:])
def sendResponse(self, reqType, data):
pack = struct.pack('!LB', len(data) + 1, reqType) + data
self.transport.write(pack)
def agentc_REQUEST_IDENTITIES(self, data):
"""
Return all of the identities that have been added to the server
"""
assert data == ''
numKeys = len(self.factory.keys)
resp = []
resp.append(struct.pack('!L', numKeys))
for key, comment in self.factory.keys.itervalues():
resp.append(NS(key.blob())) # yes, wrapped in an NS
resp.append(NS(comment))
self.sendResponse(AGENT_IDENTITIES_ANSWER, ''.join(resp))
def agentc_SIGN_REQUEST(self, data):
"""
Data is a structure with a reference to an already added key object and
some data that the clients wants signed with that key. If the key
object wasn't loaded, return AGENT_FAILURE, else return the signature.
"""
blob, data = getNS(data)
if blob not in self.factory.keys:
return self.sendResponse(AGENT_FAILURE, '')
signData, data = getNS(data)
assert data == '\000\000\000\000'
self.sendResponse(AGENT_SIGN_RESPONSE, NS(self.factory.keys[blob][0].sign(signData)))
def agentc_ADD_IDENTITY(self, data):
"""
Adds a private key to the agent's collection of identities. On
subsequent interactions, the private key can be accessed using only the
corresponding public key.
"""
# need to pre-read the key data so we can get past it to the comment string
keyType, rest = getNS(data)
if keyType == 'ssh-rsa':
nmp = 6
elif keyType == 'ssh-dss':
nmp = 5
else:
raise keys.BadKeyError('unknown blob type: %s' % keyType)
rest = getMP(rest, nmp)[-1] # ignore the key data for now, we just want the comment
comment, rest = getNS(rest) # the comment, tacked onto the end of the key blob
k = keys.Key.fromString(data, type='private_blob') # not wrapped in NS here
self.factory.keys[k.blob()] = (k, comment)
self.sendResponse(AGENT_SUCCESS, '')
def agentc_REMOVE_IDENTITY(self, data):
"""
Remove a specific key from the agent's collection of identities.
"""
blob, _ = getNS(data)
k = keys.Key.fromString(blob, type='blob')
del self.factory.keys[k.blob()]
self.sendResponse(AGENT_SUCCESS, '')
def agentc_REMOVE_ALL_IDENTITIES(self, data):
"""
Remove all keys from the agent's collection of identities.
"""
assert data == ''
self.factory.keys = {}
self.sendResponse(AGENT_SUCCESS, '')
# v1 messages that we ignore because we don't keep v1 keys
# open-ssh sends both v1 and v2 commands, so we have to
# do no-ops for v1 commands or we'll get "bad request" errors
def agentc_REQUEST_RSA_IDENTITIES(self, data):
"""
v1 message for listing RSA1 keys; superseded by
agentc_REQUEST_IDENTITIES, which handles different key types.
"""
self.sendResponse(AGENT_RSA_IDENTITIES_ANSWER, struct.pack('!L', 0))
def agentc_REMOVE_RSA_IDENTITY(self, data):
"""
v1 message for removing RSA1 keys; superseded by
agentc_REMOVE_IDENTITY, which handles different key types.
"""
self.sendResponse(AGENT_SUCCESS, '')
def agentc_REMOVE_ALL_RSA_IDENTITIES(self, data):
"""
v1 message for removing all RSA1 keys; superseded by
agentc_REMOVE_ALL_IDENTITIES, which handles different key types.
"""
self.sendResponse(AGENT_SUCCESS, '')
AGENTC_REQUEST_RSA_IDENTITIES = 1
AGENT_RSA_IDENTITIES_ANSWER = 2
AGENT_FAILURE = 5
AGENT_SUCCESS = 6
AGENTC_REMOVE_RSA_IDENTITY = 8
AGENTC_REMOVE_ALL_RSA_IDENTITIES = 9
AGENTC_REQUEST_IDENTITIES = 11
AGENT_IDENTITIES_ANSWER = 12
AGENTC_SIGN_REQUEST = 13
AGENT_SIGN_RESPONSE = 14
AGENTC_ADD_IDENTITY = 17
AGENTC_REMOVE_IDENTITY = 18
AGENTC_REMOVE_ALL_IDENTITIES = 19
messages = {}
for name, value in locals().copy().items():
if name[:7] == 'AGENTC_':
messages[value] = name[7:] # doesn't handle doubles
|
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import rdflib
import logging
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
logger = logging.getLogger('hierarchy_manager')
class HierarchyManager(object):
"""Store hierarchical relationships between different types of entities.
Used to store, e.g., entity hierarchies (proteins and protein families)
and modification hierarchies (serine phosphorylation vs. phosphorylation).
Parameters
----------
rdf_file : string
Path to the RDF file containing the hierarchy.
Attributes
----------
graph : instance of `rdflib.Graph`
The RDF graph containing the hierarchy.
"""
prefixes = """
PREFIX rn: <http://sorger.med.harvard.edu/indra/relations/>
"""
def __init__(self, rdf_file, build_closure=True):
"""Initialize with the path to an RDF file"""
self.graph = rdflib.Graph()
self.graph.parse(rdf_file, format='nt')
self.isa_closure = {}
self.partof_closure = {}
self.components = {}
if build_closure:
self.build_transitive_closures()
# Build reverse lookup dict from the entity hierarchy
self._children = {}
logger.info('Generating reverse lookup table for families')
all_children = set(self.isa_closure.keys()).union(
self.partof_closure.keys())
for child in all_children:
parents = self.get_parents(child)
for parent in parents:
children_list = self._children.get(parent, [])
children_list.append(child)
self._children[parent] = children_list
def build_transitive_closures(self):
"""Build the transitive closures of the hierarchy.
This method constructs dictionaries which contain terms in the
hierarchy as keys and either all the "isa+" or "partof+" related terms
as values.
"""
component_counter = 0
for rel, tc_dict in (('isa', self.isa_closure),
('partof', self.partof_closure)):
qstr = self.prefixes + """
SELECT ?x ?y WHERE {{
{{?x rn:{0}+ ?y .}}
}}
""".format(rel)
res = self.graph.query(qstr)
for x, y in res:
xs = x.toPython()
ys = y.toPython()
try:
tc_dict[xs].append(ys)
except KeyError:
tc_dict[xs] = [ys]
xcomp = self.components.get(xs)
ycomp = self.components.get(ys)
if xcomp is None:
if ycomp is None:
# Neither x nor y are in a component so we start a
# new component and assign x and y to the same
# component
self.components[xs] = component_counter
self.components[ys] = component_counter
component_counter += 1
else:
# Because y is already part of an existing component
# we assign its component to x
self.components[xs] = ycomp
else:
if ycomp is None:
# Because x is already part of an existing component
# we assign its component to y
self.components[ys] = xcomp
else:
# This is a special case in which both x and y are
# parts of components
# If they are in the same component then there's
# nothing further to do
if xcomp == ycomp:
continue
else:
remove_component = max(xcomp, ycomp)
joint_component = min(xcomp, ycomp)
for k, v in self.components.items():
if v == remove_component:
self.components[k] = joint_component
@lru_cache(maxsize=100000)
def find_entity(self, x):
"""
Get the entity that has the specified name (or synonym).
Parameters
----------
x : string
Name or synonym for the target entity.
"""
qstr = self.prefixes + """
SELECT ?x WHERE {{
?x rn:hasName "{0}" .
}}
""".format(x)
res = self.graph.query(qstr)
if list(res):
en = list(res)[0][0].toPython()
return en
else:
return None
def isa(self, ns1, id1, ns2, id2):
"""Indicate whether one entity has an "isa" relationship to another.
Parameters
----------
ns1 : string
Namespace code for an entity.
id1 : string
URI for an entity.
ns2 : string
Namespace code for an entity.
id2 : string
URI for an entity.
Returns
-------
bool
True if t1 has an "isa" relationship with t2, either directly or
through a series of intermediates; False otherwise.
"""
# if id2 is None, or both are None, then it's by definition isa:
if id2 is None or (id2 is None and id1 is None):
return True
# If only id1 is None, then it cannot be isa
elif id1 is None:
return False
if self.isa_closure:
term1 = self.get_uri(ns1, id1)
term2 = self.get_uri(ns2, id2)
ec = self.isa_closure.get(term1)
if ec is not None and term2 in ec:
return True
else:
return False
else:
return self.query_rdf(id1, 'rn:isa+', id2)
def partof(self, ns1, id1, ns2, id2):
"""Indicate whether one entity is physically part of another.
Parameters
----------
ns1 : string
Namespace code for an entity.
id1 : string
URI for an entity.
ns2 : string
Namespace code for an entity.
id2 : string
URI for an entity.
Returns
-------
bool
True if t1 has a "partof" relationship with t2, either directly or
through a series of intermediates; False otherwise.
"""
# if id2 is None, or both are None, then it's by definition isa:
if id2 is None or (id2 is None and id1 is None):
return True
# If only id1 is None, then it cannot be isa
elif id1 is None:
return False
if self.partof_closure:
term1 = self.get_uri(ns1, id1)
term2 = self.get_uri(ns2, id2)
ec = self.partof_closure.get(term1)
if ec is not None and term2 in ec:
return True
else:
return False
else:
return self.query_rdf(id1, 'rn:partof+', id2)
def get_parents(self, uri, type='all'):
"""Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
"""
immediate_parents = set(self.isa_closure.get(uri, [])).union(
set(self.partof_closure.get(uri, [])))
if type == 'immediate':
return immediate_parents
all_parents = set()
for parent in immediate_parents:
grandparents = self.get_parents(parent, type='all')
all_parents = all_parents.union(grandparents)
all_parents = all_parents.union(immediate_parents)
if type == 'all':
return all_parents
else:
top_parents = set()
for parent in all_parents:
if not self.get_parents(parent, type='immediate'):
top_parents.add(parent)
return top_parents
return
def get_children(self, uri):
"""Return all (not just immediate) children of a given entry.
Parameters
----------
uri : str
The URI of the entry whose children are to be returned. See the
get_uri method to construct this URI from a name space and id.
"""
children = self._children.get(uri, [])
return children
@lru_cache(maxsize=100000)
def query_rdf(self, id1, rel, id2):
term1 = self.find_entity(id1)
term2 = self.find_entity(id2)
qstr = self.prefixes + """
SELECT (COUNT(*) as ?s) WHERE {{
<{}> {} <{}> .
}}
""".format(term1, rel, term2)
res = self.graph.query(qstr)
count = [r[0] for r in res][0]
if count.toPython() == 1:
return True
else:
return False
@staticmethod
def get_uri(ns, id):
if ns == 'HGNC':
return 'http://identifiers.org/hgnc.symbol/' + id
elif ns == 'UP':
return 'http://identifiers.org/uniprot/' + id
elif ns == 'BE' or ns == 'INDRA':
return 'http://sorger.med.harvard.edu/indra/entities/' + id
else:
raise ValueError('Unknown namespace %s' % ns)
# Load the default entity and modification hierarchies
entity_file_path = os.path.join(os.path.dirname(__file__),
'../resources/entity_hierarchy.rdf')
mod_file_path = os.path.join(os.path.dirname(__file__),
'../resources/modification_hierarchy.rdf')
act_file_path = os.path.join(os.path.dirname(__file__),
'../resources/activity_hierarchy.rdf')
ccomp_file_path = os.path.join(os.path.dirname(__file__),
'../resources/cellular_component_hierarchy.rdf')
"""Default entity hierarchy loaded from the RDF file at
`resources/entity_hierarchy.rdf`."""
entity_hierarchy = HierarchyManager(entity_file_path, build_closure=True)
"""Default modification hierarchy loaded from the RDF file at
`resources/modification_hierarchy.rdf`."""
modification_hierarchy = HierarchyManager(mod_file_path, build_closure=True)
"""Default activity hierarchy loaded from the RDF file at
`resources/activity_hierarchy.rdf`."""
activity_hierarchy = HierarchyManager(act_file_path, build_closure=True)
"""Default cellular_component hierarchy loaded from the RDF file at
`resources/cellular_component_hierarchy.rdf`."""
ccomp_hierarchy = HierarchyManager(ccomp_file_path, build_closure=False)
hierarchies = {'entity': entity_hierarchy,
'modification': modification_hierarchy,
'activity': activity_hierarchy,
'cellular_component': ccomp_hierarchy}
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST conversion templates.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
class ContextAdjuster(gast.NodeTransformer):
"""Adjusts the ctx field of nodes to ensure consistency.
This transformer can change the ctx fields of a variable, tuple and other
AST elements that allow one, based on whether the element is being read or
written.
"""
def __init__(self, override_value):
self._ctx_override = override_value
def visit(self, node):
original_override = self._ctx_override
node = super(ContextAdjuster, self).visit(node)
if hasattr(node, 'ctx'):
assert node.ctx is not None, 'node {} has ctx unset'.format(node)
self._ctx_override = original_override
return node
def _apply_override(self, node):
if self._ctx_override is not None:
node.ctx = self._ctx_override()
def visit_Attribute(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node = self.generic_visit(node)
return node
def visit_Tuple(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_List(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Name(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Call(self, node):
self._apply_override(node)
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Dict(self, node):
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Subscript(self, node):
node.value = self.visit(node.value)
self._ctx_override = None
return self.generic_visit(node)
def visit_comprehension(self, node):
self._ctx_override = None
return self.generic_visit(node)
class ReplaceTransformer(gast.NodeTransformer):
"""Replace AST nodes."""
def __init__(self, replacements):
"""Create a new ReplaceTransformer.
Args:
replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by.
"""
self.replacements = replacements
self.in_replacements = False
self.preserved_annos = {
anno.Basic.ORIGIN,
anno.Basic.SKIP_PROCESSING,
anno.Static.ORIG_DEFINITIONS,
'extra_test',
}
def _prepare_replacement(self, replaced, key):
"""Prepares a replacement AST that's safe to swap in for a node.
Args:
replaced: ast.AST, the node being replaced
key: Hashable, the key of the replacement AST
Returns:
ast.AST, the replacement AST
"""
repl = self.replacements[key]
new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
if isinstance(new_nodes, gast.AST):
new_nodes = [new_nodes]
return new_nodes
def visit_Expr(self, node):
# When replacing a placeholder with an entire statement, the replacement
# must stand on its own and not be wrapped in an Expr.
new_value = self.visit(node.value)
if new_value is node.value:
return node
return new_value
def visit_keyword(self, node):
if node.arg not in self.replacements:
return self.generic_visit(node)
repl = self._prepare_replacement(node, node.arg)
if isinstance(repl, gast.keyword):
return repl
elif (repl and isinstance(repl, (list, tuple)) and
all(isinstance(r, gast.keyword) for r in repl)):
return repl
# TODO(mdan): We may allow replacing with a string as well.
# For example, if one wanted to replace foo with bar in foo=baz, then
# we could allow changing just node arg, so that we end up with bar=baz.
raise ValueError(
'a keyword argument may only be replaced by another keyword or a '
'non-empty list of keywords. Found: {} for keyword {}'.format(
repl, node.arg))
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
if node.name not in self.replacements:
return node
repl = self.replacements[node.name]
if not isinstance(repl, (gast.Name, ast.Name)):
raise ValueError(
'a function name can only be replaced by a Name node. Found: %s' %
repl)
node.name = repl.id
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
'An attribute can only be replaced by a Name node. Found: %s' % repl)
node.attr = repl.id
return node
def visit_Name(self, node):
if node.id not in self.replacements:
return node
new_nodes = self._prepare_replacement(node, node.id)
if not new_nodes:
return new_nodes
# Preserve the target context.
adjuster = ContextAdjuster(type(node.ctx))
for n in new_nodes:
if hasattr(n, 'ctx'):
adjuster.visit(n)
if len(new_nodes) == 1:
new_nodes, = new_nodes
return new_nodes
def _convert_to_ast(n):
"""Converts from a known data type to AST."""
if isinstance(n, str):
# Note: the node will receive the ctx value from the template, see
# ReplaceTransformer.visit_Name.
return gast.Name(id=n, ctx=None, annotation=None)
if isinstance(n, qual_names.QN):
return n.ast()
if isinstance(n, list):
return [_convert_to_ast(e) for e in n]
if isinstance(n, tuple):
return tuple(_convert_to_ast(e) for e in n)
return n
def replace(template, **replacements):
"""Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
contain Name children), then the caller is responsible for setting the
appropriate context.
Args:
template: A string representing Python code. Any symbol name can be used
that appears in the template code can be used as placeholder.
**replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by. String values are also
supported as a shorthand for AST Name nodes with the respective ID.
Returns:
An AST node or list of AST nodes with the replacements made. If the
template was a function, a list will be returned. If the template was a
node, the same node will be returned. If the template was a string, an
AST node will be returned (a `Module` node in the case of a multi-line
string, an `Expr` node otherwise).
Raises:
ValueError: if the arguments are incorrect.
"""
if not isinstance(template, str):
raise ValueError('Expected string template, got %s' % type(template))
tree = parser.parse_str(textwrap.dedent(template))
for k in replacements:
replacements[k] = _convert_to_ast(replacements[k])
results = ReplaceTransformer(replacements).visit(tree).body
if isinstance(results, list):
return [qual_names.resolve(r) for r in results]
return qual_names.resolve(results)
def replace_as_expression(template, **replacements):
"""Variant of replace that generates expressions, instead of code blocks."""
replacement = replace(template, **replacements)
if len(replacement) != 1:
raise ValueError(
'single expression expected; for more general templates use replace')
node = replacement[0]
node = qual_names.resolve(node)
if isinstance(node, gast.Expr):
return node.value
elif isinstance(node, gast.Name):
return node
raise ValueError(
'the template is expected to generate an expression or a name node;'
' instead found %s' % node)
|
|
#!/usr/bin/env python
import os, logging, json
from exceptions import KeyError
import tornado.httpclient
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.escape import json_encode, json_decode
from tornado.options import define, options
from sphericalmercator import SphericalMercator
import cache, safe64
try:
import mapnik2 as mapnik
except ImportError:
import mapnik
if not hasattr(mapnik,'Envelope'):
mapnik.Envelope = mapnik.Box2d
define('port', default=8888,
help='run on the given port', type=int)
define('buffer_size', default=128,
help='mapnik buffer size', type=int)
define('tilesize', default=256,
help='the size of generated tiles', type=int)
define('inspect', default=False,
help='open inspection endpoints for data', type=bool)
define('geojson', default=False,
help='allow output of GeoJSON', type=bool)
define('tile_cache', default=True,
help='enable development tile cache', type=bool)
define('tile_cache_dir', default='tiles',
help='tile cache dir', type=str)
define('map_cache_dir', default='mapfiles',
help='tile cache dir', type=str)
define('point_query', default=True,
help='enable point query', type=bool)
class TileLive(object):
def rle_encode(self, l):
""" encode a list of strings with run-length compression """
from itertools import groupby
return ["%d:%s" % (len(list(group)), name) for name, group in groupby(l)]
def layer_by_id(self, mapnik_map, layer_id):
"""
find a layer in a map, given a map id that a user puts as a param
"""
try:
layer = filter(
lambda l:
l.datasource.params().as_dict().get('id', False) ==
layer_id,
mapnik_map.layers)[0]
return layer
except KeyError:
raise Exception('Layer not found')
def jsonp(self, json, jsoncallback):
""" serve a page with an optional jsonp callback """
if jsoncallback:
json = "%s(%s)" % (jsoncallback, json)
self.set_header('Content-Type', 'text/javascript')
else:
json = "%s" % json
self.set_header('Content-Type', 'application/json')
self.write(json)
return json
def fString(self, mapfile_64, z, x, y):
""" GridTiles now use predetermined callbacks that can be done on both sides """
return "%s_%d_%d_%d" % (mapfile_64.replace('=', '_'), z, x, y)
class DataTileHandler(tornado.web.RequestHandler, TileLive):
""" serve GeoJSON tiles created by metawriters """
@tornado.web.asynchronous
def get(self, layout, mapfile_64, z, x, y, filetype):
self.z, self.x, self.y = map(int, [z, x, y])
self.filetype = filetype
self.mapfile_64 = mapfile_64
code_string = self.fString(self.mapfile_64, self.z, self.x, self.y)
if options.tile_cache and self.application._tile_cache.contains(self.mapfile_64,
"%d/%d/%d.%s" % (self.z, self.x, self.y, self.filetype)):
self.set_header('Content-Type', 'text/javascript')
self.write(self.application._tile_cache.get(self.mapfile_64,
"%d/%d/%d.%s" % (self.z, self.x, self.y, self.filetype)))
self.finish()
return
self.application._map_cache.get(mapfile, self, self.async_callback(self.async_get))
def async_get(self, mapnik_map):
envelope = self.application._merc.xyz_to_envelope(self.x, self.y, self.z)
mapnik_map.zoom_to_box(envelope)
mapnik_map.buffer_size = options.buffer_size
try:
# TODO: this makes dangerous assumptions about the content of the file string
mapnik_map.set_metawriter_property('tile_dir',
self.application._tile_cache.local_dir(self.mapfile, ''))
mapnik_map.set_metawriter_property('z', str(self.z))
mapnik_map.set_metawriter_property('x', str(self.x))
mapnik_map.set_metawriter_property('y', str(self.y))
url = "%d/%d/%d.%s" % (self.z, self.x, self.y, 'png')
self.application._tile_cache.prepare_dir(self.mapfile, url)
code_string = self.fString(self.mapfile_64, self.z, self.x, self.y)
mapnik.render_to_file(mapnik_map,
self.application._tile_cache.local_url(self.mapfile, url))
self.set_header('Content-Type', 'text/javascript')
code_string = self.fString(self.mapfile, self.z, self.x, self.y)
jsonp_str = "%s(%s)" % (code_string, json_encode({
'features': json_decode(str(self.application._tile_cache.get(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, 'json')))),
'code_string': code_string}))
self.application._tile_cache.set(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, 'json'), jsonp_str)
self.write(self.application._tile_cache.get(self.mapfile_64,
"%d/%d/%d.%s" % (self.z, self.x, self.y, self.filetype)))
self.finish()
except RuntimeError:
logging.error('Map for %s failed to render, cache reset', self.mapfile)
self.application._map_cache.remove(self.mapfile)
# Retry exactly once to re-render this tile.
if not hasattr(self, 'retry'):
self.retry = True
self.get(self.mapfile, self.z, self.x, self.y, self.filetype)
class GridTileHandler(tornado.web.RequestHandler, TileLive):
""" serve gridded tile data """
@tornado.web.asynchronous
def get(self, layout, mapfile_64, z, x, y, join_field_64):
self.z, self.x, self.y = map(int, [z, x, y])
self.join_field_64 =join_field_64
self.join_field = safe64.decode(join_field_64)
self.filetype = 'grid.json'
self.mapfile_64 = mapfile_64
if options.tile_cache and self.application._tile_cache.contains(self.mapfile_64,
"%d/%d/%d.%s.%s" % (self.z, self.x, self.y, self.join_field, self.filetype)):
logging.info('serving from cache')
self.set_header('Content-Type', 'text/javascript')
self.write(self.application._tile_cache.get(self.mapfile_64,
"%d/%d/%d.%s.%s" % (self.z,
self.x,
self.y,
self.join_field,
self.filetype)))
self.finish()
return
self.application._map_cache.get(self.mapfile_64,
self,
self.async_callback(self.async_get))
def async_get(self, mapnik_map):
envelope = self.application._merc.xyz_to_envelope(self.x, self.y, self.z)
mapnik_map.zoom_to_box(envelope)
mapnik_map.buffer_size = options.buffer_size
code_string = self.fString(self.mapfile_64, self.z, self.x, self.y)
try:
fg = [] # feature grid
for y in range(0, 256, 4):
for x in range(0, 256, 4):
featureset = mapnik_map.query_map_point(0,x,y)
added = False
for feature in featureset.features:
fg.append(feature[self.join_field])
added = True
if not added:
fg.append('')
jsonp_str = self.jsonp({
'features': str('|'.join(self.rle_encode(fg))),
'code_string': code_string
}, code_string)
logging.info('wrote jsonp')
json_url = "%d/%d/%d.%s.%s" % (self.z,
self.x,
self.y,
self.join_field_64,
self.filetype)
self.application._tile_cache.set(self.mapfile_64, json_url, jsonp_str)
self.finish()
except RuntimeError:
logging.error('Map for %s failed to render, cache reset', self.mapfile_64)
self.application._map_cache.remove(self.mapfile_64)
# Retry exactly once to re-render this tile.
if not hasattr(self, 'retry'):
self.retry = True
self.get(self.mapfile_64, self.z, self.x, self.y, self.filetype)
class TileHandler(tornado.web.RequestHandler, TileLive):
""" handle all tile requests """
@tornado.web.asynchronous
def get(self, layout, mapfile, z, x, y, filetype):
self.z, self.x, self.y = map(int, [z, x, y])
self.filetype = filetype
self.tms_style = (layout == 'tms')
self.mapfile = mapfile
if options.tile_cache and self.application._tile_cache.contains(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, filetype)):
self.set_header('Content-Type', 'image/png')
self.write(self.application._tile_cache.get(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, self.filetype)))
self.finish()
return
self.application._map_cache.get(self.mapfile,
self,
self.async_callback(self.async_get))
def async_get(self, mapnik_map):
envelope = self.application._merc.xyz_to_envelope(self.x,
self.y,
self.z,
self.tms_style)
mapnik_map.zoom_to_box(envelope)
mapnik_map.buffer_size = options.buffer_size
try:
if options.tile_cache:
mapnik_map.set_metawriter_property('tile_dir',
self.application._tile_cache.local_dir(self.mapfile, ''))
mapnik_map.set_metawriter_property('z', str(self.z))
mapnik_map.set_metawriter_property('x', str(self.x))
mapnik_map.set_metawriter_property('y', str(self.y))
url = "%d/%d/%d.%s" % (self.z, self.x, self.y, self.filetype)
self.application._tile_cache.prepare_dir(self.mapfile, url)
mapnik.render_to_file(mapnik_map,
self.application._tile_cache.local_url(self.mapfile, url))
if self.application._tile_cache.contains(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, self.filetype)):
self.set_header('Content-Type', 'image/png')
self.write(self.application._tile_cache.get(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, self.filetype)))
if self.application._tile_cache.contains(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, 'json')):
code_string = self.fString(self.mapfile, self.z, self.x, self.y)
jsonp_str = "%s(%s)" % (code_string, json_encode({
'features': json_decode(str(self.application._tile_cache.get(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, 'json')))),
'code_string': code_string}))
self.application._tile_cache.set(self.mapfile,
"%d/%d/%d.%s" % (self.z, self.x, self.y, 'json'), jsonp_str)
self.finish()
return
else:
im = mapnik.Image(options.tilesize, options.tilesize)
mapnik.render(mapnik_map, im)
self.set_header('Content-Type', 'image/png')
im_data = im.tostring('png')
self.write(im_data)
self.finish()
return
except RuntimeError:
logging.error('Map for %s failed to render, cache reset', self.mapfile)
self.application._map_cache.remove(self.mapfile)
# Retry exactly once to re-render this tile.
if not hasattr(self, 'retry'):
self.retry = True
self.get(self.mapfile, 'tms', self.z, self.x, self.y, self.filetype)
class MainHandler(tornado.web.RequestHandler):
""" home page, of little consequence """
def get(self):
self.render('home.html')
class Application(tornado.web.Application):
""" routers and settings for TileLite """
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/(tile|zxy)/([^/]+)/([0-9]+)/([0-9]+)/([0-9]+)\.(png|jpg|gif)", TileHandler),
(r"/(tms)/([^/]+)/([0-9]+)/([0-9]+)/([0-9]+)\.(png|jpg|gif)", TileHandler),
]
if options.inspect:
import inspect
handlers.extend(inspect.handlers)
if options.point_query:
import point_query
handlers.extend(point_query.handlers)
if options.tile_cache:
# since metawriters are only written on render_to_file, the
# tile cache must be enabled to use their output
self._tile_cache = cache.TileCache(directory=str(options.tile_cache_dir))
handlers.extend([
(r"/(zxy|tile)/([^/]+)/([0-9]+)/([0-9]+)/([0-9]+)\.(json)", DataTileHandler),
(r"/(zxy|tile)/([^/]+)/([0-9]+)/([0-9]+)/([0-9]+)\.([^/\.]+)\.grid\.json", GridTileHandler)])
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
static_path=os.path.join(os.path.dirname(__file__), 'static'),
)
tornado.web.Application.__init__(self, handlers, **settings)
self._merc = SphericalMercator(levels=23, size=256)
self._mercator = mapnik.Projection("+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over")
self._map_cache = cache.MapCache(directory=str(options.map_cache_dir))
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
|
|
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.db.models import Q
from djblets.webapi.errors import DOES_NOT_EXIST, WebAPIError
from djblets.webapi.fields import (BooleanFieldType,
ChoiceFieldType,
DateTimeFieldType,
DictFieldType,
IntFieldType,
ResourceFieldType,
StringFieldType)
from reviewboard.reviews.models import BaseComment
from reviewboard.webapi.base import ImportExtraDataError, WebAPIResource
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
class BaseCommentResource(MarkdownFieldsMixin, WebAPIResource):
"""Base class for comment resources.
Provides common fields and functionality for all comment resources.
"""
added_in = '1.6'
fields = {
'id': {
'type': IntFieldType,
'description': 'The numeric ID of the comment.',
},
'extra_data': {
'type': DictFieldType,
'description': 'Extra data as part of the comment. This depends '
'on what is being commented on, and may be '
'used in conjunction with an extension.',
'added_in': '2.0',
},
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether or not a comment opens an issue.',
},
'issue_status': {
'type': ChoiceFieldType,
'choices': tuple(BaseComment.ISSUE_STRING_TO_STATUS.keys()),
'description': 'The status of an issue.',
},
'public': {
'type': BooleanFieldType,
'description': 'Whether or not the comment is part of a public '
'review.',
'added_in': '2.0',
},
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The mode for the comment text field.',
'added_in': '2.0',
},
'timestamp': {
'type': DateTimeFieldType,
'description': 'The date and time that the comment was made.',
'added_in': '2.0',
},
'user': {
'type': ResourceFieldType,
'resource': 'reviewboard.webapi.resources.user.UserResource',
'description': 'The user who made the comment.',
'added_in': '2.0',
},
}
# Common field definitions for create/update requests
_COMMON_REQUIRED_CREATE_FIELDS = {
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
}
_COMMON_OPTIONAL_CREATE_FIELDS = {
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The content type for the comment text field. '
'The default is ``plain``.',
'added_in': '2.0',
},
}
_COMMON_OPTIONAL_UPDATE_FIELDS = {
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The new content type for the comment text field. '
'The default is to leave the type unchanged.',
'added_in': '2.0',
},
}
# Field definitions for top-level comment create/update requests
REQUIRED_CREATE_FIELDS = _COMMON_REQUIRED_CREATE_FIELDS
OPTIONAL_CREATE_FIELDS = dict({
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether the comment opens an issue.',
'added_in': '2.0',
},
}, **_COMMON_OPTIONAL_CREATE_FIELDS)
OPTIONAL_UPDATE_FIELDS = dict({
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether or not the comment opens an issue.',
'added_in': '2.0',
},
'issue_status': {
'type': ChoiceFieldType,
'choices': tuple(BaseComment.ISSUE_STRING_TO_STATUS.keys()),
'description': 'The status of an open issue.',
'added_in': '2.0',
},
}, **_COMMON_OPTIONAL_UPDATE_FIELDS)
# Field definitions for comment reply create/update requests
REPLY_REQUIRED_CREATE_FIELDS = dict({
'reply_to_id': {
'type': IntFieldType,
'description': 'The ID of the comment being replied to.',
},
}, **_COMMON_REQUIRED_CREATE_FIELDS)
REPLY_OPTIONAL_CREATE_FIELDS = _COMMON_OPTIONAL_CREATE_FIELDS
REPLY_OPTIONAL_UPDATE_FIELDS = _COMMON_OPTIONAL_UPDATE_FIELDS
def serialize_issue_status_field(self, obj, **kwargs):
return BaseComment.issue_status_to_string(obj.issue_status)
def has_access_permissions(self, request, obj, *args, **kwargs):
return obj.is_accessible_by(request.user)
def has_modify_permissions(self, request, obj, *args, **kwargs):
return obj.is_mutable_by(request.user)
def has_delete_permissions(self, request, obj, *args, **kwargs):
return obj.is_mutable_by(request.user)
def create_comment(self,
review,
fields,
text,
comments_m2m,
issue_opened=False,
text_type=MarkdownFieldsMixin.TEXT_TYPE_PLAIN,
extra_fields={},
save=True,
**kwargs):
"""Create a comment based on the requested data.
This will construct a comment of the type represented by the resource,
setting the issue states, text, extra_data, and any additional fields
provided by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review owning the comment.
fields (list of unicode):
The model fields that can be set through the API.
text (unicode):
The comment text.
comments_m2m (django.db.models.ManyToManyField):
The review's comments relation, where the new comment will
be added.
issue_opened (bool, optional):
Whether this comment opens an issue.
text_type (unicode, optional):
The text type for the comment. This defaults to plain text.
extra_fields (dict, optional):
Extra fields from the request not otherwise handled by the
API resource. Any ``extra_data`` modifications from this will
be applied to the comment.
save (bool, optional):
Whether or not to save the field and update ``comments_m2m``.
If ``False``, the caller is responsible for performing the
save.
**kwargs (dict):
Keyword arguments representing additional fields handled by
the API resource. Any that are also listed in ``fields`` will
be set on the model.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
comment_kwargs = {
'issue_opened': bool(issue_opened),
'rich_text': text_type == self.TEXT_TYPE_MARKDOWN,
'text': text.strip(),
}
for field in fields:
comment_kwargs[field] = kwargs.get(field)
new_comment = self.model(**comment_kwargs)
try:
self.import_extra_data(new_comment, new_comment.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
if issue_opened:
new_comment.issue_status = BaseComment.OPEN
else:
new_comment.issue_status = None
if save:
new_comment.save()
comments_m2m.add(new_comment)
return 201, {
self.item_result_key: new_comment,
}
def create_or_update_comment_reply(self, request, comment, reply,
comments_m2m, default_attrs={},
*args, **kwargs):
"""Create a reply to a comment based on the requested data.
If there's an existing reply to a comment, that one will be updated
instead.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
comment (reviewboard.reviews.models.base_commet.BaseComment):
The comment being replied to.
reply (reviewboard.reviews.models.review.Review):
The review reply owning the comment.
comments_m2m (django.db.models.ManyToManyField):
The reply's comments relation, where the new comment will
be added.
default_attrs (dict, optional):
Default attributes to add to the new comment reply, if an
existing one does not exist.
*args (tuple):
Positional arguments from the caller.
**kwargs (dict):
Keyword arguments from the caller.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
q = self._get_queryset(request, *args, **kwargs)
q = q.filter(Q(reply_to=comment) & Q(review=reply))
try:
new_comment = q.get()
# This already exists. Go ahead and update, but we're going to
# redirect the user to the right place.
is_new = False
except self.model.DoesNotExist:
new_comment = self.model(reply_to=comment, **default_attrs)
is_new = True
rsp = self.update_comment(request=request,
review=reply,
comment=new_comment,
is_reply=True,
**kwargs)
if isinstance(rsp, WebAPIError):
return rsp
data = rsp[1]
if is_new:
comments_m2m.add(new_comment)
reply.save()
return 201, data
else:
return 303, data, {
'Location': self.get_href(new_comment, request, *args,
**kwargs)
}
def update_comment(self, request, review, comment, update_fields=(),
extra_fields={}, is_reply=False, **kwargs):
"""Update an existing comment based on the requested data.
This will modify a comment, setting new fields requested by the caller.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
review (reviewboard.reviews.models.review.Review):
The review owning the comment.
comment (reviewboard.reviews.models.base_comment.BaseComment):
The comment to update.
update_fields (list of unicode, optional):
The model fields that can be updated through the API.
extra_fields (dict, optional):
Extra fields from the request not otherwise handled by the
API resource. Any ``extra_data`` modifications from this will
be applied to the comment.
is_reply (bool, optional):
Whether this is a reply to another comment.
**kwargs (dict):
Keyword arguments representing additional fields handled by
the API resource. Any that are also listed in ``fields`` will
be set on the model.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
if is_reply:
if not resources.review_reply.has_modify_permissions(request,
review):
return self.get_no_access_error(request)
else:
# Determine whether or not we're updating the issue status.
if self.should_update_issue_status(comment, **kwargs):
return self.update_issue_status(request, self, **kwargs)
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
# If we've updated the comment from having no issue opened,
# to having an issue opened, we need to set the issue status
# to OPEN.
if not comment.issue_opened and kwargs.get('issue_opened', False):
comment.issue_status = BaseComment.OPEN
# If we've updated the comment from having an issue opened to
# having no issue opened, set the issue status back to null.
if comment.issue_opened and not kwargs.get('issue_opened', True):
comment.issue_status = None
for field in ('issue_opened',) + update_fields:
value = kwargs.get(field, None)
if value is not None:
if isinstance(value, str):
value = value.strip()
setattr(comment, field, value)
self.set_text_fields(comment, 'text', **kwargs)
if not is_reply:
try:
self.import_extra_data(comment, comment.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
comment.save()
return 200, {
self.item_result_key: comment,
}
def update_issue_status(self, request, comment_resource, *args, **kwargs):
"""Updates the issue status for a comment.
Handles all of the logic for updating an issue status.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
comment = comment_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
# Check permissions to change the issue status
if not comment.can_change_issue_status(request.user):
return self.get_no_access_error(request)
# We can only update the status of an issue if an issue has been
# opened
if not comment.issue_opened:
raise PermissionDenied
comment._review_request = review_request
issue_status = \
BaseComment.issue_string_to_status(kwargs.get('issue_status'))
# If the issue requires verification, ensure that only people who are
# authorized can close it.
if (comment.require_verification and
issue_status in (BaseComment.RESOLVED, BaseComment.DROPPED) and
comment.issue_status in (BaseComment.OPEN,
BaseComment.VERIFYING_RESOLVED,
BaseComment.VERIFYING_DROPPED) and
not comment.can_verify_issue_status(request.user)):
return self.get_no_access_error(request)
# We can only update the status of the issue
comment.issue_status = issue_status
comment.save(update_fields=['issue_status'])
last_activity_time = \
review_request.get_last_activity_info()['timestamp']
return 200, {
comment_resource.item_result_key: comment,
'last_activity_time': last_activity_time.isoformat(),
}
def should_update_issue_status(self, comment, issue_status=None,
issue_opened=None, **kwargs):
"""Returns True if the comment should have its issue status updated.
Determines if a comment should have its issue status updated based
on the current state of the comment, the review, and the arguments
passed in the request.
"""
if not issue_status:
return False
issue_status = BaseComment.issue_string_to_status(issue_status)
return (comment.review.get().public and
(comment.issue_opened or issue_opened) and
issue_status != comment.issue_status)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
vispy backend for the IPython notebook (vnc approach).
We aim to have:
* ipynb_static - export visualization to a static notebook
* ipynb_vnc - vnc-approach: render in Python, send result to JS as png
* ipynb_webgl - send gl commands to JS and execute in webgl context
"""
from __future__ import division
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from .. import Application, Canvas
from ...util import logger
#from ...util.event import Event # For timer
# Imports for screenshot
# Perhaps we should refactor these to have just one import
from ...gloo.util import _screenshot
from ...io import _make_png
from base64 import b64encode
# Import for displaying Javascript on notebook
import os.path as op
# -------------------------------------------------------------------- init ---
capability = dict( # things that can be set by the backend
title=True, # But it only applies to the dummy window :P
size=True, # We cannot possibly say we dont, because Canvas always sets it
position=True, # Dito
show=True, # Note: we don't alow this, but all scripts call show ...
vsync=False,
resizable=True, # Yes, you can set to not be resizable (it always is)
decorate=False,
fullscreen=False,
context=True,
multi_window=True,
scroll=True,
parent=False,
always_on_top=False,
)
def _set_config(c):
_app.backend_module._set_config(c)
# Init dummy objects needed to import this module withour errors.
# These are all overwritten with imports from IPython (on success)
DOMWidget = object
Unicode = Int = Float = Bool = lambda *args, **kwargs: None
# Create our "backend" backend; The toolkit that is going to provide a
# canvas (e.g. OpenGL context) so we can render images.
# Note that if IPython has already loaded a GUI backend, vispy is
# probably going to use that as well, because it prefers loaded backends.
try:
# Explicitly use default (avoid using test-app)
_app = Application('default')
except Exception:
_msg = 'ipynb_vnc backend relies on a core backend'
available, testable, why_not, which = False, False, _msg, None
else:
# Try importing IPython
try:
import IPython
if IPython.version_info < (2,):
raise RuntimeError('ipynb_vnc backend need IPython version >= 2.0')
from IPython.html.widgets import DOMWidget
from IPython.utils.traitlets import Unicode, Int, Float, Bool
from IPython.display import display, Javascript
from IPython.html.nbextensions import install_nbextension
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
else:
available, testable, why_not = True, False, None
which = _app.backend_module.which
print(' NOTE: this backend requires the Chromium browser')
# Use that backend's shared context
KEYMAP = _app.backend_module.KEYMAP
# ------------------------------------------------------------- application ---
# todo: maybe trigger something in JS on any of these methods?
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._backend2 = _app._backend
def _vispy_get_backend_name(self):
realname = self._backend2._vispy_get_backend_name()
return 'ipynb_vnc (via %s)' % realname
def _vispy_process_events(self):
return self._backend2._vispy_process_events()
def _vispy_run(self):
pass # We run in IPython, so we don't run!
#return self._backend2._vispy_run()
def _vispy_quit(self):
return self._backend2._vispy_quit()
def _vispy_get_native_app(self):
return self._backend2._vispy_get_native_app()
# ------------------------------------------------------------------ canvas ---
class CanvasBackend(BaseCanvasBackend):
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
self._initialized = False
# Test kwargs
# if kwargs['size']:
# raise RuntimeError('ipynb_vnc Canvas is not resizable')
# if kwargs['position']:
# raise RuntimeError('ipynb_vnc Canvas is not positionable')
if not kwargs['decorate']:
raise RuntimeError('ipynb_vnc Canvas is not decoratable (or not)')
if kwargs['vsync']:
raise RuntimeError('ipynb_vnc Canvas does not support vsync')
if kwargs['fullscreen']:
raise RuntimeError('ipynb_vnc Canvas does not support fullscreen')
# Create real canvas. It is a backend to this backend
kwargs.pop('vispy_canvas', None)
kwargs['autoswap'] = False
canvas = Canvas(app=_app, **kwargs) # Pass kwargs to underlying canvas
self._backend2 = canvas.native
# Connect to events of canvas to keep up to date with size and draws
canvas.events.draw.connect(self._on_draw)
canvas.events.resize.connect(self._on_resize)
# Show the widget, we will hide it after the first time it's drawn
self._backend2._vispy_set_visible(True)
self._need_draw = False
# Prepare Javascript code by displaying on notebook
self._prepare_js()
# Create IPython Widget
self._widget = Widget(self._gen_event, size=canvas.size)
def _vispy_warmup(self):
return self._backend2._vispy_warmup()
def _vispy_set_current(self):
return self._backend2._vispy_set_current()
def _vispy_swap_buffers(self):
return self._backend2._vispy_swap_buffers()
def _vispy_set_title(self, title):
return self._backend2._vispy_set_title(title)
#logger.warning('IPython notebook canvas has not title.')
def _vispy_set_size(self, w, h):
#logger.warn('IPython notebook canvas cannot be resized.')
res = self._backend2._vispy_set_size(w, h)
self._backend2._vispy_set_visible(True)
return res
def _vispy_set_position(self, x, y):
logger.warning('IPython notebook canvas cannot be repositioned.')
def _vispy_set_visible(self, visible):
#self._backend2._vispy_set_visible(visible)
if not visible:
logger.warning('IPython notebook canvas cannot be hidden.')
else:
display(self._widget)
def _vispy_update(self):
self._need_draw = True
return self._backend2._vispy_update()
def _vispy_close(self):
self._need_draw = False
self._widget.quit()
return self._backend2._vispy_close()
def _vispy_get_position(self):
return 0, 0
def _vispy_get_size(self):
return self._backend2._vispy_get_size()
def _on_resize(self, event=None):
# Event handler that is called by the underlying canvas
if self._vispy_canvas is None:
return
size = self._backend2._vispy_get_size()
self._widget.size = size
self._vispy_canvas.events.resize(size=size)
def _on_draw(self, event=None):
# Event handler that is called by the underlying canvas
if self._vispy_canvas is None:
return
# Handle initialization
if not self._initialized:
self._initialized = True
#self._vispy_canvas.events.add(timer=Event)
self._vispy_canvas.events.initialize()
self._on_resize()
# We are drawn, so no need for a redraw
self._need_draw = False
# We hide the widget once it has received a paint event. So at
# initialization and after a resize the widget is briefly visible.
# Now that it is hidden the widget is unlikely to receive paint
# events anymore, so we need to force repaints from now on, via
# a trigger from JS.
self._backend2._vispy_set_visible(False)
# Normal behavior
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
# Save the encoded screenshot image to widget
self._save_screenshot()
def _save_screenshot(self):
# Take the screenshot
img = _screenshot()
# Convert to PNG and encode
self._widget.value = b64encode(_make_png(img))
# Generate vispy events according to upcoming JS events
def _gen_event(self, ev):
if self._vispy_canvas is None:
return
ev = ev.get("event")
# Parse and generate event
if ev.get("name") == "MouseEvent":
mouse = ev.get("properties")
# Generate
if mouse.get("type") == "mouse_move":
self._vispy_mouse_move(native=mouse,
pos=mouse.get("pos"),
modifiers=mouse.get("modifiers"),
)
elif mouse.get("type") == "mouse_press":
self._vispy_mouse_press(native=mouse,
pos=mouse.get("pos"),
button=mouse.get("button"),
modifiers=mouse.get("modifiers"),
)
elif mouse.get("type") == "mouse_release":
self._vispy_mouse_release(native=mouse,
pos=mouse.get("pos"),
button=mouse.get("button"),
modifiers=mouse.get("modifiers"),
)
elif mouse.get("type") == "mouse_wheel":
self._vispy_canvas.events.mouse_wheel(native=mouse,
delta=mouse.get("delta"),
pos=mouse.get("pos"),
modifiers=mouse.get
("modifiers"),
)
elif ev.get("name") == "KeyEvent":
key = ev.get("properties")
if key.get("type") == "key_press":
self._vispy_canvas.events.key_press(native=key,
key=key.get("key"),
text=key.get("text"),
modifiers=key.get
("modifiers"),
)
elif key.get("type") == "key_release":
self._vispy_canvas.events.key_release(native=key,
key=key.get("key"),
text=key.get("text"),
modifiers=key.get
("modifiers"),
)
elif ev.get("name") == "PollEvent": # Ticking from front-end (JS)
# Allthough the event originates from JS, this is basically
# a poll event from IPyhon's event loop, which we use to
# update the backend app and draw stuff if necessary. If we
# succeed to make IPython process GUI app events directly,
# this "JS timer" should not be necessary.
self._vispy_canvas.app.process_events()
if self._need_draw:
self._on_draw()
# Generate a timer event on every poll from JS
# AK: no, just use app.Timer as usual!
#self._vispy_canvas.events.timer(type="timer")
def _prepare_js(self):
pkgdir = op.dirname(__file__)
install_nbextension([op.join(pkgdir, '../../html/static/js')])
script = 'IPython.load_extensions("js/vispy");'
display(Javascript(script))
# ------------------------------------------------------------------- timer ---
class TimerBackend(BaseTimerBackend):
def __init__(self, vispy_timer):
self._backend2 = _app.backend_module.TimerBackend(vispy_timer)
def _vispy_start(self, interval):
return self._backend2._vispy_start(interval)
def _vispy_stop(self):
return self._backend2._vispy_stop()
def _vispy_timeout(self):
return self._backend2._vispy_timeout()
# ---------------------------------------------------------- IPython Widget ---
class Widget(DOMWidget):
_view_name = Unicode("Widget", sync=True)
# Define the custom state properties to sync with the front-end
format = Unicode('png', sync=True)
width = Int(sync=True)
height = Int(sync=True)
interval = Float(sync=True)
is_closing = Bool(sync=True)
value = Unicode(sync=True)
def __init__(self, gen_event, **kwargs):
super(Widget, self).__init__(**kwargs)
self.size = kwargs["size"]
self.interval = 50.0
self.gen_event = gen_event
self.on_msg(self._handle_event_msg)
def _handle_event_msg(self, _, content):
# If closing, don't bother generating the event
if not self.is_closing:
self.gen_event(content)
@property
def size(self):
return self.width, self.height
@size.setter
def size(self, size):
self.width, self.height = size
def quit(self):
self.is_closing = True
self.close()
|
|
import os
import shutil
from ..instrumenters import InstrumentPlugin
from ...metrics import formatting
from ..collectl import subsystems
from ..collectl import cli
from ..collectl import processes
from galaxy import util
from galaxy.util import directory_hash
import logging
log = logging.getLogger( __name__ )
DEFAULT_PROCFILT_ON = "username" # By default, only grab statistics for user
# processes (as identifiers by username).
DEFAULT_SUBSYSTEMS = "process"
DEFAULT_FLUSH_INTERVAL = "0" # Set to zero to flush every collection.
FORMATTED_RESOURCE_TITLES = {
"PCT": "Percent CPU Usage",
"RSYS": "Disk Reads",
"WSYS": "Disk Writes",
}
EMPTY_COLLECTL_FILE_MESSAGE = "Skipping process summary due to empty file... job probably did not run long enough for collectl to gather data."
class CollectlFormatter( formatting.JobMetricFormatter ):
def format( self, key, value ):
if key == "pid":
return ( "Process ID", int( value ) )
elif key == "raw_log_path":
return ( "Relative Path of Full Collectl Log", value )
elif key == "process_max_AccumT":
return ( "Job Runtime (System+User)", formatting.seconds_to_str( float( value ) ) )
else:
_, stat_type, resource_type = key.split( "_", 2 )
if resource_type.startswith( "Vm"):
value_str = "%s KB" % int( value )
elif resource_type in [ "RSYS", "WSYS" ] and stat_type in [ "count", "max", "sum" ]:
value_str = "%d (# system calls)" % int( value )
else:
value_str = str( value )
resource_title = FORMATTED_RESOURCE_TITLES.get( resource_type, resource_type )
return ( "%s (%s)" % ( resource_title, stat_type ), value_str )
class CollectlPlugin( InstrumentPlugin ):
""" Run collectl along with job to capture system and/or process data
according to specified collectl subsystems.
"""
plugin_type = "collectl"
formatter = CollectlFormatter()
def __init__( self, **kwargs ):
self.__configure_paths( kwargs )
self.__configure_subsystems( kwargs )
saved_logs_path = kwargs.get( "saved_logs_path", None )
if "app" in kwargs:
saved_logs_path = kwargs[ "app" ].config.resolve_path( saved_logs_path )
self.saved_logs_path = saved_logs_path
self.__configure_collectl_recorder_args( kwargs )
self.summarize_process_data = util.asbool( kwargs.get( "summarize_process_data", True ) )
self.log_collectl_program_output = util.asbool( kwargs.get( "log_collectl_program_output", False ) )
if self.summarize_process_data:
if subsystems.get_subsystem( "process" ) not in self.subsystems:
raise Exception( "Collectl plugin misconfigured - cannot summarize_process_data without process subsystem being enabled." )
process_statistics = kwargs.get( "process_statistics", None )
# None will let processes module use default set of statistics
# defined there.
self.process_statistics = processes.parse_process_statistics( process_statistics )
def pre_execute_instrument( self, job_directory ):
commands = []
# Capture PID of process so we can walk its ancestors when building
# statistics for the whole job.
commands.append( '''echo "$$" > '%s' ''' % self.__pid_file( job_directory ) )
# Run collectl in record mode to capture process and system level
# statistics according to supplied subsystems.
commands.append( self.__collectl_record_command( job_directory ) )
return commands
def post_execute_instrument( self, job_directory ):
commands = []
# collectl dies when job script completes, perhaps capture pid of
# collectl above and check if it is still alive to allow tracking if
# collectl ran successfully through the whole job.
return commands
def job_properties( self, job_id, job_directory ):
pid = open( self.__pid_file( job_directory ), "r" ).read().strip()
contents = os.listdir( job_directory )
try:
rel_path = filter( self._is_instrumented_collectl_log, contents )[ 0 ]
path = os.path.join( job_directory, rel_path )
except IndexError:
message = "Failed to find collectl log in directory %s, files were %s" % ( job_directory, contents )
raise Exception( message )
properties = dict(
pid=int( pid ),
)
if self.saved_logs_path:
destination_rel_dir = os.path.join( *directory_hash.directory_hash_id( job_id ) )
destination_rel_path = os.path.join( destination_rel_dir, rel_path )
destination_path = os.path.join( self.saved_logs_path, destination_rel_path )
destination_dir = os.path.dirname( destination_path )
if not os.path.isdir( destination_dir ):
os.makedirs( destination_dir )
shutil.copyfile( path, destination_path )
properties[ "raw_log_path" ] = destination_rel_path
if self.summarize_process_data:
# Run collectl in playback and generate statistics of interest
summary_statistics = self.__summarize_process_data( pid, path )
for statistic, value in summary_statistics:
properties[ "process_%s" % "_".join( statistic ) ] = value
return properties
def __configure_paths( self, kwargs ):
# 95% of time I would expect collectl to just be installed with apt or
# yum, but if it is manually installed on not on path, allow
# configuration of explicit path - and allow path to be different
# between galaxy job handler (local_collectl_path) and compute node
# (remote_collectl_path).
collectl_path = kwargs.get( "collectl_path", "collectl" )
self.remote_collectl_path = kwargs.get( "remote_collectl_path", collectl_path )
self.local_collectl_path = kwargs.get( "local_collectl_path", collectl_path )
def __configure_subsystems( self, kwargs ):
raw_subsystems_str = kwargs.get( "subsystems", DEFAULT_SUBSYSTEMS )
raw_subsystems = util.listify( raw_subsystems_str, do_strip=True )
self.subsystems = map( subsystems.get_subsystem, raw_subsystems )
def __configure_collectl_recorder_args( self, kwargs ):
collectl_recorder_args = kwargs.copy()
# Allow deployer to configure separate system and process intervals,
# but if they specify just one - use it for both. Thinking here is this
# plugin's most useful feature is the process level information so
# this is likely what the deployer is attempting to configure.
if "interval" in kwargs and "interval2" not in kwargs:
collectl_recorder_args[ "interval2" ] = kwargs[ "interval"]
if "flush" not in kwargs:
collectl_recorder_args[ "flush" ] = DEFAULT_FLUSH_INTERVAL
procfilt_on = kwargs.get( "procfilt_on", DEFAULT_PROCFILT_ON ).lower()
# Calculate explicit arguments, rest can just be passed through from
# constructor arguments.
explicit_args = dict(
collectl_path=self.remote_collectl_path,
procfilt=procfilt_argument( procfilt_on ),
subsystems=self.subsystems,
)
collectl_recorder_args.update( explicit_args )
self.collectl_recorder_args = collectl_recorder_args
def __summarize_process_data( self, pid, collectl_log_path ):
playback_cli_args = dict(
collectl_path=self.local_collectl_path,
playback_path=collectl_log_path,
sep="9"
)
if not os.stat( collectl_log_path ).st_size:
log.debug( EMPTY_COLLECTL_FILE_MESSAGE )
return [ ]
playback_cli = cli.CollectlCli( **playback_cli_args )
return processes.generate_process_statistics( playback_cli, pid, self.process_statistics )
def __collectl_recorder_cli( self, job_directory ):
cli_args = self.collectl_recorder_args.copy()
cli_args[ "destination_path" ] = self._instrument_file_path( job_directory, "log" )
return cli.CollectlCli( **cli_args )
def __collectl_record_command( self, job_directory ):
collectl_cli = self.__collectl_recorder_cli( job_directory )
if self.log_collectl_program_output:
redirect_to = self._instrument_file_path( job_directory, "program_output" )
else:
redirect_to = "/dev/null"
return "%s > %s 2>&1 &" % (
collectl_cli.build_command_line(),
redirect_to,
)
def __pid_file( self, job_directory ):
return self._instrument_file_path( job_directory, "pid" )
def _is_instrumented_collectl_log( self, filename ):
prefix = self._instrument_file_name( "log" )
return filename.startswith( prefix ) and filename.endswith( ".raw.gz" )
def procfilt_argument( procfilt_on ):
if procfilt_on == "username":
return "U$USER"
elif procfilt_on == "uid":
return "u$UID"
else:
# Ensure it is empty of None
if procfilt_on or procfilt_on.lower() != "none":
raise Exception( "Invalid procfilt_on argument encountered")
return ""
__all__ = [ CollectlPlugin ]
|
|
"""
Copyright (c) 2008-2015, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import unittest
import os, glob
from .test_all import db, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
class DBEnv(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
def tearDown(self):
self.env.close()
del self.env
test_support.rmtree(self.homeDir)
class DBEnv_general(DBEnv) :
def test_get_open_flags(self) :
flags = db.DB_CREATE | db.DB_INIT_MPOOL
self.env.open(self.homeDir, flags)
self.assertEqual(flags, self.env.get_open_flags())
def test_get_open_flags2(self) :
flags = db.DB_CREATE | db.DB_INIT_MPOOL | \
db.DB_INIT_LOCK | db.DB_THREAD
self.env.open(self.homeDir, flags)
self.assertEqual(flags, self.env.get_open_flags())
def test_lk_partitions(self) :
for i in [10, 20, 40] :
self.env.set_lk_partitions(i)
self.assertEqual(i, self.env.get_lk_partitions())
def test_getset_intermediate_dir_mode(self) :
self.assertEqual(None, self.env.get_intermediate_dir_mode())
for mode in ["rwx------", "rw-rw-rw-", "rw-r--r--"] :
self.env.set_intermediate_dir_mode(mode)
self.assertEqual(mode, self.env.get_intermediate_dir_mode())
self.assertRaises(db.DBInvalidArgError,
self.env.set_intermediate_dir_mode, "abcde")
def test_thread(self) :
for i in [16, 100, 1000] :
self.env.set_thread_count(i)
self.assertEqual(i, self.env.get_thread_count())
def test_cache_max(self) :
for size in [64, 128] :
size = size*1024*1024 # Megabytes
self.env.set_cache_max(0, size)
size2 = self.env.get_cache_max()
self.assertEqual(0, size2[0])
self.assertTrue(size <= size2[1])
self.assertTrue(2*size > size2[1])
def test_mutex_stat(self) :
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK)
stat = self.env.mutex_stat()
self.assertTrue("mutex_inuse_max" in stat)
def test_lg_filemode(self) :
for i in [0o600, 0o660, 0o666] :
self.env.set_lg_filemode(i)
self.assertEqual(i, self.env.get_lg_filemode())
def test_mp_max_openfd(self) :
for i in [17, 31, 42] :
self.env.set_mp_max_openfd(i)
self.assertEqual(i, self.env.get_mp_max_openfd())
def test_mp_max_write(self) :
for i in [100, 200, 300] :
for j in [1, 2, 3] :
j *= 1000000
self.env.set_mp_max_write(i, j)
v=self.env.get_mp_max_write()
self.assertEqual((i, j), v)
def test_invalid_txn(self) :
# This environment doesn't support transactions
self.assertRaises(db.DBInvalidArgError, self.env.txn_begin)
def test_mp_mmapsize(self) :
for i in [16, 32, 64] :
i *= 1024*1024
self.env.set_mp_mmapsize(i)
self.assertEqual(i, self.env.get_mp_mmapsize())
def test_tmp_dir(self) :
for i in ["a", "bb", "ccc"] :
self.env.set_tmp_dir(i)
self.assertEqual(i, self.env.get_tmp_dir())
def test_flags(self) :
self.env.set_flags(db.DB_AUTO_COMMIT, 1)
self.assertEqual(db.DB_AUTO_COMMIT, self.env.get_flags())
self.env.set_flags(db.DB_TXN_NOSYNC, 1)
self.assertEqual(db.DB_AUTO_COMMIT | db.DB_TXN_NOSYNC,
self.env.get_flags())
self.env.set_flags(db.DB_AUTO_COMMIT, 0)
self.assertEqual(db.DB_TXN_NOSYNC, self.env.get_flags())
self.env.set_flags(db.DB_TXN_NOSYNC, 0)
self.assertEqual(0, self.env.get_flags())
def test_lk_max_objects(self) :
for i in [1000, 2000, 3000] :
self.env.set_lk_max_objects(i)
self.assertEqual(i, self.env.get_lk_max_objects())
def test_lk_max_locks(self) :
for i in [1000, 2000, 3000] :
self.env.set_lk_max_locks(i)
self.assertEqual(i, self.env.get_lk_max_locks())
def test_lk_max_lockers(self) :
for i in [1000, 2000, 3000] :
self.env.set_lk_max_lockers(i)
self.assertEqual(i, self.env.get_lk_max_lockers())
def test_lg_regionmax(self) :
for i in [128, 256, 1000] :
i = i*1024*1024
self.env.set_lg_regionmax(i)
j = self.env.get_lg_regionmax()
self.assertTrue(i <= j)
self.assertTrue(2*i > j)
def test_lk_detect(self) :
flags= [db.DB_LOCK_DEFAULT, db.DB_LOCK_EXPIRE, db.DB_LOCK_MAXLOCKS,
db.DB_LOCK_MINLOCKS, db.DB_LOCK_MINWRITE,
db.DB_LOCK_OLDEST, db.DB_LOCK_RANDOM, db.DB_LOCK_YOUNGEST]
flags.append(db.DB_LOCK_MAXWRITE)
for i in flags :
self.env.set_lk_detect(i)
self.assertEqual(i, self.env.get_lk_detect())
def test_lg_dir(self) :
for i in ["a", "bb", "ccc", "dddd"] :
self.env.set_lg_dir(i)
self.assertEqual(i, self.env.get_lg_dir())
def test_lg_bsize(self) :
log_size = 70*1024
self.env.set_lg_bsize(log_size)
self.assertTrue(self.env.get_lg_bsize() >= log_size)
self.assertTrue(self.env.get_lg_bsize() < 4*log_size)
self.env.set_lg_bsize(4*log_size)
self.assertTrue(self.env.get_lg_bsize() >= 4*log_size)
def test_setget_data_dirs(self) :
dirs = ("a", "b", "c", "d")
for i in dirs :
self.env.set_data_dir(i)
self.assertEqual(dirs, self.env.get_data_dirs())
def test_setget_cachesize(self) :
cachesize = (0, 512*1024*1024, 3)
self.env.set_cachesize(*cachesize)
self.assertEqual(cachesize, self.env.get_cachesize())
cachesize = (0, 1*1024*1024, 5)
self.env.set_cachesize(*cachesize)
cachesize2 = self.env.get_cachesize()
self.assertEqual(cachesize[0], cachesize2[0])
self.assertEqual(cachesize[2], cachesize2[2])
# Berkeley DB expands the cache 25% accounting overhead,
# if the cache is small.
self.assertEqual(125, int(100.0*cachesize2[1]/cachesize[1]))
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
cachesize = (0, 2*1024*1024, 1)
self.assertRaises(db.DBInvalidArgError,
self.env.set_cachesize, *cachesize)
cachesize3 = self.env.get_cachesize()
self.assertEqual(cachesize2[0], cachesize3[0])
self.assertEqual(cachesize2[2], cachesize3[2])
# In Berkeley DB 5.1, the cachesize can change when opening the Env
self.assertTrue(cachesize2[1] <= cachesize3[1])
def test_set_cachesize_dbenv_db(self) :
# You can not configure the cachesize using
# the database handle, if you are using an environment.
d = db.DB(self.env)
self.assertRaises(db.DBInvalidArgError,
d.set_cachesize, 0, 1024*1024, 1)
def test_setget_shm_key(self) :
shm_key=137
self.env.set_shm_key(shm_key)
self.assertEqual(shm_key, self.env.get_shm_key())
self.env.set_shm_key(shm_key+1)
self.assertEqual(shm_key+1, self.env.get_shm_key())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
# If we try to reconfigure cache after opening the
# environment, core dump.
self.assertRaises(db.DBInvalidArgError,
self.env.set_shm_key, shm_key)
self.assertEqual(shm_key+1, self.env.get_shm_key())
def test_mutex_setget_max(self) :
v = self.env.mutex_get_max()
v2 = v*2+1
self.env.mutex_set_max(v2)
self.assertEqual(v2, self.env.mutex_get_max())
self.env.mutex_set_max(v)
self.assertEqual(v, self.env.mutex_get_max())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_max, v2)
def test_mutex_setget_increment(self) :
v = self.env.mutex_get_increment()
v2 = 127
self.env.mutex_set_increment(v2)
self.assertEqual(v2, self.env.mutex_get_increment())
self.env.mutex_set_increment(v)
self.assertEqual(v, self.env.mutex_get_increment())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_increment, v2)
def test_mutex_setget_tas_spins(self) :
self.env.mutex_set_tas_spins(0) # Default = BDB decides
v = self.env.mutex_get_tas_spins()
v2 = v*2+1
self.env.mutex_set_tas_spins(v2)
self.assertEqual(v2, self.env.mutex_get_tas_spins())
self.env.mutex_set_tas_spins(v)
self.assertEqual(v, self.env.mutex_get_tas_spins())
# In this case, you can change configuration
# after opening the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.env.mutex_set_tas_spins(v2)
def test_mutex_setget_align(self) :
v = self.env.mutex_get_align()
v2 = 64
if v == 64 :
v2 = 128
self.env.mutex_set_align(v2)
self.assertEqual(v2, self.env.mutex_get_align())
# Requires a nonzero power of two
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_align, 0)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_align, 17)
self.env.mutex_set_align(2*v2)
self.assertEqual(2*v2, self.env.mutex_get_align())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_align, v2)
class DBEnv_log(DBEnv) :
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG)
def test_log_file(self) :
log_file = self.env.log_file((1, 1))
self.assertEqual("log.0000000001", log_file[-14:])
# The version with transactions is checked in other test object
def test_log_printf(self) :
msg = "This is a test..."
self.env.log_printf(msg)
logc = self.env.log_cursor()
self.assertTrue(msg in (logc.last()[1]))
if db.version() >= (4, 7) :
def test_log_config(self) :
self.env.log_set_config(db.DB_LOG_DSYNC | db.DB_LOG_ZERO, 1)
self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC))
self.assertTrue(self.env.log_get_config(db.DB_LOG_ZERO))
self.env.log_set_config(db.DB_LOG_ZERO, 0)
self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC))
self.assertFalse(self.env.log_get_config(db.DB_LOG_ZERO))
class DBEnv_log_txn(DBEnv) :
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_INIT_TXN)
if db.version() < (5, 2) :
def test_tx_max(self) :
txns=[]
def tx() :
for i in range(self.env.get_tx_max()) :
txns.append(self.env.txn_begin())
tx()
self.assertRaises(MemoryError, tx)
# Abort the transactions before garbage collection,
# to avoid "warnings".
for i in txns :
i.abort()
# The version without transactions is checked in other test object
def test_log_printf(self) :
msg = "This is a test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.commit()
logc = self.env.log_cursor()
logc.last() # Skip the commit
self.assertTrue(msg in (logc.prev()[1]))
msg = "This is another test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.abort() # Do not store the new message
logc.last() # Skip the abort
self.assertTrue(msg not in (logc.prev()[1]))
msg = "This is a third test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.commit() # Do not store the new message
logc.last() # Skip the commit
self.assertTrue(msg in (logc.prev()[1]))
class DBEnv_memp(DBEnv):
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG)
self.db = db.DB(self.env)
self.db.open("test", db.DB_HASH, db.DB_CREATE, 0o660)
def tearDown(self):
self.db.close()
del self.db
DBEnv.tearDown(self)
def test_memp_1_trickle(self) :
self.db.put("hi", "bye")
self.assertTrue(self.env.memp_trickle(100) > 0)
# Preserve the order, do "memp_trickle" test first
def test_memp_2_sync(self) :
self.db.put("hi", "bye")
self.env.memp_sync() # Full flush
# Nothing to do...
self.assertTrue(self.env.memp_trickle(100) == 0)
self.db.put("hi", "bye2")
self.env.memp_sync((1, 0)) # NOP, probably
# Something to do... or not
self.assertTrue(self.env.memp_trickle(100) >= 0)
self.db.put("hi", "bye3")
self.env.memp_sync((123, 99)) # Full flush
# Nothing to do...
self.assertTrue(self.env.memp_trickle(100) == 0)
def test_memp_stat_1(self) :
stats = self.env.memp_stat() # No param
self.assertTrue(len(stats)==2)
self.assertTrue("cache_miss" in stats[0])
stats = self.env.memp_stat(db.DB_STAT_CLEAR) # Positional param
self.assertTrue("cache_miss" in stats[0])
stats = self.env.memp_stat(flags=0) # Keyword param
self.assertTrue("cache_miss" in stats[0])
def test_memp_stat_2(self) :
stats=self.env.memp_stat()[1]
self.assertTrue(len(stats))==1
self.assertTrue("test" in stats)
self.assertTrue("page_in" in stats["test"])
class DBEnv_logcursor(DBEnv):
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_INIT_TXN)
txn = self.env.txn_begin()
self.db = db.DB(self.env)
self.db.open("test", db.DB_HASH, db.DB_CREATE, 0o660, txn=txn)
txn.commit()
for i in ["2", "8", "20"] :
txn = self.env.txn_begin()
self.db.put(key = i, data = i*int(i), txn=txn)
txn.commit()
def tearDown(self):
self.db.close()
del self.db
DBEnv.tearDown(self)
def _check_return(self, value) :
self.assertTrue(isinstance(value, tuple))
self.assertEqual(len(value), 2)
self.assertTrue(isinstance(value[0], tuple))
self.assertEqual(len(value[0]), 2)
self.assertTrue(isinstance(value[0][0], int))
self.assertTrue(isinstance(value[0][1], int))
self.assertTrue(isinstance(value[1], str))
# Preserve test order
def test_1_first(self) :
logc = self.env.log_cursor()
v = logc.first()
self._check_return(v)
self.assertTrue((1, 1) < v[0])
self.assertTrue(len(v[1])>0)
def test_2_last(self) :
logc = self.env.log_cursor()
lsn_first = logc.first()[0]
v = logc.last()
self._check_return(v)
self.assertTrue(lsn_first < v[0])
def test_3_next(self) :
logc = self.env.log_cursor()
lsn_last = logc.last()[0]
self.assertEqual(next(logc), None)
lsn_first = logc.first()[0]
v = next(logc)
self._check_return(v)
self.assertTrue(lsn_first < v[0])
self.assertTrue(lsn_last > v[0])
v2 = next(logc)
self.assertTrue(v2[0] > v[0])
self.assertTrue(lsn_last > v2[0])
v3 = next(logc)
self.assertTrue(v3[0] > v2[0])
self.assertTrue(lsn_last > v3[0])
def test_4_prev(self) :
logc = self.env.log_cursor()
lsn_first = logc.first()[0]
self.assertEqual(logc.prev(), None)
lsn_last = logc.last()[0]
v = logc.prev()
self._check_return(v)
self.assertTrue(lsn_first < v[0])
self.assertTrue(lsn_last > v[0])
v2 = logc.prev()
self.assertTrue(v2[0] < v[0])
self.assertTrue(lsn_first < v2[0])
v3 = logc.prev()
self.assertTrue(v3[0] < v2[0])
self.assertTrue(lsn_first < v3[0])
def test_5_current(self) :
logc = self.env.log_cursor()
logc.first()
v = next(logc)
self.assertEqual(v, logc.current())
def test_6_set(self) :
logc = self.env.log_cursor()
logc.first()
v = next(logc)
self.assertNotEqual(v, next(logc))
self.assertNotEqual(v, next(logc))
self.assertEqual(v, logc.set(v[0]))
def test_explicit_close(self) :
logc = self.env.log_cursor()
logc.close()
self.assertRaises(db.DBCursorClosedError, logc.__next__)
def test_implicit_close(self) :
logc = [self.env.log_cursor() for i in range(10)]
self.env.close() # This close should close too all its tree
for i in logc :
self.assertRaises(db.DBCursorClosedError, i.__next__)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnv_general))
suite.addTest(unittest.makeSuite(DBEnv_memp))
suite.addTest(unittest.makeSuite(DBEnv_logcursor))
suite.addTest(unittest.makeSuite(DBEnv_log))
suite.addTest(unittest.makeSuite(DBEnv_log_txn))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
|
from collections import OrderedDict
import mock
from nose.tools import *
from website.notifications.events.base import Event, register, event_registry
from website.notifications.events.files import (
FileAdded, FileRemoved, FolderCreated, FileUpdated,
AddonFileCopied, AddonFileMoved, AddonFileRenamed,
)
from website.notifications.events import utils
from addons.base import signals
from framework.auth import Auth
from osf_tests import factories
from tests.base import OsfTestCase, NotificationTestCase
email_transactional = 'email_transactional'
email_digest = 'email_digest'
class TestEventNotImplemented(OsfTestCase):
"""
Test non-implemented errors
"""
@register("not_implemented")
class NotImplementedEvent(Event):
pass
def setUp(self):
super(TestEventNotImplemented, self).setUp()
self.user = factories.UserFactory()
self.auth = Auth(user=self.user)
self.node = factories.ProjectFactory(creator=self.user)
self.event = self.NotImplementedEvent(self.user, self.node, 'not_implemented')
@raises(NotImplementedError)
def test_text(self):
text = self.event.text_message
@raises(NotImplementedError)
def test_html(self):
html = self.event.html_message
@raises(NotImplementedError)
def test_url(self):
url = self.event.url
@raises(NotImplementedError)
def test_event(self):
event = self.event.event_type
class TestListOfFiles(OsfTestCase):
"""
List files given a list
"""
def setUp(self):
super(TestListOfFiles, self).setUp()
self.tree = {
'kind': 'folder',
'path': 'a',
'children': [
{
'kind': 'folder',
'path': 'b',
'children': [
{
'kind': 'file',
'path': 'e'
},
{
'kind': 'file',
'path': 'f'
}
]
},
{
'kind': 'file',
'path': 'c'
},
{
'kind': 'file',
'path': 'd'
}
]
}
def test_list_of_files(self):
assert_equal(['e', 'f', 'c', 'd'], utils.list_of_files(self.tree))
class TestEventExists(OsfTestCase):
# Add all possible called events here to ensure that the Event class can
# call them.
def setUp(self):
super(TestEventExists, self).setUp()
self.user = factories.UserFactory()
self.consolidate_auth = Auth(user=self.user)
self.node = factories.ProjectFactory(creator=self.user)
def test_get_file_updated(self):
# Event gets FileUpdated from file_updated
event = event_registry['file_updated'](self.user, self.node, 'file_updated', payload=file_payload)
assert_is_instance(event, FileUpdated)
def test_get_file_added(self):
# Event gets FileAdded from file_added
event = event_registry['file_added'](self.user, self.node, 'file_added', payload=file_payload)
assert_is_instance(event, FileAdded)
def test_get_file_removed(self):
# Event gets FileRemoved from file_removed
event = event_registry['file_removed'](self.user, self.node, 'file_removed', payload=file_deleted_payload)
assert_is_instance(event, FileRemoved)
def test_get_folder_created(self):
# Event gets FolderCreated from folder_created
event = event_registry['folder_created'](self.user, self.node, 'folder_created', payload=folder_created_payload)
assert_is_instance(event, FolderCreated)
def test_get_file_moved(self):
# Event gets AddonFileMoved from addon_file_moved
file_moved_payload = file_move_payload(self.node, self.node)
event = event_registry['addon_file_moved'](self.user, self.node, 'addon_file_moved', payload=file_moved_payload)
assert_is_instance(event, AddonFileMoved)
def test_get_file_copied(self):
# Event gets AddonFileCopied from addon_file_copied
file_copied_payload = file_copy_payload(self.node, self.node)
event = event_registry['addon_file_copied'](self.user, self.node, 'addon_file_copied',
payload=file_copied_payload)
assert_is_instance(event, AddonFileCopied)
def test_get_file_renamed(self):
# Event gets AddonFileCopied from addon_file_copied
file_rename_payload = file_renamed_payload()
event = event_registry['addon_file_renamed'](self.user, self.node, 'addon_file_renamed',
payload=file_rename_payload)
assert_is_instance(event, AddonFileRenamed)
class TestSignalEvent(OsfTestCase):
def setUp(self):
super(TestSignalEvent, self).setUp()
self.user = factories.UserFactory()
self.auth = Auth(user=self.user)
self.node = factories.ProjectFactory(creator=self.user)
@mock.patch('website.notifications.events.files.FileAdded.perform')
def test_event_signal(self, mock_perform):
signals.file_updated.send(
user=self.user, node=self.node, event_type='file_added', payload=file_payload
)
assert_true(mock_perform.called)
class TestFileUpdated(OsfTestCase):
def setUp(self):
super(TestFileUpdated, self).setUp()
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
# subscription
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + 'file_updated',
owner=self.project,
event_name='file_updated',
)
self.sub.save()
self.event = event_registry['file_updated'](self.user_2, self.project, 'file_updated', payload=file_payload)
def test_info_formed_correct(self):
assert_equal('{}_file_updated'.format(wb_path), self.event.event_type)
assert_equal('updated file "<b>{}</b>".'.format(materialized.lstrip('/')), self.event.html_message)
assert_equal('updated file "{}".'.format(materialized.lstrip('/')), self.event.text_message)
@mock.patch('website.notifications.emails.notify')
def test_file_updated(self, mock_notify):
self.event.perform()
# notify('exd', 'file_updated', 'user', self.project, timezone.now())
assert_true(mock_notify.called)
class TestFileAdded(NotificationTestCase):
def setUp(self):
super(TestFileAdded, self).setUp()
self.user = factories.UserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory()
self.project_subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.project_subscription.save()
self.user2 = factories.UserFactory()
self.event = event_registry['file_added'](self.user2, self.project, 'file_added', payload=file_payload)
def test_info_formed_correct(self):
assert_equal('{}_file_updated'.format(wb_path), self.event.event_type)
assert_equal('added file "<b>{}</b>".'.format(materialized.lstrip('/')), self.event.html_message)
assert_equal('added file "{}".'.format(materialized.lstrip('/')), self.event.text_message)
@mock.patch('website.notifications.emails.notify')
def test_file_added(self, mock_notify):
self.event.perform()
# notify('exd', 'file_updated', 'user', self.project, timezone.now())
assert_true(mock_notify.called)
class TestFileRemoved(NotificationTestCase):
def setUp(self):
super(TestFileRemoved, self).setUp()
self.user = factories.UserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory()
self.project_subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.project_subscription.save()
self.user2 = factories.UserFactory()
self.event = event_registry['file_removed'](
self.user2, self.project, 'file_removed', payload=file_deleted_payload
)
def test_info_formed_correct_file(self):
assert_equal('file_updated', self.event.event_type)
assert_equal('removed file "<b>{}</b>".'.format(materialized.lstrip('/')), self.event.html_message)
assert_equal('removed file "{}".'.format(materialized.lstrip('/')), self.event.text_message)
def test_info_formed_correct_folder(self):
assert_equal('file_updated', self.event.event_type)
self.event.payload['metadata']['materialized'] += u'/'
assert_equal(u'removed folder "<b>{}/</b>".'.format(materialized.lstrip('/')), self.event.html_message)
assert_equal(u'removed folder "{}/".'.format(materialized.lstrip('/')), self.event.text_message)
@mock.patch('website.notifications.emails.notify')
def test_file_removed(self, mock_notify):
self.event.perform()
# notify('exd', 'file_updated', 'user', self.project, timezone.now())
assert_true(mock_notify.called)
class TestFolderCreated(NotificationTestCase):
def setUp(self):
super(TestFolderCreated, self).setUp()
self.user = factories.UserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory()
self.project_subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.project_subscription.save()
self.user2 = factories.UserFactory()
self.event = event_registry['folder_created'](
self.user2, self.project, 'folder_created', payload=folder_created_payload
)
def test_info_formed_correct(self):
assert_equal('file_updated', self.event.event_type)
assert_equal('created folder "<b>Three/</b>".', self.event.html_message)
assert_equal('created folder "Three/".', self.event.text_message)
@mock.patch('website.notifications.emails.notify')
def test_folder_added(self, mock_notify):
self.event.perform()
assert_true(mock_notify.called)
class TestFolderFileRenamed(OsfTestCase):
def setUp(self):
super(TestFolderFileRenamed, self).setUp()
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
# subscription
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + 'file_updated',
owner=self.project,
event_name='file_updated',
)
self.sub.save()
# Payload
file_renamed_payload = file_move_payload(self.project, self.project)
self.event = event_registry['addon_file_renamed'](
self.user_1, self.project, 'addon_file_renamed',
payload=file_renamed_payload
)
self.sub.email_digest.add(self.user_2)
self.sub.save()
def test_rename_file_html(self):
self.event.payload['destination']['materialized'] = "/One/Paper14.txt"
assert_equal(self.event.html_message, 'renamed file "<b>/One/Paper13.txt</b>" to "<b>/One/Paper14.txt</b>".')
def test_rename_folder_html(self):
self.event.payload['destination']['kind'] = 'folder'
self.event.payload['destination']['materialized'] = "/One/Two/Four"
self.event.payload['source']['materialized'] = "/One/Two/Three"
assert_equal(self.event.html_message, 'renamed folder "<b>/One/Two/Three</b>" to "<b>/One/Two/Four</b>".')
def test_rename_file_text(self):
self.event.payload['destination']['materialized'] = "/One/Paper14.txt"
assert_equal(self.event.text_message, 'renamed file "/One/Paper13.txt" to "/One/Paper14.txt".')
def test_rename_folder_text(self):
self.event.payload['destination']['kind'] = 'folder'
self.event.payload['destination']['materialized'] = "/One/Two/Four"
self.event.payload['source']['materialized'] = "/One/Two/Three"
assert_equal(self.event.text_message, 'renamed folder "/One/Two/Three" to "/One/Two/Four".')
class TestFileMoved(NotificationTestCase):
def setUp(self):
super(TestFileMoved, self).setUp()
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.user_3 = factories.AuthUserFactory()
self.user_4 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.project, is_public=False, creator=self.user_1)
# Payload
file_moved_payload = file_move_payload(self.private_node, self.project)
self.event = event_registry['addon_file_moved'](
self.user_2, self.private_node, 'addon_file_moved', payload=file_moved_payload
)
# Subscriptions
# for parent node
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.sub.save()
# for private node
self.private_sub = factories.NotificationSubscriptionFactory(
_id=self.private_node._id + '_file_updated',
owner=self.private_node,
event_name='file_updated'
)
self.private_sub.save()
# for file subscription
self.file_sub = factories.NotificationSubscriptionFactory(
_id='{pid}_{wbid}_file_updated'.format(
pid=self.project._id,
wbid=self.event.waterbutler_id
),
owner=self.project,
event_name='xyz42_file_updated'
)
self.file_sub.save()
def test_info_formed_correct(self):
# Move Event: Ensures data is correctly formatted
assert_equal('{}_file_updated'.format(wb_path), self.event.event_type)
# assert_equal('moved file "<b>{}</b>".', self.event.html_message)
# assert_equal('created folder "Three/".', self.event.text_message)
@mock.patch('website.notifications.emails.store_emails')
def test_user_performing_action_no_email(self, mock_store):
# Move Event: Makes sure user who performed the action is not
# included in the notifications
self.sub.email_digest.add(self.user_2)
self.sub.save()
self.event.perform()
assert_equal(0, mock_store.call_count)
@mock.patch('website.notifications.emails.store_emails')
def test_perform_store_called_once(self, mock_store):
# Move Event: Tests that store_emails is called once from perform
self.sub.email_transactional.add(self.user_1)
self.sub.save()
self.event.perform()
assert_equal(1, mock_store.call_count)
@mock.patch('website.notifications.emails.store_emails')
def test_perform_store_one_of_each(self, mock_store):
# Move Event: Tests that store_emails is called 3 times, one in
# each category
self.sub.email_transactional.add(self.user_1)
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
self.sub.email_digest.add(self.user_3)
self.sub.save()
self.project.add_contributor(self.user_4, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.file_sub.email_digest.add(self.user_4)
self.file_sub.save()
self.event.perform()
assert_equal(3, mock_store.call_count)
@mock.patch('website.notifications.emails.store_emails')
def test_remove_user_sent_once(self, mock_store):
# Move Event: Tests removed user is removed once. Regression
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.file_sub.email_digest.add(self.user_3)
self.file_sub.save()
self.event.perform()
assert_equal(1, mock_store.call_count)
class TestFileCopied(NotificationTestCase):
# Test the copying of files
def setUp(self):
super(TestFileCopied, self).setUp()
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.user_3 = factories.AuthUserFactory()
self.user_4 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.project, is_public=False, creator=self.user_1)
# Payload
file_copied_payload = file_copy_payload(self.private_node, self.project)
self.event = event_registry['addon_file_copied'](
self.user_2, self.private_node, 'addon_file_copied',
payload=file_copied_payload
)
# Subscriptions
# for parent node
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.sub.save()
# for private node
self.private_sub = factories.NotificationSubscriptionFactory(
_id=self.private_node._id + '_file_updated',
owner=self.private_node,
event_name='file_updated'
)
self.private_sub.save()
# for file subscription
self.file_sub = factories.NotificationSubscriptionFactory(
_id='{pid}_{wbid}_file_updated'.format(
pid=self.project._id,
wbid=self.event.waterbutler_id
),
owner=self.project,
event_name='xyz42_file_updated'
)
self.file_sub.save()
def test_info_correct(self):
# Move Event: Ensures data is correctly formatted
assert_equal('{}_file_updated'.format(wb_path), self.event.event_type)
assert_equal(('copied file "<b>One/Paper13.txt</b>" from OSF Storage'
' in Consolidate to "<b>Two/Paper13.txt</b>" in OSF'
' Storage in Consolidate.'), self.event.html_message)
assert_equal(('copied file "One/Paper13.txt" from OSF Storage'
' in Consolidate to "Two/Paper13.txt" in OSF'
' Storage in Consolidate.'), self.event.text_message)
@mock.patch('website.notifications.emails.store_emails')
def test_copied_one_of_each(self, mock_store):
# Copy Event: Tests that store_emails is called 2 times, two with
# permissions, one without
self.sub.email_transactional.add(self.user_1)
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
self.sub.email_digest.add(self.user_3)
self.sub.save()
self.project.add_contributor(self.user_4, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.file_sub.email_digest.add(self.user_4)
self.file_sub.save()
self.event.perform()
assert_equal(2, mock_store.call_count)
@mock.patch('website.notifications.emails.store_emails')
def test_user_performing_action_no_email(self, mock_store):
# Move Event: Makes sure user who performed the action is not
# included in the notifications
self.sub.email_digest.add(self.user_2)
self.sub.save()
self.event.perform()
assert_equal(0, mock_store.call_count)
class TestCategorizeUsers(NotificationTestCase):
def setUp(self):
super(TestCategorizeUsers, self).setUp()
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.user_3 = factories.AuthUserFactory()
self.user_4 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
self.private_node = factories.NodeFactory(
parent=self.project, is_public=False, creator=self.user_1
)
# Payload
file_moved_payload = file_move_payload(self.private_node, self.project)
self.event = event_registry['addon_file_moved'](
self.user_2, self.private_node, 'addon_file_moved',
payload=file_moved_payload
)
# Subscriptions
# for parent node
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.sub.save()
# for private node
self.private_sub = factories.NotificationSubscriptionFactory(
_id=self.private_node._id + '_file_updated',
owner=self.private_node,
event_name='file_updated'
)
self.private_sub.save()
# for file subscription
self.file_sub = factories.NotificationSubscriptionFactory(
_id='{pid}_{wbid}_file_updated'.format(
pid=self.project._id,
wbid=self.event.waterbutler_id
),
owner=self.project,
event_name='xyz42_file_updated'
)
self.file_sub.save()
def test_warn_user(self):
# Tests that a user with a sub in the origin node gets a warning that
# they are no longer tracking the file.
self.sub.email_transactional.add(self.user_1)
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
self.sub.email_digest.add(self.user_3)
self.sub.save()
self.private_sub.none.add(self.user_3)
self.private_sub.save()
moved, warn, removed = utils.categorize_users(
self.event.user, self.event.event_type, self.event.source_node,
self.event.event_type, self.event.node
)
assert_equal({email_transactional: [], email_digest: [self.user_3._id], 'none': []}, warn)
assert_equal({email_transactional: [self.user_1._id], email_digest: [], 'none': []}, moved)
def test_moved_user(self):
# Doesn't warn a user with two different subs, but does send a
# moved email
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
self.sub.email_digest.add(self.user_3)
self.sub.save()
self.private_sub.email_transactional.add(self.user_3)
self.private_sub.save()
moved, warn, removed = utils.categorize_users(
self.event.user, self.event.event_type, self.event.source_node,
self.event.event_type, self.event.node
)
assert_equal({email_transactional: [], email_digest: [], 'none': []}, warn)
assert_equal({email_transactional: [self.user_3._id], email_digest: [], 'none': []}, moved)
def test_remove_user(self):
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.file_sub.email_transactional.add(self.user_3)
self.file_sub.save()
moved, warn, removed = utils.categorize_users(
self.event.user, self.event.event_type, self.event.source_node,
self.event.event_type, self.event.node
)
assert_equal({email_transactional: [self.user_3._id], email_digest: [], 'none': []}, removed)
def test_node_permissions(self):
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'])
self.private_sub.email_digest.add(self.user_3, self.user_4)
remove = {email_transactional: [], email_digest: [], 'none': []}
warn = {email_transactional: [], email_digest: [self.user_3._id, self.user_4._id], 'none': []}
subbed, remove = utils.subscriptions_node_permissions(
self.private_node,
warn,
remove
)
assert_equal({email_transactional: [], email_digest: [self.user_3._id], 'none': []}, subbed)
assert_equal({email_transactional: [], email_digest: [self.user_4._id], 'none': []}, remove)
class TestSubscriptionManipulations(OsfTestCase):
def setUp(self):
super(TestSubscriptionManipulations, self).setUp()
self.emails_1 = {
email_digest: ['a1234', 'b1234', 'c1234'],
email_transactional: ['d1234', 'e1234', 'f1234'],
'none': ['g1234', 'h1234', 'i1234']
}
self.emails_2 = {
email_digest: ['j1234'],
email_transactional: ['k1234'],
'none': ['l1234']
}
self.emails_3 = {
email_digest: ['b1234', 'c1234'],
email_transactional: ['e1234', 'f1234'],
'none': ['h1234', 'i1234']
}
self.emails_4 = {
email_digest: ['d1234', 'i1234'],
email_transactional: ['b1234'],
'none': []
}
self.diff_1_3 = {email_transactional: ['d1234'], 'none': ['g1234'], email_digest: ['a1234']}
self.union_1_2 = {'email_transactional': ['e1234', 'd1234', 'k1234', 'f1234'],
'none': ['h1234', 'g1234', 'i1234', 'l1234'],
'email_digest': ['j1234', 'b1234', 'a1234', 'c1234']}
self.dup_1_3 = {email_transactional: ['e1234', 'f1234'], 'none': ['h1234', 'g1234'],
'email_digest': ['a1234', 'c1234']}
def test_subscription_user_difference(self):
result = utils.subscriptions_users_difference(self.emails_1, self.emails_3)
assert_equal(self.diff_1_3, result)
def test_subscription_user_union(self):
result = utils.subscriptions_users_union(self.emails_1, self.emails_2)
assert_equal(self.union_1_2, result)
def test_remove_duplicates(self):
result = utils.subscriptions_users_remove_duplicates(
self.emails_1, self.emails_4, remove_same=False
)
assert_equal(self.dup_1_3, result)
def test_remove_duplicates_true(self):
result = utils.subscriptions_users_remove_duplicates(
self.emails_1, self.emails_1, remove_same=True
)
assert_equal({email_digest: [], email_transactional: [], 'none': ['h1234', 'g1234', 'i1234']}, result)
wb_path = u'5581cb50a24f710b0f4623f9'
materialized = u'/One/Paper13.txt'
provider = u'osfstorage'
name = u'Paper13.txt'
file_payload = OrderedDict([
(u'action', u'update'),
(u'auth', OrderedDict([
(u'email', u'tgn6m@osf.io'), (u'id', u'tgn6m'), (u'name', u'aab')])),
(u'metadata', OrderedDict([
(u'contentType', None),
(u'etag', u'10485efa4069bb94d50588df2e7466a079d49d4f5fd7bf5b35e7c0d5b12d76b7'),
(u'extra', OrderedDict([
(u'downloads', 0),
(u'version', 30)])),
(u'kind', u'file'),
(u'materialized', materialized),
(u'modified', u'Wed, 24 Jun 2015 10:45:01 '),
(u'name', name),
(u'path', wb_path),
(u'provider', provider),
(u'size', 2008)])),
(u'provider', provider),
(u'time', 1435157161.979904)])
file_deleted_payload = OrderedDict([
(u'action', u'delete'),
(u'auth', OrderedDict([
(u'email', u'tgn6m@osf.io'), (u'id', u'tgn6m'), (u'name', u'aab')])),
(u'metadata', OrderedDict([
(u'materialized', materialized),
(u'path', materialized)])), # Deleted files don't get wb_paths
(u'provider', u'osfstorage'),
(u'time', 1435157876.690203)])
folder_created_payload = OrderedDict([
(u'action', u'create_folder'),
(u'auth', OrderedDict([
(u'email', u'tgn6m@osf.io'), (u'id', u'tgn6m'), (u'name', u'aab')])),
(u'metadata', OrderedDict([
(u'etag', u'5caf8ab73c068565297e455ebce37fd64b6897a2284ec9d7ecba8b6093082bcd'),
(u'extra', OrderedDict()),
(u'kind', u'folder'),
(u'materialized', u'/Three/'),
(u'name', u'Three'),
(u'path', u'558ac595a24f714eff336d66/'),
(u'provider', u'osfstorage')])),
(u'provider', u'osfstorage'),
(u'time', 1435157969.475282)])
def file_move_payload(new_node, old_node):
return OrderedDict([
(u'action', u'move'),
(u'auth', OrderedDict([
(u'email', 'Bob'), (u'id', 'bob2'), (u'name', 'Bob')])),
(u'destination', OrderedDict([
(u'contentType', None),
(u'etag', u'10485efa4069bb94d50588df2e7466a079d49d4f5fd7bf5b35e7c0d5b12d76b7'),
(u'extra', OrderedDict([
(u'downloads', 0),
(u'version', 30)])),
(u'kind', u'file'),
(u'materialized', materialized),
(u'modified', None),
(u'name', name),
(u'nid', str(new_node)),
(u'path', wb_path),
(u'provider', provider),
(u'size', 2008),
('url', '/project/nhgts/files/osfstorage/5581cb50a24f710b0f4623f9/'),
('node', {'url': '/{}/'.format(new_node._id), '_id': new_node._id, 'title': u'Consolidate2'}),
('addon', 'OSF Storage')])),
(u'source', OrderedDict([
(u'materialized', materialized),
(u'name', u'Paper13.txt'),
(u'nid', str(old_node)),
(u'path', materialized), # Not wb path
(u'provider', provider),
('url', '/project/nhgts/files/osfstorage/One/Paper13.txt/'),
('node', {'url': '/{}/'.format(old_node._id), '_id': old_node._id, 'title': u'Consolidate'}),
('addon', 'OSF Storage')])),
(u'time', 1435158051.204264),
('node', u'nhgts'),
('project', None)])
def file_copy_payload(new_node, old_node):
return OrderedDict([
(u'action', u'copy'),
(u'auth', OrderedDict([
(u'email', u'tgn6m@osf.io'),
(u'id', u'tgn6m'),
(u'name', u'aab')])),
(u'destination', OrderedDict([
(u'contentType', None),
(u'etag', u'16075ae3e546971003095beef8323584de40b1fcbf52ed4bb9e7f8547e322824'),
(u'extra', OrderedDict([
(u'downloads', 0),
(u'version', 30)])),
(u'kind', u'file'),
(u'materialized', u'Two/Paper13.txt'),
(u'modified', None),
(u'name', u'Paper13.txt'),
(u'nid', u'nhgts'),
(u'path', wb_path),
(u'provider', u'osfstorage'),
(u'size', 2008),
('url', '/project/nhgts/files/osfstorage/558ac45da24f714eff336d59/'),
('node', {'url': '/nhgts/', '_id': old_node._id, 'title': u'Consolidate'}),
('addon', 'OSF Storage')])),
(u'source', OrderedDict([
(u'materialized', u'One/Paper13.txt'),
(u'name', u'Paper13.txt'),
(u'nid', u'nhgts'),
(u'path', u'One/Paper13.txt'),
(u'provider', u'osfstorage'),
('url', '/project/nhgts/files/osfstorage/One/Paper13.txt/'),
('node', {'url': '/nhgts/', '_id': new_node._id, 'title': u'Consolidate'}),
('addon', 'OSF Storage')])),
(u'time', 1435157658.036183),
('node', u'nhgts'),
('project', None)])
def file_renamed_payload():
return OrderedDict([
(u'action', u'move'),
(u'auth', OrderedDict([
(u'email', u'tgn6m@osf.io'),
(u'id', u'tgn6m'),
(u'name', u'aab')])),
(u'destination', OrderedDict([
(u'contentType', None),
(u'etag', u'0e9bfddcb5a59956ae60e93f32df06b174ad33b53d8a2f2cd08c780cf34a9d93'),
(u'extra', OrderedDict([
(u'downloads', 0),
(u'hashes', OrderedDict([
(u'md5', u'79a64594dd446674ce1010007ac2bde7'),
(u'sha256', u'bf710301e591f6f5ce35aa8971cfc938b39dae0fedcb9915656dded6ad025580')])),
(u'version', 1)])),
(u'kind', u'file'),
(u'materialized', u'Fibery/file2.pdf'),
(u'modified', u'2015-05-07T10:54:32'),
(u'name', u'file2.pdf'),
(u'nid', u'wp6xv'),
(u'path', u'/55f07134a24f71b2a24f4812'),
(u'provider', u'osfstorage'),
(u'size', 21209),
('url', '/project/wp6xv/files/osfstorage/55f07134a24f71b2a24f4812/'),
('node', {'url': '/wp6xv/', '_id': u'wp6xv', 'title': u'File_Notify4'}),
('addon', 'OSF Storage')])),
(u'source', OrderedDict([
(u'materialized', u'Fibery/!--i--2.pdf'),
(u'name', u'!--i--2.pdf'), (u'nid', u'wp6xv'),
(u'path', u'Fibery/!--i--2.pdf'),
(u'provider', u'osfstorage'),
('url', '/project/wp6xv/files/osfstorage/Fibery/%21--i--2.pdf/'),
('node', {'url': '/wp6xv/', '_id': u'wp6xv', 'title': u'File_Notify4'}),
('addon', 'OSF Storage')])),
(u'time', 1441905340.876648),
('node', u'wp6xv'),
('project', None)])
|
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# scripts/readelf.py
#
# A clone of 'readelf' in Python, based on the pyelftools library
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
import os, sys
from optparse import OptionParser
import string
# For running from development directory. It should take precedence over the
# installed pyelftools.
sys.path.insert(0, '.')
from elftools import __version__
from elftools.common.exceptions import ELFError
from elftools.common.py3compat import (
ifilter, byte2int, bytes2str, itervalues, str2bytes)
from elftools.elf.elffile import ELFFile
from elftools.elf.dynamic import DynamicSection, DynamicSegment
from elftools.elf.enums import ENUM_D_TAG
from elftools.elf.segments import InterpSegment
from elftools.elf.sections import SymbolTableSection
from elftools.elf.gnuversions import (
GNUVerSymSection, GNUVerDefSection,
GNUVerNeedSection,
)
from elftools.elf.relocation import RelocationSection
from elftools.elf.descriptions import (
describe_ei_class, describe_ei_data, describe_ei_version,
describe_ei_osabi, describe_e_type, describe_e_machine,
describe_e_version_numeric, describe_p_type, describe_p_flags,
describe_sh_type, describe_sh_flags,
describe_symbol_type, describe_symbol_bind, describe_symbol_visibility,
describe_symbol_shndx, describe_reloc_type, describe_dyn_tag,
describe_ver_flags,
)
from elftools.elf.constants import E_FLAGS
from elftools.dwarf.dwarfinfo import DWARFInfo
from elftools.dwarf.descriptions import (
describe_reg_name, describe_attr_value, set_global_machine_arch,
describe_CFI_instructions, describe_CFI_register_rule,
describe_CFI_CFA_rule,
)
from elftools.dwarf.constants import (
DW_LNS_copy, DW_LNS_set_file, DW_LNE_define_file)
from elftools.dwarf.callframe import CIE, FDE
class ReadElf(object):
""" display_* methods are used to emit output into the output stream
"""
def __init__(self, file, output):
""" file:
stream object with the ELF file to read
output:
output stream to write to
"""
self.elffile = ELFFile(file)
self.output = output
# Lazily initialized if a debug dump is requested
self._dwarfinfo = None
self._versioninfo = None
def display_file_header(self):
""" Display the ELF file header
"""
self._emitline('ELF Header:')
self._emit(' Magic: ')
self._emitline(' '.join('%2.2x' % byte2int(b)
for b in self.elffile.e_ident_raw))
header = self.elffile.header
e_ident = header['e_ident']
self._emitline(' Class: %s' %
describe_ei_class(e_ident['EI_CLASS']))
self._emitline(' Data: %s' %
describe_ei_data(e_ident['EI_DATA']))
self._emitline(' Version: %s' %
describe_ei_version(e_ident['EI_VERSION']))
self._emitline(' OS/ABI: %s' %
describe_ei_osabi(e_ident['EI_OSABI']))
self._emitline(' ABI Version: %d' %
e_ident['EI_ABIVERSION'])
self._emitline(' Type: %s' %
describe_e_type(header['e_type']))
self._emitline(' Machine: %s' %
describe_e_machine(header['e_machine']))
self._emitline(' Version: %s' %
describe_e_version_numeric(header['e_version']))
self._emitline(' Entry point address: %s' %
self._format_hex(header['e_entry']))
self._emit(' Start of program headers: %s' %
header['e_phoff'])
self._emitline(' (bytes into file)')
self._emit(' Start of section headers: %s' %
header['e_shoff'])
self._emitline(' (bytes into file)')
self._emitline(' Flags: %s%s' %
(self._format_hex(header['e_flags']),
self.decode_flags(header['e_flags'])))
self._emitline(' Size of this header: %s (bytes)' %
header['e_ehsize'])
self._emitline(' Size of program headers: %s (bytes)' %
header['e_phentsize'])
self._emitline(' Number of program headers: %s' %
header['e_phnum'])
self._emitline(' Size of section headers: %s (bytes)' %
header['e_shentsize'])
self._emitline(' Number of section headers: %s' %
header['e_shnum'])
self._emitline(' Section header string table index: %s' %
header['e_shstrndx'])
def decode_flags(self, flags):
description = ""
if self.elffile['e_machine'] == "EM_ARM":
if flags & E_FLAGS.EF_ARM_HASENTRY:
description += ", has entry point"
version = flags & E_FLAGS.EF_ARM_EABIMASK
if version == E_FLAGS.EF_ARM_EABI_VER5:
description += ", Version5 EABI"
return description
def display_program_headers(self, show_heading=True):
""" Display the ELF program headers.
If show_heading is True, displays the heading for this information
(Elf file type is...)
"""
self._emitline()
if self.elffile.num_segments() == 0:
self._emitline('There are no program headers in this file.')
return
elfheader = self.elffile.header
if show_heading:
self._emitline('Elf file type is %s' %
describe_e_type(elfheader['e_type']))
self._emitline('Entry point is %s' %
self._format_hex(elfheader['e_entry']))
# readelf weirness - why isn't e_phoff printed as hex? (for section
# headers, it is...)
self._emitline('There are %s program headers, starting at offset %s' % (
elfheader['e_phnum'], elfheader['e_phoff']))
self._emitline()
self._emitline('Program Headers:')
# Now comes the table of program headers with their attributes. Note
# that due to different formatting constraints of 32-bit and 64-bit
# addresses, there are some conditions on elfclass here.
#
# First comes the table heading
#
if self.elffile.elfclass == 32:
self._emitline(' Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align')
else:
self._emitline(' Type Offset VirtAddr PhysAddr')
self._emitline(' FileSiz MemSiz Flags Align')
# Now the entries
#
for segment in self.elffile.iter_segments():
self._emit(' %-14s ' % describe_p_type(segment['p_type']))
if self.elffile.elfclass == 32:
self._emitline('%s %s %s %s %s %-3s %s' % (
self._format_hex(segment['p_offset'], fieldsize=6),
self._format_hex(segment['p_vaddr'], fullhex=True),
self._format_hex(segment['p_paddr'], fullhex=True),
self._format_hex(segment['p_filesz'], fieldsize=5),
self._format_hex(segment['p_memsz'], fieldsize=5),
describe_p_flags(segment['p_flags']),
self._format_hex(segment['p_align'])))
else: # 64
self._emitline('%s %s %s' % (
self._format_hex(segment['p_offset'], fullhex=True),
self._format_hex(segment['p_vaddr'], fullhex=True),
self._format_hex(segment['p_paddr'], fullhex=True)))
self._emitline(' %s %s %-3s %s' % (
self._format_hex(segment['p_filesz'], fullhex=True),
self._format_hex(segment['p_memsz'], fullhex=True),
describe_p_flags(segment['p_flags']),
# lead0x set to False for p_align, to mimic readelf.
# No idea why the difference from 32-bit mode :-|
self._format_hex(segment['p_align'], lead0x=False)))
if isinstance(segment, InterpSegment):
self._emitline(' [Requesting program interpreter: %s]' %
bytes2str(segment.get_interp_name()))
# Sections to segments mapping
#
if self.elffile.num_sections() == 0:
# No sections? We're done
return
self._emitline('\n Section to Segment mapping:')
self._emitline(' Segment Sections...')
for nseg, segment in enumerate(self.elffile.iter_segments()):
self._emit(' %2.2d ' % nseg)
for section in self.elffile.iter_sections():
if ( not section.is_null() and
segment.section_in_segment(section)):
self._emit('%s ' % bytes2str(section.name))
self._emitline('')
def display_section_headers(self, show_heading=True):
""" Display the ELF section headers
"""
elfheader = self.elffile.header
if show_heading:
self._emitline('There are %s section headers, starting at offset %s' % (
elfheader['e_shnum'], self._format_hex(elfheader['e_shoff'])))
self._emitline('\nSection Header%s:' % (
's' if elfheader['e_shnum'] > 1 else ''))
# Different formatting constraints of 32-bit and 64-bit addresses
#
if self.elffile.elfclass == 32:
self._emitline(' [Nr] Name Type Addr Off Size ES Flg Lk Inf Al')
else:
self._emitline(' [Nr] Name Type Address Offset')
self._emitline(' Size EntSize Flags Link Info Align')
# Now the entries
#
for nsec, section in enumerate(self.elffile.iter_sections()):
self._emit(' [%2u] %-17.17s %-15.15s ' % (
nsec, bytes2str(section.name), describe_sh_type(section['sh_type'])))
if self.elffile.elfclass == 32:
self._emitline('%s %s %s %s %3s %2s %3s %2s' % (
self._format_hex(section['sh_addr'], fieldsize=8, lead0x=False),
self._format_hex(section['sh_offset'], fieldsize=6, lead0x=False),
self._format_hex(section['sh_size'], fieldsize=6, lead0x=False),
self._format_hex(section['sh_entsize'], fieldsize=2, lead0x=False),
describe_sh_flags(section['sh_flags']),
section['sh_link'], section['sh_info'],
section['sh_addralign']))
else: # 64
self._emitline(' %s %s' % (
self._format_hex(section['sh_addr'], fullhex=True, lead0x=False),
self._format_hex(section['sh_offset'],
fieldsize=16 if section['sh_offset'] > 0xffffffff else 8,
lead0x=False)))
self._emitline(' %s %s %3s %2s %3s %s' % (
self._format_hex(section['sh_size'], fullhex=True, lead0x=False),
self._format_hex(section['sh_entsize'], fullhex=True, lead0x=False),
describe_sh_flags(section['sh_flags']),
section['sh_link'], section['sh_info'],
section['sh_addralign']))
self._emitline('Key to Flags:')
self._emit(' W (write), A (alloc), X (execute), M (merge), S (strings)')
if self.elffile['e_machine'] in ('EM_X86_64', 'EM_L10M'):
self._emitline(', l (large)')
else:
self._emitline()
self._emitline(' I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)')
self._emitline(' O (extra OS processing required) o (OS specific), p (processor specific)')
def display_symbol_tables(self):
""" Display the symbol tables contained in the file
"""
self._init_versioninfo()
for section in self.elffile.iter_sections():
if not isinstance(section, SymbolTableSection):
continue
if section['sh_entsize'] == 0:
self._emitline("\nSymbol table '%s' has a sh_entsize of zero!" % (
bytes2str(section.name)))
continue
self._emitline("\nSymbol table '%s' contains %s entries:" % (
bytes2str(section.name), section.num_symbols()))
if self.elffile.elfclass == 32:
self._emitline(' Num: Value Size Type Bind Vis Ndx Name')
else: # 64
self._emitline(' Num: Value Size Type Bind Vis Ndx Name')
for nsym, symbol in enumerate(section.iter_symbols()):
version_info = ''
# readelf doesn't display version info for Solaris versioning
if (section['sh_type'] == 'SHT_DYNSYM' and
self._versioninfo['type'] == 'GNU'):
version = self._symbol_version(nsym)
if (version['name'] != bytes2str(symbol.name) and
version['index'] not in ('VER_NDX_LOCAL',
'VER_NDX_GLOBAL')):
if version['filename']:
# external symbol
version_info = '@%(name)s (%(index)i)' % version
else:
# internal symbol
if version['hidden']:
version_info = '@%(name)s' % version
else:
version_info = '@@%(name)s' % version
# symbol names are truncated to 25 chars, similarly to readelf
self._emitline('%6d: %s %5d %-7s %-6s %-7s %4s %.25s%s' % (
nsym,
self._format_hex(
symbol['st_value'], fullhex=True, lead0x=False),
symbol['st_size'],
describe_symbol_type(symbol['st_info']['type']),
describe_symbol_bind(symbol['st_info']['bind']),
describe_symbol_visibility(symbol['st_other']['visibility']),
describe_symbol_shndx(symbol['st_shndx']),
bytes2str(symbol.name),
version_info))
def display_dynamic_tags(self):
""" Display the dynamic tags contained in the file
"""
has_dynamic_sections = False
for section in self.elffile.iter_sections():
if not isinstance(section, DynamicSection):
continue
has_dynamic_sections = True
self._emitline("\nDynamic section at offset %s contains %s entries:" % (
self._format_hex(section['sh_offset']),
section.num_tags()))
self._emitline(" Tag Type Name/Value")
padding = 20 + (8 if self.elffile.elfclass == 32 else 0)
for tag in section.iter_tags():
if tag.entry.d_tag == 'DT_NEEDED':
parsed = 'Shared library: [%s]' % bytes2str(tag.needed)
elif tag.entry.d_tag == 'DT_RPATH':
parsed = 'Library rpath: [%s]' % bytes2str(tag.rpath)
elif tag.entry.d_tag == 'DT_RUNPATH':
parsed = 'Library runpath: [%s]' % bytes2str(tag.runpath)
elif tag.entry.d_tag == 'DT_SONAME':
parsed = 'Library soname: [%s]' % bytes2str(tag.soname)
elif tag.entry.d_tag.endswith(('SZ', 'ENT')):
parsed = '%i (bytes)' % tag['d_val']
elif tag.entry.d_tag.endswith(('NUM', 'COUNT')):
parsed = '%i' % tag['d_val']
elif tag.entry.d_tag == 'DT_PLTREL':
s = describe_dyn_tag(tag.entry.d_val)
if s.startswith('DT_'):
s = s[3:]
parsed = '%s' % s
else:
parsed = '%#x' % tag['d_val']
self._emitline(" %s %-*s %s" % (
self._format_hex(ENUM_D_TAG.get(tag.entry.d_tag, tag.entry.d_tag),
fullhex=True, lead0x=True),
padding,
'(%s)' % (tag.entry.d_tag[3:],),
parsed))
if not has_dynamic_sections:
# readelf only prints this if there is at least one segment
if self.elffile.num_segments():
self._emitline("\nThere is no dynamic section in this file.")
def display_relocations(self):
""" Display the relocations contained in the file
"""
has_relocation_sections = False
for section in self.elffile.iter_sections():
if not isinstance(section, RelocationSection):
continue
has_relocation_sections = True
self._emitline("\nRelocation section '%s' at offset %s contains %s entries:" % (
bytes2str(section.name),
self._format_hex(section['sh_offset']),
section.num_relocations()))
if section.is_RELA():
self._emitline(" Offset Info Type Sym. Value Sym. Name + Addend")
else:
self._emitline(" Offset Info Type Sym.Value Sym. Name")
# The symbol table section pointed to in sh_link
symtable = self.elffile.get_section(section['sh_link'])
for rel in section.iter_relocations():
hexwidth = 8 if self.elffile.elfclass == 32 else 12
self._emit('%s %s %-17.17s' % (
self._format_hex(rel['r_offset'],
fieldsize=hexwidth, lead0x=False),
self._format_hex(rel['r_info'],
fieldsize=hexwidth, lead0x=False),
describe_reloc_type(
rel['r_info_type'], self.elffile)))
if rel['r_info_sym'] == 0:
self._emitline()
continue
symbol = symtable.get_symbol(rel['r_info_sym'])
# Some symbols have zero 'st_name', so instead what's used is
# the name of the section they point at
if symbol['st_name'] == 0:
symsec = self.elffile.get_section(symbol['st_shndx'])
symbol_name = symsec.name
else:
symbol_name = symbol.name
self._emit(' %s %s%22.22s' % (
self._format_hex(
symbol['st_value'],
fullhex=True, lead0x=False),
' ' if self.elffile.elfclass == 32 else '',
bytes2str(symbol_name)))
if section.is_RELA():
self._emit(' %s %x' % (
'+' if rel['r_addend'] >= 0 else '-',
abs(rel['r_addend'])))
self._emitline()
if not has_relocation_sections:
self._emitline('\nThere are no relocations in this file.')
def display_version_info(self):
""" Display the version info contained in the file
"""
self._init_versioninfo()
if not self._versioninfo['type']:
self._emitline("\nNo version information found in this file.")
return
for section in self.elffile.iter_sections():
if isinstance(section, GNUVerSymSection):
self._print_version_section_header(
section, 'Version symbols', lead0x=False)
num_symbols = section.num_symbols()
# Symbol version info are printed four by four entries
for idx_by_4 in range(0, num_symbols, 4):
self._emit(' %03x:' % idx_by_4)
for idx in range(idx_by_4, min(idx_by_4 + 4, num_symbols)):
symbol_version = self._symbol_version(idx)
if symbol_version['index'] == 'VER_NDX_LOCAL':
version_index = 0
version_name = '(*local*)'
elif symbol_version['index'] == 'VER_NDX_GLOBAL':
version_index = 1
version_name = '(*global*)'
else:
version_index = symbol_version['index']
version_name = '(%(name)s)' % symbol_version
visibility = 'h' if symbol_version['hidden'] else ' '
self._emit('%4x%s%-13s' % (
version_index, visibility, version_name))
self._emitline()
elif isinstance(section, GNUVerDefSection):
self._print_version_section_header(
section, 'Version definition', indent=2)
offset = 0
for verdef, verdaux_iter in section.iter_versions():
verdaux = next(verdaux_iter)
name = verdaux.name
if verdef['vd_flags']:
flags = describe_ver_flags(verdef['vd_flags'])
# Mimic exactly the readelf output
flags += ' '
else:
flags = 'none'
self._emitline(' %s: Rev: %i Flags: %s Index: %i'
' Cnt: %i Name: %s' % (
self._format_hex(offset, fieldsize=6,
alternate=True),
verdef['vd_version'], flags, verdef['vd_ndx'],
verdef['vd_cnt'], bytes2str(name)))
verdaux_offset = (
offset + verdef['vd_aux'] + verdaux['vda_next'])
for idx, verdaux in enumerate(verdaux_iter, start=1):
self._emitline(' %s: Parent %i: %s' %
(self._format_hex(verdaux_offset, fieldsize=4),
idx, bytes2str(verdaux.name)))
verdaux_offset += verdaux['vda_next']
offset += verdef['vd_next']
elif isinstance(section, GNUVerNeedSection):
self._print_version_section_header(section, 'Version needs')
offset = 0
for verneed, verneed_iter in section.iter_versions():
self._emitline(' %s: Version: %i File: %s Cnt: %i' % (
self._format_hex(offset, fieldsize=6,
alternate=True),
verneed['vn_version'], bytes2str(verneed.name),
verneed['vn_cnt']))
vernaux_offset = offset + verneed['vn_aux']
for idx, vernaux in enumerate(verneed_iter, start=1):
if vernaux['vna_flags']:
flags = describe_ver_flags(vernaux['vna_flags'])
# Mimic exactly the readelf output
flags += ' '
else:
flags = 'none'
self._emitline(
' %s: Name: %s Flags: %s Version: %i' % (
self._format_hex(vernaux_offset, fieldsize=4),
bytes2str(vernaux.name), flags,
vernaux['vna_other']))
vernaux_offset += vernaux['vna_next']
offset += verneed['vn_next']
def display_hex_dump(self, section_spec):
""" Display a hex dump of a section. section_spec is either a section
number or a name.
"""
section = self._section_from_spec(section_spec)
if section is None:
self._emitline("Section '%s' does not exist in the file!" % (
section_spec))
return
self._emitline("\nHex dump of section '%s':" % bytes2str(section.name))
self._note_relocs_for_section(section)
addr = section['sh_addr']
data = section.data()
dataptr = 0
while dataptr < len(data):
bytesleft = len(data) - dataptr
# chunks of 16 bytes per line
linebytes = 16 if bytesleft > 16 else bytesleft
self._emit(' %s ' % self._format_hex(addr, fieldsize=8))
for i in range(16):
if i < linebytes:
self._emit('%2.2x' % byte2int(data[dataptr + i]))
else:
self._emit(' ')
if i % 4 == 3:
self._emit(' ')
for i in range(linebytes):
c = data[dataptr + i : dataptr + i + 1]
if byte2int(c[0]) >= 32 and byte2int(c[0]) < 0x7f:
self._emit(bytes2str(c))
else:
self._emit(bytes2str(b'.'))
self._emitline()
addr += linebytes
dataptr += linebytes
self._emitline()
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = self._section_from_spec(section_spec)
if section is None:
self._emitline("Section '%s' does not exist in the file!" % (
section_spec))
return
self._emitline("\nString dump of section '%s':" % bytes2str(section.name))
found = False
data = section.data()
dataptr = 0
while dataptr < len(data):
while ( dataptr < len(data) and
not (32 <= byte2int(data[dataptr]) <= 127)):
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
found = True
self._emitline(' [%6x] %s' % (
dataptr, bytes2str(data[dataptr:endptr])))
dataptr = endptr
if not found:
self._emitline(' No strings found in this section.')
else:
self._emitline()
def display_debug_dump(self, dump_what):
""" Dump a DWARF section
"""
self._init_dwarfinfo()
if self._dwarfinfo is None:
return
set_global_machine_arch(self.elffile.get_machine_arch())
if dump_what == 'info':
self._dump_debug_info()
elif dump_what == 'decodedline':
self._dump_debug_line_programs()
elif dump_what == 'frames':
self._dump_debug_frames()
elif dump_what == 'frames-interp':
self._dump_debug_frames_interp()
else:
self._emitline('debug dump not yet supported for "%s"' % dump_what)
def _format_hex(self, addr, fieldsize=None, fullhex=False, lead0x=True,
alternate=False):
""" Format an address into a hexadecimal string.
fieldsize:
Size of the hexadecimal field (with leading zeros to fit the
address into. For example with fieldsize=8, the format will
be %08x
If None, the minimal required field size will be used.
fullhex:
If True, override fieldsize to set it to the maximal size
needed for the elfclass
lead0x:
If True, leading 0x is added
alternate:
If True, override lead0x to emulate the alternate
hexadecimal form specified in format string with the #
character: only non-zero values are prefixed with 0x.
This form is used by readelf.
"""
if alternate:
if addr == 0:
lead0x = False
else:
lead0x = True
fieldsize -= 2
s = '0x' if lead0x else ''
if fullhex:
fieldsize = 8 if self.elffile.elfclass == 32 else 16
if fieldsize is None:
field = '%x'
else:
field = '%' + '0%sx' % fieldsize
return s + field % addr
def _print_version_section_header(self, version_section, name, lead0x=True,
indent=1):
""" Print a section header of one version related section (versym,
verneed or verdef) with some options to accomodate readelf
little differences between each header (e.g. indentation
and 0x prefixing).
"""
if hasattr(version_section, 'num_versions'):
num_entries = version_section.num_versions()
else:
num_entries = version_section.num_symbols()
self._emitline("\n%s section '%s' contains %s entries:" %
(name, bytes2str(version_section.name), num_entries))
self._emitline('%sAddr: %s Offset: %s Link: %i (%s)' % (
' ' * indent,
self._format_hex(
version_section['sh_addr'], fieldsize=16, lead0x=lead0x),
self._format_hex(
version_section['sh_offset'], fieldsize=6, lead0x=True),
version_section['sh_link'],
bytes2str(
self.elffile.get_section(version_section['sh_link']).name)
)
)
def _init_versioninfo(self):
""" Search and initialize informations about version related sections
and the kind of versioning used (GNU or Solaris).
"""
if self._versioninfo is not None:
return
self._versioninfo = {'versym': None, 'verdef': None,
'verneed': None, 'type': None}
for section in self.elffile.iter_sections():
if isinstance(section, GNUVerSymSection):
self._versioninfo['versym'] = section
elif isinstance(section, GNUVerDefSection):
self._versioninfo['verdef'] = section
elif isinstance(section, GNUVerNeedSection):
self._versioninfo['verneed'] = section
elif isinstance(section, DynamicSection):
for tag in section.iter_tags():
if tag['d_tag'] == 'DT_VERSYM':
self._versioninfo['type'] = 'GNU'
break
if not self._versioninfo['type'] and (
self._versioninfo['verneed'] or self._versioninfo['verdef']):
self._versioninfo['type'] = 'Solaris'
def _symbol_version(self, nsym):
""" Return a dict containing information on the
or None if no version information is available
"""
self._init_versioninfo()
symbol_version = dict.fromkeys(('index', 'name', 'filename', 'hidden'))
if (not self._versioninfo['versym'] or
nsym >= self._versioninfo['versym'].num_symbols()):
return None
symbol = self._versioninfo['versym'].get_symbol(nsym)
index = symbol.entry['ndx']
if not index in ('VER_NDX_LOCAL', 'VER_NDX_GLOBAL'):
index = int(index)
if self._versioninfo['type'] == 'GNU':
# In GNU versioning mode, the highest bit is used to
# store wether the symbol is hidden or not
if index & 0x8000:
index &= ~0x8000
symbol_version['hidden'] = True
if (self._versioninfo['verdef'] and
index <= self._versioninfo['verdef'].num_versions()):
_, verdaux_iter = \
self._versioninfo['verdef'].get_version(index)
symbol_version['name'] = bytes2str(next(verdaux_iter).name)
else:
verneed, vernaux = \
self._versioninfo['verneed'].get_version(index)
symbol_version['name'] = bytes2str(vernaux.name)
symbol_version['filename'] = bytes2str(verneed.name)
symbol_version['index'] = index
return symbol_version
def _section_from_spec(self, spec):
""" Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
"""
try:
num = int(spec)
if num < self.elffile.num_sections():
return self.elffile.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elffile.get_section_by_name(str2bytes(spec))
def _note_relocs_for_section(self, section):
""" If there are relocation sections pointing to the givne section,
emit a note about it.
"""
for relsec in self.elffile.iter_sections():
if isinstance(relsec, RelocationSection):
info_idx = relsec['sh_info']
if self.elffile.get_section(info_idx) == section:
self._emitline(' Note: This section has relocations against it, but these have NOT been applied to this dump.')
return
def _init_dwarfinfo(self):
""" Initialize the DWARF info contained in the file and assign it to
self._dwarfinfo.
Leave self._dwarfinfo at None if no DWARF info was found in the file
"""
if self._dwarfinfo is not None:
return
if self.elffile.has_dwarf_info():
self._dwarfinfo = self.elffile.get_dwarf_info()
else:
self._dwarfinfo = None
def _dump_debug_info(self):
""" Dump the debugging info section.
"""
self._emitline('Contents of the .debug_info section:\n')
# Offset of the .debug_info section in the stream
section_offset = self._dwarfinfo.debug_info_sec.global_offset
for cu in self._dwarfinfo.iter_CUs():
self._emitline(' Compilation Unit @ offset %s:' %
self._format_hex(cu.cu_offset))
self._emitline(' Length: %s (%s)' % (
self._format_hex(cu['unit_length']),
'%s-bit' % cu.dwarf_format()))
self._emitline(' Version: %s' % cu['version']),
self._emitline(' Abbrev Offset: %s' % (
self._format_hex(cu['debug_abbrev_offset']))),
self._emitline(' Pointer Size: %s' % cu['address_size'])
# The nesting depth of each DIE within the tree of DIEs must be
# displayed. To implement this, a counter is incremented each time
# the current DIE has children, and decremented when a null die is
# encountered. Due to the way the DIE tree is serialized, this will
# correctly reflect the nesting depth
#
die_depth = 0
for die in cu.iter_DIEs():
self._emitline(' <%s><%x>: Abbrev Number: %s%s' % (
die_depth,
die.offset,
die.abbrev_code,
(' (%s)' % die.tag) if not die.is_null() else ''))
if die.is_null():
die_depth -= 1
continue
for attr in itervalues(die.attributes):
name = attr.name
# Unknown attribute values are passed-through as integers
if isinstance(name, int):
name = 'Unknown AT value: %x' % name
self._emitline(' <%2x> %-18s: %s' % (
attr.offset,
name,
describe_attr_value(
attr, die, section_offset)))
if die.has_children:
die_depth += 1
self._emitline()
def _dump_debug_line_programs(self):
""" Dump the (decoded) line programs from .debug_line
The programs are dumped in the order of the CUs they belong to.
"""
self._emitline('Decoded dump of debug contents of section .debug_line:\n')
for cu in self._dwarfinfo.iter_CUs():
lineprogram = self._dwarfinfo.line_program_for_CU(cu)
cu_filename = bytes2str(lineprogram['file_entry'][0].name)
if len(lineprogram['include_directory']) > 0:
dir_index = lineprogram['file_entry'][0].dir_index
if dir_index > 0:
dir = lineprogram['include_directory'][dir_index - 1]
else:
dir = b'.'
cu_filename = '%s/%s' % (bytes2str(dir), cu_filename)
self._emitline('CU: %s:' % cu_filename)
self._emitline('File name Line number Starting address')
# Print each state's file, line and address information. For some
# instructions other output is needed to be compatible with
# readelf.
for entry in lineprogram.get_entries():
state = entry.state
if state is None:
# Special handling for commands that don't set a new state
if entry.command == DW_LNS_set_file:
file_entry = lineprogram['file_entry'][entry.args[0] - 1]
if file_entry.dir_index == 0:
# current directory
self._emitline('\n./%s:[++]' % (
bytes2str(file_entry.name)))
else:
self._emitline('\n%s/%s:' % (
bytes2str(lineprogram['include_directory'][file_entry.dir_index - 1]),
bytes2str(file_entry.name)))
elif entry.command == DW_LNE_define_file:
self._emitline('%s:' % (
bytes2str(lineprogram['include_directory'][entry.args[0].dir_index])))
elif not state.end_sequence:
# readelf doesn't print the state after end_sequence
# instructions. I think it's a bug but to be compatible
# I don't print them too.
self._emitline('%-35s %11d %18s' % (
bytes2str(lineprogram['file_entry'][state.file - 1].name),
state.line,
'0' if state.address == 0 else
self._format_hex(state.address)))
if entry.command == DW_LNS_copy:
# Another readelf oddity...
self._emitline()
def _dump_debug_frames(self):
""" Dump the raw frame information from .debug_frame
"""
if not self._dwarfinfo.has_CFI():
return
self._emitline('Contents of the .debug_frame section:')
for entry in self._dwarfinfo.CFI_entries():
if isinstance(entry, CIE):
self._emitline('\n%08x %s %s CIE' % (
entry.offset,
self._format_hex(entry['length'], fullhex=True, lead0x=False),
self._format_hex(entry['CIE_id'], fullhex=True, lead0x=False)))
self._emitline(' Version: %d' % entry['version'])
self._emitline(' Augmentation: "%s"' % bytes2str(entry['augmentation']))
self._emitline(' Code alignment factor: %u' % entry['code_alignment_factor'])
self._emitline(' Data alignment factor: %d' % entry['data_alignment_factor'])
self._emitline(' Return address column: %d' % entry['return_address_register'])
self._emitline()
else: # FDE
self._emitline('\n%08x %s %s FDE cie=%08x pc=%s..%s' % (
entry.offset,
self._format_hex(entry['length'], fullhex=True, lead0x=False),
self._format_hex(entry['CIE_pointer'], fullhex=True, lead0x=False),
entry.cie.offset,
self._format_hex(entry['initial_location'], fullhex=True, lead0x=False),
self._format_hex(
entry['initial_location'] + entry['address_range'],
fullhex=True, lead0x=False)))
self._emit(describe_CFI_instructions(entry))
self._emitline()
def _dump_debug_frames_interp(self):
""" Dump the interpreted (decoded) frame information from .debug_frame
"""
if not self._dwarfinfo.has_CFI():
return
self._emitline('Contents of the .debug_frame section:')
for entry in self._dwarfinfo.CFI_entries():
if isinstance(entry, CIE):
self._emitline('\n%08x %s %s CIE "%s" cf=%d df=%d ra=%d' % (
entry.offset,
self._format_hex(entry['length'], fullhex=True, lead0x=False),
self._format_hex(entry['CIE_id'], fullhex=True, lead0x=False),
bytes2str(entry['augmentation']),
entry['code_alignment_factor'],
entry['data_alignment_factor'],
entry['return_address_register']))
ra_regnum = entry['return_address_register']
else: # FDE
self._emitline('\n%08x %s %s FDE cie=%08x pc=%s..%s' % (
entry.offset,
self._format_hex(entry['length'], fullhex=True, lead0x=False),
self._format_hex(entry['CIE_pointer'], fullhex=True, lead0x=False),
entry.cie.offset,
self._format_hex(entry['initial_location'], fullhex=True, lead0x=False),
self._format_hex(entry['initial_location'] + entry['address_range'],
fullhex=True, lead0x=False)))
ra_regnum = entry.cie['return_address_register']
# Print the heading row for the decoded table
self._emit(' LOC')
self._emit(' ' if entry.structs.address_size == 4 else ' ')
self._emit(' CFA ')
# Decode the table nad look at the registers it describes.
# We build reg_order here to match readelf's order. In particular,
# registers are sorted by their number, and the register matching
# ra_regnum is always listed last with a special heading.
decoded_table = entry.get_decoded()
reg_order = sorted(ifilter(
lambda r: r != ra_regnum,
decoded_table.reg_order))
# Headings for the registers
for regnum in reg_order:
self._emit('%-6s' % describe_reg_name(regnum))
self._emitline('ra ')
# Now include ra_regnum in reg_order to print its values similarly
# to the other registers.
reg_order.append(ra_regnum)
for line in decoded_table.table:
self._emit(self._format_hex(
line['pc'], fullhex=True, lead0x=False))
self._emit(' %-9s' % describe_CFI_CFA_rule(line['cfa']))
for regnum in reg_order:
if regnum in line:
s = describe_CFI_register_rule(line[regnum])
else:
s = 'u'
self._emit('%-6s' % s)
self._emitline()
self._emitline()
def _emit(self, s=''):
""" Emit an object to output
"""
self.output.write(str(s))
def _emitline(self, s=''):
""" Emit an object to output, followed by a newline
"""
self.output.write(str(s) + '\n')
SCRIPT_DESCRIPTION = 'Display information about the contents of ELF format files'
VERSION_STRING = '%%prog: based on pyelftools %s' % __version__
def main(stream=None):
# parse the command-line arguments and invoke ReadElf
optparser = OptionParser(
usage='usage: %prog [options] <elf-file>',
description=SCRIPT_DESCRIPTION,
add_help_option=False, # -h is a real option of readelf
prog='readelf.py',
version=VERSION_STRING)
optparser.add_option('-d', '--dynamic',
action='store_true', dest='show_dynamic_tags',
help='Display the dynamic section')
optparser.add_option('-H', '--help',
action='store_true', dest='help',
help='Display this information')
optparser.add_option('-h', '--file-header',
action='store_true', dest='show_file_header',
help='Display the ELF file header')
optparser.add_option('-l', '--program-headers', '--segments',
action='store_true', dest='show_program_header',
help='Display the program headers')
optparser.add_option('-S', '--section-headers', '--sections',
action='store_true', dest='show_section_header',
help="Display the sections' headers")
optparser.add_option('-e', '--headers',
action='store_true', dest='show_all_headers',
help='Equivalent to: -h -l -S')
optparser.add_option('-s', '--symbols', '--syms',
action='store_true', dest='show_symbols',
help='Display the symbol table')
optparser.add_option('-r', '--relocs',
action='store_true', dest='show_relocs',
help='Display the relocations (if present)')
optparser.add_option('-x', '--hex-dump',
action='store', dest='show_hex_dump', metavar='<number|name>',
help='Dump the contents of section <number|name> as bytes')
optparser.add_option('-p', '--string-dump',
action='store', dest='show_string_dump', metavar='<number|name>',
help='Dump the contents of section <number|name> as strings')
optparser.add_option('-V', '--version-info',
action='store_true', dest='show_version_info',
help='Display the version sections (if present)')
optparser.add_option('--debug-dump',
action='store', dest='debug_dump_what', metavar='<what>',
help=(
'Display the contents of DWARF debug sections. <what> can ' +
'one of {info,decodedline,frames,frames-interp}'))
options, args = optparser.parse_args()
if options.help or len(args) == 0:
optparser.print_help()
sys.exit(0)
if options.show_all_headers:
do_file_header = do_section_header = do_program_header = True
else:
do_file_header = options.show_file_header
do_section_header = options.show_section_header
do_program_header = options.show_program_header
with open(args[0], 'rb') as file:
try:
readelf = ReadElf(file, stream or sys.stdout)
if do_file_header:
readelf.display_file_header()
if do_section_header:
readelf.display_section_headers(
show_heading=not do_file_header)
if do_program_header:
readelf.display_program_headers(
show_heading=not do_file_header)
if options.show_dynamic_tags:
readelf.display_dynamic_tags()
if options.show_symbols:
readelf.display_symbol_tables()
if options.show_relocs:
readelf.display_relocations()
if options.show_version_info:
readelf.display_version_info()
if options.show_hex_dump:
readelf.display_hex_dump(options.show_hex_dump)
if options.show_string_dump:
readelf.display_string_dump(options.show_string_dump)
if options.debug_dump_what:
readelf.display_debug_dump(options.debug_dump_what)
except ELFError as ex:
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
def profile_main():
# Run 'main' redirecting its output to readelfout.txt
# Saves profiling information in readelf.profile
PROFFILE = 'readelf.profile'
import cProfile
cProfile.run('main(open("readelfout.txt", "w"))', PROFFILE)
# Dig in some profiling stats
import pstats
p = pstats.Stats(PROFFILE)
p.sort_stats('cumulative').print_stats(25)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
main()
#profile_main()
|
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import map, range, zip
import errno
from itertools import product
from os import getcwd, makedirs, path
import random
import cv2
from keras import backend, layers, models
from keras.applications import vgg16
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler
__author__ = 'Gregory Giecold'
__copyright__ = 'Copyright 2017-2022 Gregory Giecold and contributors'
__credit__ = 'Gregory Giecold'
__status__ = 'beta'
__version__ = '0.1.0'
__all__ = ['class_activation_preprocess', 'class_activation_heatmap',
'display_filters', 'display_intermediate_activations',
'maximally_responsive_pattern', 'to_valid_img', 'trial_1',
'trial_2', 'trial_3']
def class_activation_heatmap(file_path, architecture='vgg16',
class_idx=None, odir=getcwd()):
"""This function implements the idea exposed in
"Grad-CAM: Visual Explanation from Deep Networks via "
"Gradient-based Localization', Selvaraju, Cogswell et al.,
arXiv:1610.02391 [cs.CV]. The gist of it consists in weighting,
given an input image, a spatial map of different channel activations
by the gradient of a class with respect to those channels.
The said channels are assumed to be part of a specific feature map;
in the present implementation, this feature map is enforced to be the last
layer of the convolutional base of either of an Inception, ResNet,
VGG16, VGG19 or Xception architecture (as specified by the eponymous
parameter).
"""
# The following is required because we are making use
# of functions present in Keras for decoding those
# model's class predictions, as well as functions
# wrapping some of the image preprocessing steps
# peculiar to each of those models
assert architecture in ('inception_v3', 'resnet50','vgg16',
'vgg19', 'xception')
module = getattr(__import__('keras.applications'), 'applications')
if architecture.startswith('vgg'):
cls = getattr(getattr(module, architecture), architecture.upper())
elif architecture == 'inception_v3':
cls = getattr(getattr(module, architecture), 'InceptionV3')
elif architecture == 'resnet50':
cls = getattr(getattr(module, architecture), 'ResNet50')
else:
cls = getattr(getattr(module, architecture), architecture.capitalize())
without_top_model = cls(include_top=False, weights='imagenet')
# without top model corresponds to the convolutional base
# of the model. It will facilitate access to the last convolution
# layer via an index instead of having to specify its name.
model = cls(weights='imagenet')
model.summary()
img = class_activation_preprocess(file_path, model.name)
predictions = model.predict(img)
decoded_predictions = getattr(getattr(module, model.name),
'decode_predictions')(predictions, top=5)[0]
print("\n\n\nThe top 5 classes predicted for this image, "
"and their probabilities are as follows:\n", decoded_predictions)
# If class_idx defaults to None, then the class with largest predicted
# probability for the input image will be selected to display
# the corresponding activation heatmap super-imposed on that image
if class_idx is None:
class_idx = np.argmax(predictions[0])
else:
assert isinstance(class_idx, int) and 0 <= class_idx < 1000
class_output = model.output[:, class_idx]
last_convolution_layer = without_top_model.get_layer(index=-2)
last_convolution_layer = model.get_layer(last_convolution_layer.name)
class_gradients = backend.gradients(class_output,
last_convolution_layer.output)[0]
pooled_class_gradients = backend.mean(class_gradients, axis=(0, 1, 2))
func = backend.function([model.input], [pooled_class_gradients,
last_convolution_layer.output[0]])
pooled_class_gradient_values, last_convolution_layer_output = func([img])
for channel, value in enumerate(pooled_class_gradient_values):
last_convolution_layer_output[:, :, channel] *= value
class_activation_heatmap = np.mean(last_convolution_layer_output, axis=-1)
class_activation_heatmap = np.maximum(class_activation_heatmap, 0)
class_activation_heatmap /= np.max(class_activation_heatmap)
plt.matshow(class_activation_heatmap)
with open(path.join(odir, 'class_{}_heatmap.png'.format(class_idx)), 'w') as fh:
plt.savefig(fh)
img = cv2.imread(file_path)
class_activation_heatmap = cv2.resize(class_activation_heatmap,
(img.shape[1], img.shape[0]))
class_activation_heatmap = np.uint8(255 * class_activation_heatmap)
class_activation_heatmap = cv2.applyColorMap(
class_activation_heatmap,
cv2.COLORMAP_JET)
img = img + 0.4 * class_activation_heatmap
cv2.imwrite(path.join(odir, 'class_{}_superimposed_heatmap.png'.format(
class_idx)), img)
plt.show()
def class_activation_preprocess(file_path, architecture='vgg16'):
"""The preprocessing steps embodied in the present function
assume that the model whose class activation heatmap
we want to display was trained on input images
of the same format as those fed to the convnet whose
architecture is specified as one of this function's parameters.
VGG16 was trained on images of size 224 * 224, with
some further preprocessing summed up in the function
'keras.applications.vgg16.preprocess_input'.
"""
assert path.isfile(file_path)
from keras.preprocessing import image
img = image.load_img(file_path, target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
module = getattr(__import__('keras.applications'), 'applications')
img = getattr(getattr(module, architecture), 'preprocess_input')(img)
return img
def maximally_responsive_pattern(model, layer_name, filter_index=0,
img_size=64, num_iterations=50, step_size=1.0):
assert isinstance(model, models.Model)
layer_output = model.get_layer(layer_name).output
# Following is the loss function whose value we are to maximize.
# Namely, starting from a blank image, we will proceed to doing
# gradient ascent in input space in order to maximize the response
# of this filter.
loss_function = backend.mean(layer_output[:, :, :, filter_index])
gradients = backend.gradients(loss_function, model.input)[0]
# Gradient normalization trick:
gradients /= (backend.sqrt(backend.mean(backend.square(gradients))) + 1e-4)
func = backend.function([model.input], [loss_function, gradients])
input_tensor = 128.0 + 20 * np.random.random((1, img_size, img_size, 3))
for iteration in range(num_iterations):
_, gradient_values = func([input_tensor])
input_tensor += step_size * gradient_values
img = to_valid_img(input_tensor[0])
return img
def to_valid_img(arr):
arr -= arr.mean()
arr /= (arr.std() + 1e-4)
arr *= 0.1
arr += 0.5
arr = np.clip(arr, 0, 1)
arr *= 255
arr = np.clip(arr, 0, 255).astype('uint8')
return arr
def display_filters(model, layer_name, img_size=64,
margin=5, grid_size=8, odir=getcwd()):
assert isinstance(model, models.Model)
grid = np.zeros((grid_size * img_size + (grid_size - 1) * margin,
grid_size * img_size + (grid_size - 1) * margin,
3))
for row, column in product(range(grid_size), range(grid_size)):
picture = maximally_responsive_pattern(
model, layer_name,
column + grid_size * row,
img_size=img_size)
column_begin = column * (img_size + margin)
column_end = column_begin + img_size
row_begin = row * (img_size + margin)
row_end = row_begin + img_size
grid[column_begin:column_end, row_begin:row_end, :] = picture
plt.figure(figsize=(20, 20))
plt.imshow(grid)
with open(path.join(odir, layer_name + '.png'), 'w') as fh:
plt.savefig(fh)
plt.show()
def display_intermediate_activations(model, file_path, num_layers=8,
height=150, width=150, odir=getcwd()):
assert isinstance(model, models.Model)
assert path.isfile(file_path)
from keras.preprocessing import image
img = image.load_img(file_path, target_size=(height, width))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0) / 255.0
plt.imshow(img[0])
with open(path.join(odir, 'cat.png'), 'w') as fh:
plt.savefig(fh)
layer_names = list()
layer_outputs = list()
for layer in model.layers[:num_layers]:
if isinstance(layer, layers.Conv2D):
layer_names.append(layer.name)
layer_outputs.append(layer.output)
intermediate_activations_model = models.Model(
input=model.input, output=layer_outputs)
activations = intermediate_activations_model.predict(img)
for layer_name, activation in zip(layer_names, activations):
_, height, _, num_filters = activation.shape
num_columns = num_filters / 16
grid = np.zeros((num_columns * height, 16 * height))
for column, row in product(range(num_columns), range(16)):
picture = activation[0, :, :, row + 16 * column]
picture = StandardScaler().fit_transform(picture)
picture *= 64
picture += 128
picture = np.clip(picture, 0, 255).astype('uint8')
grid[column * height:(column + 1) * height,
row * height:(row + 1) * height] = picture
plt.figure(figsize=(grid.shape[1] / float(height),
grid.shape[0] / float(height)))
plt.title(layer_name)
plt.grid(False)
plt.imshow(grid, aspect='auto', cmap='viridis')
with open(path.join(odir, layer_name + '.png'), 'w') as fh:
plt.savefig(fh)
plt.show()
def trial_1(odir):
print("Displaying the activations of every channel in every "
"intermediate layer activation on a randomly-selected "
"cat picture from the catsVsDogs test set:\n\n\n")
model = models.load_model('catsVsDogs_small_convnet_experiment_1.h5')
# Convnet trained on 2000 images of dogs and cats;
# no pre-training involved
model.summary()
file_path = path.join(path.dirname(getcwd()),
'data', 'catsVsDogs_small', 'test', 'cats',
'cat.{}.jpg'.format(random.randint(1500, 1999)))
display_intermediate_activations(
model, file_path,
odir=path.join(odir, 'intermediate_activations')
)
def trial_2(odir):
print("\n\n\nDisplaying the response patterns of the first 64 filters "
"in the first layer of each convolution block of the VGG16 "
"deep neural network architecture pre-trained on the "
"ImageNet dataset:\n\n\n")
model = vgg16.VGG16(include_top=False, weights='imagenet')
model.summary()
for i in range(1, 6):
layer_name = 'block{}_conv1'.format(i)
display_filters(
model, layer_name,
odir=path.join(odir, 'filter_patterns')
)
def trial_3(odir):
print("\n\n\nDisplaying a class activation map, i.e. a heatmap of 'class "
"activation' over an input image that for a particular class "
"indicates how important each location in that image is to that "
"classifying that image or an object within as representative "
"of that class. We are being tricky in submitting an image "
"of a raven that the neural network finds ambiguous to classifiy "
"(the top 2 predicted classes are 'vulture' and 'magpie').\n")
file_path = path.join(path.dirname(getcwd()), 'data', 'raven.jpg')
class_activation_heatmap(file_path, architecture='vgg16',
odir=path.join(odir, 'class_activation_maps'))
def main():
try:
odir = path.join(path.dirname(getcwd()), 'output',
'visualizing_convnets')
makedirs(odir)
for name in ('class_activation_maps', 'filter_patterns',
'intermediate_activations'):
subdidr = path.join(odir, name)
makedirs(subdir)
except OSError as err:
if err.errno != errno.EEXIST:
raise
trial_1(odir)
trial_2(odir)
trial_3(odir)
if __name__ == '__main__':
main()
|
|
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
HeaderID Extension for Python-Markdown
======================================
Auto-generate id attributes for HTML headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header #"
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header">Some Header</h1>
All header IDs are unique:
>>> text = '''
... #Header
... #Header
... #Header'''
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="header">Header</h1>
<h1 id="header_1">Header</h1>
<h1 id="header_2">Header</h1>
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> print md
<h3 id="some-header">Some Header</h3>
<h4 id="next-level">Next Level</h4>
Works with inline markup.
>>> text = '#Some *Header* with [markup](http://example.com).'
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header-with-markup">Some <em>Header</em> with <a href="http://example.com">markup</a>.</h1>
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Another Header'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> print md
<h1>Some Header</h1>
<h1>Another Header</h1>
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> print md
<h2>A Header</h2>
Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/header_id.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
import re
import logging
import unicodedata
logger = logging.getLogger('MARKDOWN')
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def slugify(value, separator):
""" Slugify a string, to make it URL friendly. """
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
return re.sub('[%s\s]+' % separator, separator, value)
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
ids.add(id)
return id
def itertext(elem):
""" Loop through all children and return text only.
Reimplements method of same name added to ElementTree in Python 2.7
"""
if elem.text:
yield elem.text
for e in elem:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
class HeaderIdTreeprocessor(Treeprocessor):
""" Assign IDs to headers. """
IDs = set()
def run(self, doc):
start_level, force_id = self._get_meta()
slugify = self.config['slugify']
sep = self.config['separator']
for elem in doc.getiterator():
if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
if force_id:
if "id" in elem.attrib:
id = elem.get('id')
else:
id = slugify(''.join(itertext(elem)), sep)
elem.set('id', unique(id, self.IDs))
if start_level:
level = int(elem.tag[-1]) + start_level
if level > 6:
level = 6
elem.tag = 'h%d' % level
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level']) - 1
force = self._str2bool(self.config['forceid'])
if hasattr(self.md, 'Meta'):
if 'header_level' in self.md.Meta:
level = int(self.md.Meta['header_level'][0]) - 1
if 'header_forceid' in self.md.Meta:
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
class HeaderIdExtension(Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.'],
'separator' : ['-', 'Word separator.'],
'slugify' : [slugify, 'Callable to generate anchors'],
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdTreeprocessor()
self.processor.md = md
self.processor.config = self.getConfigs()
if 'attr_list' in md.treeprocessors.keys():
# insert after attr_list treeprocessor
md.treeprocessors.add('headerid', self.processor, '>attr_list')
else:
# insert after 'prettify' treeprocessor.
md.treeprocessors.add('headerid', self.processor, '>prettify')
def reset(self):
self.processor.IDs = set()
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
|
|
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
"""
The :mod:`gruvi.dbus` module implements a D-BUS client and server.
The implementation uses parts of the `txdbus
<https://github.com/cocagne/txdbus>`_ project. A cut down copy of txdbus,
containing only those parts needed by Gruvi, is available as ``gruvi.txdbus``.
You need this if you are providing a message handler (see below).
Both a client and a server/bus-side implementation are provided. The bus-side
implementation is very bare bones and apart from the "Hello" message it does
not implement any of the "org.freedestkop.DBus" interface. It also does not
implement any message routing. The server side is provided mostly for testing
purposes (but it could serve as the basis for a real D-BUS server).
The client side of a D-BUS connection is implemented by :class:`DbusClient` and
the server/bus-side by :class:`DbusServer`. Both implement a procedural
interface. Messages can be send using e.g. :meth:`DbusClient.send_message` or
:meth:`DbusClient.call_method`. An object-oriented interface that represents
D-BUS objects as Python objects, like the one txdbus provides, is currently not
available. The procedural interface can be used as a basis for your own
object-oriented interface though.
To receive notifications or to respond to method calls, you need to provide a
*message handler* to the client or the server constructor. The signature of the
message handler is: ``message_handler(message, protocol)``. Here, the *message*
argument is an instance of ``gruvi.txdbus.DbusMessages``, and the
*protocol* will be the :class:`DbusProtocol` instance for the current
connection.
Message handlers runs in their own fiber, which allows them to call into
switchpoints. There is one fiber for every connection.
Usage example::
client = gruvi.DbusClient()
client.connect('session')
result = client.call_method('org.freedesktop.DBus', '/org/freedesktop/DBus',
'org.freedesktop.DBus', 'ListNames')
for name in result[0]:
print('Name: {}'.format(name))
"""
from __future__ import absolute_import, print_function
import os
import struct
import binascii
import codecs
import functools
import six
import pyuv
from . import compat
from .hub import switchpoint, switch_back
from .util import delegate_method
from .sync import Event
from .transports import TransportError
from .protocols import ProtocolError, MessageProtocol
from .stream import Stream
from .endpoints import Client, Server
from .address import saddr
from .vendor import txdbus
__all__ = ['DbusError', 'DbusMethodCallError', 'DbusProtocol', 'DbusClient', 'DbusServer']
class DbusError(ProtocolError):
"""Exception that is raised in case of D-BUS protocol errors."""
class DbusMethodCallError(DbusError):
"""Exception that is raised when a error reply is received for a D-BUS
method call."""
def __init__(self, method, reply):
message = 'error calling {!r} method ({})'.format(method, reply.error_name)
super(DbusMethodCallError, self).__init__(message)
self._error = reply.error_name
self._args = tuple(reply.body) if reply.body else ()
@property
def error(self):
return self._error
@property
def args(self):
return self._args
def parse_dbus_address(address):
"""Parse a D-BUS address string into a list of addresses."""
if address == 'session':
address = os.environ.get('DBUS_SESSION_BUS_ADDRESS')
if not address:
raise ValueError('$DBUS_SESSION_BUS_ADDRESS not set')
elif address == 'system':
address = os.environ.get('DBUS_SYSTEM_BUS_ADDRESS',
'unix:path=/var/run/dbus/system_bus_socket')
addresses = []
for addr in address.split(';'):
p1 = addr.find(':')
if p1 == -1:
raise ValueError('illegal address string: {}'.format(addr))
kind = addr[:p1]
args = dict((kv.split('=') for kv in addr[p1+1:].split(',')))
if kind == 'unix':
if 'path' in args:
addr = args['path']
elif 'abstract' in args:
addr = '\0' + args['abstract']
else:
raise ValueError('require "path" or "abstract" for unix')
elif kind == 'tcp':
if 'host' not in args or 'port' not in args:
raise ValueError('require "host" and "port" for tcp')
addr = (args['host'], int(args['port']))
else:
raise ValueError('unknown transport: {}'.format(kind))
addresses.append(addr)
return addresses
class TxdbusAuthenticator(object):
"""A adapter to use the txdbus client and server authenticators with our
transports and protocols."""
# For testing, cookie_dir is set to a temporary path. Otherwise, txdbus
# uses ~/.dbus-keyrings as specified in the spec.
cookie_dir = None
def __init__(self, transport, server_side, server_guid=None):
self._transport = transport
self._server_side = server_side
if self._server_side:
self._authenticator = txdbus.BusAuthenticator(server_guid)
self._authenticator.authenticators['DBUS_COOKIE_SHA1'].keyring_dir = self.cookie_dir
else:
self._authenticator = txdbus.ClientAuthenticator()
self._authenticator.cookie_dir = self.cookie_dir
self._authenticator.beginAuthentication(self)
def sendAuthMessage(self, message):
# Called by the txdbus authenticators
message = message.encode('ascii') + b'\r\n'
self._transport.write(message)
@property
def _unix_creds(self):
# Used by txdbus.BusExternalAuthenticator
return self._transport.get_extra_info('unix_creds')
def handleAuthMessage(self, line):
# Called by our protocol
self._authenticator.handleAuthMessage(line)
def authenticationSucceeded(self):
"""Return whether the authentication succeeded."""
return self._authenticator.authenticationSucceeded()
def getMechanismName(self):
"""Return the authentication mechanism name."""
if self._server_side:
mech = self._authenticator.current_mech
return mech.getMechanismName() if mech else None
else:
return getattr(self._authenticator, 'authMech', None)
def getUserName(self):
"""Return the authenticated user name (server side)."""
if not self._server_side:
return
mech = self._authenticator.current_mech
return mech.getUserName() if mech else None
def getGUID(self):
"""Return the GUID of the authenticated server."""
return self._authenticator.getGUID()
def parse_dbus_header(header):
"""Parse a D-BUS header. Return the message size."""
if six.indexbytes(header, 0) == ord('l'):
endian = '<'
elif six.indexbytes(header, 0) == ord('B'):
endian = '>'
else:
raise ValueError('illegal endianness')
if not 1 <= six.indexbytes(header, 1) <= 4:
raise ValueError('illegel message type')
if struct.unpack(endian + 'I', header[8:12])[0] == 0:
raise ValueError('illegal serial number')
harrlen = struct.unpack(endian + 'I', header[12:16])[0]
padlen = (8 - harrlen) % 8
bodylen = struct.unpack(endian + 'I', header[4:8])[0]
return 16 + harrlen + padlen + bodylen
def new_server_guid():
"""Return a new GUID for a server."""
return binascii.hexlify(os.urandom(16)).decode('ascii')
class DbusProtocol(MessageProtocol):
"""D-BUS Protocol."""
# According to the D-BUS spec the max message size is 128MB. However since
# we want to limited memory usage we are much more conservative here.
max_message_size = 128*1024
# Maximum size for an authentication line
max_line_size = 1000
_next_unique_name = 0
S_CREDS_BYTE, S_AUTHENTICATE, S_MESSAGE_HEADER, S_MESSAGE = range(4)
def __init__(self, message_handler=None, server_side=False, server_guid=None, timeout=None):
super(DbusProtocol, self).__init__(message_handler, timeout=timeout)
self._server_side = server_side
self._name_acquired = Event()
self._buffer = bytearray()
self._method_calls = {}
self._authenticator = None
if self._server_side:
self._server_guid = server_guid or new_server_guid()
self._unique_name = ':{}'.format(self._next_unique_name)
type(self)._next_unique_name += 1
else:
self._server_guid = None
self._unique_name = None
self._state = None
@property
def server_guid(self):
return self._server_guid
def connection_made(self, transport):
# Protocol callback
super(DbusProtocol, self).connection_made(transport)
# The client initiates by sending a '\0' byte, as per the D-BUS spec.
if self._server_side:
self._state = self.S_CREDS_BYTE
else:
self._state = self.S_AUTHENTICATE
self._transport.write(b'\0')
self._writer = Stream(transport, 'w')
self._authenticator = TxdbusAuthenticator(transport, self._server_side, self._server_guid)
self._message_size = 0
def connection_lost(self, exc):
# Protocol callback
super(DbusProtocol, self).connection_lost(exc)
if self._error is None:
self._error = TransportError('connection lost')
for notify in self._method_calls.values():
if isinstance(notify, switch_back):
notify.throw(self._error)
self._method_calls.clear()
self._name_acquired.set()
self._authenticator = None # break cycle
def on_creds_byte(self, byte):
if byte != 0:
self._error = DbusError('first byte needs to be zero')
return False
self._state = self.S_AUTHENTICATE
return True
def on_partial_auth_line(self, line):
if len(line) > self.max_line_size:
self._error = DbusError('auth line too long ({} bytes)'.format(len(line)))
return False
return True
def on_auth_line(self, line):
if not self.on_partial_auth_line(line):
return False
if line[-2:] != b'\r\n':
self._error = DbusError('auth line does not end with \\r\\n')
return False
try:
line = codecs.decode(line[:-2], 'ascii') # codecs.decode allows memoryview
except UnicodeDecodeError as e:
self._error = DbusError('auth line contain non-ascii chars')
return False
try:
self._authenticator.handleAuthMessage(line)
except txdbus.DBusAuthenticationFailed as e:
self._error = DbusError('authentication failed: {!s}'.format(e))
return False
if self._authenticator.authenticationSucceeded():
if not self._server_side:
message = txdbus.MethodCallMessage('/org/freedesktop/DBus', 'Hello',
'org.freedesktop.DBus', 'org.freedesktop.DBus')
self._transport.write(message.rawMessage)
self._method_calls[message.serial] = self.on_hello_response
self._state = self.S_MESSAGE_HEADER
self._server_guid = self._authenticator.getGUID()
return True
def on_hello_response(self, message):
self._unique_name = message.body[0]
self._name_acquired.set()
def on_message_header(self, header):
try:
size = parse_dbus_header(header)
except ValueError:
self._error = DbusError('invalid message header')
return False
if size > self.max_message_size:
self._error = DbusError('message too large ({} bytes)'.format(size))
return False
self._message_size = size
self._state = self.S_MESSAGE
return True
def on_message(self, message):
try:
parsed = txdbus.parseMessage(message)
except (txdbus.MarshallingError, struct.error) as e:
self._error = DbusError('parseMessage() error: {!s}'.format(e))
return False
if self._server_side and not self._name_acquired.is_set():
if isinstance(parsed, txdbus.MethodCallMessage) \
and parsed.member == 'Hello' \
and parsed.path == '/org/freedesktop/DBus' \
and parsed.interface == 'org.freedesktop.DBus' \
and parsed.destination == 'org.freedesktop.DBus':
response = txdbus.MethodReturnMessage(parsed.serial, signature='s',
body=[self._unique_name])
self._name_acquired.set()
self._transport.write(response.rawMessage)
else:
self._error = DbusError('Hello method not called')
return False
elif isinstance(parsed, (txdbus.MethodReturnMessage, txdbus.ErrorMessage)) \
and getattr(parsed, 'reply_serial', 0) in self._method_calls:
notify = self._method_calls.pop(parsed.reply_serial)
notify(parsed)
elif self._dispatcher:
self._queue.put_nowait(parsed)
else:
mtype = type(parsed).__name__[:-7].lower()
info = ' {!r}'.format(getattr(parsed, 'member', getattr(parsed, 'error_name', '')))
self._log.warning('no handler, ignoring inbound {}{}', mtype, info)
self._state = self.S_MESSAGE_HEADER
return True
def prepend_buffer(self, buf):
if self._buffer:
self._buffer.extend(buf)
buf = self._buffer
self._buffer = bytearray()
return memoryview(buf)
def data_received(self, data):
view = memoryview(data)
offset = 0
while offset != len(data):
if self._state == self.S_CREDS_BYTE:
credsbyte = six.indexbytes(view, offset)
offset += 1
if not self.on_creds_byte(credsbyte):
break
if self._state == self.S_AUTHENTICATE:
pos = data.find(b'\n', offset)
if pos == -1:
self._buffer.extend(view[offset:])
self.on_partial_auth_line(self._buffer)
break
line = self.prepend_buffer(view[offset:pos+1])
offset = pos+1
if not self.on_auth_line(line):
break
if self._state == self.S_MESSAGE_HEADER:
needbytes = 16 - len(self._buffer)
if len(data) - offset < needbytes:
self._buffer.extend(view[offset:])
break
header = self.prepend_buffer(view[offset:offset+needbytes])
if not self.on_message_header(header):
break
offset += len(header)
self._buffer.extend(header)
if self._state == self.S_MESSAGE:
needbytes = self._message_size - len(self._buffer)
if len(data) - offset < needbytes:
self._buffer.extend(view[offset:])
break
message = self.prepend_buffer(view[offset:offset+needbytes])
offset += needbytes
if not self.on_message(message):
break
self._maybe_pause_transport()
if self._error:
self._transport.close()
return
@switchpoint
def get_unique_name(self):
"""Return the unique name of the D-BUS connection."""
self._name_acquired.wait()
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise DbusError('not connected')
return self._unique_name
@switchpoint
def send_message(self, message):
"""Send a D-BUS message.
The *message* argument must be ``gruvi.txdbus.DbusMessage`` instance.
"""
if not isinstance(message, txdbus.DbusMessage):
raise TypeError('message: expecting DbusMessage instance (got {!r})',
type(message).__name__)
self._name_acquired.wait()
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise DbusError('not connected')
self._writer.write(message.rawMessage)
@switchpoint
def call_method(self, service, path, interface, method, signature=None,
args=None, no_reply=False, auto_start=False, timeout=-1):
"""Call a D-BUS method and wait for its reply.
This method calls the D-BUS method with name *method* that resides on
the object at bus address *service*, at path *path*, on interface
*interface*.
The *signature* and *args* are optional arguments that can be used to
add parameters to the method call. The signature is a D-BUS signature
string, while *args* must be a sequence of python types that can be
converted into the types specified by the signature. See the `D-BUS
specification
<http://dbus.freedesktop.org/doc/dbus-specification.html>`_ for a
reference on signature strings.
The flags *no_reply* and *auto_start* control the NO_REPLY_EXPECTED and
NO_AUTO_START flags on the D-BUS message.
The return value is the result of the D-BUS method call. This will be a
possibly empty sequence of values.
"""
message = txdbus.MethodCallMessage(path, method, interface=interface,
destination=service, signature=signature, body=args,
expectReply=not no_reply, autoStart=auto_start)
serial = message.serial
if timeout == -1:
timeout = self._timeout
try:
with switch_back(timeout) as switcher:
self._method_calls[serial] = switcher
self.send_message(message)
args, _ = self._hub.switch()
finally:
self._method_calls.pop(serial, None)
response = args[0]
assert response.reply_serial == serial
if isinstance(response, txdbus.ErrorMessage):
raise DbusMethodCallError(method, response)
args = tuple(response.body) if response.body else ()
return args
class DbusClient(Client):
"""A D-BUS client."""
def __init__(self, message_handler=None, timeout=30):
"""
The *message_handler* argument specifies an optional message handler.
The optional *timeout* argument specifies a default timeout for
protocol operations in seconds.
"""
protocol_factory = functools.partial(DbusProtocol, message_handler)
super(DbusClient, self).__init__(protocol_factory, timeout)
@switchpoint
def connect(self, address='session'):
"""Connect to *address* and wait until the connection is established.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively.
"""
if isinstance(address, six.string_types):
addresses = parse_dbus_address(address)
else:
addresses = [address]
for addr in addresses:
try:
super(DbusClient, self).connect(addr)
except pyuv.error.UVError:
continue
break
else:
raise DbusError('could not connect to any address')
# Wait for authentication to complete
self.get_unique_name()
protocol = Client.protocol
delegate_method(protocol, DbusProtocol.get_unique_name)
delegate_method(protocol, DbusProtocol.send_message)
delegate_method(protocol, DbusProtocol.call_method)
class DbusServer(Server):
"""A D-BUS server."""
def __init__(self, message_handler, timeout=30):
"""
The *message_handler* argument specifies the message handler.
The optional *timeout* argument specifies a default timeout for
protocol operations in seconds.
"""
protocol_factory = functools.partial(DbusProtocol, message_handler,
server_side=True)
super(DbusServer, self).__init__(protocol_factory, timeout)
@switchpoint
def listen(self, address='session'):
"""Start listening on *address* for new connection.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively.
"""
if isinstance(address, six.string_types):
addresses = parse_dbus_address(address)
else:
addresses = [address]
for addr in addresses:
try:
super(DbusServer, self).listen(addr)
except pyuv.error.UVError:
self._log.error('skipping address {}', saddr(addr))
|
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
import json
import re
import socket
import unicodedata
from base64 import standard_b64decode, urlsafe_b64decode
from collections import OrderedDict
from decimal import Decimal as DecimalType
from typing import Any, Callable, Optional, Sequence, Text, Union
from uuid import UUID
from xml.etree.ElementTree import Element, tostring
# noinspection PyCompatibility
import regex
from six import PY2, PY3, binary_type, moves as compat, \
python_2_unicode_compatible, text_type
from filters.base import BaseFilter, Type
from filters.simple import MaxLength
__all__ = [
'Base64Decode',
'ByteString',
'CaseFold',
'IpAddress',
'JsonDecode',
'MaxBytes',
'Regex',
'Split',
'Strip',
'Unicode',
'Uuid',
]
class Base64Decode(BaseFilter):
"""
Decodes an incoming value using the Base64 algo.
"""
CODE_INVALID = 'not_base64'
templates = {
CODE_INVALID: 'Base64-encoded value expected.',
}
def __init__(self):
super(Base64Decode, self).__init__()
self.whitespace_re = regex.compile(b'[ \t\r\n]+', regex.ASCII)
self.base64_re = regex.compile(b'^[-+_/A-Za-z0-9=]+$', regex.ASCII)
def _apply(self, value):
value = self._filter(value, Type(binary_type)) # type: binary_type
if self._has_errors:
return None
# Strip out whitespace.
# Technically, whitespace is not part of the Base64 alphabet,
# but virtually every implementation allows it.
value = self.whitespace_re.sub(b'', value)
# Check for invalid characters.
# Note that Python 3's b64decode does this for us, but we also
# have to support Python 2.
# https://docs.python.org/3/library/base64.html#base64.b64decode
if not self.base64_re.match(value):
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
)
# Check to see if we are working with a URL-safe dialect.
# https://en.wikipedia.org/wiki/Base64#URL_applications
if (b'_' in value) or (b'-' in value):
# You can't mix dialects, silly!
if (b'+' in value) or (b'/' in value):
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
)
url_safe = True
else:
url_safe = False
# Normalize padding.
# http://stackoverflow.com/a/9807138/
value = value.rstrip(b'=')
value += (b'=' * (4 - (len(value) % 4)))
try:
return (
urlsafe_b64decode(value)
if url_safe
else standard_b64decode(value)
)
except TypeError:
return self._invalid_value(value, self.CODE_INVALID, exc_info=True)
# noinspection SpellCheckingInspection
class CaseFold(BaseFilter):
"""
Applies case folding to an incoming string, allowing you to perform
case-insensitive comparisons.
The result tends to be lowercase, but it is recommended that you
NOT treat CaseFold as a Unicode-aware lowercase filter! The
proper way to lowercase a string is very much locale-dependent.
Note that the built in :py:meth:`str.upper` and
:py:meth:`str.lower` methods tend do a pretty good job of properly
changing the case of unicode strings.
References:
- http://www.w3.org/International/wiki/Case_folding
- https://docs.python.org/3/library/stdtypes.html#str.lower
- https://docs.python.org/3/library/stdtypes.html#str.upper
"""
def _apply(self, value):
value = self._filter(value, Type(text_type)) # type: Text
if self._has_errors:
return None
# In Python 3, case folding is supported natively.
# In Python 2, this is the best we can do.
# https://docs.python.org/3/library/stdtypes.html#str.casefold
if PY3:
# noinspection PyUnresolvedReferences
return value.casefold()
else:
# noinspection PyUnresolvedReferences
from py2casefold import casefold
return casefold(value)
@python_2_unicode_compatible
class IpAddress(BaseFilter):
"""
Validates an incoming value as an IPv[46] address.
"""
CODE_INVALID = 'not_ip_address'
templates = {
CODE_INVALID: 'This value is not a valid {ip_type} address.',
}
def __init__(self, ipv4=True, ipv6=False):
# type: (bool, bool) -> None
super(IpAddress, self).__init__()
self.ipv4 = ipv4
self.ipv6 = ipv6
def __str__(self):
return '{type}(ipv4={ipv4!r}, ipv6={ipv6!r})'.format(
type = type(self).__name__,
ipv4 = self.ipv4,
ipv6 = self.ipv6,
)
@property
def ip_type(self):
# type: () -> Text
"""
Returns the IP address versions that this Filter accepts.
"""
return '/'.join(filter(None, [
'IPv4' if self.ipv4 else None,
'IPv6' if self.ipv6 else None,
]))
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
# http://stackoverflow.com/a/4017219
if self.ipv4:
try:
socket.inet_pton(socket.AF_INET, value)
except socket.error:
pass
else:
return value
if self.ipv6:
try:
n = socket.inet_pton(socket.AF_INET6, value)
except socket.error:
pass
else:
# Convert the binary value back into a string
# representation so that the end result is
# normalized.
# https://en.wikipedia.org/wiki/IPv6_address#Presentation
return socket.inet_ntop(socket.AF_INET6, n)
# If we get here, we failed the above checks (or the Filter is
# configured not to allow anything through).
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
template_vars = {
'ip_type': self.ip_type
},
)
class JsonDecode(BaseFilter):
"""
Interprets the value as JSON.
JSON objects are converted to OrderedDict instances so that key
order is preserved.
"""
CODE_INVALID = 'not_json'
templates = {
CODE_INVALID: 'This value is not valid JSON.',
}
def __init__(self, decoder=json.loads):
# type: (Callable[Text, Any]) -> None
super(JsonDecode, self).__init__()
self.decoder = decoder
def _apply(self, value):
value = self._filter(value, Type(text_type)) # type: Text
if self._has_errors:
return None
try:
# :see: http://stackoverflow.com/a/6921760
return self.decoder(value, object_pairs_hook=OrderedDict)
except ValueError:
return self._invalid_value(value, self.CODE_INVALID, exc_info=True)
@python_2_unicode_compatible
class MaxBytes(BaseFilter):
"""
Ensures that an incoming string value is small enough to fit into a
specified number of bytes when encoded.
Note: The resulting value is a byte string, even if you provide a
unicode.
"""
CODE_TOO_LONG = 'too_long'
templates = {
CODE_TOO_LONG:
'Value is too long (must be < {max_bytes} '
'bytes when encoded using {encoding}).',
}
def __init__(
self,
max_bytes,
truncate = True,
prefix = '',
encoding = 'utf-8',
):
# type: (int, bool, Text, Text) -> None
"""
:param max_bytes:
Max number of bytes to allow.
:param truncate:
Whether to truncate values that are too long.
Set this to ``False`` to save system resources when you
know that you will reject values that are too long.
:param prefix:
Prefix to apply to truncated values.
Ignored when ``truncate`` is ``False``.
:param encoding:
The character encoding to check against.
Note: This filter is optimized for UTF-8.
"""
super(MaxBytes, self).__init__()
self.encoding = encoding
self.max_bytes = max_bytes
self.prefix = prefix
self.truncate = truncate
def __str__(self):
return '{type}({max_bytes!r}, encoding={encoding!r})'.format(
type = type(self).__name__,
max_bytes = self.max_bytes,
encoding = self.encoding,
)
def _apply(self, value):
"""
:return:
Returns bytes, truncated to the correct length.
Note: Might be a bit shorter than the max length, to avoid
orphaning a multibyte sequence.
"""
value = self._filter(
value = value,
filter_chain = (
Type((binary_type, text_type,))
| Unicode(encoding=self.encoding)
),
) # type: Text
if self._has_errors:
return None
str_value = value.encode(self.encoding)
if len(str_value) > self.max_bytes:
replacement = (
self.truncate_string(
# Ensure that we convert back to unicode before
# adding the prefix, just in case `self.encoding`
# indicates a codec that uses a BOM.
value = self.prefix + value,
max_bytes = self.max_bytes,
encoding = self.encoding,
)
if self.truncate
else None
)
return self._invalid_value(
value = value,
reason = self.CODE_TOO_LONG,
replacement = replacement,
context = {
'encoding': self.encoding,
'max_bytes': self.max_bytes,
'prefix': self.prefix,
'truncate': self.truncate,
},
)
return str_value
@staticmethod
def truncate_string(value, max_bytes, encoding):
# type: (Text, int, Text) -> binary_type
"""
Truncates a string value to the specified number of bytes.
:return:
Returns bytes, truncated to the correct length.
Note: Might be a bit shorter than `max_bytes`, to avoid
orphaning a multibyte sequence.
"""
# Convert to bytearray so that we get the same handling in
# Python 2 and Python 3.
bytes_ = bytearray(value.encode(encoding))
# Truncating the value is a bit tricky, as we have to be
# careful not to leave an unterminated multibyte sequence.
if encoding.lower() in ['utf-8', 'utf8']:
#
# This code works a bit faster than the generic routine
# (see below) because we only have to inspect up to 4
# bytes from the end of the encoded value instead of
# having to repeatedly decode the entire string.
#
# But, it only works for UTF-8.
#
truncated = bytes_[0:max_bytes]
# Walk backwards through the string until we hit certain
# sequences.
for i, o in enumerate(reversed(truncated), start=1):
# If the final byte is not part of a multibyte
# sequence, then we can stop right away; there is no
# need to remove anything.
if (i < 2) and (o < 0b10000000):
break
# If this byte is a leading byte (the first byte in a
# multibyte sequence), determine how many bytes we
# need to strip off the end of the string so that we
# can decode it back into a unicode if needed.
if o >= 0b11000000:
# Note: Assuming max 4 bytes per sequence.
# Should be good enough until extraterrestrial
# languages are encountered.
seq_length = (
4 if o >= 0b11110000 else
3 if o >= 0b11100000 else
2
)
# Now that we know how many bytes are in the final
# sequence, check to see if it is complete, and
# discard it if it is incomplete.
if seq_length != i:
truncated = truncated[0:-i]
break
# Else, we have a continuation byte. Continue walking
# backwards through the string.
return truncated
else:
trim = 0
while True:
# Progressively chop bytes off the end of the string
# until we have something that can be successfully
# decoded using the specified encoding.
truncated = bytes_[0:max_bytes - trim]
try:
truncated.decode(encoding)
except UnicodeDecodeError:
trim += 1
else:
return binary_type(truncated)
# We should never get here, but just in case, we need
# to ensure the loop eventually terminates (Python
# won't error if ``max_bytes - trim`` goes negative,
# since the slice operator accepts negative values).
if trim >= max_bytes:
raise ValueError(
'Unable to truncate {bytes_!r} to {max_bytes} '
'bytes when encoded using {encoding}.'.format(
bytes_ = bytes_,
max_bytes = max_bytes,
encoding = encoding,
),
)
@python_2_unicode_compatible
class Regex(BaseFilter):
"""
Matches a regular expression in the value.
IMPORTANT: This filter returns a LIST of all sequences in the
input value that matched the regex!
IMPORTANT: This Filter uses the ``regex`` library, which behaves
slightly differently than Python's ``re`` library.
If you've never used ``regex`` before, try it; you'll never want to
go back!
References:
- https://pypi.python.org/pypi/regex
"""
CODE_INVALID = 'malformed'
templates = {
CODE_INVALID:
'Value does not match regular expression {pattern}.',
}
# noinspection PyProtectedMember
def __init__(self, pattern):
# type: (Union[Text, regex._pattern_type, re._pattern_type]) -> None
"""
:param pattern:
String pattern, or pre-compiled regex.
IMPORTANT: If you specify your own compiled regex, be sure to
add the ``UNICODE`` flag for Unicode support!
"""
super(Regex, self).__init__()
self.regex = (
pattern
if isinstance(pattern, (regex._pattern_type, re._pattern_type))
else regex.compile(pattern, regex.UNICODE)
)
def __str__(self):
return '{type}({pattern!r})'.format(
type = type(self).__name__,
pattern = self.regex.pattern,
)
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
matches = [
match.group(0)
for match in self.regex.finditer(value)
]
if not matches:
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
template_vars = {
'pattern': self.regex.pattern,
},
)
return matches
@python_2_unicode_compatible
class Split(BaseFilter):
"""
Splits an incoming string into parts.
The result is either a list or an OrderedDict, depending on whether
you specify keys to map to the result.
"""
# noinspection PyProtectedMember
def __init__(self, pattern, keys=None):
# type: (Union[Text, regex._pattern_type, re._pattern_type], Optional[Sequence[Text]]) -> None
"""
:param pattern:
Regex used to split incoming string values.
IMPORTANT: If you specify your own compiled regex, be sure
to add the ``UNICODE`` flag for Unicode support!
:param keys:
If set, the resulting list will be converted into an
OrderedDict, using the specified keys.
IMPORTANT: If ``keys`` is set, the split value's length
must be less than or equal to ``len(keys)``.
"""
super(Split, self).__init__()
self.regex = (
pattern
if isinstance(pattern, (regex._pattern_type, re._pattern_type))
else regex.compile(pattern, regex.UNICODE)
)
self.keys = keys
def __str__(self):
return '{type}({pattern!r}, keys={keys!r}'.format(
type = type(self).__name__,
pattern = self.regex.pattern,
keys = self.keys,
)
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
split = self.regex.split(value)
if self.keys:
# The split value can have at most as many items as
# ``self.keys``.
split = self._filter(split, MaxLength(len(self.keys)))
if self._has_errors:
return None
return OrderedDict(compat.zip_longest(self.keys, split))
else:
return split
@python_2_unicode_compatible
class Strip(BaseFilter):
"""
Strips characters (whitespace and non-printables by default) from
the end(s) of a string.
IMPORTANT: This Filter uses the ``regex`` library, which behaves
slightly differently than Python's ``re`` library.
If you've never used ``regex`` before, try it; you'll never want to
go back!
"""
def __init__(self, leading=r'[\p{C}\s]+', trailing=r'[\p{C}\s]+'):
# type: (Text, Text) -> None
"""
:param leading:
Regex to match at the start of the string.
:param trailing:
Regex to match at the end of the string.
"""
super(Strip, self).__init__()
if leading:
self.leading = regex.compile(
r'^{pattern}'.format(pattern=leading),
regex.UNICODE,
)
else:
self.leading = None
if trailing:
self.trailing = regex.compile(
r'{pattern}$'.format(pattern=trailing),
regex.UNICODE,
)
else:
self.trailing = None
def __str__(self):
return '{type}(leading={leading!r}, trailing={trailing!r})'.format(
type = type(self).__name__,
leading = self.leading.pattern,
trailing = self.trailing.pattern,
)
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
if self.leading:
value = self.leading.sub('', value)
if self.trailing:
value = self.trailing.sub('', value)
return value
@python_2_unicode_compatible
class Unicode(BaseFilter):
"""
Converts a value into a unicode string.
Note: By default, additional normalization is applied to the
resulting value. See the initializer docstring for more info.
References:
- https://docs.python.org/2/howto/unicode.html
- https://en.wikipedia.org/wiki/Unicode_equivalence
"""
CODE_DECODE_ERROR = 'wrong_encoding'
templates = {
CODE_DECODE_ERROR: 'This value cannot be decoded using {encoding}.',
}
def __init__(self, encoding='utf-8', normalize=True):
# type: (Text, bool) -> None
"""
:param encoding:
Used to decode non-unicode values.
:param normalize:
Whether to normalize the resulting value:
- Convert to NFC form.
- Remove non-printable characters.
- Convert all line endings to unix-style ('\n').
"""
super(Unicode, self).__init__()
self.encoding = encoding
self.normalize = normalize
if self.normalize:
#
# Compile the regex that we will use to remove non-
# printables from the resulting unicode.
# http://www.regular-expressions.info/unicode.html#category
#
# Note: using a double negative so that we can exclude
# newlines, which are technically considered control chars.
# http://stackoverflow.com/a/3469155
#
self.npr = regex.compile(r'[^\P{C}\s]+', regex.UNICODE)
def __str__(self):
return '{type}(encoding={encoding!r})'.format(
type = type(self).__name__,
encoding = self.encoding,
)
def _apply(self, value):
try:
if isinstance(value, text_type):
decoded = value
elif isinstance(value, binary_type):
decoded = value.decode(self.encoding)
elif isinstance(value, bool):
decoded = text_type(int(value))
# In Python 3, ``bytes(<int>)`` does weird things.
# https://www.python.org/dev/peps/pep-0467/
elif isinstance(value, (int, float)):
decoded = text_type(value)
elif isinstance(value, DecimalType):
decoded = format(value, 'f')
elif isinstance(value, Element):
# There's no way (that I know of) to get
# :py:meth:`ElementTree.tostring` to return a unicode.
decoded = tostring(value, 'utf-8').decode('utf-8')
elif (
PY2 and hasattr(value, '__str__')
or PY3 and hasattr(value, '__bytes__')
):
decoded = binary_type(value).decode(self.encoding)
else:
decoded = text_type(value)
except UnicodeDecodeError:
return self._invalid_value(
value = value,
reason = self.CODE_DECODE_ERROR,
exc_info = True,
template_vars = {
'encoding': self.encoding,
},
)
if self.normalize:
return (
# Return the final string in composed form.
# https://en.wikipedia.org/wiki/Unicode_equivalence
unicodedata.normalize('NFC',
# Remove non-printables.
self.npr.sub('', decoded)
)
# Normalize line endings.
# http://stackoverflow.com/a/1749887
.replace('\r\n', '\n')
.replace('\r', '\n')
)
else:
return decoded
class ByteString(Unicode):
"""
Converts a value into a byte string, encoded as UTF-8.
IMPORTANT: This filter returns bytes objects, not bytearrays!
"""
def __init__(self, encoding='utf-8', normalize=False):
# type: (Text, bool) -> None
"""
:param encoding:
Used to decode non-unicode values.
:param normalize:
Whether to normalize the unicode value before converting
back into bytes:
- Convert to NFC form.
- Remove non-printable characters.
- Convert all line endings to unix-style ('\n').
Note that ``normalize`` is ``False`` by default for
:py:class:`ByteString`, but ``True`` by default for
:py:class:`Unicode`.
"""
super(ByteString, self).__init__(encoding, normalize)
# noinspection SpellCheckingInspection
def _apply(self, value):
decoded = super(ByteString, self)._apply(value) # type: Text
#
# No need to catch UnicodeEncodeErrors here; UTF-8 can handle
# any unicode value.
#
# Technically, we could get this error if we encounter a code
# point beyond U+10FFFF (the highest valid code point in the
# Unicode standard).
#
# However, it's not possible to create a `unicode` object with
# an invalid code point, so we wouldn't even be able to get
# this far if the incoming value contained a character that
# can't be represented using UTF-8.
#
# Note that in some versions of Python, it is possible (albeit
# really difficult) to trick Python into creating unicode
# objects with invalid code points, but it generally requires
# using specific codecs that aren't UTF-8.
#
# Example of exploit and release notes from the Python release
# (2.7.6) that fixes the issue:
#
# - https://gist.github.com/rspeer/7559750
# - https://hg.python.org/cpython/raw-file/99d03261c1ba/Misc/NEWS
#
# Normally we return ``None`` if we get any errors, but in this
# case, we'll let the superclass method decide.
return decoded if self._has_errors else decoded.encode('utf-8')
@python_2_unicode_compatible
class Uuid(BaseFilter):
"""
Interprets an incoming value as a UUID.
"""
CODE_INVALID = 'not_uuid'
CODE_WRONG_VERSION = 'wrong_version'
templates = {
CODE_INVALID: 'This value is not a well-formed UUID.',
CODE_WRONG_VERSION:
'v{incoming} UUID not allowed (expected v{expected}).',
}
def __init__(self, version=None):
# type: (Optional[int]) -> None
"""
:type version:
If specified, requires the resulting UUID to match the
specified version.
References:
- https://en.wikipedia.org/wiki/Uuid#RFC_4122_Variant
"""
super(Uuid, self).__init__()
self.version = version
def __str__(self):
return '{type}(version={version!r})'.format(
type = type(self).__name__,
version = self.version,
)
def _apply(self, value):
value = self._filter(value, Type((text_type, UUID,))) # type: Union[Text, UUID]
if self._has_errors:
return None
try:
uuid = (
value
if isinstance(value, UUID)
else UUID(hex=value)
)
except ValueError:
return self._invalid_value(value, self.CODE_INVALID, exc_info=True)
else:
if self.version not in (None, uuid.version):
return self._invalid_value(
value = text_type(uuid),
reason = self.CODE_WRONG_VERSION,
context = {
'expected': self.version,
'incoming': uuid.version,
},
)
return uuid
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
from werkzeug.wrappers import Response
import frappe
import frappe.utils
import frappe.sessions
from frappe.utils import cint
from frappe import _, is_whitelisted
from frappe.utils.response import build_response
from frappe.utils.csvutils import build_csv_response
from frappe.utils.image import optimize_image
from mimetypes import guess_type
from frappe.core.doctype.server_script.server_script_utils import get_server_script_map
ALLOWED_MIMETYPES = ('image/png', 'image/jpeg', 'application/pdf', 'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.oasis.opendocument.text', 'application/vnd.oasis.opendocument.spreadsheet')
def handle():
"""handle request"""
cmd = frappe.local.form_dict.cmd
data = None
if cmd != 'login':
data = execute_cmd(cmd)
# data can be an empty string or list which are valid responses
if data is not None:
if isinstance(data, Response):
# method returns a response object, pass it on
return data
# add the response to `message` label
frappe.response['message'] = data
return build_response("json")
def execute_cmd(cmd, from_async=False):
"""execute a request as python module"""
for hook in frappe.get_hooks("override_whitelisted_methods", {}).get(cmd, []):
# override using the first hook
cmd = hook
break
# via server script
server_script = get_server_script_map().get('_api', {}).get(cmd)
if server_script:
return run_server_script(server_script)
try:
method = get_attr(cmd)
except Exception as e:
frappe.throw(_('Failed to get method for command {0} with {1}').format(cmd, e))
if from_async:
method = method.queue
if method != run_doc_method:
is_whitelisted(method)
is_valid_http_method(method)
return frappe.call(method, **frappe.form_dict)
def run_server_script(server_script):
response = frappe.get_doc('Server Script', server_script).execute_method()
# some server scripts return output using flags (empty dict by default),
# while others directly modify frappe.response
# return flags if not empty dict (this overwrites frappe.response.message)
if response != {}:
return response
def is_valid_http_method(method):
if frappe.flags.in_safe_exec:
return
http_method = frappe.local.request.method
if http_method not in frappe.allowed_http_methods_for_whitelisted_func[method]:
throw_permission_error()
def throw_permission_error():
frappe.throw(_("Not permitted"), frappe.PermissionError)
@frappe.whitelist(allow_guest=True)
def version():
return frappe.__version__
@frappe.whitelist(allow_guest=True)
def logout():
frappe.local.login_manager.logout()
frappe.db.commit()
@frappe.whitelist(allow_guest=True)
def web_logout():
frappe.local.login_manager.logout()
frappe.db.commit()
frappe.respond_as_web_page(_("Logged Out"), _("You have been successfully logged out"),
indicator_color='green')
@frappe.whitelist()
def uploadfile():
ret = None
try:
if frappe.form_dict.get('from_form'):
try:
ret = frappe.get_doc({
"doctype": "File",
"attached_to_name": frappe.form_dict.docname,
"attached_to_doctype": frappe.form_dict.doctype,
"attached_to_field": frappe.form_dict.docfield,
"file_url": frappe.form_dict.file_url,
"file_name": frappe.form_dict.filename,
"is_private": frappe.utils.cint(frappe.form_dict.is_private),
"content": frappe.form_dict.filedata,
"decode": True
})
ret.save()
except frappe.DuplicateEntryError:
# ignore pass
ret = None
frappe.db.rollback()
else:
if frappe.form_dict.get('method'):
method = frappe.get_attr(frappe.form_dict.method)
is_whitelisted(method)
ret = method()
except Exception:
frappe.errprint(frappe.utils.get_traceback())
frappe.response['http_status_code'] = 500
ret = None
return ret
@frappe.whitelist(allow_guest=True)
def upload_file():
user = None
if frappe.session.user == 'Guest':
if frappe.get_system_settings('allow_guests_to_upload_files'):
ignore_permissions = True
else:
return
else:
user = frappe.get_doc("User", frappe.session.user)
ignore_permissions = False
files = frappe.request.files
is_private = frappe.form_dict.is_private
doctype = frappe.form_dict.doctype
docname = frappe.form_dict.docname
fieldname = frappe.form_dict.fieldname
file_url = frappe.form_dict.file_url
folder = frappe.form_dict.folder or 'Home'
method = frappe.form_dict.method
filename = frappe.form_dict.file_name
optimize = frappe.form_dict.optimize
content = None
if 'file' in files:
file = files['file']
content = file.stream.read()
filename = file.filename
content_type = guess_type(filename)[0]
if optimize and content_type.startswith("image/"):
args = {
"content": content,
"content_type": content_type
}
if frappe.form_dict.max_width:
args["max_width"] = int(frappe.form_dict.max_width)
if frappe.form_dict.max_height:
args["max_height"] = int(frappe.form_dict.max_height)
content = optimize_image(**args)
frappe.local.uploaded_file = content
frappe.local.uploaded_filename = filename
if not file_url and (frappe.session.user == "Guest" or (user and not user.has_desk_access())):
filetype = guess_type(filename)[0]
if filetype not in ALLOWED_MIMETYPES:
frappe.throw(_("You can only upload JPG, PNG, PDF, or Microsoft documents."))
if method:
method = frappe.get_attr(method)
is_whitelisted(method)
return method()
else:
ret = frappe.get_doc({
"doctype": "File",
"attached_to_doctype": doctype,
"attached_to_name": docname,
"attached_to_field": fieldname,
"folder": folder,
"file_name": filename,
"file_url": file_url,
"is_private": cint(is_private),
"content": content
})
ret.save(ignore_permissions=ignore_permissions)
return ret
def get_attr(cmd):
"""get method object from cmd"""
if '.' in cmd:
method = frappe.get_attr(cmd)
else:
method = globals()[cmd]
frappe.log("method:" + cmd)
return method
@frappe.whitelist(allow_guest=True)
def ping():
return "pong"
def run_doc_method(method, docs=None, dt=None, dn=None, arg=None, args=None):
"""run a whitelisted controller method"""
import json
import inspect
if not args:
args = arg or ""
if dt: # not called from a doctype (from a page)
if not dn:
dn = dt # single
doc = frappe.get_doc(dt, dn)
else:
if isinstance(docs, str):
docs = json.loads(docs)
doc = frappe.get_doc(docs)
doc._original_modified = doc.modified
doc.check_if_latest()
if not doc or not doc.has_permission("read"):
throw_permission_error()
try:
args = json.loads(args)
except ValueError:
args = args
method_obj = getattr(doc, method)
fn = getattr(method_obj, '__func__', method_obj)
is_whitelisted(fn)
is_valid_http_method(fn)
fnargs = inspect.getfullargspec(method_obj).args
if not fnargs or (len(fnargs)==1 and fnargs[0]=="self"):
response = doc.run_method(method)
elif "args" in fnargs or not isinstance(args, dict):
response = doc.run_method(method, args)
else:
response = doc.run_method(method, **args)
frappe.response.docs.append(doc)
if response is None:
return
# build output as csv
if cint(frappe.form_dict.get('as_csv')):
build_csv_response(response, _(doc.doctype).replace(' ', ''))
return
frappe.response['message'] = response
# for backwards compatibility
runserverobj = run_doc_method
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Daniel Kraft
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test the merge-mining RPC interface:
# getauxblock, createauxblock, submitauxblock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
)
from test_framework.auxpow import reverseHex
from test_framework.auxpow_testing import (
computeAuxpow,
getCoinbaseAddr,
mineAuxpowBlockWithMethods,
)
from decimal import Decimal
class AuxpowMiningTest (BitcoinTestFramework):
def set_test_params (self):
self.num_nodes = 2
def skip_test_if_missing_module (self):
self.skip_if_no_wallet ()
def add_options (self, parser):
parser.add_argument ("--segwit", dest="segwit", default=False,
action="store_true",
help="Test behaviour with SegWit active")
def run_test (self):
# Activate segwit if requested.
if self.options.segwit:
self.generate (self.nodes[0], 500)
self.sync_all ()
# Test with getauxblock and createauxblock/submitauxblock.
self.test_getauxblock ()
self.test_create_submit_auxblock ()
def test_common (self, create, submit):
"""
Common test code that is shared between the tests for getauxblock and the
createauxblock / submitauxblock method pair.
"""
# Verify data that can be found in another way.
auxblock = create ()
assert_equal (auxblock['chainid'], 1)
assert_equal (auxblock['height'], self.nodes[0].getblockcount () + 1)
assert_equal (auxblock['previousblockhash'],
self.nodes[0].getblockhash (auxblock['height'] - 1))
# Calling again should give the same block.
auxblock2 = create ()
assert_equal (auxblock2, auxblock)
# If we receive a new block, the old hash will be replaced.
self.sync_all ()
self.generate (self.nodes[1], 1)
self.sync_all ()
auxblock2 = create ()
assert auxblock['hash'] != auxblock2['hash']
assert_raises_rpc_error (-8, 'block hash unknown', submit,
auxblock['hash'], "x")
# Invalid format for auxpow.
assert_raises_rpc_error (-1, None, submit,
auxblock2['hash'], "x")
# Invalidate the block again, send a transaction and query for the
# auxblock to solve that contains the transaction.
self.generate (self.nodes[0], 1)
addr = self.nodes[1].get_deterministic_priv_key ().address
txid = self.nodes[0].sendtoaddress (addr, 1)
self.sync_all ()
assert_equal (self.nodes[1].getrawmempool (), [txid])
auxblock = create ()
target = reverseHex (auxblock['_target'])
# Cross-check target value with GBT to make explicitly sure that it is
# correct (not just implicitly by successfully mining blocks for it
# later on).
gbt = self.nodes[0].getblocktemplate ({"rules": ["segwit"]})
assert_equal (target, gbt['target'].encode ("ascii"))
# Compute invalid auxpow.
apow = computeAuxpow (auxblock['hash'], target, False)
res = submit (auxblock['hash'], apow)
assert not res
# Compute and submit valid auxpow.
apow = computeAuxpow (auxblock['hash'], target, True)
res = submit (auxblock['hash'], apow)
assert res
# Make sure that the block is indeed accepted.
self.sync_all ()
assert_equal (self.nodes[1].getrawmempool (), [])
height = self.nodes[1].getblockcount ()
assert_equal (height, auxblock['height'])
assert_equal (self.nodes[1].getblockhash (height), auxblock['hash'])
# Call getblock and verify the auxpow field.
data = self.nodes[1].getblock (auxblock['hash'])
assert 'auxpow' in data
auxJson = data['auxpow']
assert_equal (auxJson['chainindex'], 0)
assert_equal (auxJson['merklebranch'], [])
assert_equal (auxJson['chainmerklebranch'], [])
assert_equal (type (auxJson['parentblock']), dict)
# The non-verbose block header RPC should contain the auxpow at the end.
data = self.nodes[1].getblockheader (auxblock['hash'], False)
assert_equal (data[-160:], apow[-160:])
# Also previous blocks should have 'auxpow', since all blocks (also
# those generated by "generate") are merge-mined.
oldHash = self.nodes[1].getblockhash (100)
data = self.nodes[1].getblock (oldHash)
assert 'auxpow' in data
# Check that it paid correctly to the first node.
t = self.nodes[0].listtransactions ("*", 1)
assert_equal (len (t), 1)
t = t[0]
assert_equal (t['category'], "immature")
assert_equal (t['blockhash'], auxblock['hash'])
assert t['generated']
assert_greater_than_or_equal (t['amount'], Decimal ("1"))
assert_equal (t['confirmations'], 1)
# Verify the coinbase script. Ensure that it includes the block height
# to make the coinbase tx unique. The expected block height is around
# 200, so that the serialisation of the CScriptNum ends in an extra 00.
# The vector has length 2, which makes up for 02XX00 as the serialised
# height. Check this. (With segwit, the height is different, so we skip
# this for simplicity.)
if not self.options.segwit:
blk = self.nodes[1].getblock (auxblock['hash'])
tx = self.nodes[1].getrawtransaction (blk['tx'][0], True, blk['hash'])
coinbase = tx['vin'][0]['coinbase']
assert_equal ("02%02x00" % auxblock['height'], coinbase[0 : 6])
def test_getauxblock (self):
"""
Test the getauxblock method.
"""
create = self.nodes[0].getauxblock
submit = self.nodes[0].getauxblock
self.test_common (create, submit)
# Ensure that the payout address is changed from one block to the next.
hash1 = mineAuxpowBlockWithMethods (create, submit)
hash2 = mineAuxpowBlockWithMethods (create, submit)
self.sync_all ()
addr1 = getCoinbaseAddr (self.nodes[1], hash1)
addr2 = getCoinbaseAddr (self.nodes[1], hash2)
assert addr1 != addr2
info = self.nodes[0].getaddressinfo (addr1)
assert info['ismine']
info = self.nodes[0].getaddressinfo (addr2)
assert info['ismine']
def test_create_submit_auxblock (self):
"""
Test the createauxblock / submitauxblock method pair.
"""
# Check for errors with wrong parameters.
assert_raises_rpc_error (-1, None, self.nodes[0].createauxblock)
assert_raises_rpc_error (-5, "Invalid coinbase payout address",
self.nodes[0].createauxblock,
"this_an_invalid_address")
# Fix a coinbase address and construct methods for it.
addr1 = self.nodes[0].get_deterministic_priv_key ().address
def create ():
return self.nodes[0].createauxblock (addr1)
submit = self.nodes[0].submitauxblock
# Run common tests.
self.test_common (create, submit)
# Ensure that the payout address is the one which we specify
hash1 = mineAuxpowBlockWithMethods (create, submit)
hash2 = mineAuxpowBlockWithMethods (create, submit)
self.sync_all ()
actual1 = getCoinbaseAddr (self.nodes[1], hash1)
actual2 = getCoinbaseAddr (self.nodes[1], hash2)
assert_equal (actual1, addr1)
assert_equal (actual2, addr1)
# Ensure that different payout addresses will generate different auxblocks
addr2 = self.nodes[1].get_deterministic_priv_key ().address
auxblock1 = self.nodes[0].createauxblock(addr1)
auxblock2 = self.nodes[0].createauxblock(addr2)
assert auxblock1['hash'] != auxblock2['hash']
if __name__ == '__main__':
AuxpowMiningTest ().main ()
|
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import grey_dilation
from skimage import img_as_float
from skimage import color
from skimage import exposure
from skimage.util.dtype import dtype_limits
__all__ = ['imshow_all', 'imshow_with_histogram', 'mean_filter_demo',
'mean_filter_interactive_demo', 'plot_cdf', 'plot_histogram']
# Gray-scale images should actually be gray!
plt.rcParams['image.cmap'] = 'gray'
#--------------------------------------------------------------------------
# Custom `imshow` functions
#--------------------------------------------------------------------------
def imshow_rgb_shifted(rgb_image, shift=100, ax=None):
"""Plot each RGB layer with an x, y shift."""
if ax is None:
ax = plt.gca()
height, width, n_channels = rgb_image.shape
x = y = 0
for i_channel, channel in enumerate(iter_channels(rgb_image)):
image = np.zeros((height, width, n_channels), dtype=channel.dtype)
image[:, :, i_channel] = channel
ax.imshow(image, extent=[x, x+width, y, y+height], alpha=0.7)
x += shift
y += shift
# `imshow` fits the extents of the last image shown, so we need to rescale.
ax.autoscale()
ax.set_axis_off()
def imshow_all(*images, **kwargs):
""" Plot a series of images side-by-side.
Convert all images to float so that images have a common intensity range.
Parameters
----------
limits : str
Control the intensity limits. By default, 'image' is used set the
min/max intensities to the min/max of all images. Setting `limits` to
'dtype' can also be used if you want to preserve the image exposure.
titles : list of str
Titles for subplots. If the length of titles is less than the number
of images, empty strings are appended.
kwargs : dict
Additional keyword-arguments passed to `imshow`.
"""
images = [img_as_float(img) for img in images]
titles = kwargs.pop('titles', [])
if len(titles) != len(images):
titles = list(titles) + [''] * (len(images) - len(titles))
limits = kwargs.pop('limits', 'image')
if limits == 'image':
kwargs.setdefault('vmin', min(img.min() for img in images))
kwargs.setdefault('vmax', max(img.max() for img in images))
elif limits == 'dtype':
vmin, vmax = dtype_limits(images[0])
kwargs.setdefault('vmin', vmin)
kwargs.setdefault('vmax', vmax)
nrows, ncols = kwargs.get('shape', (1, len(images)))
size = nrows * kwargs.pop('size', 5)
width = size * len(images)
if nrows > 1:
width /= nrows * 1.33
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, size))
for ax, img, label in zip(axes.ravel(), images, titles):
ax.imshow(img, **kwargs)
ax.set_title(label)
def imshow_with_histogram(image, **kwargs):
""" Plot an image side-by-side with its histogram.
- Plot the image next to the histogram
- Plot each RGB channel separately (if input is color)
- Automatically flatten channels
- Select reasonable bins based on the image's dtype
See `plot_histogram` for information on how the histogram is plotted.
"""
width, height = plt.rcParams['figure.figsize']
fig, (ax_image, ax_hist) = plt.subplots(ncols=2, figsize=(2*width, height))
kwargs.setdefault('cmap', plt.cm.gray)
ax_image.imshow(image, **kwargs)
plot_histogram(image, ax=ax_hist)
# pretty it up
ax_image.set_axis_off()
match_axes_height(ax_image, ax_hist)
return ax_image, ax_hist
#--------------------------------------------------------------------------
# Helper functions
#--------------------------------------------------------------------------
def match_axes_height(ax_src, ax_dst):
""" Match the axes height of two axes objects.
The height of `ax_dst` is synced to that of `ax_src`.
"""
# HACK: plot geometry isn't set until the plot is drawn
plt.draw()
dst = ax_dst.get_position()
src = ax_src.get_position()
ax_dst.set_position([dst.xmin, src.ymin, dst.width, src.height])
def plot_cdf(image, ax=None):
img_cdf, bins = exposure.cumulative_distribution(image)
ax.plot(bins, img_cdf, 'r')
ax.set_ylabel("Fraction of pixels below intensity")
def plot_histogram(image, ax=None, **kwargs):
""" Plot the histogram of an image (gray-scale or RGB) on `ax`.
Calculate histogram using `skimage.exposure.histogram` and plot as filled
line. If an image has a 3rd dimension, assume it's RGB and plot each
channel separately.
"""
ax = ax if ax is not None else plt.gca()
if image.ndim == 2:
_plot_histogram(ax, image, color='black', **kwargs)
elif image.ndim == 3:
# `channel` is the red, green, or blue channel of the image.
for channel, channel_color in zip(iter_channels(image), 'rgb'):
_plot_histogram(ax, channel, color=channel_color, **kwargs)
def _plot_histogram(ax, image, alpha=0.3, **kwargs):
# Use skimage's histogram function which has nice defaults for
# integer and float images.
hist, bin_centers = exposure.histogram(image)
ax.fill_between(bin_centers, hist, alpha=alpha, **kwargs)
ax.set_xlabel('intensity')
ax.set_ylabel('# pixels')
def iter_channels(color_image):
"""Yield color channels of an image."""
# Roll array-axis so that we iterate over the color channels of an image.
for channel in np.rollaxis(color_image, -1):
yield channel
#--------------------------------------------------------------------------
# Convolution Demo
#--------------------------------------------------------------------------
def mean_filter_demo(image, vmax=1):
mean_factor = 1.0 / 9.0 # This assumes a 3x3 kernel.
iter_kernel_and_subimage = iter_kernel(image)
image_cache = []
def mean_filter_step(i_step):
while i_step >= len(image_cache):
filtered = image if i_step == 0 else image_cache[-1][1]
filtered = filtered.copy()
(i, j), mask, subimage = next(iter_kernel_and_subimage)
filter_overlay = color.label2rgb(mask, image, bg_label=0,
colors=('yellow', 'red'))
filtered[i, j] = np.sum(mean_factor * subimage)
image_cache.append((filter_overlay, filtered))
imshow_all(*image_cache[i_step], vmax=vmax)
plt.show()
return mean_filter_step
def mean_filter_interactive_demo(image):
from IPython.html import widgets
mean_filter_step = mean_filter_demo(image)
step_slider = widgets.IntSliderWidget(min=0, max=image.size-1, value=0)
widgets.interact(mean_filter_step, i_step=step_slider)
def iter_kernel(image, size=1):
""" Yield position, kernel mask, and image for each pixel in the image.
The kernel mask has a 2 at the center pixel and 1 around it. The actual
width of the kernel is 2*size + 1.
"""
width = 2*size + 1
for (i, j), pixel in iter_pixels(image):
mask = np.zeros(image.shape, dtype='int16')
mask[i, j] = 1
mask = grey_dilation(mask, size=width)
mask[i, j] = 2
subimage = image[bounded_slice((i, j), image.shape[:2], size=size)]
yield (i, j), mask, subimage
def iter_pixels(image):
""" Yield pixel position (row, column) and pixel intensity. """
height, width = image.shape[:2]
for i in range(height):
for j in range(width):
yield (i, j), image[i, j]
def bounded_slice(center, xy_max, size=1, i_min=0):
slices = []
for i, i_max in zip(center, xy_max):
slices.append(slice(max(i - size, i_min), min(i + size + 1, i_max)))
return slices
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Migrate dashboard position_json data from V1 to V2
Revision ID: bebcf3fed1fe
Revises: fc480c87706c
Create Date: 2018-07-22 11:59:07.025119
"""
# revision identifiers, used by Alembic.
import collections
import json
import sys
import uuid
from functools import reduce
from alembic import op
from sqlalchemy import Column, ForeignKey, Integer, String, Table, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from superset import db
revision = "bebcf3fed1fe"
down_revision = "fc480c87706c"
Base = declarative_base()
BACKGROUND_TRANSPARENT = "BACKGROUND_TRANSPARENT"
CHART_TYPE = "DASHBOARD_CHART_TYPE"
COLUMN_TYPE = "DASHBOARD_COLUMN_TYPE"
DASHBOARD_GRID_ID = "DASHBOARD_GRID_ID"
DASHBOARD_GRID_TYPE = "DASHBOARD_GRID_TYPE"
DASHBOARD_HEADER_ID = "DASHBOARD_HEADER_ID"
DASHBOARD_HEADER_TYPE = "DASHBOARD_HEADER_TYPE"
DASHBOARD_ROOT_ID = "DASHBOARD_ROOT_ID"
DASHBOARD_ROOT_TYPE = "DASHBOARD_ROOT_TYPE"
DASHBOARD_VERSION_KEY = "DASHBOARD_VERSION_KEY"
MARKDOWN_TYPE = "DASHBOARD_MARKDOWN_TYPE"
ROW_TYPE = "DASHBOARD_ROW_TYPE"
GRID_COLUMN_COUNT = 12
GRID_MIN_COLUMN_COUNT = 1
GRID_MIN_ROW_UNITS = 5
GRID_RATIO = 4.0
NUMBER_OF_CHARTS_PER_ROW = 3
MAX_RECURSIVE_LEVEL = 6
ROW_HEIGHT = 8
TOTAL_COLUMNS = 48
DEFAULT_CHART_WIDTH = int(TOTAL_COLUMNS / NUMBER_OF_CHARTS_PER_ROW)
MAX_VALUE = sys.maxsize
class Slice(Base):
"""Declarative class to do query in upgrade"""
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
params = Column(Text)
viz_type = Column(String(250))
dashboard_slices = Table(
"dashboard_slices",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
Column("slice_id", Integer, ForeignKey("slices.id")),
)
class Dashboard(Base):
"""Declarative class to do query in upgrade"""
__tablename__ = "dashboards"
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
slices = relationship("Slice", secondary=dashboard_slices, backref="dashboards")
def is_v2_dash(positions):
return (
isinstance(positions, dict) and positions.get("DASHBOARD_VERSION_KEY") == "v2"
)
def get_boundary(positions):
top = MAX_VALUE
left = MAX_VALUE
bottom = 0
right = 0
for position in positions:
top = min(position["row"], top)
left = min(position["col"], left)
bottom = max(position["row"] + position["size_y"], bottom)
right = max(position["col"] + position["size_x"], right)
return {"top": top, "bottom": bottom, "left": left, "right": right}
def generate_id():
return uuid.uuid4().hex[:8]
def has_overlap(positions, xAxis=True):
sorted_positions = (
sorted(positions[:], key=lambda pos: pos["col"])
if xAxis
else sorted(positions[:], key=lambda pos: pos["row"])
)
result = False
for idx, position in enumerate(sorted_positions):
if idx < len(sorted_positions) - 1:
if xAxis:
result = (
position["col"] + position["size_x"]
> sorted_positions[idx + 1]["col"]
)
else:
result = (
position["row"] + position["size_y"]
> sorted_positions[idx + 1]["row"]
)
if result:
break
return result
def get_empty_layout():
return {
DASHBOARD_VERSION_KEY: "v2",
DASHBOARD_ROOT_ID: {
"type": DASHBOARD_ROOT_TYPE,
"id": DASHBOARD_ROOT_ID,
"children": [DASHBOARD_GRID_ID],
},
DASHBOARD_GRID_ID: {
"type": DASHBOARD_GRID_TYPE,
"id": DASHBOARD_GRID_ID,
"children": [],
},
}
def get_header_component(title):
return {
"id": DASHBOARD_HEADER_ID,
"type": DASHBOARD_HEADER_TYPE,
"meta": {"text": title},
}
def get_row_container():
return {
"type": ROW_TYPE,
"id": "DASHBOARD_ROW_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"background": BACKGROUND_TRANSPARENT},
}
def get_col_container():
return {
"type": COLUMN_TYPE,
"id": "DASHBOARD_COLUMN_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"background": BACKGROUND_TRANSPARENT},
}
def get_chart_holder(position):
size_x = position["size_x"]
size_y = position["size_y"]
slice_id = position["slice_id"]
slice_name = position.get("slice_name")
code = position.get("code")
width = max(GRID_MIN_COLUMN_COUNT, int(round(size_x / GRID_RATIO)))
height = max(
GRID_MIN_ROW_UNITS, int(round(((size_y / GRID_RATIO) * 100) / ROW_HEIGHT))
)
if code is not None:
markdown_content = " " # white-space markdown
if len(code):
markdown_content = code
elif slice_name.strip():
markdown_content = "##### {}".format(slice_name)
return {
"type": MARKDOWN_TYPE,
"id": "DASHBOARD_MARKDOWN_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"width": width, "height": height, "code": markdown_content},
}
return {
"type": CHART_TYPE,
"id": "DASHBOARD_CHART_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"width": width, "height": height, "chartId": int(slice_id)},
}
def get_children_max(children, attr, root):
return max([root[childId]["meta"][attr] for childId in children])
def get_children_sum(children, attr, root):
return reduce((lambda sum, childId: sum + root[childId]["meta"][attr]), children, 0)
# find column that: width > 2 and
# each row has at least 1 chart can reduce width
def get_wide_column_ids(children, root):
return list(
filter(lambda childId: can_reduce_column_width(root[childId], root), children)
)
def is_wide_leaf_component(component):
return (
component["type"] in [CHART_TYPE, MARKDOWN_TYPE]
and component["meta"]["width"] > GRID_MIN_COLUMN_COUNT
)
def can_reduce_column_width(column_component, root):
return (
column_component["type"] == COLUMN_TYPE
and column_component["meta"]["width"] > GRID_MIN_COLUMN_COUNT
and all(
[
is_wide_leaf_component(root[childId])
or (
root[childId]["type"] == ROW_TYPE
and all(
[
is_wide_leaf_component(root[id])
for id in root[childId]["children"]
]
)
)
for childId in column_component["children"]
]
)
)
def reduce_row_width(row_component, root):
wide_leaf_component_ids = list(
filter(
lambda childId: is_wide_leaf_component(root[childId]),
row_component["children"],
)
)
widest_chart_id = None
widest_width = 0
for component_id in wide_leaf_component_ids:
if root[component_id]["meta"]["width"] > widest_width:
widest_width = root[component_id]["meta"]["width"]
widest_chart_id = component_id
if widest_chart_id:
root[widest_chart_id]["meta"]["width"] -= 1
return get_children_sum(row_component["children"], "width", root)
def reduce_component_width(component):
if is_wide_leaf_component(component):
component["meta"]["width"] -= 1
return component["meta"]["width"]
def convert(positions, level, parent, root):
if len(positions) == 0:
return
if len(positions) == 1 or level >= MAX_RECURSIVE_LEVEL:
# special treatment for single chart dash:
# always wrap chart inside a row
if parent["type"] == DASHBOARD_GRID_TYPE:
row_container = get_row_container()
root[row_container["id"]] = row_container
parent["children"].append(row_container["id"])
parent = row_container
chart_holder = get_chart_holder(positions[0])
root[chart_holder["id"]] = chart_holder
parent["children"].append(chart_holder["id"])
return
current_positions = positions[:]
boundary = get_boundary(current_positions)
top = boundary["top"]
bottom = boundary["bottom"]
left = boundary["left"]
right = boundary["right"]
# find row dividers
layers = []
current_row = top + 1
while len(current_positions) and current_row <= bottom:
upper = []
lower = []
is_row_divider = True
for position in current_positions:
row = position["row"]
size_y = position["size_y"]
if row + size_y <= current_row:
lower.append(position)
continue
elif row >= current_row:
upper.append(position)
continue
is_row_divider = False
break
if is_row_divider:
current_positions = upper[:]
layers.append(lower)
current_row += 1
# Each layer is a list of positions belong to same row section
# they can be a list of charts, or arranged in columns, or mixed
for layer in layers:
if len(layer) == 0:
continue
if len(layer) == 1 and parent["type"] == COLUMN_TYPE:
chart_holder = get_chart_holder(layer[0])
root[chart_holder["id"]] = chart_holder
parent["children"].append(chart_holder["id"])
continue
# create a new row
row_container = get_row_container()
root[row_container["id"]] = row_container
parent["children"].append(row_container["id"])
current_positions = layer[:]
if not has_overlap(current_positions):
# this is a list of charts in the same row
sorted_by_col = sorted(current_positions, key=lambda pos: pos["col"])
for position in sorted_by_col:
chart_holder = get_chart_holder(position)
root[chart_holder["id"]] = chart_holder
row_container["children"].append(chart_holder["id"])
else:
# this row has columns, find col dividers
current_col = left + 1
while len(current_positions) and current_col <= right:
upper = []
lower = []
is_col_divider = True
for position in current_positions:
col = position["col"]
size_x = position["size_x"]
if col + size_x <= current_col:
lower.append(position)
continue
elif col >= current_col:
upper.append(position)
continue
is_col_divider = False
break
if is_col_divider:
# is single chart in the column:
# add to parent container without create new column container
if len(lower) == 1:
chart_holder = get_chart_holder(lower[0])
root[chart_holder["id"]] = chart_holder
row_container["children"].append(chart_holder["id"])
else:
# create new col container
col_container = get_col_container()
root[col_container["id"]] = col_container
if not has_overlap(lower, False):
sorted_by_row = sorted(lower, key=lambda pos: pos["row"])
for position in sorted_by_row:
chart_holder = get_chart_holder(position)
root[chart_holder["id"]] = chart_holder
col_container["children"].append(chart_holder["id"])
else:
convert(lower, level + 2, col_container, root)
# add col meta
if len(col_container["children"]):
row_container["children"].append(col_container["id"])
col_container["meta"]["width"] = get_children_max(
col_container["children"], "width", root
)
current_positions = upper[:]
current_col += 1
# add row meta
row_container["meta"]["width"] = get_children_sum(
row_container["children"], "width", root
)
def convert_to_layout(positions):
root = get_empty_layout()
convert(positions, 0, root[DASHBOARD_GRID_ID], root)
# remove row's width, height and col's height from its meta data
# and make sure every row's width <= GRID_COLUMN_COUNT
# Each item is a dashboard component:
# row_container, or col_container, or chart_holder
for item in root.values():
if not isinstance(item, dict):
continue
if ROW_TYPE == item["type"]:
meta = item["meta"]
if meta.get("width", 0) > GRID_COLUMN_COUNT:
current_width = meta["width"]
while current_width > GRID_COLUMN_COUNT and len(
list(
filter(
lambda childId: is_wide_leaf_component(root[childId]),
item["children"],
)
)
):
current_width = reduce_row_width(item, root)
# because we round v1 chart size to nearest v2 grids count, result
# in there might be overall row width > GRID_COLUMN_COUNT.
# So here is an extra step to check row width, and reduce chart
# or column width if needed and if possible.
if current_width > GRID_COLUMN_COUNT:
has_wide_columns = True
while has_wide_columns:
col_ids = get_wide_column_ids(item["children"], root)
idx = 0
# need 2nd loop since same column may reduce multiple times
while idx < len(col_ids) and current_width > GRID_COLUMN_COUNT:
current_column = col_ids[idx]
for childId in root[current_column]["children"]:
if root[childId]["type"] == ROW_TYPE:
root[childId]["meta"]["width"] = reduce_row_width(
root[childId], root
)
else:
root[childId]["meta"][
"width"
] = reduce_component_width(root[childId])
root[current_column]["meta"]["width"] = get_children_max(
root[current_column]["children"], "width", root
)
current_width = get_children_sum(
item["children"], "width", root
)
idx += 1
has_wide_columns = (
len(get_wide_column_ids(item["children"], root))
and current_width > GRID_COLUMN_COUNT
)
meta.pop("width", None)
return root
def merge_position(position, bottom_line, last_column_start):
col = position["col"]
size_x = position["size_x"]
size_y = position["size_y"]
end_column = len(bottom_line) if col + size_x > last_column_start else col + size_x
# finding index where index >= col and bottom_line value > bottom_line[col]
taller_indexes = [
i
for i, value in enumerate(bottom_line)
if (i >= col and value > bottom_line[col])
]
current_row_value = bottom_line[col]
# if no enough space to fit current position, will start from taller row value
if len(taller_indexes) > 0 and (taller_indexes[0] - col + 1) < size_x:
current_row_value = max(bottom_line[col : col + size_x])
# add current row value with size_y of this position
for i in range(col, end_column):
bottom_line[i] = current_row_value + size_y
# In original position data, a lot of position's row attribute are problematic,
# for example, same positions are assigned to more than 1 chart.
# The convert function depends on row id, col id to split the whole dashboard into
# nested rows and columns. Bad row id will lead to many empty spaces, or a few charts
# are overlapped in the same row.
# This function read positions by row first.
# Then based on previous col id, width and height attribute,
# re-calculate next position's row id.
def scan_dashboard_positions_data(positions):
positions_by_row_id = {}
for position in positions:
row = position["row"]
position["col"] = min(position["col"], TOTAL_COLUMNS)
if not positions_by_row_id.get(row):
positions_by_row_id[row] = []
positions_by_row_id[row].append(position)
bottom_line = [0] * (TOTAL_COLUMNS + 1)
# col index always starts from 1, set a large number for [0] as placeholder
bottom_line[0] = MAX_VALUE
last_column_start = max([position["col"] for position in positions])
# ordered_raw_positions are arrays of raw positions data sorted by row id
ordered_raw_positions = []
row_ids = sorted(positions_by_row_id.keys())
for row_id in row_ids:
ordered_raw_positions.append(positions_by_row_id[row_id])
updated_positions = []
while len(ordered_raw_positions):
next_row = ordered_raw_positions.pop(0)
next_col = 1
while len(next_row):
# special treatment for same (row, col) assigned to more than 1 chart:
# add one additional row and display wider chart first
available_columns_index = [
i
for i, e in enumerate(
list(filter(lambda x: x["col"] == next_col, next_row))
)
]
if len(available_columns_index):
idx = available_columns_index[0]
if len(available_columns_index) > 1:
idx = sorted(
available_columns_index,
key=lambda x: next_row[x]["size_x"],
reverse=True,
)[0]
next_position = next_row.pop(idx)
merge_position(next_position, bottom_line, last_column_start + 1)
next_position["row"] = (
bottom_line[next_position["col"]] - next_position["size_y"]
)
updated_positions.append(next_position)
next_col += next_position["size_x"]
else:
next_col = next_row[0]["col"]
return updated_positions
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
dashboards = session.query(Dashboard).all()
for i, dashboard in enumerate(dashboards):
print("scanning dashboard ({}/{}) >>>>".format(i + 1, len(dashboards)))
position_json = json.loads(dashboard.position_json or "[]")
if not is_v2_dash(position_json):
print("Converting dashboard... dash_id: {}".format(dashboard.id))
position_dict = {}
positions = []
slices = dashboard.slices
if position_json:
# scan and fix positions data: extra spaces, dup rows, .etc
position_json = scan_dashboard_positions_data(position_json)
position_dict = {
str(position["slice_id"]): position for position in position_json
}
last_row_id = (
max([pos["row"] + pos["size_y"] for pos in position_json])
if position_json
else 0
)
new_slice_counter = 0
for slice in slices:
position = position_dict.get(str(slice.id))
# some dashboard didn't have position_json
# place 3 charts in a row
if not position:
position = {
"col": (
new_slice_counter
% NUMBER_OF_CHARTS_PER_ROW
* DEFAULT_CHART_WIDTH
+ 1
),
"row": (
last_row_id
+ int(new_slice_counter / NUMBER_OF_CHARTS_PER_ROW)
* DEFAULT_CHART_WIDTH
),
"size_x": DEFAULT_CHART_WIDTH,
"size_y": DEFAULT_CHART_WIDTH,
"slice_id": str(slice.id),
}
new_slice_counter += 1
# attach additional parameters to position dict,
# prepare to replace markup and separator viz_type
# to dashboard UI component
form_data = json.loads(slice.params or "{}")
viz_type = slice.viz_type
if form_data and viz_type in ["markup", "separator"]:
position["code"] = form_data.get("code")
position["slice_name"] = slice.slice_name
positions.append(position)
v2_layout = convert_to_layout(positions)
v2_layout[DASHBOARD_HEADER_ID] = get_header_component(
dashboard.dashboard_title
)
sorted_by_key = collections.OrderedDict(sorted(v2_layout.items()))
dashboard.position_json = json.dumps(sorted_by_key, indent=2)
session.merge(dashboard)
session.commit()
else:
print("Skip converted dash_id: {}".format(dashboard.id))
session.close()
def downgrade():
print("downgrade is done")
|
|
import pytest
import io
from CommonServerPython import *
import OpsGenieV3
from unittest import mock
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_create_alert_wrong_responders():
"""
Given:
- An app client object
When:
- Calling function create_alert with argument responders in the wrong format
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.create_alert(mock_client, {'responders': ['team', 'id']})
def test_create_alert(mocker):
"""
Given:
- An app client object
- Responders "team,id,123"
When:
- Calling function create_alert with argument responders in the right format
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'create_alert',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/create_alert.json'))
res = OpsGenieV3.create_alert(mock_client, {'responders': "team,id,123"})
assert (res.raw_response == util_load_json('test_data/create_alert.json'))
def test_get_alerts(mocker):
"""
Given:
- An app client object
- Limit = 1
When:
- Calling function list_alerts
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'list_alerts',
return_value=util_load_json('test_data/get_alerts.json'))
res = OpsGenieV3.get_alerts(mock_client, {"limit": 1})
assert (len(res.outputs) == 1)
def test_get_alerts_going_to_right_function():
"""
Given:
- An app client object
When:
- Calling function get_alerts
Case A: "alert-id" = 1234
Case B: No arguments
Then:
- Ensure the right function was called
Case A: Called get_alert
Case B: Called list_alerts
"""
mock_client = OpsGenieV3.Client(base_url="")
mock_client.get_alert = mock.MagicMock()
OpsGenieV3.get_alerts(mock_client, {"alert-id": 1234})
assert mock_client.get_alert.called
OpsGenieV3.list_alerts = mock.MagicMock()
OpsGenieV3.get_alerts(mock_client, {})
assert OpsGenieV3.list_alerts.called
def test_delete_alert(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
When:
- Calling function delete_alert
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'delete_alert',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/delete_alert.json'))
res = OpsGenieV3.delete_alert(mock_client, {"alert-id": 1234})
assert (res.raw_response == util_load_json('test_data/delete_alert.json'))
def test_ack_alert(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
When:
- Calling function ack_alert
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'ack_alert',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/ack_alert.json'))
res = OpsGenieV3.ack_alert(mock_client, {"alert-id": 1234})
assert (res.raw_response == util_load_json('test_data/ack_alert.json'))
def test_close_alert(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
When:
- Calling function close_alert
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'close_alert',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/close_alert.json'))
res = OpsGenieV3.close_alert(mock_client, {"alert-id": 1234})
assert (res.raw_response == util_load_json('test_data/close_alert.json'))
def test_assign_alert_without_args():
"""
Given:
- An app client object
When:
- Calling function assign_alert with no arguments
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.assign_alert(mock_client, {})
def test_assign_alert(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
- Owner_id = 123
When:
- Calling function assign_alert
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'assign_alert',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/assign_alert.json'))
res = OpsGenieV3.assign_alert(mock_client, {"alert-id": 1234, "owner_id": 123})
assert (res.raw_response == util_load_json('test_data/assign_alert.json'))
def test_add_responder_alert_wrong_responders():
"""
Given:
- An app client object
When:
- Calling function add_responder_alert with argument responders in the wrong format
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.add_responder_alert(mock_client, {'responders': ['team', 'id']})
def test_add_responder_alert(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
- owner_id = 123
When:
- Calling function add_responder_alert
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'add_responder_alert',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/add_responder_alert.json'))
res = OpsGenieV3.add_responder_alert(mock_client, {"alert-id": 1234, "owner_id": 123})
assert (res.raw_response == util_load_json('test_data/add_responder_alert.json'))
def test_get_escalations_without_args():
"""
Given:
- An app client object
When:
- Calling function escalate_alert with no arguments
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.escalate_alert(mock_client, {})
def test_get_escalations(mocker):
"""
Given:
- An app client object
When:
- Calling function get_escalations
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'get_escalations',
return_value=util_load_json('test_data/get_escalations.json'))
res = OpsGenieV3.get_escalations(mock_client, {})
assert len(res.outputs) == 2
def test_get_escalation(mocker):
"""
Given:
- An app client object
- escalation_id = 123
When:
- Calling function get_escalations
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'get_escalation',
return_value=util_load_json('test_data/get_escalations.json'))
res = OpsGenieV3.get_escalations(mock_client, {"escalation_id": 123})
assert len(res.outputs) == 2
def test_escalate_alert_without_args():
"""
Given:
- An app client object
When:
- Calling function escalate_alert with no arguments
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.escalate_alert(mock_client, {})
def test_escalate_alert(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
= escalation_id = 123
When:
- Calling function escalate_alert
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'escalate_alert',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/escalate_alert.json'))
res = OpsGenieV3.escalate_alert(mock_client, {"alert-id": 1234, "escalation_id": 123})
assert (res.raw_response == util_load_json('test_data/escalate_alert.json'))
def test_add_alert_tag(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
- tags = [1,2]
When:
- Calling function add_alert_tag
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'add_alert_tag',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/add_alert_tag.json'))
res = OpsGenieV3.add_alert_tag(mock_client, {"alert-id": 1234, "tags": [1, 2]})
assert (res.raw_response == util_load_json('test_data/add_alert_tag.json'))
def test_remove_alert_tag(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
- tags = [1,2]
When:
- Calling function remove_alert_tag
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'remove_alert_tag',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/remove_alert_tag.json'))
res = OpsGenieV3.remove_alert_tag(mock_client, {"alert-id": 1234, "tags": [1, 2]})
assert (res.raw_response == util_load_json('test_data/remove_alert_tag.json'))
def test_get_alert_attachments(mocker):
"""
Given:
- An app client object
- Alert-id = 1234
When:
- Calling function get_alert_attachments
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'get_alert_attachments',
return_value=util_load_json('test_data/get_alert_attachments.json'))
res = OpsGenieV3.get_alert_attachments(mock_client, {"alert-id": 1234})
assert (res.readable_output == "### OpsGenie Attachment\n**No entries.**\n")
def test_get_schedules():
"""
Given:
- An app client object
When:
- Calling function get_schedules
Case A: "schedule_id" = 1234
Case B: No arguments
Then:
- Ensure the right function was called
Case A: Called get_schedule
Case B: Called list_schedules
"""
mock_client = OpsGenieV3.Client(base_url="")
mock_client.get_schedule = mock.MagicMock()
OpsGenieV3.get_schedules(mock_client, {"schedule_id": 1234})
assert mock_client.get_schedule.called
mock_client.list_schedules = mock.MagicMock()
OpsGenieV3.get_schedules(mock_client, {})
assert mock_client.list_schedules.called
def test_get_schedule_overrides_without_args():
"""
Given:
- An app client object
When:
- Calling function get_schedule_overrides with no arguments
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.get_schedule_overrides(mock_client, {})
def test_get_schedule_without_args():
"""
Given:
- An app client object
When:
- Calling function get_schedule with no arguments
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
mock_client.get_schedule({})
def test_get_schedule_overrides():
"""
Given:
- An app client object
When:
- Calling function get_schedule_overrides
Case A: "schedule_id" = 1234 , override_alias = 123
Case B: No arguments
Then:
- Ensure the right function was called
Case A: Called get_schedule_override
Case B: Called list_schedule_overrides
"""
mock_client = OpsGenieV3.Client(base_url="")
mock_client.get_schedule_override = mock.MagicMock()
OpsGenieV3.get_schedule_overrides(mock_client, {"schedule_id": 1234, "override_alias": 123})
assert mock_client.get_schedule_override.called
mock_client.list_schedule_overrides = mock.MagicMock()
OpsGenieV3.get_schedule_overrides(mock_client, {"schedule_id": 1234})
assert mock_client.list_schedule_overrides.called
def test_get_on_call_without_args():
"""
Given:
- An app client object
When:
- Calling function get_on_call with no arguments
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.get_on_call(mock_client, {})
def test_get_on_call(mocker):
"""
Given:
- An app client object
- schedule_id = 1234
When:
- Calling function get_on_call
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'get_on_call',
return_value=util_load_json('test_data/delete_incident.json'))
res = OpsGenieV3.get_on_call(mock_client, {"schedule_id": 1234})
assert (res.raw_response == util_load_json('test_data/delete_incident.json'))
def test_create_incident_wrong_args():
"""
Given:
- An app client object
When:
- Calling function create_incident with argument responders in the wrong format
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.create_incident(mock_client, {'responders': ['team', 'id']})
def test_create_incident(mocker):
"""
Given:
- An app client object
When:
- Calling function create_incident
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'create_incident',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/create_incident.json'))
res = OpsGenieV3.create_incident(mock_client, {})
assert (res.raw_response == util_load_json('test_data/create_incident.json'))
def test_delete_incident(mocker):
"""
Given:
- incident_id = 1234
When:
- Calling function delete_incident
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'delete_incident',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/delete_incident.json'))
res = OpsGenieV3.delete_incident(mock_client, {"incident_id": 1234})
assert (res.raw_response == util_load_json('test_data/delete_incident.json'))
def test_get_incidents(mocker):
"""
Given:
- An app client object
- limit = 1
When:
- Calling function get_incidents
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'list_incidents',
return_value=util_load_json('test_data/get_incidents.json'))
res = OpsGenieV3.get_incidents(mock_client, {"limit": 1})
assert (len(res.outputs) == 1)
def test_responders_to_json():
"""
Given:
- An app client object
- responders = ["team", "id", 1, "schedule", "name", "a"]
- responder_key = 'responders'
When:
- Calling function responders_to_json
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
res = mock_client.responders_to_json(responders=["team", "id", 1, "schedule", "name", "a"],
responder_key='responders')
assert (res == {'responders': [{'id': 1, 'type': 'team'}, {'name': 'a', 'type': 'schedule'}]})
def test_get_incidents_going_to_right_function():
"""
Given:
- An app client object
When:
- Calling function get_incidents
Case A: "incident_id" = 1234
Case B: No arguments
Then:
- Ensure the right function was called
Case A: Called get_incident
Case B: Called list_incidents
"""
mock_client = OpsGenieV3.Client(base_url="")
mock_client.get_incident = mock.MagicMock()
OpsGenieV3.get_incidents(mock_client, {"incident_id": 1234})
assert mock_client.get_incident.called
OpsGenieV3.list_incidents = mock.MagicMock()
OpsGenieV3.get_incidents(mock_client, {})
assert OpsGenieV3.list_incidents.called
def test_close_incident(mocker):
"""
Given:
- An app client object
- incident_id = 1234
When:
- Calling function close_incident
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'close_incident',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/close_incident.json'))
res = OpsGenieV3.close_incident(mock_client, {"incident_id": 1234})
assert (res.raw_response == util_load_json('test_data/close_incident.json'))
def test_resolve_incident(mocker):
"""
Given:
- An app client object
- incident_id = 1234
When:
- Calling function resolve_incident
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'resolve_incident',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/resolve_incident.json'))
res = OpsGenieV3.resolve_incident(mock_client, {"incident_id": 1234})
assert (res.raw_response == util_load_json('test_data/resolve_incident.json'))
def test_add_responder_incident_wrong_args():
"""
Given:
- An app client object
When:
- Calling function add_responder_incident with argument responders in the wrong format
Then:
- Ensure the resulted will raise an exception.
"""
mock_client = OpsGenieV3.Client(base_url="")
with pytest.raises(DemistoException):
OpsGenieV3.add_responder_incident(mock_client, {'responders': ['team', 'id']})
def test_add_responder_incident(mocker):
"""
Given:
- An app client object
- incident_id = 1234
- responders = ["team", "id", "name"]
When:
- Calling function add_responder_incident
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'add_responder_incident',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/add_responder_incident.json'))
res = OpsGenieV3.add_responder_incident(mock_client, {"incident_id": 1234, "responders": ["team", "id", "name"]})
assert (res.raw_response == util_load_json('test_data/add_responder_incident.json'))
def test_add_tag_incident(mocker):
"""
Given:
- An app client object
- incident_id = 1234
- tags = [1, 2]
When:
- Calling function add_tag_incident
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'add_tag_incident',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/add_tag_incident.json'))
res = OpsGenieV3.add_tag_incident(mock_client, {"incident_id": 1234, "tags": [1, 2]})
assert (res.raw_response == util_load_json('test_data/add_tag_incident.json'))
def test_remove_tag_incident(mocker):
"""
Given:
- An app client object
- incident_id = 1234
- tags = [1, 2]
When:
- Calling function remove_tag_incident
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'remove_tag_incident',
return_value=util_load_json('test_data/request.json'))
mocker.patch.object(mock_client, 'get_request',
return_value=util_load_json('test_data/remove_tag_incident.json'))
res = OpsGenieV3.remove_tag_incident(mock_client, {"incident_id": 1234, "tags": [1, 2]})
assert (res.raw_response == util_load_json('test_data/remove_tag_incident.json'))
def test_get_teams(mocker):
"""
Given:
- An app client object
When:
- Calling function get_teams
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'list_teams',
return_value=util_load_json('test_data/get_teams.json'))
res = OpsGenieV3.get_teams(mock_client, {})
assert len(res.outputs) == 2
def test_get_teams_going_to_right_function():
"""
Given:
- An app client object
When:
- Calling function get_teams
Case A: "team_id" = 1234
Case B: No arguments
Then:
- Ensure the right function was called
Case A: Called get_team
Case B: Called list_teams
"""
mock_client = OpsGenieV3.Client(base_url="")
mock_client.get_team = mock.MagicMock()
OpsGenieV3.get_teams(mock_client, {"team_id": 1234})
assert mock_client.get_team.called
mock_client.list_teams = mock.MagicMock()
OpsGenieV3.get_teams(mock_client, {})
assert mock_client.list_teams.called
def test_fetch_incidents_command(mocker):
"""
Given:
- An app client object
When:
- Calling function fetch_incidents_command
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'list_alerts',
return_value=util_load_json('test_data/get_alerts.json'))
mocker.patch.object(mock_client, 'list_incidents',
return_value=util_load_json('test_data/get_incidents.json'))
mocker.patch.object(OpsGenieV3, '_get_utc_now', return_value=datetime(2021, 11, 26))
mocker.patch.object(OpsGenieV3, '_parse_fetch_time', return_value='2021-11-23T12:19:48Z')
res, last_run = OpsGenieV3.fetch_incidents_command(mock_client, {"max_fetch": 1})
assert len(res) == 2
assert last_run == {'Alerts': {'lastRun': '2021-11-26T00:00:00Z',
'next_page': 'https://api.opsgenie.com/v2/alerts?limit=1&sort='
'createdAt&offset=1&order=desc'},
'Incidents': {'lastRun': '2021-11-26T00:00:00Z',
'next_page': 'https://api.opsgenie.com/v1/incidents?limit=1&'
'sort=insertedAt&offset=1&order=desc'}}
def test_fetch_incidents_command_no_result(mocker):
"""
Given:
- An app client object
- max_fetch = 1
When:
- Calling function fetch_incidents_command
- The list_alerts and list_incidents functions returns empty response
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'list_alerts',
return_value=util_load_json('test_data/empty_response.json'))
mocker.patch.object(mock_client, 'list_incidents',
return_value=util_load_json('test_data/empty_response.json'))
mocker.patch.object(OpsGenieV3, '_get_utc_now', return_value=datetime(2021, 11, 26))
mocker.patch.object(OpsGenieV3, '_parse_fetch_time', return_value='2021-11-23T12:19:48Z')
res, last_run = OpsGenieV3.fetch_incidents_command(mock_client, {"max_fetch": 1})
assert len(res) == 0
assert last_run == {'Alerts': {'lastRun': '2021-11-26T00:00:00Z', 'next_page': None},
'Incidents': {'lastRun': '2021-11-26T00:00:00Z', 'next_page': None}}
def test_fetch_with_paging_only_alerts(mocker):
"""
Given:
- An app client object
- max_fetch = 2
- event_types = OpsGenieV3.ALERT_TYPE
When:
- Calling function fetch_incidents_command
- The list_alerts function returns result with paging
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'list_alerts',
return_value=util_load_json('test_data/get_alerts.json'))
mocker.patch.object(mock_client, 'get_paged',
return_value=util_load_json('test_data/get_alerts_without_next.json'))
mocker.patch.object(OpsGenieV3, '_get_utc_now', return_value=datetime(2021, 11, 26))
mocker.patch.object(OpsGenieV3, '_parse_fetch_time', return_value='2021-11-23T12:19:48Z')
res, last_run = OpsGenieV3.fetch_incidents_command(mock_client, {"max_fetch": 2,
"event_types": OpsGenieV3.ALERT_TYPE})
assert (last_run == {'Alerts': {'lastRun': '2021-11-26T00:00:00Z',
'next_page': 'https://api.opsgenie.com/v2/alerts?limit=1&sort=createdAt&offset=1&order=desc'},
'Incidents': {'lastRun': None, 'next_page': None}})
mocker.patch.object(demisto, 'getLastRun', return_value=last_run)
res, last_run = OpsGenieV3.fetch_incidents_command(mock_client,
{"max_fetch": 2, "event_types": OpsGenieV3.ALERT_TYPE},
last_run)
assert (last_run == {'Alerts': {'lastRun': '2021-11-26T00:00:00Z', 'next_page': None},
'Incidents': {'lastRun': None, 'next_page': None}})
def test_fetch_with_paging_only_incidents(mocker):
"""
Given:
- An app client object
- max_fetch = 2
- event_types = OpsGenieV3.INCIDENT_TYPE
When:
- Calling function fetch_incidents_command
- The list_incidents function returns result with paging
Then:
- Ensure the return data is correct
"""
mocker.patch('CommonServerPython.get_demisto_version', return_value={"version": "6.2.0"})
mock_client = OpsGenieV3.Client(base_url="")
mocker.patch.object(mock_client, 'list_incidents',
return_value=util_load_json('test_data/get_incidents.json'))
mocker.patch.object(mock_client, 'get_paged',
return_value=util_load_json('test_data/get_incidents_without_next.json'))
mocker.patch.object(OpsGenieV3, '_get_utc_now', return_value=datetime(2021, 11, 26))
mocker.patch.object(OpsGenieV3, '_parse_fetch_time', return_value='2021-11-23T12:19:48Z')
res, last_run = OpsGenieV3.fetch_incidents_command(mock_client, {"max_fetch": 2,
"event_types": OpsGenieV3.INCIDENT_TYPE})
assert (last_run == {'Incidents': {'lastRun': '2021-11-26T00:00:00Z',
'next_page': 'https://api.opsgenie.com/v1/incidents?limit='
'1&sort=insertedAt&offset=1&order=desc'},
'Alerts': {'lastRun': None, 'next_page': None}})
mocker.patch.object(demisto, 'getLastRun', return_value=last_run)
res, last_run = OpsGenieV3.fetch_incidents_command(mock_client,
{"max_fetch": 2, "event_types": OpsGenieV3.INCIDENT_TYPE},
last_run)
assert (last_run == {'Incidents': {'lastRun': '2021-11-26T00:00:00Z', 'next_page': None},
'Alerts': {'lastRun': None, 'next_page': None}})
def test_build_query_fetch():
"""
Given:
- An app client object
- args
- is_fetch_query = True
When:
- Calling function build_query
Then:
- Ensure the return data is correct
"""
args = {
"query": "createdAt < 147039484114",
"status": "Open",
"is_fetch_query": True,
"priority": "P1,P3",
"tags": "1,2"
}
mock_client = OpsGenieV3.Client(base_url="")
res = mock_client.build_query(args)
assert (res == "createdAt < 147039484114 AND status=open AND priority: (P1 OR P3) AND tag: (1 OR 2)")
def test_build_query_not_fetch():
"""
Given:
- An app client object
- args
- is_fetch_query = False
When:
- Calling function build_query
Then:
- Ensure the return data is correct
"""
args = {
"query": "createdAt < 147039484114",
"status": "Open",
"is_fetch_query": False,
"priority": "P1,P3",
"tags": "1,2"
}
mock_client = OpsGenieV3.Client(base_url="")
res = mock_client.build_query(args)
assert (res == "createdAt < 147039484114")
def test_build_query_not_fetch_without_query():
"""
Given:
- An app client object
- args
- is_fetch_query = False
When:
- Calling function build_query
Then:
- Ensure the return data is correct
"""
args = {
"status": "Open",
"is_fetch_query": False,
"priority": "P1,P3",
"tags": "1,2"
}
mock_client = OpsGenieV3.Client(base_url="")
res = mock_client.build_query(args)
assert (res == "status=open AND priority: (P1 OR P3) AND tag: (1 OR 2)")
def test_responders_to_json_empty_value():
"""
Given:
- An app client object
- responders = {}
When:
- Calling function responders_to_json
Then:
- Ensure the return data is correct
"""
mock_client = OpsGenieV3.Client(base_url="")
res = mock_client.responders_to_json(responders={},
responder_key="responder")
assert (res == {})
|
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
create_server = {
'status_code': [202],
'response_body': {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'security_groups': {'type': 'array'},
'links': parameter_types.links,
'OS-DCF:diskConfig': {'type': 'string'}
},
'additionalProperties': False,
# NOTE: OS-DCF:diskConfig & security_groups are API extension,
# and some environments return a response without these
# attributes.So they are not 'required'.
'required': ['id', 'links']
}
},
'additionalProperties': False,
'required': ['server']
}
}
create_server_with_admin_pass = copy.deepcopy(create_server)
create_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
create_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
list_servers = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links,
'name': {'type': 'string'}
},
'additionalProperties': False,
'required': ['id', 'links', 'name']
}
},
'servers_links': parameter_types.links
},
'additionalProperties': False,
# NOTE(gmann): servers_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['servers']
}
}
delete_server = {
'status_code': [204],
}
common_show_server = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'status': {'type': 'string'},
'image': {'oneOf': [
{'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'additionalProperties': False,
'required': ['id', 'links']},
{'type': ['string', 'null']}
]},
'flavor': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'additionalProperties': False,
'required': ['id', 'links']
},
'fault': {
'type': 'object',
'properties': {
'code': {'type': 'integer'},
'created': {'type': 'string'},
'message': {'type': 'string'},
'details': {'type': 'string'},
},
'additionalProperties': False,
# NOTE(gmann): 'details' is not necessary to be present
# in the 'fault'. So it is not defined as 'required'.
'required': ['code', 'created', 'message']
},
'user_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
'created': {'type': 'string'},
'updated': {'type': 'string'},
'progress': {'type': 'integer'},
'metadata': {'type': 'object'},
'links': parameter_types.links,
'addresses': parameter_types.addresses,
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
'accessIPv4': parameter_types.access_ip_v4,
'accessIPv6': parameter_types.access_ip_v6
},
'additionalProperties': False,
# NOTE(GMann): 'progress' attribute is present in the response
# only when server's status is one of the progress statuses
# ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
# 'fault' attribute is present in the response
# only when server's status is one of the "ERROR", "DELETED".
# OS-DCF:diskConfig and accessIPv4/v6 are API
# extensions, and some environments return a response
# without these attributes.So these are not defined as 'required'.
'required': ['id', 'name', 'status', 'image', 'flavor',
'user_id', 'tenant_id', 'created', 'updated',
'metadata', 'links', 'addresses', 'hostId']
}
update_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': common_show_server
},
'additionalProperties': False,
'required': ['server']
}
}
server_detail = copy.deepcopy(common_show_server)
server_detail['properties'].update({
'key_name': {'type': ['string', 'null']},
'security_groups': {'type': 'array'},
# NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
# attributes.
'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
'OS-EXT-AZ:availability_zone': {'type': 'string'},
# NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
# attributes.
'OS-EXT-STS:task_state': {'type': ['string', 'null']},
'OS-EXT-STS:vm_state': {'type': 'string'},
'OS-EXT-STS:power_state': {'type': 'integer'},
'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
'os-extended-volumes:volumes_attached': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'}
},
'additionalProperties': False,
},
},
'config_drive': {'type': 'string'}
})
server_detail['properties']['addresses']['patternProperties'][
'^[a-zA-Z0-9-_.]+$']['items']['properties'].update({
'OS-EXT-IPS:type': {'type': 'string'},
'OS-EXT-IPS-MAC:mac_addr': parameter_types.mac_address})
# NOTE(gmann): Update OS-EXT-IPS:type and OS-EXT-IPS-MAC:mac_addr
# attributes in server address. Those are API extension,
# and some environments return a response without
# these attributes. So they are not 'required'.
get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': server_detail
},
'additionalProperties': False,
'required': ['server']
}
}
list_servers_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': server_detail
},
'servers_links': parameter_types.links
},
'additionalProperties': False,
# NOTE(gmann): servers_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['servers']
}
}
rebuild_server = copy.deepcopy(update_server)
rebuild_server['status_code'] = [202]
rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
rescue_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'adminPass': {'type': 'string'}
},
'additionalProperties': False,
'required': ['adminPass']
}
}
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'virtual_interfaces': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'mac_address': parameter_types.mac_address,
'OS-EXT-VIF-NET:net_id': {'type': 'string'}
},
'additionalProperties': False,
# 'OS-EXT-VIF-NET:net_id' is API extension So it is
# not defined as 'required'
'required': ['id', 'mac_address']
}
}
},
'additionalProperties': False,
'required': ['virtual_interfaces']
}
}
common_attach_volume_info = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': ['string', 'null']},
'volumeId': {'type': 'string'},
'serverId': {'type': ['integer', 'string']}
},
'additionalProperties': False,
# 'device' is optional in response.
'required': ['id', 'volumeId', 'serverId']
}
attach_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachment': common_attach_volume_info
},
'additionalProperties': False,
'required': ['volumeAttachment']
}
}
detach_volume = {
'status_code': [202]
}
show_volume_attachment = copy.deepcopy(attach_volume)
show_volume_attachment['response_body']['properties'][
'volumeAttachment']['properties'].update({'serverId': {'type': 'string'}})
list_volume_attachments = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachments': {
'type': 'array',
'items': common_attach_volume_info
}
},
'additionalProperties': False,
'required': ['volumeAttachments']
}
}
list_volume_attachments['response_body']['properties'][
'volumeAttachments']['items']['properties'].update(
{'serverId': {'type': 'string'}})
list_addresses_by_network = {
'status_code': [200],
'response_body': parameter_types.addresses
}
list_addresses = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'addresses': parameter_types.addresses
},
'additionalProperties': False,
'required': ['addresses']
}
}
common_server_group = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'policies': {
'type': 'array',
'items': {'type': 'string'}
},
# 'members' attribute contains the array of instance's UUID of
# instances present in server group
'members': {
'type': 'array',
'items': {'type': 'string'}
},
'metadata': {'type': 'object'}
},
'additionalProperties': False,
'required': ['id', 'name', 'policies', 'members', 'metadata']
}
create_show_server_group = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_group': common_server_group
},
'additionalProperties': False,
'required': ['server_group']
}
}
delete_server_group = {
'status_code': [204]
}
list_server_groups = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_groups': {
'type': 'array',
'items': common_server_group
}
},
'additionalProperties': False,
'required': ['server_groups']
}
}
instance_actions = {
'type': 'object',
'properties': {
'action': {'type': 'string'},
'request_id': {'type': 'string'},
'user_id': {'type': 'string'},
'project_id': {'type': 'string'},
'start_time': {'type': 'string'},
'message': {'type': ['string', 'null']},
'instance_uuid': {'type': 'string'}
},
'additionalProperties': False,
'required': ['action', 'request_id', 'user_id', 'project_id',
'start_time', 'message', 'instance_uuid']
}
instance_action_events = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'event': {'type': 'string'},
'start_time': {'type': 'string'},
'finish_time': {'type': 'string'},
'result': {'type': 'string'},
'traceback': {'type': ['string', 'null']}
},
'additionalProperties': False,
'required': ['event', 'start_time', 'finish_time', 'result',
'traceback']
}
}
list_instance_actions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceActions': {
'type': 'array',
'items': instance_actions
}
},
'additionalProperties': False,
'required': ['instanceActions']
}
}
instance_actions_with_events = copy.deepcopy(instance_actions)
instance_actions_with_events['properties'].update({
'events': instance_action_events})
# 'events' does not come in response body always so it is not
# defined as 'required'
show_instance_action = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceAction': instance_actions_with_events
},
'additionalProperties': False,
'required': ['instanceAction']
}
}
show_password = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'password': {'type': 'string'}
},
'additionalProperties': False,
'required': ['password']
}
}
get_vnc_console = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'console': {
'type': 'object',
'properties': {
'type': {'type': 'string'},
'url': {
'type': 'string',
'format': 'uri'
}
},
'additionalProperties': False,
'required': ['type', 'url']
}
},
'additionalProperties': False,
'required': ['console']
}
}
get_console_output = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'output': {'type': 'string'}
},
'additionalProperties': False,
'required': ['output']
}
}
set_server_metadata = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'metadata': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'additionalProperties': False,
'required': ['metadata']
}
}
list_server_metadata = copy.deepcopy(set_server_metadata)
update_server_metadata = copy.deepcopy(set_server_metadata)
delete_server_metadata_item = {
'status_code': [204]
}
set_show_server_metadata_item = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'meta': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'additionalProperties': False,
'required': ['meta']
}
}
server_actions_common_schema = {
'status_code': [202]
}
server_actions_delete_password = {
'status_code': [204]
}
server_actions_confirm_resize = copy.deepcopy(
server_actions_delete_password)
update_attached_volume = {
'status_code': [202]
}
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None, "data_path")
FLAGS = flags.FLAGS
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
self._sequence_len = tf.placeholder(tf.int64, [batch_size])
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# from tensorflow.models.rnn import rnn
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
# outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
softmax_w = tf.get_variable("softmax_w", [size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def sequence_len(self):
return self._sequence_len
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y, z) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.sequence_len: z,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
elif FLAGS.model == "test":
return TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.initialize_all_variables().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, train_data, m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
tf.app.run()
|
|
from __future__ import print_function
"""Contains useful and reusable code for EPP scripts.
Classes, methods and exceptions.
Johannes Alneberg, Science for Life Laboratory, Stockholm, Sweden.
Copyright (C) 2013 Johannes Alneberg
"""
import logging
import sys
import os
import pkg_resources
from pkg_resources import DistributionNotFound
from shutil import copy
from requests import HTTPError
from genologics.entities import Artifact
from genologics.config import MAIN_LOG
from logging.handlers import RotatingFileHandler
from time import strftime, localtime
import csv
def attach_file(src,resource):
"""Attach file at src to given resource
Copies the file to the current directory, EPP node will upload this file
automatically if the process output is properly set up"""
original_name = os.path.basename(src)
new_name = resource.id + '_' + original_name
dir = os.getcwd()
location = os.path.join(dir,new_name)
copy(src,location)
return location
class EmptyError(ValueError):
"Raised if an iterator is unexpectedly empty."
pass
class NotUniqueError(ValueError):
"Raised if there are unexpectedly more than 1 item in an iterator"
pass
def unique_check(l,msg):
"Check that l is of length 1, otherwise raise error, with msg appended"
if len(l)==0:
raise EmptyError("No item found for {0}".format(msg))
elif len(l)!=1:
raise NotUniqueError("Multiple items found for {0}".format(msg))
def set_field(element):
try:
element.put()
except (TypeError, HTTPError) as e:
logging.warning("Error while updating element: {0}".format(e))
class EppLogger(object):
"""Context manager for logging module useful for EPP script execution.
This context manager (CM) automatically logs what script that is executed,
with what parameters it was executed and what version (including) commit
hash of the genologics package used. Since EPP scripts are often ran
automatically by the genologics LIMS client, the stdout and stderr is
captured and logged within this CM. Stderr is duplicated so that the
last line can be shown in the GUI. In order to track multiple runs
of the same process from the genologics LIMS GUI, the previous log
files can be prepended. Also a main log file can be used that is
supposed to be common for all scripts executed on the server.
"""
PACKAGE = 'genologics'
def __enter__(self):
logging.info('Executing file: {0}'.format(sys.argv[0]))
logging.info('with parameters: {0}'.format(sys.argv[1:]))
try:
logging.info('Version of {0}: '.format(self.PACKAGE) +
pkg_resources.require(self.PACKAGE)[0].version)
except DistributionNotFound as e:
logging.error(e)
logging.error(('Make sure you have the {0} '
'package installed').format(self.PACKAGE))
sys.exit(-1)
return self
def __exit__(self,exc_type,exc_val,exc_tb):
# If no exception has occured in block, turn off logging.
if not exc_type:
logging.shutdown()
sys.stderr = self.saved_stderr
sys.stdout = self.saved_stdout
# Do not repress possible exception
return False
def __init__(self,log_file=None,level=logging.INFO,lims=None,prepend=False):
""" Initialize the logger with custom settings.
Arguments:
log_file -- file to write individual log to
Keyword Arguments:
level -- Logging level, default logging.INFO
lims -- Lims instance, needed for prepend to work
prepend -- If True, prepend old log file to new, requires lims
"""
self.lims = lims
self.log_file = log_file
self.level = level
self.prepend = prepend
if prepend and self.log_file:
self.prepend_old_log()
# Loggers that will capture stdout and stderr respectively
stdout_logger = logging.getLogger('STDOUT')
self.slo = self.StreamToLogger(stdout_logger, logging.INFO)
self.saved_stdout = sys.stdout
sys.stdout = self.slo
stderr_logger = logging.getLogger('STDERR')
self.saved_stderr = sys.stderr
# Duplicate stderr stream to log
self.sle = self.StreamToLogger(stderr_logger, logging.INFO,
self.saved_stderr)
sys.stderr = self.sle
# Root logger with filehandler(s)
self.logger = logging.getLogger()
self.logger.setLevel(self.level)
formatter = logging.Formatter(
'%(asctime)s:%(levelname)s:%(name)s:%(message)s')
if self.log_file:
individual_fh = logging.FileHandler(self.log_file,mode='a')
individual_fh.setFormatter(formatter)
self.logger.addHandler(individual_fh)
if MAIN_LOG:
# Rotating file handler, that will create up to 10 backup logs,
# each no bigger than 100MB.
main_fh = RotatingFileHandler(MAIN_LOG,mode='a',
maxBytes=1e8,backupCount=10)
main_fh.setFormatter(formatter)
self.logger.addHandler(main_fh)
else:
self.logger.warning('No main log file found.')
def prepend_old_log(self, external_log_file = None):
"""Prepend the old log to the new log.
The location of the old log file is retrieved through the REST api.
In order to work, the script should be executed on the LIMS server
since the location on the disk is parsed out from the sftp string
and then used for local copy of file.
This method does not use logging since that could mess up the
logging settings, instead warnings are printed to stderr."""
if external_log_file:
log_file_name = external_log_file
else:
log_file_name = self.log_file
local_log_path = os.path.join(os.getcwd(), log_file_name)
if not os.path.isfile(local_log_path):
try:
log_artifact = Artifact(self.lims,id = log_file_name)
log_artifact.get()
if log_artifact.files:
log_path = log_artifact.files[0].content_location.split(
self.lims.baseuri.split(':')[1])[1]
copy(log_path, local_log_path)
with open(local_log_path,'a') as f:
f.write('='*80+'\n')
except HTTPError: # Probably no artifact found, skip prepending
print(('No log file artifact found '
'for id: {0}').format(log_file_name), file=sys.stderr)
except IOError as e: # Probably some path was wrong in copy
print(('Log could not be prepended, '
'make sure {0} and {1} are '
'proper paths.').format(log_path,
log_file_name), file=sys.stderr)
raise e
class StreamToLogger(object):
"""Fake file-like stream object that redirects writes to a logger
instance.
source:
http://www.electricmonk.nl/log/2011/08/14/
redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO, stream=None):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
self.stream = stream
def write(self, buf):
if self.stream:
self.stream.write(buf)
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class ReadResultFiles():
"""Class to read pars different kinds of result files from a process.
The class stores the parsed content of all shared result files in a
dictionary 'shared_files'. The data is parsed as lists of lists. """
def __init__(self, process):
self.process = process
self.shared_files = self._pars_file('SharedResultFile')
self.perinput_files = self._pars_file('ResultFile')
def get_file_path(self, artifact):
if len(artifact.files) > 0:
file = artifact.files[0]
file_path = file.content_location.split('scilifelab.se')[1]
if len(file_path.split('.')) > 1:
return file_path
return None
def _pars_file(self, output_type):
"""Reads a csv or txt into a list of lists, where sub lists are lines
of the csv."""
outs = self.process.all_outputs()
outarts = [a for a in outs if a.output_type == output_type]
parsed_files = {}
for outart in outarts:
file_path = self.get_file_path(outart)
if file_path:
of = open(file_path ,'r')
file_ext = file_path.split('.')[-1]
if file_ext == 'csv':
pf = [row for row in csv.reader(of.read().splitlines())]
parsed_files[outart.name] = pf
elif file_ext == 'txt':
pf = [row.strip().strip('\\').split('\t') for row in of.readlines()]
parsed_files[outart.name] = pf
of.close()
return parsed_files
def format_file(self, parsed_file, name = '', first_header = None,
header_row = None, root_key_col = 0, find_keys = []):
"""Function to format a parsed csv or txt file.
Arguments and Output:
parsed_file A list of lists where sublists are rows of the csv.
name Name of parsed file.
first_header First column of the heather section in the file.
default value is 'None'
root_key_col If you want the root keys to be given by some other
column than the first one, set root_key_col to the
column number.
header_row Instead of specifying first_header you can choose
from what line to reed by setting header_row to the
row number where you want to start reading.
find_keys List of row names to look for. Will exclude all
others.
file_info Dict of dicts. Keys of root dict are the first
column in the csv starting from the line after the
heather line. Keys of sub dicts are the columns of
the heather line."""
file_info = {}
keys = []
error_message = ''
duplicated_lines = []
exeptions = ['Sample','Fail', '']
if type(first_header) is not list:
if first_header:
first_header=[first_header]
else:
first_header=[]
for row, line in enumerate(parsed_file):
if keys and len(line)==len(keys):
root_key = line[root_key_col]
cond1 = find_keys == [] and root_key not in exeptions
cond2 = root_key in find_keys
if root_key in file_info:
duplicated_lines.append(root_key)
elif (cond1 or cond2):
file_info[root_key] = {}
if not duplicated_lines:
for col in range(len(keys)):
if keys[col] != '':
file_info[root_key][keys[col]] = line[col]
elif keys[col-1] != '':
tupl = (file_info[root_key][keys[col-1]], line[col])
file_info[root_key][keys[col-1]] = tupl
head = line[root_key_col] if len(line) > root_key_col else None
if first_header and head in first_header:
keys = line
elif header_row and row == header_row:
keys = line
if duplicated_lines:
error_message = ("Row names {0} occurs more than once in file {1}. "
"Fix the file to continue. ").format(','.join(duplicated_lines), name)
if not file_info:
error_message = error_message + "Could not format parsed file {0}.".format(name)
if error_message:
print(error_message, file=sys.stderr)
sys.exit(-1)
return file_info
class CopyField(object):
"""Class to copy any filed (or udf) from any lims element to any
udf on any other lims element
arguments:
s_elt source element - instance of a type
d_elt destination element - instance of a type
s_field_name name of source field (or udf) to be copied
d_udf_name name of destination udf name. If not specified
s_field_name will be used.
The copy_udf() function takes a log file as optional argument.
If this is given the changes will be logged there.
Written by Maya Brandi and Johannes Alnberg
"""
def __init__(self, s_elt, d_elt, s_field_name, d_udf_name = None):
if not d_udf_name:
d_udf_name = s_field_name
self.s_elt = s_elt
self.s_field_name = s_field_name
self.s_field = self._get_field(s_elt, s_field_name)
self.d_elt = d_elt
self.d_type = d_elt._URI
self.d_udf_name = d_udf_name
self.old_dest_udf = self._get_field(d_elt, d_udf_name)
def _current_time(self):
return strftime("%Y-%m-%d %H:%M:%S", localtime())
def _get_field(self, elt, field):
if field in elt.udf:
return elt.udf[field]
else:
try:
return elt.field
except:
return None
def _set_udf(self, elt, udf_name, val):
try:
elt.udf[udf_name] = val
elt.put()
return True
except (TypeError, HTTPError) as e:
print("Error while updating element: {0}".format(e), file=sys.stderr)
sys.exit(-1)
return False
def _log_before_change(self, changelog_f=None):
if changelog_f:
d = {'ct' : self._current_time(),
's_udf' : self.s_field_name,
'sn' : self.d_elt.name,
'si' : self.d_elt.id,
'su' : self.old_dest_udf,
'nv' : self.s_field,
'd_elt_type': self.d_type}
changelog_f.write(("{ct}: udf: '{s_udf}' on {d_elt_type}: '{sn}' ("
"id: {si}) is changed from '{su}' to '{nv}'.\n").format(**d))
logging.info(("Copying from element with id: {0} to element with "
" id: {1}").format(self.s_elt.id, self.d_elt.id))
def _log_after_change(self):
d = {'s_udf': self.s_field_name,
'd_udf': self.d_udf_name,
'su': self.old_dest_udf,
'nv': self.s_field,
'd_elt_type': self.d_type}
logging.info("Updated {d_elt_type} udf: {d_udf}, from {su} to "
"{nv}.".format(**d))
def copy_udf(self, changelog_f = None):
if self.s_field != self.old_dest_udf:
self._log_before_change(changelog_f)
log = self._set_udf(self.d_elt, self.d_udf_name, self.s_field)
self._log_after_change()
return log
else:
return False
|
|
#!/usr/bin/python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utility binary to manage database."""
import os
import os.path
import sys
from flask.ext.script import Manager
from compass.actions import clean_deployment
from compass.actions import clean_installing_progress
from compass.actions import deploy
from compass.actions import reinstall
from compass.actions import search
from compass.api import app
from compass.config_management.utils import config_manager
from compass.db.api import database
from compass.tasks.client import celery
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
from compass.utils import util
flags.add('table_name',
help='table name',
default='')
flags.add('clusters',
help=(
'clusters and hosts of each cluster, the format is as '
'clusterid:hostname1,hostname2,...;...'),
default='')
flags.add_bool('async',
help='ryn in async mode',
default=True)
flags.add('switch_machines_file',
help=(
'files for switches and machines '
'connected to each switch. each line in the file '
'is machine,<switch ip>,<switch port>,<vlan>,<mac> '
'or switch,<switch_ip>,<switch_vendor>,'
'<switch_version>,<switch_community>,<switch_state>'),
default='')
flags.add('search_cluster_properties',
help='comma separated properties to search in cluster config',
default='')
flags.add('print_cluster_properties',
help='comma separated cluster config properties to print',
default='')
flags.add('search_host_properties',
help='comma separated properties to search in host config',
default='')
flags.add('print_host_properties',
help='comma separated host config properties to print',
default='')
app_manager = Manager(app, usage="Perform database operations")
TABLE_MAPPING = {
}
@app_manager.command
def list_config():
"List the commands."
for key, value in app.config.items():
print key, value
@app_manager.command
def checkdb():
"""check if db exists."""
if setting.DATABASE_TYPE == 'file':
if os.path.exists(setting.DATABASE_FILE):
sys.exit(0)
else:
sys.exit(1)
sys.exit(0)
@app_manager.command
def createdb():
"""Creates database from sqlalchemy models."""
try:
dropdb()
except Exception:
pass
if setting.DATABASE_TYPE == 'file':
if os.path.exists(setting.DATABASE_FILE):
os.remove(setting.DATABASE_FILE)
database.create_db()
if setting.DATABASE_TYPE == 'file':
os.chmod(setting.DATABASE_FILE, 0o777)
@app_manager.command
def dropdb():
"""Drops database from sqlalchemy models."""
database.drop_db()
@app_manager.command
def createtable():
"""Create database table."""
if not flags.OPTIONS.table_name:
print 'flag --table_name is missing'
return
table_name = flags.OPTIONS.table_name
if table_name not in TABLE_MAPPING:
print '--table_name should be in %s' % TABLE_MAPPING.keys()
return
database.create_table(TABLE_MAPPING[table_name])
@app_manager.command
def droptable():
"""Drop database table."""
if not flags.OPTIONS.table_name:
print 'flag --table_name is missing'
return
table_name = flags.OPTIONS.table_name
if table_name not in TABLE_MAPPING:
print '--table_name should be in %s' % TABLE_MAPPING.keys()
return
database.drop_table(TABLE_MAPPING[table_name])
@app_manager.command
def sync_from_installers():
"""set adapters in Adapter table from installers."""
with database.session():
manager = config_manager.ConfigManager()
manager.update_adapters_from_installers()
@app_manager.command
def sync_switch_configs():
"""Set switch configs in SwitchConfig table from setting.
.. note::
the switch config is stored in SWITCHES list in setting config.
for each entry in the SWITCHES, its type is dict and must contain
fields 'switch_ips' and 'filter_ports'.
The format of switch_ips is
<ip_blocks>.<ip_blocks>.<ip_blocks>.<ip_blocks>.
ip_blocks consists of ip_block separated by comma.
ip_block can be an integer and a range of integer like xx-xx.
The example of switch_ips is like: xxx.xxx.xxx-yyy,xxx-yyy.xxx,yyy
The format of filter_ports consists of list of
<port_prefix><port_range> separated by comma. port_range can be an
integer or a rnage of integer like xx-xx.
The example of filter_ports is like: ae1-5,20-40.
"""
with database.session():
manager = config_manager.ConfigManager()
manager.update_switch_filters()
@app_manager.command
def clean_clusters():
"""Delete clusters and hosts.
.. note::
The clusters and hosts are defined in --clusters.
the clusters flag is as clusterid:hostname1,hostname2,...;...
"""
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
if flags.OPTIONS.async:
celery.send_task('compass.tasks.clean_deployment', (cluster_hosts,))
else:
clean_deployment.clean_deployment(cluster_hosts)
@app_manager.command
def clean_installation_progress():
"""Clean clusters and hosts installation progress.
.. note::
The cluster and hosts is defined in --clusters.
The clusters flags is as clusterid:hostname1,hostname2,...;...
"""
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
if flags.OPTIONS.async:
celery.send_task('compass.tasks.clean_installing_progress',
(cluster_hosts,))
else:
clean_installing_progress.clean_installing_progress(cluster_hosts)
@app_manager.command
def reinstall_clusters():
"""Reinstall hosts in clusters.
.. note::
The hosts are defined in --clusters.
The clusters flag is as clusterid:hostname1,hostname2,...;...
"""
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
if flags.OPTIONS.async:
celery.send_task('compass.tasks.reinstall', (cluster_hosts,))
else:
reinstall.reinstall(cluster_hosts)
@app_manager.command
def deploy_clusters():
"""Deploy hosts in clusters.
.. note::
The hosts are defined in --clusters.
The clusters flag is as clusterid:hostname1,hostname2,...;...
"""
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
if flags.OPTIONS.async:
celery.send_task('compass.tasks.deploy', (cluster_hosts,))
else:
deploy.deploy(cluster_hosts)
@app_manager.command
def set_switch_machines():
"""Set switches and machines.
.. note::
--switch_machines_file is the filename which stores all switches
and machines information.
each line in fake_switches_files presents one machine.
the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac>
or switch,<switch_ip>,<switch_vendor>,<switch_version>,
<switch_community>,<switch_state>
"""
if not flags.OPTIONS.switch_machines_file:
print 'flag --switch_machines_file is missing'
return
switches, switch_machines = util.get_switch_machines_from_file(
flags.OPTIONS.switch_machines_file)
with database.session():
manager = config_manager.ConfigManager()
manager.update_switch_and_machines(switches, switch_machines)
@app_manager.command
def search_cluster_hosts():
"""Search cluster hosts by properties.
.. note::
--search_cluster_properties defines what properties are used to search.
the format of search_cluster_properties is as
<property_name>=<property_value>;... If no search properties are set,
It will returns properties of all hosts.
--print_cluster_properties defines what properties to print.
the format of print_cluster_properties is as
<property_name>;...
--search_host_properties defines what properties are used to search.
the format of search_host_properties is as
<property_name>=<property_value>;... If no search properties are set,
It will returns properties of all hosts.
--print_host_properties defines what properties to print.
the format of print_host_properties is as
<property_name>;...
"""
cluster_properties = util.get_properties_from_str(
flags.OPTIONS.search_cluster_properties)
cluster_properties_name = util.get_properties_name_from_str(
flags.OPTIONS.print_cluster_properties)
host_properties = util.get_properties_from_str(
flags.OPTIONS.search_host_properties)
host_properties_name = util.get_properties_name_from_str(
flags.OPTIONS.print_host_properties)
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
cluster_properties, cluster_host_properties = search.search(
cluster_hosts, cluster_properties,
cluster_properties_name, host_properties,
host_properties_name)
print 'clusters properties:'
util.print_properties(cluster_properties)
for clusterid, host_properties in cluster_host_properties.items():
print 'hosts properties under cluster %s' % clusterid
util.print_properties(host_properties)
if __name__ == "__main__":
flags.init()
logsetting.init()
app_manager.run()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The compatible tensorflow library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf1
from tensorflow.compat.v2 import * # pylint:disable=wildcard-import, g-bad-import-order
# Import absl.flags and absl.logging to overwrite the Tensorflow ones.
# This is the intended behavior in TF 2.0.
# pylint:disable=g-bad-import-order, unused-import, g-import-not-at-top
from absl import flags
from absl import logging
# pylint: disable=g-direct-tensorflow-import
from REDACTED import config_pb2
from REDACTED.tensorflow.python.data.ops import dataset_ops
from REDACTED.tensorflow.python.framework import function as _function_lib
from REDACTED.tensorflow.python.framework import ops
from REDACTED.tensorflow.python.ops import array_ops
from REDACTED.tensorflow.python.ops import check_ops
from REDACTED.tensorflow.python.ops import embedding_ops
from REDACTED.tensorflow.python.ops import functional_ops
from REDACTED.tensorflow.python.ops import inplace_ops
from REDACTED.tensorflow.python.ops import math_ops
from REDACTED.tensorflow.python.util import module_wrapper as _module_wrapper
from REDACTED.tensorflow.python.platform import app
# pylint: enable=g-direct-tensorflow-import
# pylint: enable=unused-import, g-bad-import-order, g-import-not-at-top
if tf1.executing_eagerly():
logging.warning("Lingvo does not support eager execution yet. Please disable "
"eager execution with tf.compat.v1.disable_eager_execution() "
"or proceed at your own risk.")
def _clone_module(m):
"""Shallow clone of module `m`."""
if isinstance(m, _module_wrapper.TFModuleWrapper):
# pylint: disable=protected-access
return _module_wrapper.TFModuleWrapper(
wrapped=_clone_module(m._tfmw_wrapped_module),
module_name=m._tfmw_module_name,
public_apis=m._tfmw_public_apis,
deprecation=m._tfmw_print_deprecation_warnings,
has_lite=m._tfmw_has_lite)
# pylint: enable=protected-access
out = type(m)(m.__name__, m.__doc__)
out.__dict__.update(m.__dict__)
return out
# Aliases to a few routines lingvo libraries uses often.
Defun = _function_lib.Defun
While = functional_ops.While
If = functional_ops.If
InplaceUpdate = inplace_ops.alias_inplace_update
Empty = inplace_ops.empty
EmptyLike = inplace_ops.empty_like
# pylint: disable=undefined-variable, used-before-assignment
# Move this V2 symbol here to avoid being overwritten by its following V1
# version.
where_v2 = where
while_loop_v2 = while_loop
# Import the local V2 module to maker sure the following V1 overwritting never
# applies to the global module and symbol.
data = _clone_module(data)
graph_util = _clone_module(graph_util)
image = _clone_module(image)
io = _clone_module(io)
losses = _clone_module(keras.losses)
metrics = _clone_module(keras.metrics)
nn = _clone_module(nn)
saved_model = _clone_module(saved_model)
strings = _clone_module(strings)
summary = _clone_module(summary)
test = _clone_module(test)
train = _clone_module(train)
# pylint: enable=undefined-variable, used-before-assignment
# TF 1.x symbols used in the codebase.
# To keep this list short, please use TF 2.x API whenever applicable.
# Only use TF 1.x API if it has no 2.x equivalent.
# pylint: disable=undefined-variable
add_to_collection = tf1.add_to_collection
all_variables = tf1.global_variables
# The following asserts can be directly replaced with TF2 `tf.debugging.*`
# after TF2/eager is enabled.
assert_integer = tf1.assert_integer
assert_positive = tf1.assert_positive
assert_type = tf1.assert_type
assert_scalar = tf1.assert_scalar
assign = tf1.assign
assign_add = tf1.assign_add
assign_sub = tf1.assign_sub
AUTO_REUSE = tf1.AUTO_REUSE
container = tf1.container
data.Dataset = tf1.data.Dataset
data.TFRecordDataset = tf1.data.TFRecordDataset
device = tf1.device
Dimension = tf1.Dimension
disable_eager_execution = tf1.disable_eager_execution
div = tf1.div
enable_eager_execution = tf1.enable_eager_execution
floor_div = tf1.floor_div
get_collection = tf1.get_collection
get_collection_ref = tf1.get_collection_ref
get_default_graph = tf1.get_default_graph
get_local_variable = tf1.get_local_variable
get_seed = tf1.get_seed
get_variable = tf1.get_variable
get_variable_scope = tf1.get_variable_scope
global_variables = tf1.global_variables
global_variables_initializer = tf1.global_variables_initializer
gradients = tf1.gradients
graph_util.convert_variables_to_constants = (
tf1.graph_util.convert_variables_to_constants)
graph_util.extract_sub_graph = tf1.graph_util.extract_sub_graph
GraphDef = tf1.GraphDef
GraphKeys = tf1.GraphKeys
GraphOptions = tf1.GraphOptions
group = tf1.group
image.resize_bilinear = tf1.image.resize_bilinear
image.resize_images = tf1.image.resize_images
image.resize_nearest_neighbor = tf1.image.resize_nearest_neighbor
initialize_all_tables = tf1.initialize_all_tables
InteractiveSession = tf1.InteractiveSession
io.tf_record_iterator = tf1.io.tf_record_iterator
layers = tf1.layers
local_variables_initializer = tf1.local_variables_initializer
losses.absolute_difference = tf1.losses.absolute_difference
losses.add_loss = tf1.losses.add_loss
losses.compute_weighted_loss = tf1.losses.compute_weighted_loss
losses.get_regularization_loss = tf1.losses.get_regularization_loss
losses.huber_loss = tf1.losses.huber_loss
losses.mean_squared_error = tf1.losses.mean_squared_error
losses.Reduction.MEAN = tf1.losses.Reduction.MEAN
losses.Reduction.SUM = tf1.losses.Reduction.SUM
losses.sigmoid_cross_entropy = tf1.losses.sigmoid_cross_entropy
losses.softmax_cross_entropy = tf1.losses.softmax_cross_entropy
losses.sparse_softmax_cross_entropy = (tf1.losses.sparse_softmax_cross_entropy)
make_template = tf1.make_template
metrics.accuracy = tf1.metrics.accuracy
metrics.auc = tf1.metrics.auc
metrics.precision = tf1.metrics.precision
metrics.recall = tf1.metrics.recall
moving_average_variables = tf1.moving_average_variables
multinomial = tf1.multinomial
name_scope = tf1.name_scope
OptimizerOptions = tf1.OptimizerOptions
placeholder = tf1.placeholder
placeholder_with_default = tf1.placeholder_with_default
Print = tf1.Print
py_func = tf1.py_func
python_io = tf1.python_io
report_uninitialized_variables = tf1.report_uninitialized_variables
reset_default_graph = tf1.reset_default_graph
resource_loader = tf1.resource_loader
RunMetadata = tf1.RunMetadata
RunOptions = tf1.RunOptions
saved_model.build_signature_def = tf1.saved_model.build_signature_def
saved_model.Builder = tf1.saved_model.Builder
saved_model.load = tf1.saved_model.load
saved_model.loader = tf1.saved_model.loader
saved_model.signature_constants = tf1.saved_model.signature_constants
saved_model.simple_save = tf1.saved_model.simple_save
saved_model.tag_constants = tf1.saved_model.tag_constants
saved_model.utils = tf1.saved_model.utils
Session = tf1.Session
sparse_to_dense = tf1.sparse_to_dense
string_split = tf1.string_split
strings.reduce_join = tf1.reduce_join
strings.split = tf1.strings.split
Summary = tf1.Summary
if tf1.summary is not None:
# tf.summary are not supported on TPU so we sometimes set tf.summary to None
# to prohibit the direct use of it.
# It is safe to skip copying tf.summary members in such cases.
summary.audio = tf1.summary.audio
summary.FileWriter = tf1.summary.FileWriter
summary.histogram = tf1.summary.histogram
summary.image = tf1.summary.image
summary.merge = tf1.summary.merge
summary.merge_all = tf1.summary.merge_all
summary.scalar = tf1.summary.scalar
summary.Summary = tf1.summary.Summary
summary.Summary.FromString = tf1.summary.Summary.FromString
tables_initializer = tf1.tables_initializer
test.compute_gradient_error = tf1.test.compute_gradient_error
test.get_temp_dir = tf1.test.get_temp_dir
test.mock = tf1.test.mock
tpu = tf1.tpu
train.AdadeltaOptimizer = tf1.train.AdadeltaOptimizer
train.AdagradOptimizer = tf1.train.AdagradOptimizer
train.AdamOptimizer = tf1.train.AdamOptimizer
train.export_meta_graph = tf1.train.export_meta_graph
train.get_or_create_global_step = tf1.train.get_or_create_global_step
train.get_global_step = tf1.train.get_global_step
train.GradientDescentOptimizer = tf1.train.GradientDescentOptimizer
train.MomentumOptimizer = tf1.train.MomentumOptimizer
train.MonitoredTrainingSession = tf1.train.MonitoredTrainingSession
train.NewCheckpointReader = tf1.train.NewCheckpointReader
train.Optimizer = tf1.train.Optimizer
train.RMSPropOptimizer = tf1.train.RMSPropOptimizer
train.Saver = tf1.train.Saver
train.SaverDef = tf1.train.SaverDef
train.summary_iterator = tf1.train.summary_iterator
trainable_variables = tf1.trainable_variables
Variable = tf1.Variable
variables_initializer = tf1.variables_initializer
VariableScope = tf1.VariableScope
variable_scope = tf1.variable_scope
where = tf1.where
while_loop = tf1.while_loop
wrap_function = tf1.wrap_function
# Explicit 1.x symbol import.
data.make_initializable_iterator = dataset_ops.make_initializable_iterator
data.make_one_shot_iterator = dataset_ops.make_one_shot_iterator
# For `nn.embedding_lookup`, v2 doesn't have the arg 'partition_strategy' in
# the API, and uses 'partition_strategy="div"' by default;
# while v1 uses 'partition_strategy="mod"' by default. Keep this for now.
nn.embedding_lookup = embedding_ops.embedding_lookup
# pylint: enable=undefined-variable
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1beta1.types import (
training_pipeline as gca_training_pipeline,
)
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"CreateTrainingPipelineRequest",
"GetTrainingPipelineRequest",
"ListTrainingPipelinesRequest",
"ListTrainingPipelinesResponse",
"DeleteTrainingPipelineRequest",
"CancelTrainingPipelineRequest",
"CreatePipelineJobRequest",
"GetPipelineJobRequest",
"ListPipelineJobsRequest",
"ListPipelineJobsResponse",
"DeletePipelineJobRequest",
"CancelPipelineJobRequest",
},
)
class CreateTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline].
Attributes:
parent (str):
Required. The resource name of the Location to create the
TrainingPipeline in. Format:
``projects/{project}/locations/{location}``
training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline):
Required. The TrainingPipeline to create.
"""
parent = proto.Field(proto.STRING, number=1,)
training_pipeline = proto.Field(
proto.MESSAGE, number=2, message=gca_training_pipeline.TrainingPipeline,
)
class GetTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline].
Attributes:
name (str):
Required. The name of the TrainingPipeline resource. Format:
``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListTrainingPipelinesRequest(proto.Message):
r"""Request message for
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines].
Attributes:
parent (str):
Required. The resource name of the Location to list the
TrainingPipelines from. Format:
``projects/{project}/locations/{location}``
filter (str):
Lists the PipelineJobs that match the filter expression. The
following fields are supported:
- ``pipeline_name``: Supports ``=`` and ``!=`` comparisons.
- ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``labels``: Supports key-value equality and key presence.
Filter expressions can be combined together using logical
operators (``AND`` & ``OR``). For example:
``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``.
The syntax to define filter expression is based on
https://google.aip.dev/160.
Examples:
- ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"``
PipelineJobs created or updated after 2020-05-18 00:00:00
UTC.
- ``labels.env = "prod"`` PipelineJobs with label "env" set
to "prod".
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]
call.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
class ListTrainingPipelinesResponse(proto.Message):
r"""Response message for
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]
Attributes:
training_pipelines (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline]):
List of TrainingPipelines in the requested
page.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
training_pipelines = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_training_pipeline.TrainingPipeline,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline].
Attributes:
name (str):
Required. The name of the TrainingPipeline resource to be
deleted. Format:
``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
"""
name = proto.Field(proto.STRING, number=1,)
class CancelTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline].
Attributes:
name (str):
Required. The name of the TrainingPipeline to cancel.
Format:
``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
"""
name = proto.Field(proto.STRING, number=1,)
class CreatePipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob].
Attributes:
parent (str):
Required. The resource name of the Location to create the
PipelineJob in. Format:
``projects/{project}/locations/{location}``
pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob):
Required. The PipelineJob to create.
pipeline_job_id (str):
The ID to use for the PipelineJob, which will become the
final component of the PipelineJob name. If not provided, an
ID will be automatically generated.
This value should be less than 128 characters, and valid
characters are /[a-z][0-9]-/.
"""
parent = proto.Field(proto.STRING, number=1,)
pipeline_job = proto.Field(
proto.MESSAGE, number=2, message=gca_pipeline_job.PipelineJob,
)
pipeline_job_id = proto.Field(proto.STRING, number=3,)
class GetPipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob].
Attributes:
name (str):
Required. The name of the PipelineJob resource. Format:
``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListPipelineJobsRequest(proto.Message):
r"""Request message for
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs].
Attributes:
parent (str):
Required. The resource name of the Location to list the
PipelineJobs from. Format:
``projects/{project}/locations/{location}``
filter (str):
The standard list filter. Supported fields:
- ``display_name`` supports ``=`` and ``!=``.
- ``state`` supports ``=`` and ``!=``.
The following examples demonstrate how to filter the list of
PipelineJobs:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained via
[ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token]
of the previous
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]
call.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListPipelineJobsResponse(proto.Message):
r"""Response message for
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]
Attributes:
pipeline_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob]):
List of PipelineJobs in the requested page.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
pipeline_jobs = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_pipeline_job.PipelineJob,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeletePipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob].
Attributes:
name (str):
Required. The name of the PipelineJob resource to be
deleted. Format:
``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}``
"""
name = proto.Field(proto.STRING, number=1,)
class CancelPipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob].
Attributes:
name (str):
Required. The name of the PipelineJob to cancel. Format:
``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}``
"""
name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import re
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"Bijector",
]
class _Mapping(collections.namedtuple(
"_Mapping", ["x", "y", "ildj", "kwargs"])):
"""Helper class to make it easier to manage caching in `Bijector`."""
def __new__(cls, x=None, y=None, ildj=None, kwargs=None):
"""Custom __new__ so namedtuple items have defaults.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
Returns:
mapping: New instance of _Mapping.
"""
return super(_Mapping, cls).__new__(cls, x, y, ildj, kwargs)
@property
def x_key(self):
"""Returns key used for caching Y=g(X)."""
return (self.x,) + self._deep_tuple(tuple(sorted(self.kwargs.items())))
@property
def y_key(self):
"""Returns key used for caching X=g^{-1}(Y)."""
return (self.y,) + self._deep_tuple(tuple(sorted(self.kwargs.items())))
def merge(self, x=None, y=None, ildj=None, kwargs=None, mapping=None):
"""Returns new _Mapping with args merged with self.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj: `Tensor`. Inverse log det Jacobian.
kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
arg is specified.
Returns:
mapping: New instance of `_Mapping` which has inputs merged with self.
Raises:
ValueError: if mapping and any other arg is not `None`.
"""
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj=ildj, kwargs=kwargs)
elif not all(arg is None for arg in [x, y, ildj, kwargs]):
raise ValueError("Cannot specify mapping and individual args.")
return _Mapping(
x=self._merge(self.x, mapping.x),
y=self._merge(self.y, mapping.y),
ildj=self._merge(self.ildj, mapping.ildj),
kwargs=self._merge(self.kwargs, mapping.kwargs))
def _merge(self, old, new):
"""Helper to merge which handles merging one value."""
if old is None:
return new
elif new is not None and old != new:
raise ValueError("Incompatible values: %s != %s" % (old, new))
return old
def _deep_tuple(self, x):
"""Converts lists of lists to tuples of tuples."""
return (tuple(map(self._deep_tuple, x))
if isinstance(x, (list, tuple)) else x)
@six.add_metaclass(abc.ABCMeta)
class Bijector(object):
"""Interface for invertible transformations of a `Distribution` sample.
#### Mathematical Details
A `Bijector` implements a
[diffeomorphism](https://en.wikipedia.org/wiki/Diffeomorphism), i.e., a
bijective, differentiable function. A `Bijector` is used by
`TransformedDistribution` but can be generally used for transforming a
`Distribution` generated `Tensor`. A `Bijector` is characterized by three
operations:
1. Forward Evaluation
Useful for turning one random outcome into another random outcome from a
different distribution.
2. Inverse Evaluation
Useful for "reversing" a transformation to compute one probability in
terms of another.
3. (log o det o Jacobian o inverse)(x)
"The log of the determinant of the matrix of all first-order partial
derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the det(Jacobian) is the volume of the
transformation and is used to scale the probability.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
#### Example Uses
- Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).
```
- Computing a log-likelihood:
```python
def transformed_log_prob(bijector, log_prob, x):
return (bijector.inverse_log_det_jacobian(x) +
log_prob(bijector.inverse(x)))
```
- Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
#### Example Bijectors
- "Exponential"
```none
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```none
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
Here is an example of how one might implement the `Exp` bijector:
```python
class Exp(Bijector):
def __init__(self, event_ndims=0, validate_args=False, name="exp"):
super(Exp, self).__init__(
event_ndims=event_ndims, validate_args=validate_args, name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse(self, y):
return math_ops.log(y)
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(self._inverse(y))
def _forward_log_det_jacobian(self, x):
if self.event_ndims is None:
raise ValueError("Jacobian requires known event_ndims.")
event_dims = array_ops.shape(x)[-self.event_ndims:]
return math_ops.reduce_sum(x, axis=event_dims)
```
- "Affine"
```none
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```none
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
#### Jacobian
The Jacobian is a reduction over event dims. To see this, consider the `Exp`
`Bijector` applied to a `Tensor` which has sample, batch, and event (S, B, E)
shape semantics. Suppose the `Tensor`'s partitioned-shape is `(S=[4], B=[2],
E=[3, 3])`. The shape of the `Tensor` returned by `forward` and `inverse` is
unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by
`inverse_log_det_jacobian` is `[4, 2]` because the Jacobian is a reduction
over the event dimensions.
It is sometimes useful to implement the inverse Jacobian as the negative
forward Jacobian. For example,
```python
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jac(self._inverse(y)) # Note negation.
```
The correctness of this approach can be seen from the following claim.
- Claim:
Assume `Y = g(X)` is a bijection whose derivative exists and is nonzero
for its domain, i.e., `dY/dX = d/dX g(X) != 0`. Then:
```none
(log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)
```
- Proof:
From the bijective, nonzero differentiability of `g`, the
[inverse function theorem](
https://en.wikipedia.org/wiki/Inverse_function_theorem)
implies `g^{-1}` is differentiable in the image of `g`.
Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields
`I = g'(g^{-1}(y))*g^{-1}'(y)`.
The same theorem also implies `g{-1}'` is non-singular therefore:
`inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.
The claim follows from [properties of determinant](
https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).
Generally its preferable to directly implement the inverse Jacobian. This
should have superior numerical stability and will often share subgraphs with
the `_inverse` implementation.
#### Subclass Requirements
- Subclasses typically implement:
- `_forward`,
- `_inverse`,
- `_inverse_log_det_jacobian`,
- `_forward_log_det_jacobian` (optional).
The `_forward_log_det_jacobian` is called when the bijector is inverted via
the `Invert` bijector. If undefined, a slightly less efficiently
calculation, `-1 * _inverse_log_det_jacobian`, is used.
If the bijector changes the shape of the input, you must also implement:
- _forward_event_shape_tensor,
- _forward_event_shape (optional),
- _inverse_event_shape_tensor,
- _inverse_event_shape (optional).
By default the event-shape is assumed unchanged from input.
- If the `Bijector`'s use is limited to `TransformedDistribution` (or friends
like `QuantizedDistribution`) then depending on your use, you may not need
to implement all of `_forward` and `_inverse` functions.
Examples:
1. Sampling (e.g., `sample`) only requires `_forward`.
2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require
`_inverse` (and related).
3. Only calling probability functions on the output of `sample` means
`_inverse` can be implemented as a cache lookup.
See "Example Uses" [above] which shows how these functions are used to
transform a distribution. (Note: `_forward` could theoretically be
implemented as a cache lookup but this would require controlling the
underlying sample generation mechanism.)
"""
@abc.abstractmethod
def __init__(self,
event_ndims=None,
graph_parents=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform which operates on vector events.
identity = Identity(event_ndims=1)
# Create the Y = g(X) = exp(X) transform which operates on matrices.
exp = Exp(event_ndims=2)
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
event_ndims: number of dimensions associated with event coordinates.
graph_parents: Python list of graph prerequisites of this `Bijector`.
is_constant_jacobian: Python `bool` indicating that the Jacobian is not a
function of the input.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
name: The name to give Ops created by the initializer.
"""
self._event_ndims = (
ops.convert_to_tensor(event_ndims, dtype=dtypes.int32)
if event_ndims is not None else None)
self._graph_parents = graph_parents or []
self._is_constant_jacobian = is_constant_jacobian
self._validate_args = validate_args
self._dtype = dtype
self._from_y = {}
self._from_x = {}
# Using abbreviation ildj for "inverse log det Jacobian."
# This variable is not `None` iff is_constant_jacobian is `True`.
self._constant_ildj = None
if name:
self._name = name
else:
# We want the default convention to be snake_case rather than CamelCase
# since `Chain` uses bijector.name as the kwargs dictionary key.
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
self._name = camel_to_snake(type(self).__name__.lstrip("_"))
@property
def event_ndims(self):
"""Returns then number of event dimensions this bijector operates on."""
return self._event_ndims
@property
def graph_parents(self):
"""Returns this `Bijector`'s graph_parents as a Python list."""
return self._graph_parents
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian is not a function of x.
Note: Jacobian is either constant for both forward and inverse or neither.
Returns:
is_constant_jacobian: Python `bool`.
"""
return self._is_constant_jacobian
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def _forward_event_shape_tensor(self, input_shape):
"""Subclass implementation for `forward_event_shape_tensor` function."""
# By default, we assume event_shape is unchanged.
return input_shape
def forward_event_shape_tensor(self,
input_shape,
name="forward_event_shape_tensor"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
input_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `forward` function.
name: name to give to the op
Returns:
forward_event_shape_tensor: `Tensor`, `int32` vector indicating
event-portion shape after applying `forward`.
"""
with self._name_scope(name, [input_shape]):
input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32,
name="input_shape")
return self._forward_event_shape_tensor(input_shape)
def _forward_event_shape(self, input_shape):
"""Subclass implementation for `forward_event_shape` public function."""
# By default, we assume event_shape is unchanged.
return input_shape
def forward_event_shape(self, input_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `forward_event_shape_tensor`. May be only partially defined.
Args:
input_shape: `TensorShape` indicating event-portion shape passed into
`forward` function.
Returns:
forward_event_shape_tensor: `TensorShape` indicating event-portion shape
after applying `forward`. Possibly unknown.
"""
return self._forward_event_shape(tensor_shape.TensorShape(input_shape))
def _inverse_event_shape_tensor(self, output_shape):
"""Subclass implementation for `inverse_event_shape_tensor` function."""
# By default, we assume event_shape is unchanged.
return output_shape
def inverse_event_shape_tensor(self,
output_shape,
name="inverse_event_shape_tensor"):
"""Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
output_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `inverse` function.
name: name to give to the op
Returns:
inverse_event_shape_tensor: `Tensor`, `int32` vector indicating
event-portion shape after applying `inverse`.
"""
with self._name_scope(name, [output_shape]):
output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32,
name="output_shape")
return self._inverse_event_shape_tensor(output_shape)
def _inverse_event_shape(self, output_shape):
"""Subclass implementation for `inverse_event_shape` public function."""
# By default, we assume event_shape is unchanged.
return tensor_shape.TensorShape(output_shape)
def inverse_event_shape(self, output_shape):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `inverse_event_shape_tensor`. May be only partially defined.
Args:
output_shape: `TensorShape` indicating event-portion shape passed into
`inverse` function.
Returns:
inverse_event_shape_tensor: `TensorShape` indicating event-portion shape
after applying `inverse`. Possibly unknown.
"""
return self._inverse_event_shape(output_shape)
def _forward(self, x):
"""Subclass implementation for `forward` public function."""
raise NotImplementedError("forward not implemented.")
def _call_forward(self, x, name, **kwargs):
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, kwargs=kwargs)
if mapping.y is not None:
return mapping.y
mapping = mapping.merge(y=self._forward(x, **kwargs))
self._cache(mapping)
return mapping.y
def forward(self, x, name="forward"):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
"""
return self._call_forward(x, name)
def _inverse(self, y):
"""Subclass implementation for `inverse` public function."""
raise NotImplementedError("inverse not implemented")
def _call_inverse(self, y, name, **kwargs):
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, kwargs=kwargs)
if mapping.x is not None:
return mapping.x
mapping = mapping.merge(x=self._inverse(y, **kwargs))
self._cache(mapping)
return mapping.x
def inverse(self, y, name="inverse"):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if `_inverse` is not implemented.
"""
return self._call_inverse(y, name)
def _inverse_log_det_jacobian(self, y):
"""Subclass implementation of `inverse_log_det_jacobian` public function."""
raise NotImplementedError("inverse_log_det_jacobian not implemented.")
def _call_inverse_log_det_jacobian(self, y, name, **kwargs):
with self._name_scope(name, [y]):
if self._constant_ildj is not None:
return self._constant_ildj
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
mapping = self._lookup(y=y, kwargs=kwargs)
if mapping.ildj is not None:
return mapping.ildj
try:
x = None # Not needed; leave cache as is.
ildj = self._inverse_log_det_jacobian(y, **kwargs)
except NotImplementedError as original_exception:
try:
x = mapping.x if mapping.x is not None else self._inverse(y, **kwargs)
ildj = self._inverse_log_det_jacobian(y, **kwargs)
except NotImplementedError:
raise original_exception
mapping = mapping.merge(x=x, ildj=ildj)
self._cache(mapping)
if self.is_constant_jacobian:
self._constant_ildj = mapping.ildj
return mapping.ildj
def inverse_log_det_jacobian(self, y, name="inverse_log_det_jacobian"):
"""Returns the (log o det o Jacobian o inverse)(y).
Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)
Note that `forward_log_det_jacobian` is the negative of this function.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if `_inverse_log_det_jacobian` is not implemented.
"""
return self._call_inverse_log_det_jacobian(y, name)
def _forward_log_det_jacobian(self, x):
"""Subclass implementation of `forward_log_det_jacobian`."""
raise NotImplementedError(
"forward_log_det_jacobian not implemented.")
def _call_forward_log_det_jacobian(self, x, name, **kwargs):
with self._name_scope(name, [x]):
if self._constant_ildj is not None:
# Need "-1. *" to avoid invalid-unary-operand-type linter warning.
return -1. * self._constant_ildj
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
mapping = self._lookup(x=x, kwargs=kwargs)
if mapping.ildj is not None:
return -mapping.ildj
try:
y = None # Not needed; leave cache as is.
ildj = -self._forward_log_det_jacobian(x, **kwargs)
except NotImplementedError as original_exception:
try:
y = mapping.y if mapping.y is not None else self._forward(x, **kwargs)
ildj = self._inverse_log_det_jacobian(y, **kwargs)
except NotImplementedError:
raise original_exception
mapping = mapping.merge(y=y, ildj=ildj)
self._cache(mapping)
if self.is_constant_jacobian:
self._constant_ildj = mapping.ildj
return -mapping.ildj
def forward_log_det_jacobian(self, x, name="forward_log_det_jacobian"):
"""Returns both the forward_log_det_jacobian.
Args:
x: `Tensor`. The input to the "forward" Jacobian evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_forward_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
return self._call_forward_log_det_jacobian(x, name)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=(values or []) + self.graph_parents) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
def _cache(self, mapping):
"""Helper which stores mapping info in forward/inverse dicts."""
if self._constant_ildj is not None:
# Fold in ildj if known constant Jacobian.
mapping = mapping.merge(ildj=self._constant_ildj)
# Merging from lookup is an added check that we're not overwriting anything
# which is not None.
mapping = mapping.merge(mapping=self._lookup(
mapping.x, mapping.y, mapping.kwargs))
if mapping.x is None and mapping.y is None:
raise ValueError("Caching expects at least one of (x,y) to be known, "
"i.e., not None.")
self._from_x[mapping.x_key] = mapping
self._from_y[mapping.y_key] = mapping
def _lookup(self, x=None, y=None, kwargs=None):
"""Helper which retrieves mapping info from forward/inverse dicts."""
mapping = _Mapping(x=x, y=y, kwargs=kwargs)
# Since _cache requires both x,y to be set, we only need to do one cache
# lookup since the mapping is always in both or neither.
if mapping.x is not None:
return self._from_x.get(mapping.x_key, mapping)
if mapping.y is not None:
return self._from_y.get(mapping.y_key, mapping)
return mapping
def _event_dims_tensor(self, sample):
"""Return a 1D `int32` tensor: `range(rank(sample))[-event_ndims:]`."""
if self.event_ndims is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
static_event_ndims = tensor_util.constant_value(self.event_ndims)
static_rank = sample.get_shape().ndims
if static_event_ndims is not None and static_rank is not None:
return ops.convert_to_tensor(
static_rank + np.arange(-static_event_ndims, 0).astype(np.int32))
if static_event_ndims is not None:
event_range = np.arange(-static_event_ndims, 0).astype(np.int32)
else:
event_range = math_ops.range(-self.event_ndims, 0, dtype=dtypes.int32)
if static_rank is not None:
return event_range + static_rank
else:
return event_range + array_ops.rank(sample)
|
|
"""
DocBlockr v2.12.1
by Nick Fisher, and all the great people listed in CONTRIBUTORS.md
https://github.com/spadgos/sublime-jsdocs
*** Please read CONTIBUTING.md before sending pull requests. Thanks! ***
"""
import sublime
import sublime_plugin
import re
import datetime
import time
import imp
from functools import reduce
def read_line(view, point):
if (point >= view.size()):
return
next_line = view.line(point)
return view.substr(next_line)
def write(view, str):
view.run_command(
'insert_snippet', {
'contents': str
}
)
def counter():
count = 0
while True:
count += 1
yield(count)
def escape(str):
return str.replace('$', '\$').replace('{', '\{').replace('}', '\}')
def is_numeric(val):
try:
float(val)
return True
except ValueError:
return False
def getParser(view):
scope = view.scope_name(view.sel()[0].end())
res = re.search('\\bsource\\.([a-z+\-]+)', scope)
sourceLang = res.group(1) if res else 'js'
viewSettings = view.settings()
if sourceLang == "php":
return JsdocsPHP(viewSettings)
elif sourceLang == "coffee":
return JsdocsCoffee(viewSettings)
elif sourceLang == "actionscript" or sourceLang == 'haxe':
return JsdocsActionscript(viewSettings)
elif sourceLang == "c++" or sourceLang == 'c' or sourceLang == 'cuda-c++':
return JsdocsCPP(viewSettings)
elif sourceLang == 'objc' or sourceLang == 'objc++':
return JsdocsObjC(viewSettings)
elif sourceLang == 'java' or sourceLang == 'groovy':
return JsdocsJava(viewSettings)
elif sourceLang == 'rust':
return JsdocsRust(viewSettings)
elif sourceLang == 'ts':
return JsdocsTypescript(viewSettings)
return JsdocsJavascript(viewSettings)
class JsdocsCommand(sublime_plugin.TextCommand):
def run(self, edit, inline=False):
self.initialize(self.view, inline)
if self.parser.isExistingComment(self.line):
write(self.view, "\n *" + self.indentSpaces)
return
# erase characters in the view (will be added to the output later)
self.view.erase(edit, self.trailingRgn)
# match against a function declaration.
out = self.parser.parse(self.line)
snippet = self.generateSnippet(out, inline)
write(self.view, snippet)
def initialize(self, v, inline=False):
point = v.sel()[0].end()
self.settings = v.settings()
# trailing characters are put inside the body of the comment
self.trailingRgn = sublime.Region(point, v.line(point).end())
self.trailingString = v.substr(self.trailingRgn).strip()
# drop trailing '*/'
self.trailingString = escape(re.sub('\\s*\\*\\/\\s*$', '', self.trailingString))
self.indentSpaces = " " * max(0, self.settings.get("jsdocs_indentation_spaces", 1))
self.prefix = "*"
settingsAlignTags = self.settings.get("jsdocs_align_tags", 'deep')
self.deepAlignTags = settingsAlignTags == 'deep'
self.shallowAlignTags = settingsAlignTags in ('shallow', True)
self.parser = parser = getParser(v)
parser.inline = inline
# use trailing string as a description of the function
if self.trailingString:
parser.setNameOverride(self.trailingString)
# read the next line
self.line = parser.getDefinition(v, v.line(point).end() + 1)
def generateSnippet(self, out, inline=False):
# substitute any variables in the tags
if out:
out = self.substituteVariables(out)
# align the tags
if out and (self.shallowAlignTags or self.deepAlignTags) and not inline:
out = self.alignTags(out)
# fix all the tab stops so they're consecutive
if out:
out = self.fixTabStops(out)
if inline:
if out:
return " " + out[0] + " */"
else:
return " $0 */"
else:
return self.createSnippet(out) + ('\n' if self.settings.get('jsdocs_newline_after_block') else '')
def alignTags(self, out):
def outputWidth(str):
# get the length of a string, after it is output as a snippet,
# "${1:foo}" --> 3
return len(re.sub("[$][{]\\d+:([^}]+)[}]", "\\1", str).replace('\$', '$'))
# count how many columns we have
maxCols = 0
# this is a 2d list of the widths per column per line
widths = []
# Grab the return tag if required.
if self.settings.get('jsdocs_per_section_indent'):
returnTag = self.settings.get('jsdocs_return_tag') or '@return'
else:
returnTag = False
for line in out:
if line.startswith('@'):
# Ignore the return tag if we're doing per-section indenting.
if returnTag and line.startswith(returnTag):
continue
# ignore all the words after `@author`
columns = line.split(" ") if not line.startswith('@author') else ['@author']
widths.append(list(map(outputWidth, columns)))
maxCols = max(maxCols, len(widths[-1]))
# initialise a list to 0
maxWidths = [0] * maxCols
if (self.shallowAlignTags):
maxCols = 1
for i in range(0, maxCols):
for width in widths:
if (i < len(width)):
maxWidths[i] = max(maxWidths[i], width[i])
# Convert to a dict so we can use .get()
maxWidths = dict(enumerate(maxWidths))
# Minimum spaces between line columns
minColSpaces = self.settings.get('jsdocs_min_spaces_between_columns', 1)
for index, line in enumerate(out):
# format the spacing of columns, but ignore the author tag. (See #197)
if line.startswith('@') and not line.startswith('@author'):
newOut = []
for partIndex, part in enumerate(line.split(" ")):
newOut.append(part)
newOut.append(" " * minColSpaces + (" " * (maxWidths.get(partIndex, 0) - outputWidth(part))))
out[index] = "".join(newOut).strip()
return out
def substituteVariables(self, out):
def getVar(match):
varName = match.group(1)
if varName == 'datetime':
date = datetime.datetime.now().replace(microsecond=0)
offset = time.timezone / -3600.0
return "%s%s%02d%02d" % (
date.isoformat(),
'+' if offset >= 0 else "-",
abs(offset),
(offset % 1) * 60
)
elif varName == 'date':
return datetime.date.today().isoformat()
else:
return match.group(0)
def subLine(line):
return re.sub(r'\{\{([^}]+)\}\}', getVar, line)
return list(map(subLine, out))
def fixTabStops(self, out):
tabIndex = counter()
def swapTabs(m):
return "%s%d%s" % (m.group(1), next(tabIndex), m.group(2))
for index, outputLine in enumerate(out):
out[index] = re.sub("(\\$\\{)\\d+(:[^}]+\\})", swapTabs, outputLine)
return out
def createSnippet(self, out):
snippet = ""
closer = self.parser.settings['commentCloser']
if out:
if self.settings.get('jsdocs_spacer_between_sections') == True:
lastTag = None
for idx, line in enumerate(out):
res = re.match("^\\s*@([a-zA-Z]+)", line)
if res and (lastTag != res.group(1)):
lastTag = res.group(1)
out.insert(idx, "")
elif self.settings.get('jsdocs_spacer_between_sections') == 'after_description':
lastLineIsTag = False
for idx, line in enumerate(out):
res = re.match("^\\s*@([a-zA-Z]+)", line)
if res:
if not lastLineIsTag:
out.insert(idx, "")
lastLineIsTag = True
for line in out:
snippet += "\n " + self.prefix + (self.indentSpaces + line if line else "")
else:
snippet += "\n " + self.prefix + self.indentSpaces + "${0:" + self.trailingString + '}'
snippet += "\n" + closer
return snippet
class JsdocsParser(object):
def __init__(self, viewSettings):
self.viewSettings = viewSettings
self.setupSettings()
self.nameOverride = None
def isExistingComment(self, line):
return re.search('^\\s*\\*', line)
def setNameOverride(self, name):
""" overrides the description of the function - used instead of parsed description """
self.nameOverride = name
def getNameOverride(self):
return self.nameOverride
def parse(self, line):
if self.viewSettings.get('jsdocs_simple_mode'):
return None
out = self.parseFunction(line) # (name, args, retval, options)
if (out):
return self.formatFunction(*out)
out = self.parseVar(line)
if out:
return self.formatVar(*out)
return None
def formatVar(self, name, val, valType=None):
out = []
if not valType:
if not val or val == '': # quick short circuit
valType = "[type]"
else:
valType = self.guessTypeFromValue(val) or self.guessTypeFromName(name) or "[type]"
if self.inline:
out.append("@%s %s${1:%s}%s ${1:[description]}" % (
self.settings['typeTag'],
"{" if self.settings['curlyTypes'] else "",
valType,
"}" if self.settings['curlyTypes'] else ""
))
else:
out.append("${1:[%s description]}" % (escape(name)))
out.append("@%s %s${1:%s}%s" % (
self.settings['typeTag'],
"{" if self.settings['curlyTypes'] else "",
valType,
"}" if self.settings['curlyTypes'] else ""
))
return out
def getTypeInfo(self, argType, argName):
typeInfo = ''
if self.settings['typeInfo']:
typeInfo = '%s${1:%s}%s ' % (
"{" if self.settings['curlyTypes'] else "",
escape(argType or self.guessTypeFromName(argName) or "[type]"),
"}" if self.settings['curlyTypes'] else "",
)
return typeInfo
def formatFunction(self, name, args, retval, options={}):
out = []
if 'as_setter' in options:
out.append('@private')
return out
extraTagAfter = self.viewSettings.get("jsdocs_extra_tags_go_after") or False
description = self.getNameOverride() or ('[%s%sdescription]' % (escape(name), ' ' if name else ''))
out.append("${1:%s}" % description)
if (self.viewSettings.get("jsdocs_autoadd_method_tag") is True):
out.append("@%s %s" % (
"method",
escape(name)
))
if not extraTagAfter:
self.addExtraTags(out)
# if there are arguments, add a @param for each
if (args):
# remove comments inside the argument list.
args = re.sub("/\*.*?\*/", '', args)
for argType, argName in self.parseArgs(args):
typeInfo = self.getTypeInfo(argType, argName)
format_str = "@param %s%s"
if (self.viewSettings.get('jsdocs_param_description')):
format_str += " ${1:[description]}"
out.append(format_str % (
typeInfo,
escape(argName)
))
# return value type might be already available in some languages but
# even then ask language specific parser if it wants it listed
retType = self.getFunctionReturnType(name, retval)
if retType is not None:
typeInfo = ''
if self.settings['typeInfo']:
typeInfo = ' %s${1:%s}%s' % (
"{" if self.settings['curlyTypes'] else "",
retType or "[type]",
"}" if self.settings['curlyTypes'] else ""
)
format_args = [
self.viewSettings.get('jsdocs_return_tag') or '@return',
typeInfo
]
if (self.viewSettings.get('jsdocs_return_description')):
format_str = "%s%s %s${1:[description]}"
third_arg = ""
# the extra space here is so that the description will align with the param description
if args and self.viewSettings.get('jsdocs_align_tags') == 'deep':
if not self.viewSettings.get('jsdocs_per_section_indent'):
third_arg = " "
format_args.append(third_arg)
else:
format_str = "%s%s"
out.append(format_str % tuple(format_args))
for notation in self.getMatchingNotations(name):
if 'tags' in notation:
out.extend(notation['tags'])
if extraTagAfter:
self.addExtraTags(out)
return out
def getFunctionReturnType(self, name, retval):
""" returns None for no return type. False meaning unknown, or a string """
if re.match("[A-Z]", name):
# no return, but should add a class
return None
if re.match('[$_]?(?:set|add)($|[A-Z_])', name):
# setter/mutator, no return
return None
if re.match('[$_]?(?:is|has)($|[A-Z_])', name): # functions starting with 'is' or 'has'
return self.settings['bool']
return self.guessTypeFromName(name) or False
def parseArgs(self, args):
"""
an array of tuples, the first being the best guess at the type, the second being the name
"""
out = []
if not args:
return out
# the current token
current = ''
# characters which open a section inside which commas are not separators between different arguments
openQuotes = '"\'<('
# characters which close the the section. The position of the character here should match the opening
# indicator in `openQuotes`
closeQuotes = '"\'>)'
matchingQuote = ''
insideQuotes = False
nextIsLiteral = False
blocks = []
for char in args:
if nextIsLiteral: # previous char was a \
current += char
nextIsLiteral = False
elif char == '\\':
nextIsLiteral = True
elif insideQuotes:
current += char
if char == matchingQuote:
insideQuotes = False
else:
if char == ',':
blocks.append(current.strip())
current = ''
else:
current += char
quoteIndex = openQuotes.find(char)
if quoteIndex > -1:
matchingQuote = closeQuotes[quoteIndex]
insideQuotes = True
blocks.append(current.strip())
for arg in blocks:
out.append((self.getArgType(arg), self.getArgName(arg)))
return out
def getArgType(self, arg):
return None
def getArgName(self, arg):
return arg
def addExtraTags(self, out):
extraTags = self.viewSettings.get('jsdocs_extra_tags', [])
if (len(extraTags) > 0):
out.extend(extraTags)
def guessTypeFromName(self, name):
matches = self.getMatchingNotations(name)
if len(matches):
rule = matches[0]
if ('type' in rule):
return self.settings[rule['type']] if rule['type'] in self.settings else rule['type']
if (re.match("(?:is|has)[A-Z_]", name)):
return self.settings['bool']
if (re.match("^(?:cb|callback|done|next|fn)$", name)):
return self.settings['function']
return False
def getMatchingNotations(self, name):
def checkMatch(rule):
if 'prefix' in rule:
regex = re.escape(rule['prefix'])
if re.match('.*[a-z]', rule['prefix']):
regex += '(?:[A-Z_]|$)'
return re.match(regex, name)
elif 'regex' in rule:
return re.search(rule['regex'], name)
return list(filter(checkMatch, self.viewSettings.get('jsdocs_notation_map', [])))
def getDefinition(self, view, pos):
"""
get a relevant definition starting at the given point
returns string
"""
maxLines = 25 # don't go further than this
openBrackets = 0
definition = ''
# count the number of open parentheses
def countBrackets(total, bracket):
return total + (1 if bracket == '(' else -1)
for i in range(0, maxLines):
line = read_line(view, pos)
if line is None:
break
pos += len(line) + 1
# strip comments
line = re.sub(r"//.*", "", line)
line = re.sub(r"/\*.*\*/", "", line)
searchForBrackets = line
# on the first line, only start looking from *after* the actual function starts. This is
# needed for cases like this:
# (function (foo, bar) { ... })
if definition == '':
opener = re.search(self.settings['fnOpener'], line) if self.settings['fnOpener'] else False
if opener:
# ignore everything before the function opener
searchForBrackets = line[opener.start():]
openBrackets = reduce(countBrackets, re.findall('[()]', searchForBrackets), openBrackets)
definition += line
if openBrackets == 0:
break
return definition
class JsdocsJavascript(JsdocsParser):
def setupSettings(self):
identifier = '[a-zA-Z_$][a-zA-Z_$0-9]*'
self.settings = {
# curly brackets around the type information
"curlyTypes": True,
'typeInfo': True,
"typeTag": self.viewSettings.get('jsdocs_override_js_var') or "type",
# technically, they can contain all sorts of unicode, but w/e
"varIdentifier": identifier,
"fnIdentifier": identifier,
"fnOpener": r'function(?:\s+' + identifier + r')?\s*\(',
"commentCloser": " */",
"bool": "Boolean",
"function": "Function"
}
def parseFunction(self, line):
res = re.search(
# fnName = function, fnName : function
r'(?:(?P<name1>' + self.settings['varIdentifier'] + r')\s*[:=]\s*)?'
+ 'function'
# function fnName
+ r'(?:\s+(?P<name2>' + self.settings['fnIdentifier'] + '))?'
# (arg1, arg2)
+ r'\s*\(\s*(?P<args>.*)\)',
line
)
if not res:
return None
# grab the name out of "name1 = function name2(foo)" preferring name1
name = res.group('name1') or res.group('name2') or ''
args = res.group('args')
return (name, args, None)
def parseVar(self, line):
res = re.search(
# var foo = blah,
# foo = blah;
# baz.foo = blah;
# baz = {
# foo : blah
# }
'(?P<name>' + self.settings['varIdentifier'] + ')\s*[=:]\s*(?P<val>.*?)(?:[;,]|$)',
line
)
if not res:
return None
return (res.group('name'), res.group('val').strip())
def guessTypeFromValue(self, val):
lowerPrimitives = self.viewSettings.get('jsdocs_lower_case_primitives') or False
shortPrimitives = self.viewSettings.get('jsdocs_short_primitives') or False
if is_numeric(val):
return "number" if lowerPrimitives else "Number"
if val[0] == '"' or val[0] == "'":
return "string" if lowerPrimitives else "String"
if val[0] == '[':
return "Array"
if val[0] == '{':
return "Object"
if val == 'true' or val == 'false':
returnVal = 'Bool' if shortPrimitives else 'Boolean'
return returnVal.lower() if lowerPrimitives else returnVal
if re.match('RegExp\\b|\\/[^\\/]', val):
return 'RegExp'
if val[:4] == 'new ':
res = re.search('new (' + self.settings['fnIdentifier'] + ')', val)
return res and res.group(1) or None
return None
class JsdocsPHP(JsdocsParser):
def setupSettings(self):
shortPrimitives = self.viewSettings.get('jsdocs_short_primitives') or False
nameToken = '[a-zA-Z_\\x7f-\\xff][a-zA-Z0-9_\\x7f-\\xff]*'
self.settings = {
# curly brackets around the type information
'curlyTypes': False,
'typeInfo': True,
'typeTag': "var",
'varIdentifier': '[$]' + nameToken + '(?:->' + nameToken + ')*',
'fnIdentifier': nameToken,
'fnOpener': 'function(?:\\s+' + nameToken + ')?\\s*\\(',
'commentCloser': ' */',
'bool': 'bool' if shortPrimitives else 'boolean',
'function': "function"
}
def parseFunction(self, line):
res = re.search(
'function\\s+&?(?:\\s+)?'
+ '(?P<name>' + self.settings['fnIdentifier'] + ')'
# function fnName
# (arg1, arg2)
+ '\\s*\\(\\s*(?P<args>.*)\)',
line
)
if not res:
return None
return (res.group('name'), res.group('args'), None)
def getArgType(self, arg):
# function add($x, $y = 1)
res = re.search(
'(?P<name>' + self.settings['varIdentifier'] + ")\\s*=\\s*(?P<val>.*)",
arg
)
if res:
return self.guessTypeFromValue(res.group('val'))
# function sum(Array $x)
if re.search('\\S\\s', arg):
return re.search("^(\\S+)", arg).group(1)
else:
return None
def getArgName(self, arg):
return re.search("(" + self.settings['varIdentifier'] + ")(?:\\s*=.*)?$", arg).group(1)
def parseVar(self, line):
res = re.search(
# var $foo = blah,
# $foo = blah;
# $baz->foo = blah;
# $baz = array(
# 'foo' => blah
# )
'(?P<name>' + self.settings['varIdentifier'] + ')\\s*=>?\\s*(?P<val>.*?)(?:[;,]|$)',
line
)
if res:
return (res.group('name'), res.group('val').strip())
res = re.search(
'\\b(?:var|public|private|protected|static)\\s+(?P<name>' + self.settings['varIdentifier'] + ')',
line
)
if res:
return (res.group('name'), None)
return None
def guessTypeFromValue(self, val):
shortPrimitives = self.viewSettings.get('jsdocs_short_primitives') or False
if is_numeric(val):
return "float" if '.' in val else 'int' if shortPrimitives else 'integer'
if val[0] == '"' or val[0] == "'":
return "string"
if val[:5] == 'array':
return "array"
if val.lower() in ('true', 'false', 'filenotfound'):
return 'bool' if shortPrimitives else 'boolean'
if val[:4] == 'new ':
res = re.search('new (' + self.settings['fnIdentifier'] + ')', val)
return res and res.group(1) or None
return None
def getFunctionReturnType(self, name, retval):
shortPrimitives = self.viewSettings.get('jsdocs_short_primitives') or False
if (name[:2] == '__'):
if name in ('__construct', '__destruct', '__set', '__unset', '__wakeup'):
return None
if name == '__sleep':
return 'array'
if name == '__toString':
return 'string'
if name == '__isset':
return 'bool' if shortPrimitives else 'boolean'
return JsdocsParser.getFunctionReturnType(self, name, retval)
class JsdocsCPP(JsdocsParser):
def setupSettings(self):
nameToken = '[a-zA-Z_][a-zA-Z0-9_]*'
identifier = '(%s)(::%s)?' % (nameToken, nameToken)
self.settings = {
'typeInfo': False,
'curlyTypes': False,
'typeTag': 'param',
'commentCloser': ' */',
'fnIdentifier': identifier,
'varIdentifier': '(' + identifier + ')\\s*(?:\\[(?:' + identifier + ')?\\]|\\((?:(?:\\s*,\\s*)?[a-z]+)+\\s*\\))?',
'fnOpener': identifier + '\\s+' + identifier + '\\s*\\(',
'bool': 'bool',
'function': 'function'
}
def parseFunction(self, line):
res = re.search(
'(?P<retval>' + self.settings['varIdentifier'] + ')[&*\\s]+'
+ '(?P<name>' + self.settings['varIdentifier'] + ');?'
# void fnName
# (arg1, arg2)
+ '\\s*\\(\\s*(?P<args>.*)\)',
line
)
if not res:
return None
return (res.group('name'), res.group('args'), res.group('retval'))
def parseArgs(self, args):
if args.strip() == 'void':
return []
return super(JsdocsCPP, self).parseArgs(args)
def getArgType(self, arg):
return None
def getArgName(self, arg):
return re.search(self.settings['varIdentifier'] + r"(?:\s*=.*)?$", arg).group(1)
def parseVar(self, line):
return None
def guessTypeFromValue(self, val):
return None
def getFunctionReturnType(self, name, retval):
return retval if retval != 'void' else None
class JsdocsCoffee(JsdocsParser):
def setupSettings(self):
identifier = '[a-zA-Z_$][a-zA-Z_$0-9]*'
self.settings = {
# curly brackets around the type information
'curlyTypes': True,
'typeTag': self.viewSettings.get('jsdocs_override_js_var') or "type",
'typeInfo': True,
# technically, they can contain all sorts of unicode, but w/e
'varIdentifier': identifier,
'fnIdentifier': identifier,
'fnOpener': None, # no multi-line function definitions for you, hipsters!
'commentCloser': '###',
'bool': 'Boolean',
'function': 'Function'
}
def parseFunction(self, line):
res = re.search(
# fnName = function, fnName : function
'(?:(?P<name>' + self.settings['varIdentifier'] + ')\s*[:=]\s*)?'
+ '(?:\\((?P<args>[^()]*?)\\))?\\s*([=-]>)',
line
)
if not res:
return None
# grab the name out of "name1 = function name2(foo)" preferring name1
name = res.group('name') or ''
args = res.group('args')
return (name, args, None)
def parseVar(self, line):
res = re.search(
# var foo = blah,
# foo = blah;
# baz.foo = blah;
# baz = {
# foo : blah
# }
'(?P<name>' + self.settings['varIdentifier'] + ')\s*[=:]\s*(?P<val>.*?)(?:[;,]|$)',
line
)
if not res:
return None
return (res.group('name'), res.group('val').strip())
def guessTypeFromValue(self, val):
lowerPrimitives = self.viewSettings.get('jsdocs_lower_case_primitives') or False
if is_numeric(val):
return "number" if lowerPrimitives else "Number"
if val[0] == '"' or val[0] == "'":
return "string" if lowerPrimitives else "String"
if val[0] == '[':
return "Array"
if val[0] == '{':
return "Object"
if val == 'true' or val == 'false':
return "boolean" if lowerPrimitives else "Boolean"
if re.match('RegExp\\b|\\/[^\\/]', val):
return 'RegExp'
if val[:4] == 'new ':
res = re.search('new (' + self.settings['fnIdentifier'] + ')', val)
return res and res.group(1) or None
return None
class JsdocsActionscript(JsdocsParser):
def setupSettings(self):
nameToken = '[a-zA-Z_][a-zA-Z0-9_]*'
self.settings = {
'typeInfo': False,
'curlyTypes': False,
'typeTag': '',
'commentCloser': ' */',
'fnIdentifier': nameToken,
'varIdentifier': '(%s)(?::%s)?' % (nameToken, nameToken),
'fnOpener': 'function(?:\\s+[gs]et)?(?:\\s+' + nameToken + ')?\\s*\\(',
'bool': 'bool',
'function': 'function'
}
def parseFunction(self, line):
res = re.search(
# fnName = function, fnName : function
'(?:(?P<name1>' + self.settings['varIdentifier'] + ')\s*[:=]\s*)?'
+ 'function(?:\s+(?P<getset>[gs]et))?'
# function fnName
+ '(?:\s+(?P<name2>' + self.settings['fnIdentifier'] + '))?'
# (arg1, arg2)
+ '\s*\(\s*(?P<args>.*)\)',
line
)
if not res:
return None
name = res.group('name1') and re.sub(self.settings['varIdentifier'], r'\1', res.group('name1')) \
or res.group('name2') \
or ''
args = res.group('args')
options = {}
if res.group('getset') == 'set':
options['as_setter'] = True
return (name, args, None, options)
def parseVar(self, line):
return None
def getArgName(self, arg):
return re.sub(self.settings['varIdentifier'] + r'(\s*=.*)?', r'\1', arg)
def getArgType(self, arg):
# could actually figure it out easily, but it's not important for the documentation
return None
class JsdocsObjC(JsdocsParser):
def setupSettings(self):
identifier = '[a-zA-Z_$][a-zA-Z_$0-9]*'
self.settings = {
# curly brackets around the type information
"curlyTypes": True,
'typeInfo': True,
"typeTag": "type",
# technically, they can contain all sorts of unicode, but w/e
"varIdentifier": identifier,
"fnIdentifier": identifier,
"fnOpener": '^\s*[-+]',
"commentCloser": " */",
"bool": "Boolean",
"function": "Function"
}
def getDefinition(self, view, pos):
maxLines = 25 # don't go further than this
definition = ''
for i in range(0, maxLines):
line = read_line(view, pos)
if line is None:
break
pos += len(line) + 1
# strip comments
line = re.sub("//.*", "", line)
if definition == '':
if not self.settings['fnOpener'] or not re.search(self.settings['fnOpener'], line):
definition = line
break
definition += line
if line.find(';') > -1 or line.find('{') > -1:
definition = re.sub(r'\s*[;{]\s*$', '', definition)
break
return definition
def parseFunction(self, line):
# this is terrible, don't judge me
typeRE = r'[a-zA-Z_$][a-zA-Z0-9_$]*\s*\**'
res = re.search(
'[-+]\s+\\(\\s*(?P<retval>' + typeRE + ')\\s*\\)\\s*'
+ '(?P<name>[a-zA-Z_$][a-zA-Z0-9_$]*)'
# void fnName
# (arg1, arg2)
+ '\\s*(?::(?P<args>.*))?',
line
)
if not res:
return
name = res.group('name')
argStr = res.group('args')
args = []
if argStr:
groups = re.split('\\s*:\\s*', argStr)
numGroups = len(groups)
for i in range(0, numGroups):
group = groups[i]
if i < numGroups - 1:
result = re.search(r'\s+(\S*)$', group)
name += ':' + result.group(1)
group = group[:result.start()]
args.append(group)
if (numGroups):
name += ':'
return (name, '|||'.join(args), res.group('retval'))
def parseArgs(self, args):
out = []
for arg in args.split('|||'): # lol
lastParen = arg.rfind(')')
out.append((arg[1:lastParen], arg[lastParen + 1:]))
return out
def getFunctionReturnType(self, name, retval):
return retval if retval != 'void' and retval != 'IBAction' else None
def parseVar(self, line):
return None
class JsdocsJava(JsdocsParser):
def setupSettings(self):
identifier = '[a-zA-Z_$][a-zA-Z_$0-9]*'
self.settings = {
"curlyTypes": False,
'typeInfo': False,
"typeTag": "type",
"varIdentifier": identifier,
"fnIdentifier": identifier,
"fnOpener": identifier + '(?:\\s+' + identifier + ')?\\s*\\(',
"commentCloser": " */",
"bool": "Boolean",
"function": "Function"
}
def parseFunction(self, line):
line = line.strip()
res = re.search(
# Modifiers
'(?:(public|protected|private|static|abstract|final|transient|synchronized|native|strictfp)\s+)*'
# Return value
+ '(?P<retval>[a-zA-Z_$][\<\>\., a-zA-Z_$0-9]+)\s+'
# Method name
+ '(?P<name>' + self.settings['fnIdentifier'] + ')\s*'
# Params
+ '\((?P<args>.*)\)\s*'
# # Throws ,
+ '(?:throws){0,1}\s*(?P<throws>[a-zA-Z_$0-9\.,\s]*)',
line
)
if not res:
return None
group_dict = res.groupdict()
name = group_dict["name"]
retval = group_dict["retval"]
full_args = group_dict["args"]
throws = group_dict["throws"] or ""
arg_list = []
for arg in full_args.split(","):
arg_list.append(arg.strip().split(" ")[-1])
args = ",".join(arg_list)
throws_list = []
for arg in throws.split(","):
throws_list.append(arg.strip().split(" ")[-1])
throws = ",".join(throws_list)
return (name, args, retval, throws)
def parseVar(self, line):
return None
def guessTypeFromValue(self, val):
return None
def formatFunction(self, name, args, retval, throws_args, options={}):
out = JsdocsParser.formatFunction(self, name, args, retval, options)
if throws_args != "":
for unused, exceptionName in self.parseArgs(throws_args):
typeInfo = self.getTypeInfo(unused, exceptionName)
out.append("@throws %s%s ${1:[description]}" % (
typeInfo,
escape(exceptionName)
))
return out
def getFunctionReturnType(self, name, retval):
if retval == "void":
return None
return retval
def getDefinition(self, view, pos):
maxLines = 25 # don't go further than this
definition = ''
open_curly_annotation = False
open_paren_annotation = False
for i in range(0, maxLines):
line = read_line(view, pos)
if line is None:
break
pos += len(line) + 1
# Move past empty lines
if re.search("^\s*$", line):
continue
# strip comments
line = re.sub("//.*", "", line)
line = re.sub(r"/\*.*\*/", "", line)
if definition == '':
# Must check here for function opener on same line as annotation
if self.settings['fnOpener'] and re.search(self.settings['fnOpener'], line):
pass
# Handle Annotations
elif re.search("^\s*@", line):
if re.search("{", line) and not re.search("}", line):
open_curly_annotation = True
if re.search("\(", line) and not re.search("\)", line):
open_paren_annotation = True
continue
elif open_curly_annotation:
if re.search("}", line):
open_curly_annotation = False
continue
elif open_paren_annotation:
if re.search("\)", line):
open_paren_annotation = False
elif re.search("^\s*$", line):
continue
# Check for function
elif not self.settings['fnOpener'] or not re.search(self.settings['fnOpener'], line):
definition = line
break
definition += line
if line.find(';') > -1 or line.find('{') > -1:
definition = re.sub(r'\s*[;{]\s*$', '', definition)
break
return definition
class JsdocsRust(JsdocsParser):
def setupSettings(self):
self.settings = {
"curlyTypes": False,
'typeInfo': False,
"typeTag": False,
"varIdentifier": ".*",
"fnIdentifier": ".*",
"fnOpener": "^\s*fn",
"commentCloser": " */",
"bool": "Boolean",
"function": "Function"
}
def parseFunction(self, line):
res = re.search('\s*fn\s+(?P<name>\S+)', line)
if not res:
return None
name = res.group('name').join('');
return (name, [])
def formatFunction(self, name, args):
return name
############################################################33
class JsdocsIndentCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
currPos = v.sel()[0].begin()
currLineRegion = v.line(currPos)
currCol = currPos - currLineRegion.begin() # which column we're currently in
prevLine = v.substr(v.line(v.line(currPos).begin() - 1))
spaces = self.getIndentSpaces(prevLine)
if spaces:
toStar = len(re.search("^(\\s*\\*)", prevLine).group(1))
toInsert = spaces - currCol + toStar
if spaces is None or toInsert <= 0:
v.run_command(
'insert_snippet', {
'contents': "\t"
}
)
return
v.insert(edit, currPos, " " * toInsert)
else:
v.insert(edit, currPos, "\t")
def getIndentSpaces(self, line):
hasTypes = getParser(self.view).settings['typeInfo']
extraIndent = '\\s+\\S+' if hasTypes else ''
res = re.search("^\\s*\\*(?P<fromStar>\\s*@(?:param|property)%s\\s+\\S+\\s+)\\S" % extraIndent, line) \
or re.search("^\\s*\\*(?P<fromStar>\\s*@(?:returns?|define)%s\\s+\\S+\\s+)\\S" % extraIndent, line) \
or re.search("^\\s*\\*(?P<fromStar>\\s*@[a-z]+\\s+)\\S", line) \
or re.search("^\\s*\\*(?P<fromStar>\\s*)", line)
if res:
return len(res.group('fromStar'))
return None
class JsdocsJoinCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
for sel in v.sel():
for lineRegion in reversed(v.lines(sel)):
v.replace(edit, v.find("[ \\t]*\\n[ \\t]*((?:\\*|//[!/]?|#)[ \\t]*)?", lineRegion.begin()), ' ')
class JsdocsDecorateCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
re_whitespace = re.compile("^(\\s*)//")
v.run_command('expand_selection', {'to': 'scope'})
for sel in v.sel():
maxLength = 0
lines = v.lines(sel)
for lineRegion in lines:
lineText = v.substr(lineRegion)
tabCount = lineText.count("\t")
leadingWS = len(re_whitespace.match(lineText).group(1))
leadingWS = leadingWS - tabCount
maxLength = max(maxLength, lineRegion.size())
lineLength = maxLength - leadingWS
leadingWS = tabCount * "\t" + " " * leadingWS
v.insert(edit, sel.end(), leadingWS + "/" * (lineLength + 3) + "\n")
for lineRegion in reversed(lines):
line = v.substr(lineRegion)
rPadding = 1 + (maxLength - lineRegion.size())
v.replace(edit, lineRegion, leadingWS + line + (" " * rPadding) + "//")
# break
v.insert(edit, sel.begin(), "/" * (lineLength + 3) + "\n")
class JsdocsDeindent(sublime_plugin.TextCommand):
"""
When pressing enter at the end of a docblock, this takes the cursor back one space.
/**
*
*/| <-- from here
| <-- to here
"""
def run(self, edit):
v = self.view
lineRegion = v.line(v.sel()[0])
line = v.substr(lineRegion)
v.insert(edit, v.sel()[0].begin(), re.sub("^(\\s*)\\s\\*/.*", "\n\\1", line))
class JsdocsReparse(sublime_plugin.TextCommand):
"""
Reparse a docblock to make the fields 'active' again, so that pressing tab will jump to the next one
"""
def run(self, edit):
tabIndex = counter()
def tabStop(m):
return "${%d:%s}" % (next(tabIndex), m.group(1))
v = self.view
v.run_command('clear_fields')
v.run_command('expand_selection', {'to': 'scope'})
sel = v.sel()[0]
# escape string, so variables starting with $ won't be removed
text = escape(v.substr(sel))
# strip out leading spaces, since inserting a snippet keeps the indentation
text = re.sub("\\n\\s+\\*", "\n *", text)
# replace [bracketed] [text] with a tabstop
text = re.sub("(\\[.+?\\])", tabStop, text)
v.erase(edit, sel)
write(v, text)
class JsdocsTrimAutoWhitespace(sublime_plugin.TextCommand):
"""
Trim the automatic whitespace added when creating a new line in a docblock.
"""
def run(self, edit):
v = self.view
lineRegion = v.line(v.sel()[0])
line = v.substr(lineRegion)
spaces = max(0, v.settings().get("jsdocs_indentation_spaces", 1))
v.replace(edit, lineRegion, re.sub("^(\\s*\\*)\\s*$", "\\1\n\\1" + (" " * spaces), line))
class JsdocsWrapLines(sublime_plugin.TextCommand):
"""
Reformat description text inside a comment block to wrap at the correct length.
Wrap column is set by the first ruler (set in Default.sublime-settings), or 80 by default.
Shortcut Key: alt+q
"""
def run(self, edit):
v = self.view
settings = v.settings()
rulers = settings.get('rulers')
tabSize = settings.get('tab_size')
wrapLength = rulers[0] if (len(rulers) > 0) else 80
numIndentSpaces = max(0, settings.get("jsdocs_indentation_spaces", 1))
indentSpaces = " " * numIndentSpaces
indentSpacesSamePara = " " * max(0, settings.get("jsdocs_indentation_spaces_same_para", numIndentSpaces))
spacerBetweenSections = settings.get("jsdocs_spacer_between_sections") == True
spacerBetweenDescriptionAndTags = settings.get("jsdocs_spacer_between_sections") == "after_description"
v.run_command('expand_selection', {'to': 'scope'})
# find the first word
startPoint = v.find("\n\\s*\\* ", v.sel()[0].begin()).begin()
# find the first tag, or the end of the comment
endPoint = v.find("\\s*\n\\s*\\*(/)", v.sel()[0].begin()).begin()
# replace the selection with this ^ new selection
v.sel().clear()
v.sel().add(sublime.Region(startPoint, endPoint))
# get the description text
text = v.substr(v.sel()[0])
# find the indentation level
indentation = len(re.sub('\t', ' ' * tabSize, re.search("\n(\\s*\\*)", text).group(1)))
wrapLength -= indentation - tabSize
# join all the lines, collapsing "empty" lines
text = re.sub("\n(\\s*\\*\\s*\n)+", "\n\n", text)
def wrapPara(para):
para = re.sub("(\n|^)\\s*\\*\\s*", " ", para)
# split the paragraph into words
words = para.strip().split(' ')
text = '\n'
line = ' *' + indentSpaces
lineTagged = False # indicates if the line contains a doc tag
paraTagged = False # indicates if this paragraph contains a doc tag
lineIsNew = True
tag = ''
# join all words to create lines, no longer than wrapLength
for i, word in enumerate(words):
if not word and not lineTagged:
continue
if lineIsNew and word[0] == '@':
lineTagged = True
paraTagged = True
tag = word
if len(line) + len(word) >= wrapLength - 1:
# appending the word to the current line whould exceed its
# length requirements
text += line.rstrip() + '\n'
line = ' *' + indentSpacesSamePara + word + ' '
lineTagged = False
lineIsNew = True
else:
line += word + ' '
lineIsNew = False
text += line.rstrip()
return {'text': text,
'lineTagged': lineTagged,
'tagged': paraTagged,
'tag': tag}
# split the text into paragraphs, where each paragraph is eighter
# defined by an empty line or the start of a doc parameter
paragraphs = re.split('\n{2,}|\n\\s*\\*\\s*(?=@)', text)
wrappedParas = []
text = ''
for p, para in enumerate(paragraphs):
# wrap the lines in the current paragraph
wrappedParas.append(wrapPara(para))
# combine all the paragraphs into a single piece of text
for i in range(0, len(wrappedParas)):
para = wrappedParas[i]
last = i == len(wrappedParas) - 1
nextIsTagged = not last and wrappedParas[i + 1]['tagged']
nextIsSameTag = nextIsTagged and para['tag'] == wrappedParas[i + 1]['tag']
if last or (para['lineTagged'] or nextIsTagged) and \
not (spacerBetweenSections and not nextIsSameTag) and \
not (not para['lineTagged'] and nextIsTagged and spacerBetweenDescriptionAndTags):
text += para['text']
else:
text += para['text'] + '\n *'
text = escape(text)
write(v, text)
class JsdocsTypescript(JsdocsParser):
def setupSettings(self):
identifier = '[a-zA-Z_$][a-zA-Z_$0-9]*'
base_type_identifier = r'%s(\.%s)*(\[\])?' % ((identifier, ) * 2)
parametric_type_identifier = r'%s(\s*<\s*%s(\s*,\s*%s\s*)*>)?' % ((base_type_identifier, ) * 3)
self.settings = {
# curly brackets around the type information
"curlyTypes": True,
'typeInfo': True,
"typeTag": "type",
# technically, they can contain all sorts of unicode, but w/e
"varIdentifier": identifier,
"fnIdentifier": identifier,
"fnOpener": 'function(?:\\s+' + identifier + ')?\\s*\\(',
"commentCloser": " */",
"bool": "Boolean",
"function": "Function",
"functionRE":
# Modifiers
r'(?:public|private|static)?\s*'
# Method name
+ r'(?P<name>' + identifier + r')\s*'
# Params
+ r'\((?P<args>.*)\)\s*'
# Return value
+ r'(:\s*(?P<retval>' + parametric_type_identifier + r'))?',
"varRE":
r'((public|private|static|var)\s+)?(?P<name>' + identifier
+ r')\s*(:\s*(?P<type>' + parametric_type_identifier
+ r'))?(\s*=\s*(?P<val>.*?))?([;,]|$)'
}
self.functionRE = re.compile(self.settings['functionRE'])
self.varRE = re.compile(self.settings['varRE'])
def parseFunction(self, line):
line = line.strip()
res = self.functionRE.search(line)
if not res:
return None
group_dict = res.groupdict()
return (group_dict["name"], group_dict["args"], group_dict["retval"])
def getArgType(self, arg):
if ':' in arg:
return arg.split(':')[-1].strip()
return None
def getArgName(self, arg):
if ':' in arg:
arg = arg.split(':')[0]
return arg.strip('[ \?]')
def parseVar(self, line):
res = self.varRE.search(line)
if not res:
return None
val = res.group('val')
if val: val = val.strip()
return (res.group('name'), val, res.group('type'))
def getFunctionReturnType(self, name, retval):
return retval if retval != 'void' else None
def guessTypeFromValue(self, val):
lowerPrimitives = self.viewSettings.get('jsdocs_lower_case_primitives') or False
if is_numeric(val):
return "number" if lowerPrimitives else "Number"
if val[0] == '"' or val[0] == "'":
return "string" if lowerPrimitives else "String"
if val[0] == '[':
return "Array"
if val[0] == '{':
return "Object"
if val == 'true' or val == 'false':
return "boolean" if lowerPrimitives else "Boolean"
if re.match('RegExp\\b|\\/[^\\/]', val):
return 'RegExp'
if val[:4] == 'new ':
res = re.search('new (' + self.settings['fnIdentifier'] + ')', val)
return res and res.group(1) or None
return None
# to run, enable jsdocs_development_mode and press Ctrl+K, Ctrl+T
class JsdocsTests(sublime_plugin.WindowCommand):
def run(self):
import tests.javascript
# sublime.active_window().run_command('show_panel', panel='output.console')
self.window.run_command("show_panel", {"panel": "console"})
print ('\nDocBlockr tests')
print ('---------------')
for modName in tests.__dict__:
if not modName.startswith('__'):
mod = getattr(tests, modName)
mod = imp.reload(mod)
self.runTests(mod, modName)
def runTests(self, mod, modName):
successes = 0
failures = 0
helper = TestHelper(self.window)
helper.set_syntax(mod.syntax)
for member in mod.__dict__:
if member.startswith('test_'):
helper.startTest()
try:
testFn = getattr(mod, member)
ret = testFn(helper)
expected = "\n".join(ret) if isinstance(ret, list) else ret
assert isinstance(expected, str), 'Did not return a string to check'
self.compare(helper.view, expected)
self.report(member, modName, True)
successes += 1
except AssertionError as e:
self.report(member, modName, False, testFn, e.args[0])
failures += 1
finally:
helper.endTest()
helper.dispose()
print ('%s/%s passed.' % ( successes, successes + failures ))
def compare(self, view, expected):
delim = '|'
expectedRegion = None
checkRegions = delim in expected
if checkRegions:
# compare selections
(beforeSelection, d, tempAfter) = expected.partition(delim)
(selected, d, afterSelection) = tempAfter.partition(delim)
expectedRegion = sublime.Region(
len(beforeSelection),
len(beforeSelection) + (len(selected) if afterSelection else 0)
)
expected = beforeSelection + selected + afterSelection
actual = view.substr(sublime.Region(0, view.size()))
assert actual == expected, "Actual:\n%s\nExpected:\n%s" % (actual, expected)
if checkRegions:
actualRegion = view.sel()[0]
assert actualRegion == expectedRegion, \
"Selection doesn't match. Actual %s, expected %s" % (actualRegion, expectedRegion)
def report(self, testName, modName, success, testFn=None, errorMessage=''):
print ("[%s] %s: %s %s%s" % (
" OK " if success else "FAIL",
modName,
testName[5:].replace('_', ' ').title(),
"" if success else "-- " + testFn.func_doc + '.\n',
errorMessage
))
class TestHelper():
def __init__(self, window):
self.window = window
self.view = self.window.new_file()
self.cursorPos = 0
self.savedPos = 0
def dispose(self):
self.window.run_command('close')
def set_syntax(self, file):
self.view.set_syntax_file(file)
def startTest(self):
self.cursorPos = 0
self.edit = self.view.begin_edit()
def endTest(self):
self.view.end_edit(self.edit)
self.view.run_command('undo')
self.edit = None
def insert(self, text, pos = -1):
if pos == -1:
pos = self.cursorPos
if isinstance(text, list):
text = '\n'.join(text)
if '|' in text:
(before, __, after) = text.partition('|')
adjustCursor = len(before)
text = before + after
else:
adjustCursor = len(text)
self.view.insert(self.edit, pos, text)
self.cursorPos = pos + adjustCursor
self.setCursor(self.cursorPos)
def run(self, cmdName = 'jsdocs'):
self.view.run_command(cmdName)
def saveCursor(self):
self.savedPos = self.cursorPos
def restoreCursor(self):
self.setCursor(self.savedPos)
def setCursor(self, pos):
self.view.sel().clear()
self.view.sel().add(pos)
|
|
r"""
Arithmatex.
pymdownx.arithmatex
Extension that preserves the following for MathJax use:
$Equation$, \(Equation\)
$$
Display Equations
$$
\[
Display Equations
\]
\begin{align}
Display Equations
\end{align}
and $Inline MathJax Equations$
Inline equations are converted to the following form for HTML output by default:
\(Inline MathJax Equations\)
While block/display equations are converted to the following form for HTML output by default:
\[
Display Equations
\]
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.inlinepatterns import Pattern
from markdown.blockprocessors import BlockProcessor
from markdown import util as md_util
from . import util
import re
RE_DOLLAR_INLINE = r'(?:(?<!\\)((?:\\{2})+)(?=\$)|(?<!\\)(\$)(?!\s)((?:\\.|[^\$])+?)(?<!\s)(?:\$))'
RE_BRACKET_INLINE = r'(?:(?<!\\)((?:\\{2})+?)(?=\\\()|(?<!\\)(\\\()((?:\\[^)]|[^\\])+?)(?:\\\)))'
class InlineArithmatexPattern(Pattern):
"""Arithmatex inline pattern handler."""
ESCAPED_BSLASH = '%s%s%s' % (md_util.STX, ord('\\'), md_util.ETX)
def __init__(self, pattern, wrap, script, md):
"""Initialize."""
self.script = script
self.wrap = wrap[0] + '%s' + wrap[1]
Pattern.__init__(self, pattern)
self.markdown = md
def handleMatch(self, m):
"""Handle notations and switch them to something that will be more detectable in HTML."""
# Handle escapes
escapes = m.group(2)
if not escapes:
escapes = m.group(5)
if escapes:
return escapes.replace('\\\\', self.ESCAPED_BSLASH)
# Handle Tex
math = m.group(4)
if not math:
math = m.group(7)
if self.script:
el = md_util.etree.Element('script', {'type': 'math/tex'})
el.text = md_util.AtomicString(math)
else:
el = md_util.etree.Element('span')
el.text = md_util.AtomicString(self.wrap % math)
return el
class BlockArithmatexProcessor(BlockProcessor):
"""Mathjax block processor to find $$mathjax$$ content."""
RE_DOLLAR_BLOCK = r'(?P<dollar>[$]{2})(?P<math>.+?)(?P=dollar)'
RE_TEX_BLOCK = r'(?P<math2>\\begin\{(?P<env>[a-z]+\*?)\}.+?\\end\{(?P=env)\})'
RE_BRACKET_BLOCK = r'\\\[(?P<math3>(?:\\[^\]]|[^\\])+?)\\\]'
def __init__(self, config, md):
"""Initialize."""
self.script = config.get('insert_as_script', False)
wrap = config.get('tex_block_wrap', ['\\[', '\\]'])
self.wrap = wrap[0] + '%s' + wrap[1]
allowed_patterns = set(config.get('block_syntax', ['dollar', 'square', 'begin']))
pattern = []
if 'dollar' in allowed_patterns:
pattern.append(self.RE_DOLLAR_BLOCK)
if 'square' in allowed_patterns:
pattern.append(self.RE_BRACKET_BLOCK)
if 'begin' in allowed_patterns:
pattern.append(self.RE_TEX_BLOCK)
self.match = None
if pattern:
self.pattern = re.compile(r'(?s)^(?:%s)[ ]*$' % '|'.join(pattern))
else:
self.pattern = None
self.markdown = md
BlockProcessor.__init__(self, md.parser)
def test(self, parent, block):
"""Return 'True' for future Python Markdown block compatibility."""
self.match = self.pattern.match(block) if self.pattern is not None else None
return self.match is not None
def run(self, parent, blocks):
"""Find and handle block content."""
blocks.pop(0)
math = self.match.group('math')
if not math:
math = self.match.group('math2')
if not math:
math = self.match.group('math3')
if self.script:
el = md_util.etree.SubElement(parent, 'script', {'type': 'math/tex; mode=display'})
el.text = md_util.AtomicString(math)
else:
el = md_util.etree.SubElement(parent, 'span')
el.text = md_util.AtomicString(self.wrap % math)
return True
class ArithmatexExtension(Extension):
"""Adds delete extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'tex_inline_wrap': [
["\\(", "\\)"],
"Wrap inline content with the provided text ['open', 'close'] - Default: ['', '']"
],
'tex_block_wrap': [
["\\[", "\\]"],
"Wrap blick content with the provided text ['open', 'close'] - Default: ['', '']"
],
"block_syntax": [
['dollar', 'square', 'begin'],
'Enable block syntax: "dollar" ($$...$$), "square" (\\[...\\]), and '
'"begin" (\\begin{env}...\\end{env}). - Default: ["dollar", "square", "begin"]'
],
"inline_syntax": [
['dollar', 'round'],
'Enable block syntax: "dollar" ($$...$$), "bracket" (\\(...\\)) '
' - Default: ["dollar", "round"]'
],
'insert_as_script': [
False,
"Insert the math Tex notation into a script tag. Overrides wrapping. - Default: False"
]
}
super(ArithmatexExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
"""Extend the inline and block processor objects."""
md.registerExtension(self)
util.escape_chars(md, ['$'])
config = self.getConfigs()
allowed_inline = set(config.get('inline_syntax', ['dollar', 'round']))
inline_patterns = []
if 'dollar' in allowed_inline:
inline_patterns.append(RE_DOLLAR_INLINE)
if 'round' in allowed_inline:
inline_patterns.append(RE_BRACKET_INLINE)
if inline_patterns:
inline = InlineArithmatexPattern(
'(?:%s)' % '|'.join(inline_patterns),
config.get('tex_inline_wrap', ["\\(", "\\)"]),
config.get('insert_as_script', False),
md
)
md.inlinePatterns.add("arithmatex-inline", inline, ">backtick")
md.parser.blockprocessors.add('arithmatex-block', BlockArithmatexProcessor(config, md), "<code")
def makeExtension(*args, **kwargs):
"""Return extension."""
return ArithmatexExtension(*args, **kwargs)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from heat.openstack.common.gettextutils import _
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common import log as logging
from heat.openstack.common.rpc import amqp as rpc_amqp
from heat.openstack.common.rpc import common as rpc_common
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"name": link_name,
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
addr_opts["link"]["x-declare"].update(link_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect"""
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object"""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'"""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
super(DirectConsumer, self).__init__(session, callback,
"%s/%s" % (msg_id, msg_id),
{"type": "direct"},
msg_id,
{"exclusive": True})
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
super(TopicConsumer, self).__init__(session, callback,
"%s/%s" % (exchange_name, topic),
{}, name or topic, {})
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'"""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
super(FanoutConsumer, self).__init__(
session, callback,
"%s_fanout" % topic,
{"durable": False, "type": "fanout"},
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
{"exclusive": True})
class Publisher(object):
"""Base Publisher class"""
def __init__(self, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection"""
self.sender = session.sender(self.address)
def send(self, msg):
"""Send a message"""
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'"""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
super(DirectPublisher, self).__init__(session, msg_id,
{"type": "Direct"})
class TopicPublisher(Publisher):
"""Publisher class for 'topic'"""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic))
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'"""
def __init__(self, conf, session, topic):
"""init a 'fanout' publisher.
"""
super(FanoutPublisher, self).__init__(
session,
"%s_fanout" % topic, {"type": "fanout"})
class NotifyPublisher(Publisher):
"""Publisher class for notifications"""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(NotifyPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic),
{"durable": True})
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.protocol = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues"""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError, e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in consumers.itervalues():
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError), e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.close()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread"""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer"""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message"""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread"""
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection"""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
|
""" Tools for API access. Currently, only Bitfinex supported
(includes data from Bitstamp). Based on sample code by Raphael Nicolle
(https://community.bitfinex.com/showwiki.php?title=Sample+API+Code). """
from decimal import Decimal, getcontext
getcontext().prec = 8
import requests
import json
import simplejson
import base64
import hmac
import ssl
import hashlib
import time
import types
import httplib
import traceback
TIMEOUT = 5
def decimalize(obj, keys):
if isinstance(obj, types.ListType):
return [decimalize(xs, keys) for xs in obj]
if not isinstance(obj, types.DictType):
return obj
def to_decimal(k, val):
if val is None:
return None
if isinstance(val, types.ListType):
return [decimalize(ys, keys) for ys in val]
if k in keys:
return Decimal(val)
return val
return {k: to_decimal(k, obj[k]) for k in obj}
def undecimalize(obj):
if isinstance(obj, types.ListType):
return map(undecimalize, obj)
if not isinstance(obj, types.DictType):
return obj
def from_decimal(val):
if isinstance(val, Decimal):
return str(val)
return val
return {k: from_decimal(obj[k]) for k in obj}
class BitfinexAPI(object):
def __init__(self):
self.BITFINEX = 'api.bitfinex.com/'
self.EXCHANGES = ['bitfinex', 'bitstamp']
self.DECIMAL_KEYS = set([
'amount', 'ask', 'available', 'bid', 'close', 'executed_amount',
'high', 'highest', 'last_price', 'low', 'lowest', 'mid', 'open',
'original_amount', 'price', 'remaining_amount', 'timestamp',
'volume'])
def tryAPIcall(self, func):
try:
r = func()
return decimalize(r.json(), self.DECIMAL_KEYS)
except requests.ConnectionError:
print 'Connection error'
return
except requests.Timeout:
print 'Request timed out'
return
except simplejson.decoder.JSONDecodeError:
print 'JSON decode error'
return
except ssl.SSLError:
print 'SSL error'
return
except httplib.IncompleteRead:
print 'Incomplete read error'
return
except:
traceback.print_exc()
return
def ticker(self, symbol="btcusd"):
"""
Gives innermost bid and asks and information on the most recent trade.
Response:
mid (price): (bid + ask) / 2
bid (price): Innermost bid.
ask (price): Innermost ask.
last_price (price) The price at which the last order executed.
timestamp (time) The timestamp at which this
information was valid.
"""
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/ticker/"+symbol,
verify=False, timeout=TIMEOUT))
def today(self, symbol="btcusd"):
"""
Today's low, high and volume.
Response:
low (price)
high (price)
volume (price)
"""
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/today/"+symbol,
verify=False, timeout=TIMEOUT))
def candles(self, payload, symbol="btcusd"):
"""
Get a list of the most recent candlesticks
(trading data) for the given symbol.
Request:
timestamp (time): Optional. Only show trades at or
after this timestamp.
Response:
An array of dictionaries
start_at (timestamp)
period (integer, period in seconds)
open (price)
close (price)
highest (price)
lowest (price)
volume (decimal)
"""
headers = self._prepare_payload(False, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/candles/" +
symbol, headers=headers, verify=False,
timeout=TIMEOUT))
def lendbook(self, payload, symbol="btcusd"):
"""
Get the full lend book.
Request:
limit_bids (int): Optional. Limit the number of bids (loan
demands) returned. May be 0 in which case the array of
bids is empty. Default is 50.
limit_asks (int): Optional. Limit the number of asks (loan
offers) returned. May be 0 in which case the array of
asks is empty. Default is 50.
Response:
bids (array of loan demands):
rate (rate in % per 365 days)
amount (decimal)
period (days): minimum period for the loan
timestamp (time)
asks (array of loan offers)
rate (rate in % per 365 days)
amount (decimal)
period (days): maximum period for the loan
timestamp (time)
"""
headers = self._prepare_payload(False, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/lendbook/" +
symbol, headers=headers, verify=False,
timeout=TIMEOUT))
def book(self, payload, symbol="btcusd"):
"""
Get the full order book.
Request:
limit_bids (int): Optional. Limit the number of bids returned. May
be 0 in which case the array of bids is empty. Default is 50.
limit_asks (int): Optional. Limit the number of asks returned. May
be 0 in which case the array of asks is empty. Default is 50.
Response:
bids (array)
price (price)
amount (decimal)order_cancel
timestamp (time)
asks (array)
price (price)
amount (decimal)
timestamp (time)
Example:
{u'bids':
[{u'timestamp': Decimal('1389375876.0'),
u'price': Decimal('830.0'),
u'amount': Decimal('0.71413304')},
{u'timestamp': Decimal('1389375863.0'),
u'price': Decimal('829.0'), u'amount': Decimal('1.0')},
{u'timestamp': Decimal('1389376067.0'),
u'price': Decimal('829.0'), u'amount': Decimal('2.0')},
{u'timestamp': Decimal('1389376072.0'),
u'price': Decimal('828.01'),
u'amount': Decimal('0.81391621')},
{u'timestamp': Decimal('1389375637.0'),
u'price': Decimal('828.0'), u'amount': Decimal('1.0')}],
u'asks':
[{u'timestamp': Decimal('1389376082.0'),
u'price': Decimal('831.0'),
u'amount': Decimal('0.74827024')},
{u'timestamp': Decimal('1389376064.0'),
u'price': Decimal('831.01'),
u'amount': Decimal('4.08318334')},
{u'timestamp': Decimal('1389376090.0'),
u'price': Decimal('831.01'), u'amount': Decimal('0.4')},
{u'timestamp': Decimal('1389376089.0'),
u'price': Decimal('832.8799'), u'amount': Decimal('1.35')},
{u'timestamp': Decimal('1389376082.0'),
u'price': Decimal('832.88'),
u'amount': Decimal('0.83139194')}]
}
"""
headers = self._prepare_payload(False, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/book/"+symbol,
headers=headers, verify=False,
timeout=TIMEOUT))
def trades(self, payload, symbol="btcusd"):
"""
Get a list of the most recent trades for the given symbol.
Request:
timestamp (time): Optional. Only show trades at
or after this timestamp.
limit_trades (int): Optional. Limit the number of trades
returned. Must be >= 1. Default is 50.
Response:
An array of dictionaries
price (price)
amount (decimal)
timestamp (time)
exchange (string)
"""
headers = self._prepare_payload(False, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/trades/"+symbol,
headers=headers, verify=False,
timeout=TIMEOUT))
def lends(self, payload, symbol="btcusd"):
"""
Get a list of the most recent lending data for the given currency:
total amount lent and rate (in % by 365 days).
Request:
timestamp (time): Optional. Only show trades at or
after this timestamp.
limit_lends (int): Optional. Limit the number of lends returned.
Must be >= 1. Default is 50.
Response:
An array of dictionaries
rate (decimal, % by 365 days): Average rate of total
loans opened at fixed rates
amount_lent (decimal): Total amount of open loans
in the given currency
timestamp (time)
"""
headers = self._prepare_payload(False, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/lends/"+symbol,
headers=headers, verify=False,
timeout=TIMEOUT))
def symbols(self):
"""
Get a list of valid symbol IDs.
Response:
A list of symbol names. Currently just "btcusd".
"""
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/symbols",
verify=False))
### AUTHENTICATED ###
def order_new(self, payload):
"""
Submit a new order.
Request:
symbol (string): The name of the symbol (see `/symbols`).
amount (decimal): Order size: how much to buy or sell.
price (price): Price to buy or sell at. May omit if a market order.
exchange (string): "bitfinex", "bitstamp", "all" (for no routing).
side (string): Either "buy" or "sell".
type (string): "market" / "limit" / "stop" / "trailing-stop" /
"exchange market" / "exchange limit" / "exchange stop" /
"exchange trailing-stop". (type starting by "exchange " are
exchange orders, others are margin trading orders)
is_hidden (bool): true if the order should be hidden.
Default is false.
Response:
order_id (int): A randomly generated ID for the order.
and the information given by /order/status
Order types:
Margin trading type Exchange type
LIMIT EXCHANGE LIMIT
MARKET EXCHANGE MARKET
STOP EXCHANGE STOP
TRAILING STOP EXCHANGE TRAILING STOP
Example Response:
{u'avg_execution_price': u'0.0',
u'remaining_amount': Decimal('0.1'),
u'order_id': 5480291,
u'timestamp': Decimal('1389414906.469904775'),
u'price': Decimal('864.01'),
u'exchange': u'bitfinex',
u'executed_amount': Decimal('0.0'),
u'symbol': u'btcusd',
u'is_live': True,
u'was_forced': False,
u'id': 5480291,
u'is_cancelled': False,
u'original_amount': Decimal('0.1'),
u'type': u'exchange market', u'side': u'sell'}
"""
payload["request"] = "/v1/order/new"
payload["nonce"] = str(long(time.time() * 100000))
headers = self._prepare_payload(True, payload)
return self.tryAPIcall(
lambda: requests.post("https://"+self.BITFINEX+"/v1/order/new",
headers=headers, verify=False,
timeout=TIMEOUT))
def order_cancel(self, payload):
"""
Cancel an order.
Request:
order_id (int): The order ID given by `/order/new`.
Response:
Result of /order/status for the cancelled order."""
payload["request"] = "/v1/order/cancel"
payload["nonce"] = str(long(time.time() * 100000))
headers = self._prepare_payload(True, payload)
return self.tryAPIcall(
lambda: requests.post("https://"+self.BITFINEX+"/v1/order/cancel",
headers=headers, verify=False,
timeout=TIMEOUT))
def order_status(self, payload):
"""
Get the status of an order. Is it active? Was it cancelled?
To what extent has it been executed? etc.
Request:
order_id (int): The order ID given by `/order/new`.
Response:
symbol (string): The symbol name the order belongs to.
exchange (string): "bitfinex", "mtgox", "bitstamp".
price (decimal): The price the order was issued at (can be
null for market orders).
avg_execution_price (decimal): The average price at which
this order as been executed so far.
0 if the order has not been executed at all.
side (string): Either "buy" or "sell".
type (string): "market" / "limit" / "stop" / "trailing-stop".
timestamp (time): The timestamp the order was submitted.
is_live (bool): Could the order still be filled?
is_cancelled (bool): Has the order been cancelled?
was_forced (bool): For margin only: true if it was
forced by the system.
executed_amount (decimal): How much of the order has
been executed so far in its history?
remaining_amount (decimal): How much is still
remaining to be submitted?
original_amount (decimal): What was the order
originally submitted for?
"""
payload["request"] = "/v1/order/status"
payload["nonce"] = str(long(time.time() * 100000))
headers = self._prepare_payload(True, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/order/status",
headers=headers, verify=False,
timeout=TIMEOUT))
def orders(self):
"""
An array of the results of `/order/status` for all your live orders.
Example Response:
[{ u'avg_execution_price': u'0.0',
u'remaining_amount': Decimal('0.27958'),
u'timestamp': Decimal('1389409705.0'),
u'price': Decimal('850.0'),
u'exchange': None,
u'executed_amount': Decimal('0.0'),
u'symbol': u'btcusd',
u'is_live': True,
u'was_forced': False,
u'id': 5475379,
u'is_cancelled': False,
u'original_amount': Decimal('0.27958'),
u'type': u'exchange stop',
u'side': u'sell' }]
"""
payload = {}
payload["request"] = "/v1/orders"
payload["nonce"] = str(long(time.time() * 100000))
headers = self._prepare_payload(True, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/orders",
headers=headers, verify=False,
timeout=TIMEOUT))
def positions(self):
""" View your active positions. """
payload = {}
payload["request"] = "/v1/positions"
payload["nonce"] = str(long(time.time() * 100000))
headers = self._prepare_payload(True, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/positions",
headers=headers, verify=False,
timeout=TIMEOUT))
def balances(self):
"""
A list of wallet balances:
type (string): "trading", "deposit" or "exchange"
currency (string): Currency
amount (decimal): How much balance of this currency in this wallet
available (decimal): How much X there is in this wallet that
is available to trade.
Example response:
[{u'available': Decimal('0.0'), u'currency': u'btc',
u'amount': Decimal('0.0'), u'type': u'trading'},
{u'available': Decimal('0.0'), u'currency': u'usd',
u'amount': Decimal('0.0'), u'type': u'trading'},
{u'available': Decimal('0.0'), u'currency': u'btc',
u'amount': Decimal('0.0'), u'type': u'deposit'},
{u'available': Decimal('0.0'), u'currency': u'usd',
u'amount': Decimal('0.0'), u'type': u'deposit'},
{u'available': Decimal('0.0'), u'currency': u'btc',
u'amount': Decimal('0.0'), u'type': u'exchange'},
{u'available': Decimal('481.24270344'), u'currency': u'usd',
u'amount': Decimal('481.24270344'), u'type': u'exchange'}]
"""
payload = {}
payload["request"] = "/v1/balances"
payload["nonce"] = str(long(time.time() * 100000))
headers = self._prepare_payload(True, payload)
return self.tryAPIcall(
lambda: requests.get("https://"+self.BITFINEX+"/v1/balances",
headers=headers, verify=False,
timeout=TIMEOUT))
def past_trades(self, payload):
"""
Cancel an order.
Request:
symbol (string): The pair traded (BTCUSD, LTCUSD, LTCBTC).
timestamp (time): Trades made before this timestamp won't
be returned.
limit_trades (int): Optional. Limit the number of trades
returned. Default is 50.
Response:
A list of dictionaries:
price (price)
amount (decimal)
timestamp (time)
exchange (string)
type (string) Sell or Buy
"""
payload["request"] = "/v1/mytrades"
payload["nonce"] = str(long(time.time() * 100000))
headers = self._prepare_payload(True, payload)
return self.tryAPIcall(
lambda: requests.post("https://"+self.BITFINEX+"/v1/mytrades",
headers=headers, verify=False,
timeout=TIMEOUT))
# Private
def _prepare_payload(self, should_sign, d):
j = json.dumps(undecimalize(d))
data = base64.standard_b64encode(j)
if should_sign:
h = hmac.new(self.secret, data, hashlib.sha384)
signature = h.hexdigest()
return {
"X-BFX-APIKEY": self.key,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data,
}
else:
return {
"X-BFX-PAYLOAD": data,
}
|
|
# bootstrapping setuptools
import ez_setup
ez_setup.use_setuptools()
import os
import sys
import textwrap
from distutils.errors import *
from distutils.command.clean import clean as _clean
from distutils.cmd import Command
from setuptools import setup
from distutils import log
from distutils.core import setup
class clean(_clean):
"""Also cleanup local temp files."""
def run(self):
_clean.run(self)
import fnmatch
# kill temporary files
patterns = [
# generic tempfiles
'*~', '*.bak', '*.pyc',
# tempfiles generated by ANTLR runs
't[0-9]*Lexer.py', 't[0-9]*Parser.py',
'*.tokens', '*__.g',
]
for path in ('antlr3', 'unittests', 'tests'):
path = os.path.join(os.path.dirname(__file__), path)
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=True):
graveyard = []
for pat in patterns:
graveyard.extend(fnmatch.filter(files, pat))
for name in graveyard:
filePath = os.path.join(root, name)
try:
log.info("removing '%s'", filePath)
os.unlink(filePath)
except OSError, exc:
log.warn(
"Failed to delete '%s': %s",
filePath, exc
)
class TestError(DistutilsError):
pass
# grml.. the class name appears in the --help output:
# ...
# Options for 'CmdUnitTest' command
# ...
# so I have to use a rather ugly name...
class unittest(Command):
"""Run unit tests for package"""
description = "run unit tests for package"
user_options = [
('xml-output=', None,
"Directory for JUnit compatible XML files."),
]
boolean_options = []
def initialize_options(self):
self.xml_output = None
def finalize_options(self):
pass
def run(self):
testDir = os.path.join(os.path.dirname(__file__), 'unittests')
if not os.path.isdir(testDir):
raise DistutilsFileError(
"There is not 'unittests' directory. Did you fetch the "
"development version?",
)
import glob
import imp
import unittest
import traceback
import StringIO
suite = unittest.TestSuite()
loadFailures = []
# collect tests from all unittests/test*.py files
testFiles = []
for testPath in glob.glob(os.path.join(testDir, 'test*.py')):
testFiles.append(testPath)
testFiles.sort()
for testPath in testFiles:
testID = os.path.basename(testPath)[:-3]
try:
modFile, modPathname, modDescription \
= imp.find_module(testID, [testDir])
testMod = imp.load_module(
testID, modFile, modPathname, modDescription
)
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(testMod)
)
except Exception:
buf = StringIO.StringIO()
traceback.print_exc(file=buf)
loadFailures.append(
(os.path.basename(testPath), buf.getvalue())
)
if self.xml_output:
import xmlrunner
runner = xmlrunner.XMLTestRunner(
stream=open(os.path.join(self.xml_output, 'unittest.xml'), 'w'))
else:
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
for testName, error in loadFailures:
sys.stderr.write('\n' + '='*70 + '\n')
sys.stderr.write(
"Failed to load test module %s\n" % testName
)
sys.stderr.write(error)
sys.stderr.write('\n')
if not result.wasSuccessful() or loadFailures:
raise TestError(
"Unit test suite failed!",
)
class functest(Command):
"""Run functional tests for package"""
description = "run functional tests for package"
user_options = [
('testcase=', None,
"testcase to run [default: run all]"),
('antlr-version=', None,
"ANTLR version to use [default: HEAD (in ../../build)]"),
('antlr-jar=', None,
"Explicit path to an antlr jar (overrides --antlr-version)"),
('xml-output=', None,
"Directory for JUnit compatible XML files."),
]
boolean_options = []
def initialize_options(self):
self.testcase = None
self.antlr_version = 'HEAD'
self.antlr_jar = None
self.xml_output = None
def finalize_options(self):
pass
def run(self):
import glob
import imp
import unittest
import traceback
import StringIO
testDir = os.path.join(os.path.dirname(__file__), 'tests')
if not os.path.isdir(testDir):
raise DistutilsFileError(
"There is not 'tests' directory. Did you fetch the "
"development version?",
)
# make sure, relative imports from testcases work
sys.path.insert(0, testDir)
rootDir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
if self.antlr_jar is not None:
classpath = [self.antlr_jar]
elif self.antlr_version == 'HEAD':
classpath = [
os.path.join(rootDir, 'tool', 'target', 'classes'),
os.path.join(rootDir, 'runtime', 'Java', 'target', 'classes')
]
else:
classpath = [
os.path.join(rootDir, 'archive',
'antlr-%s.jar' % self.antlr_version)
]
classpath.extend([
os.path.join(rootDir, 'lib', 'antlr-2.7.7.jar'),
os.path.join(rootDir, 'lib', 'stringtemplate-3.2.jar'),
os.path.join(rootDir, 'lib', 'junit-4.2.jar')
])
os.environ['CLASSPATH'] = ':'.join(classpath)
os.environ['ANTLRVERSION'] = self.antlr_version
suite = unittest.TestSuite()
loadFailures = []
# collect tests from all tests/t*.py files
testFiles = []
for testPath in glob.glob(os.path.join(testDir, 't*.py')):
if (testPath.endswith('Lexer.py')
or testPath.endswith('Parser.py')
):
continue
# if a single testcase has been selected, filter out all other
# tests
if (self.testcase is not None
and os.path.basename(testPath)[:-3] != self.testcase
):
continue
testFiles.append(testPath)
testFiles.sort()
for testPath in testFiles:
testID = os.path.basename(testPath)[:-3]
try:
modFile, modPathname, modDescription \
= imp.find_module(testID, [testDir])
testMod = imp.load_module(
testID, modFile, modPathname, modDescription
)
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(testMod)
)
except Exception:
buf = StringIO.StringIO()
traceback.print_exc(file=buf)
loadFailures.append(
(os.path.basename(testPath), buf.getvalue())
)
if self.xml_output:
import xmlrunner
runner = xmlrunner.XMLTestRunner(
stream=open(os.path.join(self.xml_output, 'functest.xml'), 'w'))
else:
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
for testName, error in loadFailures:
sys.stderr.write('\n' + '='*70 + '\n')
sys.stderr.write(
"Failed to load test module %s\n" % testName
)
sys.stderr.write(error)
sys.stderr.write('\n')
if not result.wasSuccessful() or loadFailures:
raise TestError(
"Functional test suite failed!",
)
setup(name='antlr_python_runtime',
version='3.1.3',
packages=['antlr3'],
author="Benjamin Niemann",
author_email="pink@odahoda.de",
url="http://www.antlr.org/",
download_url="http://www.antlr.org/download.html",
license="BSD",
description="Runtime package for ANTLR3",
long_description=textwrap.dedent('''\
This is the runtime package for ANTLR3, which is required to use parsers
generated by ANTLR3.
'''),
cmdclass={'unittest': unittest,
'functest': functest,
'clean': clean
},
)
|
|
import json
import subprocess
from pathlib import Path
import pytest
from sanic_routing import __version__ as __routing_version__
from sanic import __version__
def capture(command):
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=Path(__file__).parent,
)
try:
out, err = proc.communicate(timeout=1)
except subprocess.TimeoutExpired:
proc.kill()
out, err = proc.communicate()
return out, err, proc.returncode
def starting_line(lines):
for idx, line in enumerate(lines):
if line.strip().startswith(b"Sanic v"):
return idx
return 0
def read_app_info(lines):
for line in lines:
if line.startswith(b"{") and line.endswith(b"}"):
return json.loads(line)
@pytest.mark.parametrize(
"appname",
(
"fake.server.app",
"fake.server:app",
"fake.server:create_app()",
"fake.server.create_app()",
),
)
def test_server_run(appname):
command = ["sanic", appname]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
firstline = lines[starting_line(lines) + 1]
assert exitcode != 1
assert firstline == b"Goin' Fast @ http://127.0.0.1:8000"
@pytest.mark.parametrize(
"cmd",
(
(
"--cert=certs/sanic.example/fullchain.pem",
"--key=certs/sanic.example/privkey.pem",
),
(
"--tls=certs/sanic.example/",
"--tls=certs/localhost/",
),
(
"--tls=certs/sanic.example/",
"--tls=certs/localhost/",
"--tls-strict-host",
),
),
)
def test_tls_options(cmd):
command = ["sanic", "fake.server.app", *cmd, "-p=9999", "--debug"]
out, err, exitcode = capture(command)
assert exitcode != 1
lines = out.split(b"\n")
firstline = lines[starting_line(lines) + 1]
assert firstline == b"Goin' Fast @ https://127.0.0.1:9999"
@pytest.mark.parametrize(
"cmd",
(
("--cert=certs/sanic.example/fullchain.pem",),
(
"--cert=certs/sanic.example/fullchain.pem",
"--key=certs/sanic.example/privkey.pem",
"--tls=certs/localhost/",
),
("--tls-strict-host",),
),
)
def test_tls_wrong_options(cmd):
command = ["sanic", "fake.server.app", *cmd, "-p=9999", "--debug"]
out, err, exitcode = capture(command)
assert exitcode == 1
assert not out
lines = err.decode().split("\n")
errmsg = lines[8]
assert errmsg == "TLS certificates must be specified by either of:"
@pytest.mark.parametrize(
"cmd",
(
("--host=localhost", "--port=9999"),
("-H", "localhost", "-p", "9999"),
),
)
def test_host_port_localhost(cmd):
command = ["sanic", "fake.server.app", *cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
firstline = lines[starting_line(lines) + 1]
assert exitcode != 1
assert firstline == b"Goin' Fast @ http://localhost:9999"
@pytest.mark.parametrize(
"cmd",
(
("--host=127.0.0.127", "--port=9999"),
("-H", "127.0.0.127", "-p", "9999"),
),
)
def test_host_port_ipv4(cmd):
command = ["sanic", "fake.server.app", *cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
firstline = lines[starting_line(lines) + 1]
assert exitcode != 1
assert firstline == b"Goin' Fast @ http://127.0.0.127:9999"
@pytest.mark.parametrize(
"cmd",
(
("--host=::", "--port=9999"),
("-H", "::", "-p", "9999"),
),
)
def test_host_port_ipv6_any(cmd):
command = ["sanic", "fake.server.app", *cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
firstline = lines[starting_line(lines) + 1]
assert exitcode != 1
assert firstline == b"Goin' Fast @ http://[::]:9999"
@pytest.mark.parametrize(
"cmd",
(
("--host=::1", "--port=9999"),
("-H", "::1", "-p", "9999"),
),
)
def test_host_port_ipv6_loopback(cmd):
command = ["sanic", "fake.server.app", *cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
firstline = lines[starting_line(lines) + 1]
assert exitcode != 1
assert firstline == b"Goin' Fast @ http://[::1]:9999"
@pytest.mark.parametrize(
"num,cmd",
(
(1, (f"--workers={1}",)),
(2, (f"--workers={2}",)),
(4, (f"--workers={4}",)),
(1, ("-w", "1")),
(2, ("-w", "2")),
(4, ("-w", "4")),
),
)
def test_num_workers(num, cmd):
command = ["sanic", "fake.server.app", *cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
worker_lines = [
line
for line in lines
if b"Starting worker" in line or b"Stopping worker" in line
]
assert exitcode != 1
assert len(worker_lines) == num * 2, f"Lines found: {lines}"
@pytest.mark.parametrize("cmd", ("--debug", "-d"))
def test_debug(cmd):
command = ["sanic", "fake.server.app", cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
info = read_app_info(lines)
assert info["debug"] is True
assert info["auto_reload"] is True
@pytest.mark.parametrize("cmd", ("--auto-reload", "-r"))
def test_auto_reload(cmd):
command = ["sanic", "fake.server.app", cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
info = read_app_info(lines)
assert info["debug"] is False
assert info["auto_reload"] is True
@pytest.mark.parametrize(
"cmd,expected", (("--access-log", True), ("--no-access-log", False))
)
def test_access_logs(cmd, expected):
command = ["sanic", "fake.server.app", cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
info = read_app_info(lines)
assert info["access_log"] is expected
@pytest.mark.parametrize("cmd", ("--version", "-v"))
def test_version(cmd):
command = ["sanic", cmd]
out, err, exitcode = capture(command)
version_string = f"Sanic {__version__}; Routing {__routing_version__}\n"
assert out == version_string.encode("utf-8")
@pytest.mark.parametrize(
"cmd,expected",
(
("--noisy-exceptions", True),
("--no-noisy-exceptions", False),
),
)
def test_noisy_exceptions(cmd, expected):
command = ["sanic", "fake.server.app", cmd]
out, err, exitcode = capture(command)
lines = out.split(b"\n")
info = read_app_info(lines)
assert info["noisy_exceptions"] is expected
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras training and evaluation routines for eager execution.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import numpy as np
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import losses as losses_module
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils.losses_utils import squeeze_or_expand_dimensions
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
def _eager_loss_fn(outputs, targets, loss_fn, output_name):
with backend.name_scope(output_name + '_loss'):
loss = loss_fn(targets, outputs)
return loss
def _eager_metrics_fn(model,
outputs,
targets,
sample_weights=None,
masks=None,
return_stateful_result=True):
"""Calculates the metrics for each output of the given model.
Arguments:
model: The model on which metrics are being calculated.
outputs: The outputs of the given model.
targets: The predictions or targets of the given model.
sample_weights: Optional list of sample weights for each output.
masks: Optional list of masks for each output.
return_stateful_result: Boolean, indicates whether the stateful
(aggregated)/stateless metric result should be returned.
Returns:
Returns the metric results for each output of the model.
"""
outputs = generic_utils.to_list(outputs)
targets = generic_utils.to_list(targets)
# TODO(psv): Consider supporting skip target indices in eager mode?
metric_results = model._handle_metrics(
outputs,
targets=targets,
sample_weights=sample_weights,
masks=masks,
return_stateful_result=return_stateful_result)
return [backend.mean(t) for t in metric_results]
def _model_loss(model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False):
"""Calculates the loss for a given model.
Arguments:
model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input
arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss, loss value calculated using the
specified loss function and masks for each output. The total loss includes
regularization losses and applies masking and sample weighting
to the loss value.
"""
total_loss = 0
kwargs = {}
if model._expects_training_arg:
kwargs['training'] = training
if len(inputs) == 1 and not isinstance(inputs, dict):
inputs = inputs[0]
if model._compute_output_and_mask_jointly:
outs, masks = model._call_and_compute_mask(inputs, **kwargs)
masks = generic_utils.to_list(masks)
else:
outs = model.call(inputs, **kwargs)
masks = None
outs = generic_utils.to_list(outs)
if masks is None:
masks = [None for _ in outs]
targets = generic_utils.to_list(targets)
loss_metrics = []
aggregated_loss_metrics = []
with backend.name_scope('loss'):
for i, loss_fn in enumerate(model.loss_functions):
if sample_weights:
weights = sample_weights[i]
else:
weights = None
mask = masks[i]
with backend.name_scope(model.output_names[i] + '_loss'):
if isinstance(loss_fn, losses_module.Loss):
if mask is not None:
mask = math_ops.cast(mask, outs[i].dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update dimensions of weights to match with mask if possible.
mask, _, weights = squeeze_or_expand_dimensions(
mask, None, weights)
weights *= mask
output_loss = loss_fn(targets[i], outs[i], sample_weight=weights)
else:
weighted_masked_fn = training_utils.weighted_masked_objective(loss_fn)
output_loss = weighted_masked_fn(
targets[i], outs[i], weights, mask=mask)
# If the number of outputs is 1 then we don't append the loss metric
# associated with each model output. When there are multiple outputs
# associated with a model, each output's loss is calculated and returned
# as part of the loss_metrics.
if len(model.outputs) > 1:
loss_metrics.append(backend.mean(output_loss))
if output_loss_metrics is not None:
# Keep track of the stateful loss result.
aggregated_loss_metrics.append(
training_utils.call_metric_function(
output_loss_metrics[i],
targets[i],
outs[i],
weights=weights,
mask=mask))
loss_weight = model.loss_weights_list[i]
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
total_loss = backend.mean(total_loss)
# Add regularization losses
custom_losses = model.losses
if custom_losses:
total_loss += math_ops.add_n(custom_losses)
model._clear_losses()
return outs, total_loss, loss_metrics, aggregated_loss_metrics, masks
def iterator_fit_loop(model,
inputs,
class_weight,
steps_per_epoch,
epoch_logs,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
epochs=1,
verbose=1,
callbacks=None,
validation_steps=None,
do_validation=False,
batch_size=None,
output_loss_metrics=None):
"""Fit function for eager execution when input is given as dataset iterator.
Updates the given epoch logs.
Arguments:
model: Instance of the `Model`.
inputs: Input dataset iterator.
class_weight: Optional class-weight array to weight the importance of
samples in `inputs` based on the class they belong to, as conveyed by
the targets from the `inputs` iterator.
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch.
epoch_logs: Dictionary of logs from every epoch.
val_inputs: Input data for validation.
val_targets: Target data for validation.
val_sample_weights: Sample weight data for validation.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: CallbackList instance. Controls callbacks during training.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with default value of `None`.
do_validation: Boolean value indicating whether we should do validation.
batch_size: int, val_inputs and val_targets will be evaled batch by
batch with size batch_size if they are array.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
# make sure either x,y or x,y,sample_weights is provided
if (not isinstance(inputs.output_shapes, collections.Sequence) or
len(inputs.output_shapes) not in (2, 3)):
raise ValueError('Please provide either inputs and targets '
'or inputs, targets, and sample_weights')
for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
# Get data from the iterator.
try:
next_element = inputs.get_next()
except errors.OutOfRangeError:
logging.warning(
'Your dataset iterator ran out of data; interrupting training. Make '
'sure that your dataset can generate at least '
'`steps_per_epoch * epochs` batches (in this case, %d batches). You '
'may need to use the repeat() function when building your '
'dataset.' % steps_per_epoch * epochs)
break
if len(inputs.output_shapes) == 2:
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate and standardize data.
x, y, sample_weights = model._standardize_user_data(
x, y, sample_weight=sample_weights, class_weight=class_weight)
x = training_utils.cast_if_floating_dtype(x)
y = training_utils.cast_if_floating_dtype(y)
if sample_weights:
sample_weights = [
training_utils.cast_if_floating_dtype(
ops.convert_to_tensor(val, dtype=backend.floatx()))
if val is not None else None for val in sample_weights
]
# Train model.
outs, loss, _, aggregated_loss_metrics, masks = _process_single_batch(
model,
x,
y,
output_loss_metrics=output_loss_metrics,
sample_weights=sample_weights,
training=True)
outs = generic_utils.to_list(outs)
if step_index == 0:
# Set stateful_metrics in callbacks. We do not do this before the
# `steps_per_epoch` loop because model will be compiled only in the first
# iteration of this loop in the deferred build scenario.
for cbk in callbacks:
if (isinstance(cbk, cbks.BaseLogger) or
isinstance(cbk, cbks.ProgbarLogger)):
cbk.stateful_metrics = model.metrics_names[1:] # Exclude `loss`
callback_metrics = copy.copy(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
'validation_steps': validation_steps
})
# Calculate metrics.
for l, o in zip(model.metrics_names, outs):
batch_logs[l] = o
metrics_results = _eager_metrics_fn(
model, outs, y, sample_weights=sample_weights, masks=masks)
batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))
for k, v in zip(
model.metrics_names,
[backend.mean(loss)] + aggregated_loss_metrics + metrics_results):
batch_logs[k] = tensor_util.constant_value(v)
callbacks.on_batch_end(step_index, batch_logs)
if callbacks.model.stop_training:
break
if step_index == steps_per_epoch - 1:
if do_validation:
val_outs = test_loop(
model,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
verbose=0,
batch_size=batch_size)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(model.metrics_names, val_outs):
epoch_logs['val_' + l] = o
def iterator_test_loop(model, inputs, steps, verbose=0):
"""Test function for eager execution when input is given as dataset iterator.
Arguments:
model: Model instance that is being evaluated in Eager mode.
inputs: Input dataset iterator.
steps: Total number of steps (batches of samples) before declaring
predictions finished.
verbose: Verbosity mode.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
# make sure either x,y or x,y,sample_weights is provided
if (not isinstance(inputs.output_shapes, collections.Sequence) or
len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
raise ValueError('Please provide either inputs and targets'
'or inputs, targets, and sample_weights')
outs = []
# Create metric wrapper for the losses.
output_loss_metrics = []
for i in range(len(model.outputs)):
loss_fn = model.loss_functions[i]
loss_name = loss_fn.name if isinstance(
loss_fn, losses_module.Loss) else loss_fn.__name__
mean_wrapped_loss = metrics_module.MeanMetricWrapper(
loss_fn, name=loss_name)
output_loss_metrics.append(mean_wrapped_loss)
num_samples = 0
if verbose == 1:
progbar = generic_utils.Progbar(target=steps)
for step_index in range(steps):
# Get data from the iterator.
try:
next_element = inputs.get_next()
except errors.OutOfRangeError:
logging.warning(
'Your dataset iterator ran out of data interrupting testing. '
'Make sure that your dataset can generate at least `steps` batches '
'(in this case, %d batches). You may need to use the repeat() '
'function when building your dataset.', steps)
break
if len(inputs.output_shapes) == 2:
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate and standardize data.
x, y, sample_weights = model._standardize_user_data(
x, y, sample_weight=sample_weights)
x = training_utils.cast_if_floating_dtype(x)
y = training_utils.cast_if_floating_dtype(y)
if sample_weights:
sample_weights = [
training_utils.cast_if_floating_dtype(
ops.convert_to_tensor(val, dtype=backend.floatx()))
if val is not None else None for val in sample_weights
]
if step_index == 0:
# Get stateful metrics indices. We do not do this before the `steps` loop
# because model will be compiled only in the first iteration of this loop
# in the deferred build scenario.
if hasattr(model, '_compile_metrics'):
for m in model.metrics:
m.reset_states()
for m in output_loss_metrics:
m.reset_states()
# Calculate model output, loss values.
loss_outs, loss, _, aggregated_loss_metrics, masks = _model_loss(
model,
x,
y,
output_loss_metrics=output_loss_metrics,
sample_weights=sample_weights,
training=False)
metrics_results = _eager_metrics_fn(
model, loss_outs, y, sample_weights=sample_weights, masks=masks)
batch_outs = []
for _, v in zip(
model.metrics_names,
[backend.mean(loss)] + aggregated_loss_metrics + metrics_results):
batch_outs.append(tensor_util.constant_value(v))
# Get current step size.
if isinstance(x, list):
step_size = x[0].get_shape().as_list()[0]
elif isinstance(x, dict):
step_size = list(x.values())[0].get_shape().as_list()[0]
else:
step_size = x.get_shape().as_list()[0]
# Accumulate results in output array.
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step_index == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
outs[0] += batch_outs[0] * step_size # index 0 = 'loss'
outs[1:] = batch_outs[1:]
# Calculate sample size.
num_samples += step_size
if verbose == 1:
progbar.update(step_index + 1)
outs[0] /= num_samples # index 0 = 'loss'
if len(outs) == 1:
return outs[0]
return outs
def iterator_predict_loop(model, inputs, steps, verbose=0):
"""Predict function for eager execution when input is dataset iterator.
Arguments:
model: Instance of `Model`.
inputs: Input dataset iterator.
steps: Total number of steps (batches of samples) before declaring
`_predict_loop` finished.
verbose: Verbosity mode.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions (if the model has multiple outputs).
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
if not isinstance(inputs.output_shapes,
collections.Sequence) or len(inputs.output_shapes) > 3:
raise ValueError(
'Please provide data as a list or tuple of 1, 2, or 3 elements '
' - `(input)`, or `(input, target)`, or `(input, target,'
'sample_weights)`. Received %s. We do not use the `target` or'
'`sample_weights` value here.' % inputs.output_shapes)
outs = []
if verbose == 1:
progbar = generic_utils.Progbar(target=steps)
for step_index in range(steps):
# Get data from the iterator.
try:
next_element = inputs.get_next()
except errors.OutOfRangeError:
logging.warning(
'Your dataset iterator ran out of data; interrupting prediction. '
'Make sure that your dataset can generate at least `steps` batches '
'(in this case, %d batches). You may need to use the repeat() '
'function when building your dataset.', steps)
break
# expects a tuple, where first element of tuple represents inputs
x = next_element[0]
# Validate and standardize data.
x, _, _ = model._standardize_user_data(x)
x = training_utils.cast_if_floating_dtype(x)
if isinstance(x, list) and len(x) == 1:
x = x[0]
if model._expects_training_arg:
batch_outs = model.call(x, training=False)
else:
batch_outs = model.call(x)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
# We collect the results from every step and then concatenate them once
# in the end. This is an expensive process. We are doing this because we
# do not know the number of samples beforehand.
if step_index == 0:
for _ in batch_outs:
outs.append([])
for i, batch_out in enumerate(batch_outs):
outs[i].append(backend.get_value(batch_out))
if verbose == 1:
progbar.update(step_index + 1)
for i, out in enumerate(outs):
outs[i] = np.concatenate(tuple(out), axis=0)
if len(outs) == 1:
return outs[0]
return outs
def _process_single_batch(model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False):
"""Calculate the loss and gradient for one input batch.
The model weights are updated if training is set to True.
Arguments:
model: Model whose loss has to be calculated.
inputs: List of input arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: The boolean represents if the weights of the model are updated.
'fit' methods will set this to True while 'evaluate' methods will
set this to False.
Returns:
output of the model, total loss, the loss and the mask
associated with each output.
Raises:
ValueError: If the model has no loss to optimize.
"""
with backend.learning_phase_scope(1 if training else 0):
with GradientTape() as tape:
outs, loss, loss_metrics, aggregated_loss_metrics, masks\
= _model_loss(
model,
inputs,
targets,
output_loss_metrics=output_loss_metrics,
sample_weights=sample_weights,
training=training)
if loss is None:
raise ValueError('The model cannot be run '
'because it has no loss to optimize.')
if training:
if not model._collected_trainable_weights:
logging.warning('The list of trainable weights is empty. Make sure that'
' you are not setting model.trainable to False before '
'compiling the model.')
else:
grads = tape.gradient(loss, model._collected_trainable_weights)
model.optimizer.apply_gradients(zip(grads,
model._collected_trainable_weights))
return outs, loss, loss_metrics, aggregated_loss_metrics, masks
def train_on_batch(model, inputs, targets, sample_weights=None):
"""Calculates the loss and gradient updates for one input batch.
Arguments:
model: Model whose loss has to be calculated.
inputs: Input batch data.
targets: Target batch data.
sample_weights: Sample weight batch data.
Returns:
total loss and the loss associated with each output.
"""
if isinstance(inputs, collections.Sequence):
if len(inputs) and tensor_util.is_tensor(inputs[0]):
inputs = training_utils.cast_if_floating_dtype(inputs)
targets = training_utils.cast_if_floating_dtype(targets)
else:
inputs = [
ops.convert_to_tensor(val, dtype=backend.floatx()) for val in inputs
]
targets = [
ops.convert_to_tensor(val, dtype=backend.floatx()) for val in targets
]
if sample_weights:
sample_weights = [
ops.convert_to_tensor(val, dtype=backend.floatx())
if val is not None else None for val in sample_weights
]
outs, loss, loss_metrics, _, masks = _process_single_batch(
model, inputs, targets, sample_weights=sample_weights, training=True)
if not isinstance(outs, list):
outs = [outs]
metrics_results = _eager_metrics_fn(
model,
outs,
targets,
sample_weights=sample_weights,
masks=masks,
return_stateful_result=False)
loss = generic_utils.to_list(loss)
return [
tensor_util.constant_value(v)
for v in loss + loss_metrics + metrics_results
]
def test_on_batch(model, inputs, targets, sample_weights=None):
"""Calculates the loss for one input batch.
Arguments:
model: Model whose loss has to be calculated.
inputs: Input batch data.
targets: Target batch data.
sample_weights: Sample weight batch data.
Returns:
total loss, loss and metrics associated with each output.
"""
if isinstance(inputs, collections.Sequence):
if len(inputs) and tensor_util.is_tensor(inputs[0]):
inputs = training_utils.cast_if_floating_dtype(inputs)
targets = training_utils.cast_if_floating_dtype(targets)
else:
inputs = [
ops.convert_to_tensor(val, dtype=backend.floatx()) for val in inputs
]
targets = [
ops.convert_to_tensor(val, dtype=backend.floatx()) for val in targets
]
if sample_weights:
sample_weights = [
ops.convert_to_tensor(val, dtype=backend.floatx())
if val is not None else None for val in sample_weights
]
outs, loss, loss_metrics, _, masks = _model_loss(
model, inputs, targets, sample_weights=sample_weights, training=False)
if not isinstance(outs, list):
outs = [outs]
metrics_results = _eager_metrics_fn(
model,
outs,
targets,
sample_weights=sample_weights,
masks=masks,
return_stateful_result=False)
loss = generic_utils.to_list(loss)
return [
tensor_util.constant_value(v)
for v in loss + loss_metrics + metrics_results
]
def fit_loop(model,
inputs,
targets,
sample_weights=None,
class_weight=None,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
shuffle=True,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Fit function for eager execution.
Arguments:
model: Instance of the model that is being executed in Eager mode.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
class_weight: Optional class-weight array to weight the importance of
samples in `inputs` based on the class they belong to, as conveyed by
`targets`.
val_inputs: Input data for validation.
val_targets: Target data for validation.
val_sample_weights: Sample weight data for validation.
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
shuffle: Whether to shuffle the data at the beginning of each epoch
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with default value of `None`.
Returns:
`History` object.
Raises:
ValueError: In case of invalid argument values.
"""
# Convert training inputs to an EagerIterator
inputs, steps_per_epoch = training_utils.convert_to_iterator(
x=inputs,
y=targets,
sample_weights=sample_weights,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
shuffle=shuffle)
# Required for eager execution
with backend.learning_phase_scope(1):
do_validation = val_inputs is not None
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
validation_steps=validation_steps,
verbose=verbose)
# Create metric wrapper for the losses.
output_loss_metrics = []
for i in range(len(model.outputs)):
loss_fn = model.loss_functions[i]
loss_name = loss_fn.name if isinstance(
loss_fn, losses_module.Loss) else loss_fn.__name__
mean_wrapped_loss = metrics_module.MeanMetricWrapper(
loss_fn, name=loss_name)
output_loss_metrics.append(mean_wrapped_loss)
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
if model._is_compiled: # Model may not be compiled the first time.
# Reset stateful metrics
for m in model.metrics:
m.reset_states()
for m in output_loss_metrics:
m.reset_states()
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
iterator_fit_loop(
model,
inputs,
class_weight,
steps_per_epoch=steps_per_epoch,
epoch_logs=epoch_logs,
val_inputs=val_inputs,
val_targets=val_targets,
val_sample_weights=val_sample_weights,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_steps=validation_steps,
do_validation=do_validation,
batch_size=batch_size,
output_loss_metrics=output_loss_metrics)
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
return model.history
def test_loop(model, inputs, targets,
sample_weights=None,
batch_size=None,
verbose=0,
steps=None):
"""Test function for eager execution.
Arguments:
model: Model instance that is being evaluated in Eager mode.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
inputs, steps = training_utils.convert_to_iterator(
x=inputs,
y=targets,
sample_weights=sample_weights,
batch_size=batch_size,
steps_per_epoch=steps,
is_validation=True)
with backend.learning_phase_scope(0):
return iterator_test_loop(model, inputs, steps, verbose=verbose)
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
"""Predict function for eager execution.
Arguments:
model: Instance of `Model`.
inputs: List of input arrays.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
with backend.learning_phase_scope(0):
inputs, steps = training_utils.convert_to_iterator(
x=inputs, batch_size=batch_size, steps_per_epoch=steps)
return iterator_predict_loop(model, inputs, steps, verbose=verbose)
|
|
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, groups))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, groups)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv.split(X, y, groups))
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.api.croninfo."""
import datetime
import re
from google.appengine.api import croninfo
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from absl.testing import absltest
class CronEntryTest(absltest.TestCase):
"""Tests for the croninfo.CronEntry class."""
def testCronEntryConstructor(self):
"""Tests a normal cron entry can be constructed."""
cron = croninfo.CronEntry(url='/foo/bar/biz', schedule='every 2 mins')
cron.CheckInitialized()
self.assertEqual('/foo/bar/biz', cron.url)
self.assertIsNone(cron.retry_parameters)
def testCronEntryConstructorWithRetryParameters(self):
"""Tests a normal cron entry with retry parameters can be constructed."""
retry_parameters = croninfo.RetryParameters(job_retry_limit=3)
cron = croninfo.CronEntry(url='/foo/bar/biz',
schedule='every 2 mins',
retry_parameters=retry_parameters)
cron.CheckInitialized()
self.assertEqual('/foo/bar/biz', cron.url)
self.assertEqual(3, cron.retry_parameters.job_retry_limit)
self.assertIsNotNone(cron.retry_parameters)
def testCronEntryConstructorWithAllRetryParameters(self):
"""Tests a normal cron entry with retry parameters can be constructed."""
retry_parameters = croninfo.RetryParameters(job_retry_limit=4,
job_age_limit='2d',
min_backoff_seconds=17,
max_backoff_seconds=17.77,
max_doublings=8)
cron = croninfo.CronEntry(url='/foo/bar/biz',
schedule='every 2 mins',
retry_parameters=retry_parameters)
cron.CheckInitialized()
self.assertEqual('/foo/bar/biz', cron.url)
self.assertEqual(4, cron.retry_parameters.job_retry_limit)
self.assertEqual('2d', cron.retry_parameters.job_age_limit)
self.assertEqual(17, cron.retry_parameters.min_backoff_seconds)
self.assertEqual(17.77, cron.retry_parameters.max_backoff_seconds)
self.assertEqual(8, cron.retry_parameters.max_doublings)
def testCronEntryConstructorWithEmptyRetryParameters(self):
"""Tests an empty cron entry with retry parameters can be constructed."""
retry_parameters = croninfo.RetryParameters()
cron = croninfo.CronEntry(url='/foo/bar/biz',
schedule='every 2 mins',
retry_parameters=retry_parameters)
self.assertIsNotNone(cron.retry_parameters)
self.assertIsNone(cron.retry_parameters.job_retry_limit)
self.assertIsNone(cron.retry_parameters.job_age_limit)
self.assertIsNone(cron.retry_parameters.min_backoff_seconds)
self.assertIsNone(cron.retry_parameters.max_backoff_seconds)
self.assertIsNone(cron.retry_parameters.max_doublings)
def testRetryParameterConstructorBadRetryAgeLimit(self):
"""Tests retry parameters with a bad age limit."""
self.assertRaises(validation.ValidationError,
croninfo.RetryParameters,
job_age_limit='2x')
def testRetryParameterConstructorNegativeValues(self):
"""Tests that (illegal) retry parameter negative values raise an error."""
self.assertRaises(validation.ValidationError,
croninfo.RetryParameters,
job_retry_limit=-1)
self.assertRaises(validation.ValidationError,
croninfo.RetryParameters,
max_doublings=-2.0)
self.assertRaises(validation.ValidationError,
croninfo.RetryParameters,
min_backoff_seconds=-3.0)
self.assertRaises(validation.ValidationError,
croninfo.RetryParameters,
max_backoff_seconds=-4)
def testCronEntryConstructorWithAttemptDeadline(self):
"""Tests a normal cron entry with retry parameters can be constructed."""
cron = croninfo.CronEntry(
url='/foo/bar/biz', schedule='every 2 mins', attempt_deadline='15.1s')
cron.CheckInitialized()
self.assertEqual(datetime.timedelta(seconds=15.1), cron.attempt_deadline)
def testLoadCronWithAttemptDeadlineEndOfLineChracters(self):
"""Tests that end-of-line characters are removed from cron entry."""
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' attempt_deadline: 25.1s\n\r')
config = croninfo.LoadSingleCron(input_data)
self.assertEqual('test-retry-app', config.application)
self.assertLen(config.cron, 1)
self.assertIsInstance(config.cron[0], croninfo.CronEntry)
self.assertEqual(
datetime.timedelta(seconds=25.1), config.cron[0].attempt_deadline)
def testCronAttemptDeadlineInvalidFormat(self):
"""Tests that a badly formatted attempt deadline string raise an exception."""
self.assertRaises(
validation.ValidationError,
croninfo.CronEntry,
url='/',
schedule='every 1 minutes',
attempt_deadline='20m')
self.assertRaises(
validation.ValidationError,
croninfo.CronEntry,
url='/',
schedule='every 1 minutes',
attempt_deadline='..223s')
def testBadCronEntryConstructor(self):
"""Tests that absolute URLs are rejected."""
self.assertRaises(validation.ValidationError, croninfo.CronEntry,
url='http://www.google.com/',
schedule='every 2 mins')
def testMissingCronEntryConstructor(self):
"""Tests for required values."""
cron = croninfo.CronEntry(schedule='every 2 mins')
self.assertRaises(validation.MissingAttribute, cron.CheckInitialized)
cron = croninfo.CronEntry(url='/cron.html')
self.assertRaises(validation.MissingAttribute, cron.CheckInitialized)
def testInvalidTimezoneConstructor(self):
"""Tests that an invalid timezone is rejected."""
self.assertRaises(validation.ValidationError, croninfo.CronEntry,
url='/foo/bar/baz', schedule='every 2 minutes',
timezone='orbiting jupiter')
class CronInfoTest(absltest.TestCase):
"""Tests for the croninfo.CronInfoExternal class."""
def testCronInfoConstructor(self):
info = croninfo.CronInfoExternal(cron=[
croninfo.CronEntry(url='/foo', schedule='every 2 mins'),
croninfo.CronEntry(url='/baz', schedule='every 20 hours'),
croninfo.CronEntry(url='/baz', schedule='every 20 hours',
timezone='PST8PDT'),
])
info.CheckInitialized()
class LoadSingleCronTest(absltest.TestCase):
def testLoaderSaneFile(self):
input_data = ('application: test-app\n'
'cron:\n'
'- url: /admin/hourly\n'
' schedule: every 60 mins\n'
'- url: /admin/daily\n'
' schedule: every 24 hours\n'
' timezone: Australia/NSW\n'
'- url: /admin/minute\n'
' schedule: every 1 mins\n'
' target: my-alternate-version\n'
'- url: /admin/description\n'
' schedule: every 2 mins\n'
' description: A task that runs every 2 minutes.\n'
)
config = croninfo.LoadSingleCron(input_data)
self.assertEqual('test-app', config.application)
self.assertLen(config.cron, 4)
self.assertIsInstance(config.cron[0], croninfo.CronEntry)
self.assertIsInstance(config.cron[1], croninfo.CronEntry)
self.assertEqual(config.cron[1].url, '/admin/daily')
self.assertEqual(config.cron[1].schedule, 'every 24 hours')
self.assertEqual(config.cron[1].timezone, 'Australia/NSW')
self.assertEqual(config.cron[2].url, '/admin/minute')
self.assertEqual(config.cron[2].schedule, 'every 1 mins')
self.assertEqual(config.cron[2].target, 'my-alternate-version')
self.assertEqual(config.cron[3].url, '/admin/description')
self.assertEqual(config.cron[3].schedule, 'every 2 mins')
self.assertEqual(config.cron[3].description,
'A task that runs every 2 minutes.')
def testLoaderSaneFileWithRetry(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' job_retry_limit: 4\n'
'- url: /a/retry/job2\n'
' schedule: every 14 hours\n'
' retry_parameters:\n'
' job_retry_limit: 2\n'
' job_age_limit: 1d\n'
' min_backoff_seconds: 1\n'
' max_backoff_seconds: 1800\n'
' max_doublings: 20\n'
)
config = croninfo.LoadSingleCron(input_data)
self.assertEqual('test-retry-app', config.application)
self.assertLen(config.cron, 2)
self.assertIsInstance(config.cron[0], croninfo.CronEntry)
self.assertIsNotNone(config.cron[0].retry_parameters)
self.assertEqual(4, config.cron[0].retry_parameters.job_retry_limit)
self.assertIsNone(config.cron[0].retry_parameters.job_age_limit)
self.assertIsNone(config.cron[0].retry_parameters.min_backoff_seconds)
self.assertIsNone(config.cron[0].retry_parameters.max_backoff_seconds)
self.assertIsNone(config.cron[0].retry_parameters.max_doublings)
self.assertIsNotNone(config.cron[1].retry_parameters)
self.assertEqual(2, config.cron[1].retry_parameters.job_retry_limit)
self.assertEqual('1d', config.cron[1].retry_parameters.job_age_limit)
self.assertEqual(1, config.cron[1].retry_parameters.min_backoff_seconds)
self.assertEqual(1800, config.cron[1].retry_parameters.max_backoff_seconds)
self.assertEqual(20, config.cron[1].retry_parameters.max_doublings)
def testLoaderInvalidRetry_1(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' job_retry_limit: -1\n'
)
with self.assertRaisesRegex(yaml_errors.EventListenerError, '-1'):
croninfo.LoadSingleCron(input_data)
def testLoaderInvalidRetry_2(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' job_age_limit: 0\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_3(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' job_age_limit: xx\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_4(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' job_age_limit: {}\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_5(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' job_age_limit: xx\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_6(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' job_age_limit: {}\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_7(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' max_doublings: -5\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_8(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' min_backoff_seconds: -55\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_9(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' max_backoff_seconds: -2\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderInvalidRetry_10(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters:\n'
' param_doesnt_exist: 2\n'
)
self.assertRaises(yaml_errors.EventListenerError,
croninfo.LoadSingleCron, input_data)
def testLoaderEmptyRetry_11(self):
input_data = ('application: test-retry-app\n'
'cron:\n'
'- url: /a/retry/job\n'
' schedule: every 12 mins\n'
' retry_parameters: {}\n'
)
config = croninfo.LoadSingleCron(input_data)
self.assertLen(config.cron, 1)
self.assertIsNotNone(config.cron[0].retry_parameters)
self.assertIsNone(config.cron[0].retry_parameters.job_retry_limit)
self.assertIsNone(config.cron[0].retry_parameters.job_age_limit)
self.assertIsNone(config.cron[0].retry_parameters.min_backoff_seconds)
self.assertIsNone(config.cron[0].retry_parameters.max_backoff_seconds)
self.assertIsNone(config.cron[0].retry_parameters.max_doublings)
def testLoaderUnicodeDescription(self):
input_data = ('cron:\n'
'- url: /admin/description\n'
' schedule: every 2 mins\n'
' description: A Chinese description - '
u'\u4e2d\u56fd\u63cf\u8ff0.\n'
)
config = croninfo.LoadSingleCron(input_data)
self.assertLen(config.cron, 1)
self.assertEqual(config.cron[0].url, '/admin/description')
self.assertEqual(config.cron[0].description,
u'A Chinese description - \u4e2d\u56fd\u63cf\u8ff0.')
def testLoaderWithoutPytz(self):
real_pytz_module = croninfo.pytz
croninfo.pytz = None
try:
self.testLoaderSaneFile()
finally:
croninfo.pytz = real_pytz_module
def testLoaderWithModuleTarget(self):
input_data = ('application: test-app\n'
'cron:\n'
'- url: /admin/hourly\n'
' schedule: every 60 mins\n'
' target: module\n'
)
config = croninfo.LoadSingleCron(input_data)
self.assertEqual('test-app', config.application)
self.assertLen(config.cron, 1)
self.assertIsInstance(config.cron[0], croninfo.CronEntry)
self.assertEqual(config.cron[0].url, '/admin/hourly')
self.assertEqual(config.cron[0].schedule, 'every 60 mins')
self.assertEqual(config.cron[0].target, 'module')
def testLoaderWithNumericVerisionedModuleTarget(self):
"""Test b/35767221.
apphosting/api/croninfo.py should have the same change as b/15887817.
"""
input_data = ('application: test-app\n'
'cron:\n'
'- url: /admin/hourly\n'
' schedule: every 60 mins\n'
' target: 1:module\n'
)
config = croninfo.LoadSingleCron(input_data)
self.assertEqual('test-app', config.application)
self.assertLen(config.cron, 1)
self.assertIsInstance(config.cron[0], croninfo.CronEntry)
self.assertEqual(config.cron[0].url, '/admin/hourly')
self.assertEqual(config.cron[0].schedule, 'every 60 mins')
self.assertEqual(config.cron[0].target, '1:module')
input_data = ('application: test-app\n'
'cron:\n'
'- url: /admin/hourly\n'
' schedule: every 60 mins\n'
' target: 1.module\n'
)
with self.assertRaisesRegex(
yaml_errors.EventError,
re.escape(
'Value \'1.module\' for target does not match expression \''
r'^(?:^(?:(?:((?!-)[a-z\d\-]{1,63}):)?)((?!-)[a-z\d\-]{1,100})$)$'
'\'')):
croninfo.LoadSingleCron(input_data)
if __name__ == '__main__':
absltest.main()
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import six
import webob.exc
from wsme.rest import json
from glance.api import policy
from glance.api.v2.model.metadef_resource_type import ResourceType
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations
from glance.api.v2.model.metadef_resource_type import ResourceTypes
from glance.common import exception
from glance.common import wsgi
import glance.db
import glance.gateway
from glance import i18n
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
class ResourceTypeController(object):
def __init__(self, db_api=None, policy_enforcer=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.gateway = glance.gateway.Gateway(db_api=self.db_api,
policy_enforcer=self.policy)
def index(self, req):
try:
filters = {}
filters['namespace'] = None
rs_type_repo = self.gateway.get_metadef_resource_type_repo(
req.context)
db_resource_type_list = rs_type_repo.list(filters=filters)
resource_type_list = [ResourceType.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
resource_types = ResourceTypes()
resource_types.resource_types = resource_type_list
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(e)
raise webob.exc.HTTPInternalServerError(e)
return resource_types
def show(self, req, namespace):
try:
filters = {}
filters['namespace'] = namespace
rs_type_repo = self.gateway.get_metadef_resource_type_repo(
req.context)
db_resource_type_list = rs_type_repo.list(filters=filters)
resource_type_list = [ResourceTypeAssociation.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
resource_types = ResourceTypeAssociations()
resource_types.resource_type_associations = resource_type_list
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(e)
raise webob.exc.HTTPInternalServerError(e)
return resource_types
def create(self, req, resource_type, namespace):
rs_type_factory = self.gateway.get_metadef_resource_type_factory(
req.context)
rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context)
try:
new_resource_type = rs_type_factory.new_resource_type(
namespace=namespace, **resource_type.to_dict())
rs_type_repo.add(new_resource_type)
except exception.Forbidden as e:
msg = (_LE("Forbidden to create resource type. "
"Reason: %(reason)s")
% {'reason': encodeutils.exception_to_unicode(e)})
LOG.error(msg)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(e)
raise webob.exc.HTTPInternalServerError()
return ResourceTypeAssociation.to_wsme_model(new_resource_type)
def delete(self, req, namespace, resource_type):
rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context)
try:
filters = {}
found = False
filters['namespace'] = namespace
db_resource_type_list = rs_type_repo.list(filters=filters)
for db_resource_type in db_resource_type_list:
if db_resource_type.name == resource_type:
db_resource_type.delete()
rs_type_repo.remove(db_resource_type)
found = True
if not found:
raise exception.NotFound()
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
msg = (_("Failed to find resource type %(resourcetype)s to "
"delete") % {'resourcetype': resource_type})
LOG.error(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except Exception as e:
LOG.error(e)
raise webob.exc.HTTPInternalServerError()
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['created_at', 'updated_at']
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(
explanation=encodeutils.exception_to_unicode(msg))
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
resource_type = json.fromjson(ResourceTypeAssociation, body)
return dict(resource_type=resource_type)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def show(self, response, result):
resource_type_json = json.tojson(ResourceTypeAssociations, result)
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, result):
resource_type_json = json.tojson(ResourceTypes, result)
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def create(self, response, result):
resource_type_json = json.tojson(ResourceTypeAssociation, result)
response.status_int = 201
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def delete(self, response, result):
response.status_int = 204
def _get_base_properties():
return {
'name': {
'type': 'string',
'description': _('Resource type names should be aligned with Heat '
'resource types whenever possible: '
'http://docs.openstack.org/developer/heat/'
'template_guide/openstack.html'),
'maxLength': 80,
},
'prefix': {
'type': 'string',
'description': _('Specifies the prefix to use for the given '
'resource type. Any properties in the namespace '
'should be prefixed with this prefix when being '
'applied to the specified resource type. Must '
'include prefix separator (e.g. a colon :).'),
'maxLength': 80,
},
'properties_target': {
'type': 'string',
'description': _('Some resource types allow more than one key / '
'value pair per instance. For example, Cinder '
'allows user and image metadata on volumes. Only '
'the image properties metadata is evaluated by '
'Nova (scheduling or drivers). This property '
'allows a namespace target to remove the '
'ambiguity.'),
'maxLength': 80,
},
"created_at": {
"type": "string",
"description": _("Date and time of resource type association"
" (READ-ONLY)"),
"format": "date-time"
},
"updated_at": {
"type": "string",
"description": _("Date and time of the last resource type "
"association modification (READ-ONLY)"),
"format": "date-time"
}
}
def get_schema():
properties = _get_base_properties()
mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs()
schema = glance.schema.Schema(
'resource_type_association',
properties,
required=mandatory_attrs,
)
return schema
def get_collection_schema():
resource_type_schema = get_schema()
return glance.schema.CollectionSchema('resource_type_associations',
resource_type_schema)
def create_resource():
"""ResourceTypeAssociation resource factory method"""
schema = get_schema()
deserializer = RequestDeserializer(schema)
serializer = ResponseSerializer(schema)
controller = ResourceTypeController()
return wsgi.Resource(controller, deserializer, serializer)
|
|
"""
Stores customer, organization, and order information.
"""
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from l10n.models import Country
from satchmo_store.contact import CUSTOMER_ID
import datetime
import logging
log = logging.getLogger('contact.models')
class ContactRole(models.Model):
key = models.CharField(_('Key'), max_length=30, unique=True, primary_key=True)
name = models.CharField(_('Name'), max_length=40)
def __unicode__(self):
return ugettext(self.name)
class ContactOrganization(models.Model):
key = models.CharField(_('Key'), max_length=30, unique=True, primary_key=True)
name = models.CharField(_('Name'), max_length=40)
def __unicode__(self):
return ugettext(self.name)
class Meta:
verbose_name = _('Contact organization type')
class ContactOrganizationRole(models.Model):
key = models.CharField(_('Key'), max_length=30, unique=True, primary_key=True)
name = models.CharField(_('Name'), max_length=40)
def __unicode__(self):
return ugettext(self.name)
class ContactInteractionType(models.Model):
key = models.CharField(_('Key'), max_length=30, unique=True, primary_key=True)
name = models.CharField(_('Name'), max_length=40)
def __unicode__(self):
return ugettext(self.name)
class OrganizationManager(models.Manager):
def by_name(self, name, create=False, role='Customer', orgtype='Company'):
org = None
orgs = self.filter(name=name, role__key=role, type__key=orgtype)
if orgs.count() > 0:
org = orgs[0]
if not org:
if not create:
raise Organization.DoesNotExist()
else:
log.debug('Creating organization: %s', name)
role = ContactOrganizationRole.objects.get(pk=role)
orgtype = ContactOrganization.objects.get(pk=orgtype)
org = Organization(name=name, role=role, type=orgtype)
org.save()
return org
class Organization(models.Model):
"""
An organization can be a company, government or any kind of group.
"""
name = models.CharField(_("Name"), max_length=50, )
type = models.ForeignKey(ContactOrganization, verbose_name=_("Type"), null=True)
role = models.ForeignKey(ContactOrganizationRole, verbose_name=_("Role"), null=True)
create_date = models.DateField(_("Creation Date"))
notes = models.TextField(_("Notes"), max_length=200, blank=True, null=True)
objects = OrganizationManager()
def __unicode__(self):
return self.name
def save(self, **kwargs):
"""Ensure we have a create_date before saving the first time."""
if not self.pk:
self.create_date = datetime.date.today()
super(Organization, self).save(**kwargs)
class Meta:
verbose_name = _("Organization")
verbose_name_plural = _("Organizations")
class ContactManager(models.Manager):
def from_request(self, request, create=False):
"""Get the contact from the session, else look up using the logged-in
user. Create an unsaved new contact if `create` is true.
Returns:
- Contact object or None
"""
contact = None
if request.user.is_authenticated():
try:
contact = Contact.objects.get(user=request.user.id)
request.session[CUSTOMER_ID] = contact.id
except Contact.DoesNotExist:
pass
else:
# Don't create a Contact if the user isn't authenticated.
create = False
if request.session.get(CUSTOMER_ID):
try:
contactBySession = Contact.objects.get(id=request.session[CUSTOMER_ID])
if contact is None:
contact = contactBySession
elif contact != contactBySession:
# For some reason the authenticated id and the session customer ID don't match.
# Let's bias the authenticated ID and kill this customer ID:
log.debug("CURIOUS: The user authenticated as %r (contact id:%r) and a session as %r (contact id:%r)" %
(contact.user.get_full_name(), contact.id, Contact.objects.get(id=request.session[CUSTOMER_ID]).full_name, request.session[CUSTOMER_ID]))
log.debug("Deleting the session contact.")
del request.session[CUSTOMER_ID]
except Contact.DoesNotExist:
log.debug("This user has a session stored customer id (%r) which doesn't exist anymore. Removing it from the session." % request.session[CUSTOMER_ID])
del request.session[CUSTOMER_ID]
if contact is None:
if create:
contact = Contact(user=request.user)
else:
raise Contact.DoesNotExist()
return contact
OCCUPATION_CHOICES = (
('Student', _('Student')),
('Professional', _('Professional')),
('Other', _('Other')),
)
class Contact(models.Model):
"""
A customer, supplier or any individual that a store owner might interact
with.
"""
title = models.CharField(_("Title"), max_length=30, blank=True, null=True)
first_name = models.CharField(_("First name"), max_length=30, )
last_name = models.CharField(_("Last name"), max_length=30, )
user = models.ForeignKey(User, blank=True, null=True, unique=True)
role = models.ForeignKey(ContactRole, verbose_name=_("Role"), null=True)
organization = models.ForeignKey(Organization, verbose_name=_("Organization"), blank=True, null=True)
dob = models.DateField(_("Date of birth"), blank=True, null=True)
email = models.EmailField(_("Email"), blank=True, max_length=75)
notes = models.TextField(_("Notes"), max_length=500, blank=True)
create_date = models.DateField(_("Creation date"))
occupation = models.CharField(_("Occupation"), choices=OCCUPATION_CHOICES,
max_length=20, blank=True)
objects = ContactManager()
def _get_full_name(self):
"""Return the person's full name."""
return u'%s %s' % (self.first_name, self.last_name)
full_name = property(_get_full_name)
def _shipping_address(self):
"""Return the default shipping address or None."""
try:
return self.addressbook_set.get(is_default_shipping=True)
except AddressBook.DoesNotExist:
return None
shipping_address = property(_shipping_address)
def _billing_address(self):
"""Return the default billing address or None."""
try:
return self.addressbook_set.get(is_default_billing=True)
except AddressBook.DoesNotExist:
return None
billing_address = property(_billing_address)
def _primary_phone(self):
"""Return the default phone number or None."""
try:
return self.phonenumber_set.get(primary=True)
except PhoneNumber.DoesNotExist:
return None
primary_phone = property(_primary_phone)
def __unicode__(self):
return self.full_name
def save(self, **kwargs):
"""Ensure we have a create_date before saving the first time."""
if not self.pk:
self.create_date = datetime.date.today()
# Validate contact to user sync
if self.user:
dirty = False
user = self.user
if user.email != self.email:
user.email = self.email
dirty = True
if user.first_name != self.first_name:
user.first_name = self.first_name
dirty = True
if user.last_name != self.last_name:
user.last_name = self.last_name
dirty = True
if dirty:
self.user = user
self.user.save()
super(Contact, self).save(**kwargs)
def _get_address_book_entries(self):
""" Return all non primary shipping and billing addresses
"""
return AddressBook.objects.filter(contact=self.pk).exclude(is_default_shipping=True).exclude(is_default_billing=True)
address_book_entries=property(_get_address_book_entries)
class Meta:
verbose_name = _("Contact")
verbose_name_plural = _("Contacts")
PHONE_CHOICES = (
('Work', _('Work')),
('Home', _('Home')),
('Fax', _('Fax')),
('Mobile', _('Mobile')),
)
class Interaction(models.Model):
"""
A type of activity with the customer. Useful to track emails, phone calls,
or in-person interactions.
"""
contact = models.ForeignKey(Contact, verbose_name=_("Contact"))
type = models.ForeignKey(ContactInteractionType, verbose_name=_("Type"))
date_time = models.DateTimeField(_("Date and Time"), )
description = models.TextField(_("Description"), max_length=200)
def __unicode__(self):
return u'%s - %s' % (self.contact.full_name, self.type)
class Meta:
verbose_name = _("Interaction")
verbose_name_plural = _("Interactions")
class PhoneNumber(models.Model):
"""
Phone number associated with a contact.
"""
contact = models.ForeignKey(Contact)
type = models.CharField(_("Description"), choices=PHONE_CHOICES,
max_length=20, blank=True)
phone = models.CharField(_("Phone Number"), blank=True, max_length=30,
)
primary = models.BooleanField(_("Primary"), default=False)
def __unicode__(self):
return u'%s - %s' % (self.type, self.phone)
def save(self, **kwargs):
"""
If this number is the default, then make sure that it is the only
primary phone number. If there is no existing default, then make
this number the default.
"""
existing_number = self.contact.primary_phone
if existing_number:
if self.primary:
existing_number.primary = False
super(PhoneNumber, existing_number).save()
else:
self.primary = True
super(PhoneNumber, self).save(**kwargs)
class Meta:
ordering = ['-primary']
verbose_name = _("Phone Number")
verbose_name_plural = _("Phone Numbers")
class AddressBook(models.Model):
"""
Address information associated with a contact.
"""
contact = models.ForeignKey(Contact)
description = models.CharField(_("Description"), max_length=20, blank=True,
help_text=_('Description of address - Home, Office, Warehouse, etc.',))
addressee = models.CharField(_("Addressee"), max_length=80)
street1 = models.CharField(_("Street"), max_length=80)
street2 = models.CharField(_("Street"), max_length=80, blank=True)
state = models.CharField(_("State"), max_length=50, blank=True)
city = models.CharField(_("City"), max_length=50)
postal_code = models.CharField(_("Zip Code"), max_length=30)
country = models.ForeignKey(Country, verbose_name=_("Country"))
is_default_shipping = models.BooleanField(_("Default Shipping Address"),
default=False)
is_default_billing = models.BooleanField(_("Default Billing Address"),
default=False)
def __unicode__(self):
return u'%s - %s' % (self.contact.full_name, self.description)
def save(self, **kwargs):
"""
If this address is the default billing or shipping address, then
remove the old address's default status. If there is no existing
default, then make this address the default.
"""
existing_billing = self.contact.billing_address
if existing_billing:
if self.is_default_billing:
existing_billing.is_default_billing = False
super(AddressBook, existing_billing).save()
else:
self.is_default_billing = True
existing_shipping = self.contact.shipping_address
if existing_shipping:
if self.is_default_shipping:
existing_shipping.is_default_shipping = False
super(AddressBook, existing_shipping).save()
else:
self.is_default_shipping = True
super(AddressBook, self).save(**kwargs)
class Meta:
verbose_name = _("Address Book")
verbose_name_plural = _("Address Books")
import config
|
|
__author__ = 'Michael Isik'
from pybrain.structure.networks.network import Network
from pybrain.structure.modules.lstm import LSTMLayer
from pybrain.structure.modules.linearlayer import LinearLayer
from pybrain.structure.connections.full import FullConnection
from pybrain.structure.modules.module import Module
from pybrain.structure.modules.biasunit import BiasUnit
from numpy import zeros, array, append
class EvolinoNetwork(Module):
def __init__(self, indim, outdim, hiddim=6):
Module.__init__(self, indim, outdim)
self._network = Network()
self._in_layer = LinearLayer(indim+outdim)
self._hid_layer = LSTMLayer(hiddim)
self._out_layer = LinearLayer(outdim)
self._bias = BiasUnit()
self._network.addInputModule( self._in_layer )
self._network.addModule(self._hid_layer)
self._network.addModule(self._bias)
self._network.addOutputModule(self._out_layer)
self._hid_to_out_connection = FullConnection( self._hid_layer , self._out_layer )
self._in_to_hid_connection = FullConnection( self._in_layer , self._hid_layer )
self._network.addConnection( self._hid_to_out_connection )
self._network.addConnection( self._in_to_hid_connection )
self._network.addConnection( FullConnection( self._bias, self._hid_layer ) )
self._network.sortModules()
self.time = self._network.time
self.backprojectionFactor = 0.01
def reset(self):
self._network.reset()
def _washout(self, input, target, first_idx=None, last_idx=None):
assert self.indim == len(input[0])
assert self.outdim == len(target[0])
assert len(input) == len(target)
if first_idx is None: first_idx = 0
if last_idx is None: last_idx = len(target)-1
raw_outputs = []
for i in xrange(first_idx, last_idx+1):
backprojection = self._getLastOutput()
backprojection *= self.backprojectionFactor
full_inp = self._createFullInput( input[i], backprojection )
self._activateNetwork(full_inp)
raw_out = self._getRawOutput()
# print "RAWOUT: ", full_inp, " --> ", raw_out, self._getLastOutput()
raw_outputs.append(array(raw_out))
self._setLastOutput(target[i])
return array(raw_outputs)
def _activateNetwork(self, input):
assert len(input) == self._network.indim
output = self._network.activate(input)
self.time = self._network.time
# print "INNNNNNN=", input, " OUTPP=", output
return output
def activate(self, input):
assert len(input) == self.indim
backprojection = self._getLastOutput()
backprojection *= self.backprojectionFactor
full_inp = self._createFullInput(input, backprojection)
out = self._activateNetwork(full_inp)
# print "AAAAAACT: ", full_inp, "-->", out
# self._setLastOutput(last_out*5)
return out
def calculateOutput(self, dataset, washout_calculation_ratio=(1,2)):
washout_calculation_ratio=array(washout_calculation_ratio,float)
ratio = washout_calculation_ratio/sum(washout_calculation_ratio)
# iterate through all sequences
collected_input = None
collected_output = None
collected_target = None
for i in range(dataset.getNumSequences()):
seq = dataset.getSequence(i)
input = seq[0]
target = seq[1]
washout_steps = int( len(input) * ratio[0] )
washout_input = input [ : washout_steps ]
washout_target = target [ : washout_steps ]
calculation_target = target [ washout_steps : ]
# reset
self.reset()
# washout
self._washout(washout_input, washout_target)
# collect calculation data
outputs = []
inputs = []
# for i in xrange(washout_steps, len(input)):
for inp in input[washout_steps:]:
out = self.activate(inp)
# print out
# print inp
inputs.append(inp)
outputs.append(out)
# collect output and targets
if collected_input is not None:
collected_input = append( collected_input, inputs, axis=0 )
else:
collected_input = array(inputs)
# print collected_input; exit()
if collected_output is not None:
collected_output = append( collected_output, outputs, axis=0 )
else:
collected_output = array(outputs)
if collected_target is not None:
collected_target = append( collected_target, calculation_target, axis=0 )
else:
collected_target = calculation_target
return collected_input, collected_output, collected_target
def _createFullInput(self, input, output):
if self.indim>0:
return append(input, output)
else:
return array(output)
def _getLastOutput(self):
if self.time == 0:
return zeros(self.outdim)
else:
return self._out_layer.outputbuffer[self.time-1]
def _setLastOutput(self, output):
self._out_layer.outputbuffer[self.time-1][:] = output
# ======================================================== Genome related ===
def _validateGenomeLayer(self, layer):
""" Validates the type and state of a layer
"""
assert isinstance(layer,LSTMLayer)
assert not layer.peepholes
def getGenome(self):
""" Returns the Genome of the network.
See class description for more details.
"""
return self._getGenomeOfLayer(self._hid_layer)
def setGenome(self, weights):
""" Sets the Genome of the network.
See class description for more details.
"""
weights = deepcopy(weights)
self._setGenomeOfLayer(self._hid_layer, weights)
def _getGenomeOfLayer(self, layer):
""" Returns the genome of a single layer.
"""
self._validateGenomeLayer(layer)
dim = layer.outdim
layer_weights = []
connections = self._getInputConnectionsOfLayer(layer)
for cell_idx in range(dim):
# todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
cell_weights = []
for c in connections:
cell_weights += [
c.params[ cell_idx + 0 * dim ],
c.params[ cell_idx + 1 * dim ],
c.params[ cell_idx + 2 * dim ],
c.params[ cell_idx + 3 * dim ] ]
layer_weights.append( cell_weights )
return layer_weights
def _setGenomeOfLayer(self, layer, weights):
""" Sets the genome of a single layer.
"""
self._validateGenomeLayer(layer)
dim = layer.outdim
connections = self._getInputConnectionsOfLayer(layer)
for cell_idx in range(dim):
cell_weights = weights.pop(0)
for c in connections:
params = c.params
params[cell_idx + 0 * dim] = cell_weights.pop(0)
params[cell_idx + 1 * dim] = cell_weights.pop(0)
params[cell_idx + 2 * dim] = cell_weights.pop(0)
params[cell_idx + 3 * dim] = cell_weights.pop(0)
assert not len(cell_weights)
# ============================================ Linear Regression related ===
def setOutputWeightMatrix(self, W):
""" Sets the weight matrix of the output layer's input connection.
"""
c = self._hid_to_out_connection
c.params[:] = W.flatten()
def getOutputWeightMatrix(self):
""" Sets the weight matrix of the output layer's input connection.
"""
c = self._hid_to_out_connection
p = c.getParameters()
return reshape( p, (c.outdim, c.indim) )
def _getRawOutput(self):
""" Returns the current output of the last hidden layer.
This is needed for linear regression, which calculates
the weight matrix W of the full connection between this layer
and the output layer.
"""
return copy(self._hid_layer.outputbuffer[self.time-1])
# ====================================================== Topology Helper ===
def _getInputConnectionsOfLayer(self, layer):
""" Returns a list of all input connections for the layer. """
connections = []
for c in sum( self._network.connections.values(), [] ):
if c.outmod is layer:
if not isinstance( c, FullConnection ):
raise NotImplementedError("At the time there is only support for FullConnection")
connections.append(c)
return connections
from numpy import reshape
from copy import copy,deepcopy
class NetworkWrapper(object):
""" Network wrapper class for Evolino Networks
This class implements methods for extracting and setting the genome of
the supplied network to allow its evolving.
The genome of the network consists of the input weights of each hidden
lstm neuron. The structure of the genome will be a list of lists,
where the inner lists bundle all input weights of on neuron:
[ [ neuron1's inweights ] , [ neuron2's inweights ] , ... ]
The inner lists will be used as chromosomes inside the evolino framework.
Also there are methods that help with the linear regression part.
They can extract end set the weight matrix W for the last full-connection.
At the moment the network must meet following constraints:
- All hidden layers that have input connections must be of type LSTMLayer
- The LSTMLayer do not use peepholes
- There must be exactly one output-layer
- There must be exactly one input-layer
- There must be only one layer, that is connected to the output layer
- The input layer must be connected to only one hidden layer
- All used connections must be of type FullConnection
When the network is supplied it will be augmented with a
recurrent full connection from the output layer to the first hidden layer.
So do not do this yourself.
"""
def __init__(self, network):
""" @param network: The network to be wrapped
"""
self.network = network
self._output_connection = None
self._last_hidden_layer = None
self._first_hidden_layer = None
self._establishRecurrence()
def getNetwork(self):
""" Returns the Network """
return self.network
def _establishRecurrence(self):
""" Adds a recurrent full connection from the output layer to the first
hidden layer.
"""
network = self.network
outlayer = self.getOutputLayer()
hid1layer = self.getFirstHiddenLayer()
network.addRecurrentConnection( FullConnection( outlayer, hid1layer ) )
# ======================================================== Genome related ===
def _validateGenomeLayer(self, layer):
""" Validates the type and state of a layer
"""
assert isinstance(layer,LSTMLayer)
assert not layer.peepholes
def getGenome(self):
""" Returns the Genome of the network.
See class description for more details.
"""
weights=[]
for layer in self.getHiddenLayers():
if isinstance(layer, LSTMLayer):
# if layer is not self._recurrence_layer:
weights += self._getGenomeOfLayer(layer)
return weights
def setGenome(self, weights):
""" Sets the Genome of the network.
See class description for more details.
"""
weights = deepcopy(weights)
for layer in self.getHiddenLayers():
if isinstance(layer, LSTMLayer):
# if layer is not self._recurrence_layer:
self._setGenomeOfLayer(layer, weights)
def _getGenomeOfLayer(self, layer):
""" Returns the genome of a single layer.
"""
self._validateGenomeLayer(layer)
dim = layer.outdim
layer_weights = []
connections = self._getInputConnectionsOfLayer(layer)
for cell_idx in range(dim):
# todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
cell_weights = []
for c in connections:
cell_weights += [
c.getParameters()[ cell_idx + 0 * dim ],
c.getParameters()[ cell_idx + 1 * dim ],
c.getParameters()[ cell_idx + 2 * dim ],
c.getParameters()[ cell_idx + 3 * dim ] ]
layer_weights.append( cell_weights )
return layer_weights
def _setGenomeOfLayer(self, layer, weights):
""" Sets the genome of a single layer.
"""
self._validateGenomeLayer(layer)
dim = layer.outdim
connections = self._getInputConnectionsOfLayer(layer)
for cell_idx in range(dim):
cell_weights = weights.pop(0)
for c in connections:
params = c.getParameters()
params[cell_idx + 0 * dim] = cell_weights.pop(0)
params[cell_idx + 1 * dim] = cell_weights.pop(0)
params[cell_idx + 2 * dim] = cell_weights.pop(0)
params[cell_idx + 3 * dim] = cell_weights.pop(0)
assert not len(cell_weights)
# ============================================ Linear Regression related ===
def setOutputWeightMatrix(self, W):
""" Sets the weight matrix of the output layer's input connection.
"""
c = self.getOutputConnection()
p = c.getParameters()
p[:] = W.flatten()
def getOutputWeightMatrix(self):
""" Sets the weight matrix of the output layer's input connection.
"""
c = self.getOutputConnection()
p = c.getParameters()
return reshape( p, (c.outdim, c.indim) )
def injectBackproject(self, injection):
""" Injects a vector into the recurrent connection.
This will be used in the evolino trainingsphase, where the target
values need to be backprojected instead of the real output of the net.
@param injection: vector of length self.network.outdim
"""
outlayer = self.getOutputLayer()
outlayer.outputbuffer[self.network.time-1][:] = injection
def _getRawOutput(self):
""" Returns the current output of the last hidden layer.
This is needed for linear regression, which calculates
the weight matrix W of the full connection between this layer
and the output layer.
"""
return copy(self.getLastHiddenLayer().outputbuffer[self.network.time-1])
# ====================================================== Topology Helper ===
def getOutputLayer(self):
""" Returns the output layer """
assert len(self.network.outmodules)==1
return self.network.outmodules[0]
def getOutputConnection(self):
""" Returns the input connection of the output layer. """
if self._output_connection is None:
outlayer = self.getOutputLayer()
lastlayer = self.getLastHiddenLayer()
for c in self.getConnections():
if c.outmod is outlayer:
assert c.inmod is lastlayer
self._output_connection = c
return self._output_connection
def getLastHiddenLayer(self):
""" Returns the last hidden layer. """
if self._last_hidden_layer is None:
outlayer = self.getOutputLayer()
layers = []
for c in self.getConnections():
if c.outmod is outlayer:
# print c.inmod
layers.append(c.inmod)
assert len(layers)==1
self._last_hidden_layer = layers[0]
return self._last_hidden_layer
def getFirstHiddenLayer(self):
""" Returns the first hidden layer. """
if self._first_hidden_layer is None:
inlayer = self.getInputLayer()
layers = []
for c in self.getConnections():
if c.inmod is inlayer:
layers.append(c.outmod)
assert len(layers)==1
self._first_hidden_layer = layers[0]
return self._first_hidden_layer
def getConnections(self):
""" Returns a list of all connections. """
return sum( self.network.connections.values(), [] )
def getInputLayer(self):
""" Returns the input layer. """
assert len(self.network.inmodules)==1
return self.network.inmodules[0]
def _getInputConnectionsOfLayer(self, layer):
""" Returns a list of all input connections for the layer. """
connections = []
for c in sum( self.network.connections.values(), [] ):
if c.outmod is layer:
if not isinstance( c, FullConnection ):
raise NotImplementedError("At the time there is only support for FullConnection")
connections.append(c)
return connections
def getHiddenLayers(self):
""" Returns a list of all hidden layers. """
layers = []
network = self.network
for m in network.modules:
if m not in network.inmodules and m not in network.outmodules:
layers.append(m)
return layers
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Testing utilities provided by ``flocker.volume``.
"""
import errno
import os
import uuid
from unittest import SkipTest
from characteristic import attributes
from twisted.python.filepath import FilePath
from twisted.internet.task import Clock
from twisted.internet import reactor
from twisted.trial.unittest import SynchronousTestCase
from ..common import ProcessNode
from ..testtools import run_process
from ._ipc import RemoteVolumeManager
from .filesystems.zfs import StoragePool
from .service import VolumeService
from .filesystems.memory import FilesystemStoragePool
def create_volume_service(test):
"""
Create a new ``VolumeService`` suitable for use in unit tests.
:param TestCase test: A unit test which will shut down the service
when done.
:return: The ``VolumeService`` created.
"""
service = VolumeService(FilePath(test.mktemp()),
FilesystemStoragePool(FilePath(test.mktemp())),
reactor=Clock())
service.startService()
test.addCleanup(service.stopService)
return service
def service_for_pool(test, pool):
"""
Create a ``VolumeService`` wrapping a pool suitable for use in tests.
:param TestCase test: A unit test which will shut down the service
when done.
:param IStoragePool pool: The pool to wrap.
:return: A ``VolumeService``.
"""
service = VolumeService(FilePath(test.mktemp()), pool, None)
service.startService()
test.addCleanup(service.stopService)
return service
def create_zfs_pool(test_case):
"""Create a new ZFS pool, then delete it after the test is over.
:param test_case: A ``unittest.TestCase``.
:return: The pool's name as ``bytes``.
"""
if os.getuid() != 0:
raise SkipTest("Functional tests must run as root.")
pool_name = b"testpool_%s" % (uuid.uuid4(),)
pool_path = FilePath(test_case.mktemp())
mount_path = FilePath(test_case.mktemp())
with pool_path.open("wb") as f:
f.truncate(100 * 1024 * 1024)
test_case.addCleanup(pool_path.remove)
try:
run_process([b"zpool", b"create", b"-m", mount_path.path,
pool_name, pool_path.path])
except OSError as e:
if e.errno == errno.ENOENT:
raise SkipTest(
"Install zpool to run these tests: "
"http://doc-dev.clusterhq.com/using/installing/index.html"
"#optional-zfs-backend-configuration")
raise
test_case.addCleanup(run_process, [b"zpool", b"destroy", pool_name])
return pool_name
class MutatingProcessNode(ProcessNode):
"""Mutate the command being run in order to make tests work.
Come up with something better in
https://clusterhq.atlassian.net/browse/FLOC-125
"""
def __init__(self, to_service):
"""
:param to_service: The VolumeService to which a push is being done.
"""
self.to_service = to_service
ProcessNode.__init__(self, initial_command_arguments=[])
def _mutate(self, remote_command):
"""
Add the pool and mountpoint arguments, which aren't necessary in real
code.
:param remote_command: Original command arguments.
:return: Modified command arguments.
"""
return remote_command[:1] + [
b"--pool", self.to_service.pool._name,
b"--mountpoint", self.to_service.pool._mount_root.path
] + remote_command[1:]
def run(self, remote_command):
return ProcessNode.run(self, self._mutate(remote_command))
def get_output(self, remote_command):
return ProcessNode.get_output(self, self._mutate(remote_command))
@attributes(["from_service", "to_service", "remote"])
class ServicePair(object):
"""
A configuration for testing ``IRemoteVolumeManager``.
:param VolumeService from_service: The origin service.
:param VolumeService to_service: The destination service.
:param IRemoteVolumeManager remote: Talks to ``to_service``.
"""
def create_realistic_servicepair(test):
"""
Create a ``ServicePair`` that uses ZFS for testing
``RemoteVolumeManager``.
:param TestCase test: A unit test.
:return: A new ``ServicePair``.
"""
from_pool = StoragePool(reactor, create_zfs_pool(test),
FilePath(test.mktemp()))
from_service = VolumeService(FilePath(test.mktemp()),
from_pool, reactor=Clock())
from_service.startService()
test.addCleanup(from_service.stopService)
to_pool = StoragePool(reactor, create_zfs_pool(test),
FilePath(test.mktemp()))
to_config = FilePath(test.mktemp())
to_service = VolumeService(to_config, to_pool, reactor=Clock())
to_service.startService()
test.addCleanup(to_service.stopService)
remote = RemoteVolumeManager(MutatingProcessNode(to_service),
to_config)
return ServicePair(from_service=from_service, to_service=to_service,
remote=remote)
def make_volume_options_tests(make_options, extra_arguments=None):
"""
Make a ``TestCase`` to test the ``VolumeService`` specific arguments added
to an ``Options`` class by the ``flocker_volume_options`` class decorator.
:param make_options: A zero-argument callable which will be called to
produce the ``Options`` instance under test.
:param extra_arguments: An optional ``list`` of non-VolumeService related
arguments which are required by the ``Options`` instance under test.
:return: A ``SynchronousTestCase``.
"""
if extra_arguments is None:
extra_arguments = []
def parseOptions(options, argv):
options.parseOptions(argv + extra_arguments)
class VolumeOptionsTests(SynchronousTestCase):
"""
Tests for ``Options`` subclasses decorated with
``flocker_volume_options``.
"""
def test_default_config(self):
"""
By default the config file is ``b'/etc/flocker/volume.json'``.
"""
options = make_options()
parseOptions(options, [])
self.assertEqual(
FilePath(b"/etc/flocker/volume.json"), options["config"])
def test_config(self):
"""
The options class accepts a ``--config`` parameter.
"""
path = b"/foo/bar"
options = make_options()
parseOptions(options, [b"--config", path])
self.assertEqual(FilePath(path), options["config"])
def test_pool(self):
"""
The options class accepts a ``--pool`` parameter.
"""
pool = b"foo-bar"
options = make_options()
parseOptions(options, [b"--pool", pool])
self.assertEqual(pool, options["pool"])
def test_mountpoint(self):
"""
The options class accepts a ``--mountpoint`` parameter.
"""
mountpoint = b"/bar/baz"
options = make_options()
parseOptions(options, [b"--mountpoint", mountpoint])
self.assertEqual(mountpoint, options["mountpoint"])
dummy_options = make_options()
VolumeOptionsTests.__name__ = dummy_options.__class__.__name__ + "Tests"
return VolumeOptionsTests
|
|
import os, sys, codecs, shutil, filecmp, subprocess
# the template_dir is the path where this file lives on disk
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
def ensure_dev_path(debug=True):
rc = subprocess.call(["xcode-select", "-print-path"], stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
if rc == 0 :
return
if debug:
print '[INFO] XCode 4.3+ likely. Searching for developer folders.'
trypath = '/Developer'
if os.path.isdir(trypath):
os.putenv('DEVELOPER_DIR',trypath)
return
trypath = '/Applications/Xcode.app/Contents/Developer'
if os.path.isdir(trypath):
os.putenv('DEVELOPER_DIR',trypath)
return
spotlight_args = ['mdfind','kMDItemDisplayName==Xcode&&kMDItemKind==Application']
spotlight = subprocess.Popen(spotlight_args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
for line in spotlight.stdout.readlines():
trypath = line.rstrip()+'/Contents/Developer'
if os.path.isdir(trypath):
os.putenv('DEVELOPER_DIR',trypath)
return
def read_config(f):
props = {}
if os.path.exists(f):
contents = open(f).read()
for line in contents.splitlines(False):
if line[0:1]=='#': continue
(k,v) = line.split("=")
props[k]=v
return props
def locate_modules(modules, project_dir, assets_dest_dir, log):
module_lib_search_path = []
module_asset_dirs = []
for module in modules:
if module.js:
# Skip CommonJS modules. These will be processed in a later pass.
continue
module_id = module.manifest.moduleid.lower()
module_version = module.manifest.version
module_lib_name = ('lib%s.a' % module_id).lower()
# check first in the local project
local_module_lib = os.path.join(project_dir, 'modules', 'iphone', module_lib_name)
local = False
if os.path.exists(local_module_lib):
module_lib_search_path.append([module_lib_name, local_module_lib])
local = True
log("[INFO] Detected (local) third-party module: %s" % (local_module_lib))
else:
if module.lib is None:
module_lib_path = module.get_resource(module_lib_name)
log("[ERROR] Third-party module: %s/%s missing library at %s" % (module_id, module_version, module_lib_path))
sys.exit(1)
module_lib_search_path.append([module_lib_name, os.path.abspath(module.lib).rsplit('/',1)[0]])
log("[INFO] Detected third-party module: %s/%s" % (module_id, module_version))
if not local:
# copy module resources
img_dir = module.get_resource('assets', 'images')
if os.path.exists(img_dir):
dest_img_dir = os.path.join(assets_dest_dir, 'modules', module_id, 'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
module_asset_dirs.append([img_dir, dest_img_dir])
# copy in any module assets
module_assets_dir = module.get_resource('assets')
if os.path.exists(module_assets_dir):
module_dir = os.path.join(assets_dest_dir, 'modules', module_id)
module_asset_dirs.append([module_assets_dir, module_dir])
return module_lib_search_path, module_asset_dirs
def link_modules(modules, name, proj_dir, relative=False):
if len(modules)>0:
from pbxproj import PBXProj
proj = PBXProj()
xcode_proj = os.path.join(proj_dir,'%s.xcodeproj'%name,'project.pbxproj')
current_xcode = open(xcode_proj).read()
for tp in modules:
proj.add_static_library(tp[0], tp[1], relative)
out = proj.parse(xcode_proj)
# since xcode changes can be destructive, only write as necessary (if changed)
if current_xcode!=out:
xo = open(xcode_proj, 'w')
xo.write(out)
xo.close()
def create_info_plist(tiapp, template_dir, project_dir, output):
def write_info_plist(infoplist_tmpl):
name = tiapp.properties['name']
appid = tiapp.properties['id']
plist = codecs.open(infoplist_tmpl, encoding='utf-8').read()
plist = plist.replace('__PROJECT_NAME__',name)
plist = plist.replace('__PROJECT_ID__',appid)
plist = plist.replace('__URL__',appid)
urlscheme = name.replace('.','_').replace(' ','').lower()
plist = plist.replace('__URLSCHEME__',urlscheme)
if tiapp.has_app_property('ti.facebook.appid'):
fbid = tiapp.get_app_property('ti.facebook.appid')
plist = plist.replace('__ADDITIONAL_URL_SCHEMES__', '<string>fb%s</string>' % fbid)
else:
plist = plist.replace('__ADDITIONAL_URL_SCHEMES__','')
pf = codecs.open(output,'w', encoding='utf-8')
pf.write(plist)
pf.close()
# if the user has a Info.plist in their project directory, consider
# that a custom override
infoplist_tmpl = os.path.join(project_dir,'Info.plist')
if os.path.exists(infoplist_tmpl):
shutil.copy(infoplist_tmpl,output)
else:
infoplist_tmpl = os.path.join(template_dir,'Info.plist')
write_info_plist(infoplist_tmpl)
def write_debugger_plist(debughost, debugport, debugairkey, debughosts, template_dir, debuggerplist):
debugger_tmpl = os.path.join(template_dir,'debugger.plist')
plist = codecs.open(debugger_tmpl, encoding='utf-8').read()
if debughost:
plist = plist.replace('__DEBUGGER_HOST__',debughost)
plist = plist.replace('__DEBUGGER_PORT__',debugport)
else:
plist = plist.replace('__DEBUGGER_HOST__','')
plist = plist.replace('__DEBUGGER_PORT__','')
if debugairkey:
plist = plist.replace('__DEBUGGER_AIRKEY__',debugairkey)
else:
plist = plist.replace('__DEBUGGER_AIRKEY__','')
if debughosts:
plist = plist.replace('__DEBUGGER_HOSTS__',debughosts)
else:
plist = plist.replace('__DEBUGGER_HOSTS__','')
tempfile = debuggerplist+'.tmp'
pf = codecs.open(tempfile,'w',encoding='utf-8')
pf.write(plist)
pf.close()
if os.path.exists(debuggerplist):
changed = not filecmp.cmp(tempfile, debuggerplist, shallow=False)
else:
changed = True
shutil.move(tempfile, debuggerplist)
return changed
def install_default(image, project_dir, template_dir, dest):
project_resources = os.path.join(project_dir, 'Resources')
platform_resources = os.path.join(project_resources, 'iphone')
template_resources = os.path.join(template_dir, 'resources')
if image is not None:
graphic_path = os.path.join(platform_resources,image)
else:
graphic_path = os.path.join(template_resources, image)
if not os.path.exists(graphic_path):
graphic_path = os.path.join(project_resources,image)
if not os.path.exists(graphic_path):
graphic_path = os.path.join(template_resources,image)
if os.path.exists(graphic_path):
dest_graphic_path = os.path.join(dest,image)
if os.path.exists(dest_graphic_path):
os.remove(dest_graphic_path)
shutil.copy(graphic_path, dest)
def install_logo(tiapp, applogo, project_dir, template_dir, dest):
# copy over the appicon
if applogo==None and tiapp.properties.has_key('icon'):
applogo = tiapp.properties['icon']
install_default(applogo, project_dir, template_dir, dest)
def install_defaults(project_dir, template_dir, dest):
for graphic in os.listdir(os.path.join(template_dir, 'resources')):
install_default(graphic, project_dir, template_dir, dest)
def fix_xcode_script(content,script_name,script_contents):
# fix up xcode compile scripts in build phase
start = 0
while start >= 0:
start = content.find("name = \"%s\";" % script_name, start)
if start > 0:
begin = content.find("shellScript = ",start)
if begin > 0:
end = content.find("};",begin+1)
if end > 0:
before = content[0:begin+15]
after = content[end:]
script = "%s\";\n " % script_contents
content = before + script + after
start = begin
return content
SPLICE_START_MARKER="TI_AUTOGEN_BEGIN"
SPLICE_END_MARKER="TI_AUTOGEN_END"
def splice_code(file, section, replacement):
if not os.path.exists(file):
return False
with open(file, 'r') as fd:
contents = fd.read()
# want to preserve this as part of the preamble
start_search = "//##%s %s" % (SPLICE_START_MARKER, section)
start_marker = contents.find(start_search)
if start_marker == -1:
return False
end_marker = contents.find("//##%s %s" % (SPLICE_END_MARKER, section), start_marker)
if end_marker == -1:
print "[ERROR] Couldn't splice section %s in %s: No end marker" % (section, file)
return False
preamble = contents[0:start_marker+len(start_search)] + "\n"
appendix = contents[end_marker:]
new_contents = preamble + replacement + appendix
if contents != new_contents:
with open(file, 'w') as fd:
fd.write(new_contents)
return True
return False
|
|
"""
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Dan Wendlandt, Nicira, Inc
#
"""
from abc import abstractmethod
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import exceptions as qexception
from quantum.extensions import extensions
from quantum import manager
from quantum.openstack.common import cfg
from quantum import quota
# L3 Exceptions
class RouterNotFound(qexception.NotFound):
message = _("Router %(router_id)s could not be found")
class RouterInUse(qexception.InUse):
message = _("Router %(router_id)s still has active ports")
class FloatingIPNotFound(qexception.NotFound):
message = _("Floating IP %(floatingip_id)s could not be found")
class ExternalGatewayForFloatingIPNotFound(qexception.NotFound):
message = _("External network %(external_network_id)s is not reachable "
"from subnet %(subnet_id)s. Therefore, cannot associate "
"Port %(port_id)s with a Floating IP.")
class FloatingIPPortAlreadyAssociated(qexception.InUse):
message = _("Cannot associate floating IP %(floating_ip_address)s "
"(%(fip_id)s) with port %(port_id)s "
"using fixed IP %(fixed_ip)s, as that fixed IP already "
"has a floating IP on external network %(net_id)s.")
class L3PortInUse(qexception.InUse):
message = _("Port %(port_id)s has owner %(device_owner)s and therefore"
" cannot be deleted directly via the port API.")
class ExternalNetworkInUse(qexception.InUse):
message = _("External network %(net_id)s cannot be updated to be made "
"non-external, since it has existing gateway ports")
def _validate_uuid_or_none(data, valid_values=None):
if data is None:
return None
return attr._validate_regex(data, attr.UUID_PATTERN)
attr.validators['type:uuid_or_none'] = _validate_uuid_or_none
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'routers': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:regex': attr.UUID_PATTERN},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'validate': {'type:boolean': None},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
'external_gateway_info': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None}
},
'floatingips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'floating_ip_address': {'allow_post': False, 'allow_put': False,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'floating_network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:regex': attr.UUID_PATTERN},
'is_visible': True},
'router_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'fixed_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True}
},
}
EXTERNAL = 'router:external'
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {EXTERNAL: {'allow_post': True,
'allow_put': True,
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True,
'convert_to': attr.convert_to_boolean,
'validate': {'type:boolean': None},
'enforce_policy': True,
'required_by_policy': True}}}
l3_quota_opts = [
cfg.IntOpt('quota_router',
default=10,
help='number of routers allowed per tenant, -1 for unlimited'),
cfg.IntOpt('quota_floatingip',
default=50,
help='number of floating IPs allowed per tenant, '
'-1 for unlimited'),
]
cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS')
class L3(object):
@classmethod
def get_name(cls):
return "Quantum L3 Router"
@classmethod
def get_alias(cls):
return "router"
@classmethod
def get_description(cls):
return ("Router abstraction for basic L3 forwarding"
" between L2 Quantum networks and access to external"
" networks via a NAT gateway.")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/quantum/router/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
""" Returns Ext Resources """
exts = []
plugin = manager.QuantumManager.get_plugin()
for resource_name in ['router', 'floatingip']:
collection_name = resource_name + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
member_actions = {}
if resource_name == 'router':
member_actions = {'add_router_interface': 'PUT',
'remove_router_interface': 'PUT'}
quota.QUOTAS.register_resource_by_name(resource_name)
controller = base.create_resource(collection_name,
resource_name,
plugin, params,
member_actions=member_actions)
ex = extensions.ResourceExtension(collection_name,
controller,
member_actions=member_actions)
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
class RouterPluginBase(object):
@abstractmethod
def create_router(self, context, router):
pass
@abstractmethod
def update_router(self, context, id, router):
pass
@abstractmethod
def get_router(self, context, id, fields=None):
pass
@abstractmethod
def delete_router(self, context, id):
pass
@abstractmethod
def get_routers(self, context, filters=None, fields=None):
pass
@abstractmethod
def add_router_interface(self, context, router_id, interface_info):
pass
@abstractmethod
def remove_router_interface(self, context, router_id, interface_info):
pass
@abstractmethod
def create_floatingip(self, context, floatingip):
pass
@abstractmethod
def update_floatingip(self, context, id, floatingip):
pass
@abstractmethod
def get_floatingip(self, context, id, fields=None):
pass
@abstractmethod
def delete_floatingip(self, context, id):
pass
@abstractmethod
def get_floatingips(self, context, filters=None, fields=None):
pass
def get_routers_count(self, context, filters=None):
raise qexception.NotImplementedError()
def get_floatingips_count(self, context, filters=None):
raise qexception.NotImplementedError()
|
|
from stripe.test.helper import (
StripeApiTestCase, MyUpdateable
)
class UpdateableAPIResourceTests(StripeApiTestCase):
def setUp(self):
super(UpdateableAPIResourceTests, self).setUp()
self.mock_response({
'thats': 'it'
})
self.obj = MyUpdateable.construct_from({
'id': 'myid',
'foo': 'bar',
'baz': 'boz',
'metadata': {
'size': 'l',
'score': 4,
'height': 10
}
}, 'mykey')
def checkSave(self):
self.assertTrue(self.obj is self.obj.save())
self.assertEqual('it', self.obj.thats)
# TODO: Should we force id to be retained?
# self.assertEqual('myid', obj.id)
self.assertRaises(AttributeError, getattr, self.obj, 'baz')
def test_idempotent_save(self):
self.obj.baz = 'updated'
self.obj.save(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'metadata': {},
'baz': 'updated',
},
{
'Idempotency-Key': 'foo',
},
)
def test_save(self):
self.obj.baz = 'updated'
self.obj.other = 'newval'
self.obj.metadata.size = 'm'
self.obj.metadata.info = 'a2'
self.obj.metadata.height = None
self.checkSave()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'baz': 'updated',
'other': 'newval',
'metadata': {
'size': 'm',
'info': 'a2',
'height': '',
}
},
None
)
def test_add_key_to_nested_object(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {
'size': 'l',
'score': 4,
'height': 10
}
}, 'mykey')
acct.legal_entity['first_name'] = 'bob'
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'legal_entity': {
'first_name': 'bob',
}
},
None
)
def test_save_nothing(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'metadata': {
'key': 'value',
}
}, 'mykey')
self.assertTrue(acct is acct.save())
# Note: ideally, we'd want the library to NOT issue requests in this
# case (i.e. the assert should actually be `assert_not_called()`).
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{'metadata': {}},
None
)
def test_replace_nested_object(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {
'last_name': 'smith',
}
}, 'mykey')
acct.legal_entity = {
'first_name': 'bob',
}
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'legal_entity': {
'first_name': 'bob',
'last_name': '',
}
},
None
)
def test_array_setting(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {}
}, 'mykey')
acct.legal_entity.additional_owners = [{'first_name': 'Bob'}]
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'legal_entity': {
'additional_owners': [
{'first_name': 'Bob'}
]
}
},
None
)
def test_array_none(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {
'additional_owners': None,
}
}, 'mykey')
acct.foo = 'bar'
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'foo': 'bar',
'legal_entity': {},
},
None
)
def test_array_insertion(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {
'additional_owners': []
}
}, 'mykey')
acct.legal_entity.additional_owners.append({'first_name': 'Bob'})
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'legal_entity': {
'additional_owners': {
'0': {'first_name': 'Bob'},
}
}
},
None
)
def test_array_update(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {
'additional_owners': [
{'first_name': 'Bob'},
{'first_name': 'Jane'}
]
}
}, 'mykey')
acct.legal_entity.additional_owners[1].first_name = 'Janet'
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'legal_entity': {
'additional_owners': {
'0': {},
'1': {'first_name': 'Janet'}
}
}
},
None
)
def test_array_noop(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {
'additional_owners': [{'first_name': 'Bob'}]
},
'currencies_supported': ['usd', 'cad']
}, 'mykey')
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'legal_entity': {'additional_owners': {'0': {}}}
},
None
)
def test_hash_noop(self):
acct = MyUpdateable.construct_from({
'id': 'myid',
'legal_entity': {
'address': {'line1': '1 Two Three'}
}
}, 'mykey')
self.assertTrue(acct is acct.save())
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{'legal_entity': {'address': {}}},
None
)
def test_save_replace_metadata_with_number(self):
self.obj.baz = 'updated'
self.obj.other = 'newval'
self.obj.metadata = 3
self.checkSave()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'baz': 'updated',
'other': 'newval',
'metadata': 3,
},
None
)
def test_save_overwrite_metadata(self):
self.obj.metadata = {}
self.checkSave()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'metadata': {
'size': '',
'score': '',
'height': '',
}
},
None
)
def test_save_replace_metadata(self):
self.obj.baz = 'updated'
self.obj.other = 'newval'
self.obj.metadata = {
'size': 'm',
'info': 'a2',
'score': 4,
}
self.checkSave()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'baz': 'updated',
'other': 'newval',
'metadata': {
'size': 'm',
'info': 'a2',
'height': '',
'score': 4,
}
},
None
)
def test_save_update_metadata(self):
self.obj.baz = 'updated'
self.obj.other = 'newval'
self.obj.metadata.update({
'size': 'm',
'info': 'a2',
'score': 4,
})
self.checkSave()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/myupdateables/myid',
{
'baz': 'updated',
'other': 'newval',
'metadata': {
'size': 'm',
'info': 'a2',
'score': 4,
}
},
None
)
|
|
from dal_5 import DAL5
################################################################################
class DAL6:
# Disk Abstraction Layer
def __init__(self, blocks, size):
self.__disk = DAL5(blocks, size)
# Get Context Object
def new_context(self):
return Context(self.__disk)
# Seed Control Interface
def seed(self, data=None):
return self.__disk.seed(data)
# Probability Of Failure
def fail(self, probability):
self.__disk.fail(probability)
# Dump To File
def dump(self, name):
self.__disk.dump(name)
# Load From File
def load(self, name, abstract):
assert type(abstract) is bool
self.__disk.load(name, abstract)
################################################################################
class Context:
# DEFAULTS
BACK = '..'
RELATIVE = '.'
# Relative Path Context
def __init__(self, disk_object):
assert disk_object.__class__ is DAL5
self.__disk = disk_object
self.__cwd = ''
# Change Currect Directory
def chdir(self, path):
path = self.__resolve_path(path)
assert self.__disk.is_directory(path)
self.__cwd = path
# Get Current Directory
def getcwd(self):
return self.__cwd
# List Directory Contents
def listdir(self, path):
path = self.__resolve_path(path)
assert self.__disk.is_directory(path)
return self.__disk.list_directory(path)
# Make New Directory
def mkdir(self, path):
path = self.__resolve_path(path)
assert not self.__disk.exists(path)
self.__disk.make_directory(path)
# Remove Old Directory
def rmdir(self, path):
path = self.__resolve_path(path)
assert self.__disk.is_directory(path)
self.__disk.remove_directory(path)
# Open A File
def file(self, path, mode):
path = self.__resolve_path(path)
return File(self.__disk, path, mode)
# Remove A File
def remove(self, path):
path = self.__resolve_path(path)
assert self.__disk.is_file(path)
self.__disk.remove_file(path)
# Test For Existance
def exists(self, path):
assert type(path) is str
if path:
try:
path = self.__resolve_path(path)
if path:
return self.__disk.exists(path)
else:
return True
except:
return False
return True
# Check If File
def isfile(self, path):
path = self.__resolve_path(path)
return self.__disk.is_file(path)
# Check If Directory
def isdir(self, path):
path = self.__resolve_path(path)
return self.__disk.is_directory(path)
# Private Utility Function
def __resolve_path(self, path):
assert type(path) is str
parts = path.split(self.__disk.PATH_SEPARATOR)
if parts[0] == self.RELATIVE:
if len(parts) == 1:
return self.__cwd
for part in parts[1:]:
assert part != self.BACK and part != self.RELATIVE
path = self.__disk.PATH_SEPARATOR.join(parts[1:])
if self.__cwd:
return self.__cwd + self.__disk.PATH_SEPARATOR + path
else:
return path
elif parts[0] == self.BACK:
assert self.__cwd
if len(parts) == 1:
cwd_parts = self.__cwd.split(self.__disk.PATH_SEPARATOR)
del cwd_parts[-1]
if cwd_parts:
return self.__disk.PATH_SEPARATOR.join(cwd_parts)
else:
return ''
else:
cwd_parts = self.__cwd.split(self.__disk.PATH_SEPARATOR)
del cwd_parts[-1]
index = 1
while index != len(parts) and parts[index] == self.BACK:
del cwd_parts[-1]
index += 1
parts = parts[index:]
for part in parts:
assert path != self.BACK and part != self.RELATIVE
path = cwd_parts + parts
if path:
return self.__disk.PATH_SEPARATOR.join(path)
else:
return ''
else:
return path
################################################################################
class File:
# MODES
READ = 'r'
WRITE = 'w'
APPEND = 'a'
# File Accessor Object
def __init__(self, disk_object, path, mode):
assert disk_object.__class__ is DAL5
assert type(path) is str and path
assert mode == self.READ or mode == self.WRITE or mode == self.APPEND
self.__disk = disk_object
if self.__disk.exists(path):
assert self.__disk.is_file(path)
else:
assert mode == self.WRITE
self.__disk.make_file(path)
parts = path.split(self.__disk.PATH_SEPARATOR)
self.closed = self.__closed = False
self.mode = self.__mode = mode
self.name = self.__name = parts[-1]
self.path = self.__path = path
if mode == self.WRITE:
self.__stream = ''
else:
self.__stream = self.__disk.read_file(path)
if mode == self.APPEND:
self.__pointer = len(self.__stream)
else:
self.__pointer = 0
# Permanently Close File
def close(self, force):
assert not self.__closed
success = True
if self.__disk.exists(self.__path):
self.__disk.write_file(self.__path, self.__stream)
else:
if force:
try:
self.__disk.make_file(self.__path)
self.__disk.write_file(self.__path, self.__stream)
except:
success = False
else:
success = False
self.closed = self.__closed = True
return success
# Read From File
def read(self, size=None):
assert not self.__closed
assert self.__mode == self.READ
if size is None:
size = (2 ** 31) - 1
else:
assert type(size) is int and size > 0
data = self.__stream[self.__pointer:self.__pointer+size]
pointer = self.__pointer + size
if pointer > len(self.__stream):
pointer = len(self.__stream)
self.__pointer = pointer
return data
# Change Pointer Value
def seek(self, offset, from_start=False):
assert type(offset) is int
assert type(from_start) is bool
assert not self.__closed
assert self.__mode != self.APPEND
if from_start:
assert 0 <= offset <= len(self.__stream)
self.__pointer = offset
else:
pointer = self.__pointer + offset
assert 0 <= pointer <= len(self.__stream)
self.__pointer = pointer
# Return Pointer Value
def tell(self):
return self.__pointer
# Truncate This File
def truncate(self, size=None):
assert not self.__closed
assert self.__mode == self.WRITE
if size is None:
self.__stream = self.__stream[:self.__pointer]
else:
assert type(size) is int and size >= 0
self.__stream = self.__stream[:size]
if self.__pointer > len(self.__stream):
self.__pointer = len(self.__stream)
# Write To File
def write(self, string):
assert type(string) is str
assert not self.__closed
assert self.__mode != self.READ
head = self.__stream[:self.__pointer]
tail = self.__stream[self.__pointer+len(string):]
self.__stream = head + string + tail
self.__pointer += len(string)
# Write All Lines
def writelines(self, sequence, separator=None):
assert type(sequence) is list
if separator is None:
for line in sequence:
self.write(line)
else:
assert type(separator) is str
self.write(separator.join(sequence))
# Return File Size
def size(self):
return len(self.__stream)
################################################################################
def test():
# Not Yet Implemented
pass
################################################################################
if __name__ == '__main__':
test()
|
|
# pylint: disable=missing-docstring
import mock
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import DEFAULT_DB_ALIAS, connections
from django.test.utils import CaptureQueriesContext
from guardian.conf.settings import ANONYMOUS_USER_NAME
from guardian.models import UserObjectPermission
from guardian.shortcuts import assign_perm, remove_perm
from rest_framework import exceptions, status
from rest_framework.test import APIRequestFactory, force_authenticate
from resolwe.flow.models import Collection, Data, DescriptorSchema, Entity, Process
from resolwe.flow.views import CollectionViewSet, DataViewSet, EntityViewSet, ProcessViewSet
from resolwe.test import ResolweAPITestCase, TestCase
factory = APIRequestFactory() # pylint: disable=invalid-name
MESSAGES = {
'NOT_FOUND': 'Not found.',
'NO_PERMS': 'You do not have permission to perform this action.',
}
class TestDataViewSetCase(TestCase):
def setUp(self):
super().setUp()
self.data_viewset = DataViewSet.as_view(actions={
'get': 'list',
'post': 'create',
})
self.data_detail_viewset = DataViewSet.as_view(actions={
'get': 'retrieve',
})
self.collection = Collection.objects.create(contributor=self.contributor)
self.proc = Process.objects.create(
type='data:test:process',
slug='test-process',
version='1.0.0',
contributor=self.contributor,
entity_type='test-schema',
entity_descriptor_schema='test-schema',
input_schema=[{'name': 'input_data', 'type': 'data:test:', 'required': False}],
)
self.descriptor_schema = DescriptorSchema.objects.create(
slug='test-schema',
version='1.0.0',
contributor=self.contributor,
)
assign_perm('view_collection', self.contributor, self.collection)
assign_perm('add_collection', self.contributor, self.collection)
assign_perm('view_process', self.contributor, self.proc)
assign_perm('view_descriptorschema', self.contributor, self.descriptor_schema)
def test_prefetch(self):
request = factory.get('/', '', format='json')
force_authenticate(request, self.contributor)
for _ in range(10):
Data.objects.create(contributor=self.contributor, process=self.proc)
# Check prefetch. The number of queries without prefetch depends
# on the number of Data objects. With prefetch 56 queries,
# without prefetch 73 queries. Python 2 and 3 have slightly
# different number of queries, so we set a loose constraint in test.
conn = connections[DEFAULT_DB_ALIAS]
with CaptureQueriesContext(conn) as captured_queries:
self.data_viewset(request)
self.assertLess(len(captured_queries), 62)
def test_descriptor_schema(self):
# Descriptor schema can be assigned by slug.
data = {'process': 'test-process', 'descriptor_schema': 'test-schema'}
request = factory.post('/', data, format='json')
force_authenticate(request, self.contributor)
self.data_viewset(request)
data = Data.objects.latest()
self.assertEqual(data.descriptor_schema, self.descriptor_schema)
# Descriptor schema can be assigned by id.
data = {'process': 'test-process', 'descriptor_schema': self.descriptor_schema.pk}
request = factory.post('/', data, format='json')
force_authenticate(request, self.contributor)
self.data_viewset(request)
data = Data.objects.latest()
self.assertEqual(data.descriptor_schema, self.descriptor_schema)
def test_use_latest_with_perm(self):
Process.objects.create(
type='test:process',
name='Test process',
slug='test-process',
version='2.0.0',
contributor=self.contributor,
)
DescriptorSchema.objects.create(
name='Test schema',
slug='test-schema',
version='2.0.0',
contributor=self.contributor,
)
data = {'process': 'test-process', 'descriptor_schema': 'test-schema'}
request = factory.post('/', data, format='json')
force_authenticate(request, self.contributor)
self.data_viewset(request)
data = Data.objects.latest()
# Check that older versions are user if user doesn't have permissions on the latest
self.assertEqual(data.process, self.proc)
self.assertEqual(data.descriptor_schema, self.descriptor_schema)
def test_public_create(self):
assign_perm('view_process', AnonymousUser(), self.proc)
data = {'process': 'test-process'}
request = factory.post('/', data, format='json')
resp = self.data_viewset(request)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(Data.objects.count(), 1)
data = Data.objects.latest()
self.assertEqual(data.contributor.username, ANONYMOUS_USER_NAME)
self.assertEqual(data.process.slug, 'test-process')
def test_inherit_permissions(self):
data_ctype = ContentType.objects.get_for_model(Data)
entity_ctype = ContentType.objects.get_for_model(Entity)
assign_perm('view_collection', self.user, self.collection)
assign_perm('add_collection', self.user, self.collection)
post_data = {'process': 'test-process', 'collections': [self.collection.pk]}
request = factory.post('/', post_data, format='json')
force_authenticate(request, self.contributor)
resp = self.data_viewset(request)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
data = Data.objects.last()
entity = Entity.objects.last()
self.assertTrue(self.user.has_perm('view_data', data))
self.assertTrue(self.user.has_perm('view_entity', entity))
self.assertTrue(self.user.has_perm('add_entity', entity))
self.assertEqual(UserObjectPermission.objects.filter(content_type=data_ctype, user=self.user).count(), 1)
self.assertEqual(UserObjectPermission.objects.filter(content_type=entity_ctype, user=self.user).count(), 2)
# Add some permissions and run another process in same entity.
assign_perm('edit_collection', self.user, self.collection)
assign_perm('share_entity', self.user, entity)
post_data = {
'process': 'test-process',
'collections': [self.collection.pk],
'input': {'input_data': data.pk},
}
request = factory.post('/', post_data, format='json')
force_authenticate(request, self.contributor)
resp = self.data_viewset(request)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
data_2 = Data.objects.last()
self.assertTrue(self.user.has_perm('view_data', data_2))
self.assertTrue(self.user.has_perm('edit_data', data_2))
self.assertTrue(self.user.has_perm('share_data', data_2))
self.assertFalse(self.user.has_perm('edit_entity', entity))
self.assertEqual(UserObjectPermission.objects.filter(content_type=data_ctype, user=self.user).count(), 4)
self.assertEqual(UserObjectPermission.objects.filter(content_type=entity_ctype, user=self.user).count(), 3)
def test_create_entity(self):
data = {'process': 'test-process', 'collections': [self.collection.pk]}
request = factory.post('/', data, format='json')
force_authenticate(request, self.contributor)
resp = self.data_viewset(request)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Test that one Entity was created and that it was added to the same collection as Data object.
self.assertEqual(Entity.objects.count(), 1)
self.assertEqual(Entity.objects.first().collections.count(), 1)
self.assertEqual(Entity.objects.first().collections.first().pk, self.collection.pk)
def test_collections_fields(self):
# Create data object.
data = {'process': 'test-process', 'collections': [self.collection.pk]}
request = factory.post('/', data, format='json')
force_authenticate(request, self.contributor)
response = self.data_viewset(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = Data.objects.last()
entity = Entity.objects.last()
# Ensure collections/entities are not present in lists.
request = factory.get('/', '', format='json')
force_authenticate(request, self.contributor)
response = self.data_viewset(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertNotIn('collections', response.data[0].keys())
self.assertNotIn('entities', response.data[0].keys())
# Check that query returns the correct collection ids.
request = factory.get('/', '', format='json')
force_authenticate(request, self.contributor)
response = self.data_detail_viewset(request, pk=data.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['collections'], [self.collection.pk])
self.assertEqual(response.data['entities'], [entity.pk])
# Check that hydrate_{collections,entities} works. Also ensure that the serializer
# doesn't crash if hydrate_data is also set (could cause infinite recursion).
request = factory.get('/', {
'hydrate_collections': '1',
'hydrate_entities': '1',
'hydrate_data': '1',
}, format='json')
force_authenticate(request, self.contributor)
response = self.data_detail_viewset(request, pk=data.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['collections'][0]['id'], self.collection.pk)
self.assertEqual(response.data['entities'][0]['id'], entity.pk)
def test_process_is_active(self):
# Do not allow creating data of inactive processes
Process.objects.filter(slug='test-process').update(is_active=False)
data = {'process': 'test-process'}
request = factory.post('/', data, format='json')
force_authenticate(request, self.contributor)
response = self.data_viewset(request)
self.assertEqual(response.status_code, 400)
class TestCollectionViewSetCase(TestCase):
def setUp(self):
super().setUp()
self.checkslug_viewset = CollectionViewSet.as_view(actions={
'get': 'slug_exists',
})
self.add_data_viewset = CollectionViewSet.as_view(actions={
'post': 'add_data',
})
self.remove_data_viewset = CollectionViewSet.as_view(actions={
'post': 'remove_data',
})
self.collection_detail_viewset = CollectionViewSet.as_view(actions={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
})
self.collection_list_viewset = CollectionViewSet.as_view(actions={
'get': 'list',
'post': 'create',
})
self.detail_url = lambda pk: reverse('resolwe-api:collection-detail', kwargs={'pk': pk})
def _create_data(self):
process = Process.objects.create(
name='Test process',
contributor=self.contributor,
)
return Data.objects.create(
name='Test data',
contributor=self.contributor,
process=process,
)
def _create_entity(self):
return Entity.objects.create(
name='Test entity',
contributor=self.contributor,
)
def test_set_descriptor_schema(self):
d_schema = DescriptorSchema.objects.create(slug="new-schema", name="New Schema", contributor=self.contributor)
data = {
'name': 'Test collection',
'descriptor_schema': 'new-schema',
}
request = factory.post('/', data=data, format='json')
force_authenticate(request, self.admin)
self.collection_list_viewset(request)
self.assertEqual(Collection.objects.count(), 1)
self.assertEqual(Collection.objects.first().descriptor_schema, d_schema)
def test_change_descriptor_schema(self):
collection = Collection.objects.create(slug="collection1", name="Collection 1", contributor=self.contributor)
d_schema = DescriptorSchema.objects.create(slug="new-schema", name="New Schema", contributor=self.contributor)
request = factory.patch(self.detail_url(collection.pk), {'descriptor_schema': 'new-schema'}, format='json')
force_authenticate(request, self.admin)
self.collection_detail_viewset(request, pk=collection.pk)
collection.refresh_from_db()
self.assertEqual(collection.descriptor_schema, d_schema)
def test_change_slug(self):
collection1 = Collection.objects.create(name="Collection", contributor=self.contributor)
collection2 = Collection.objects.create(name="Collection", contributor=self.contributor)
self.assertEqual(collection1.slug, 'collection')
self.assertEqual(collection2.slug, 'collection-2')
request = factory.patch(self.detail_url(collection1.pk), {'name': 'Collection', 'slug': None}, format='json')
force_authenticate(request, self.admin)
response = self.collection_detail_viewset(request, pk=collection1.pk)
self.assertEqual(response.data['slug'], 'collection')
request = factory.patch(self.detail_url(collection2.pk), {'slug': 'collection-3'}, format='json')
force_authenticate(request, self.admin)
response = self.collection_detail_viewset(request, pk=collection2.pk)
self.assertEqual(response.data['slug'], 'collection-3')
request = factory.patch(self.detail_url(collection2.pk), {'slug': 'collection'}, format='json')
force_authenticate(request, self.admin)
response = self.collection_detail_viewset(request, pk=collection2.pk)
self.assertContains(response, 'already taken', status_code=400)
def test_check_slug(self):
Collection.objects.create(slug="collection1", name="Collection 1", contributor=self.admin)
# unauthorized
request = factory.get('/', {'name': 'collection1'}, format='json')
resp = self.checkslug_viewset(request)
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.data, None)
# existing slug
request = factory.get('/', {'name': 'collection1'}, format='json')
force_authenticate(request, self.admin)
resp = self.checkslug_viewset(request)
self.assertEqual(resp.data, True)
# existing slug - iexact
request = factory.get('/', {'name': 'Collection1'}, format='json')
force_authenticate(request, self.admin)
resp = self.checkslug_viewset(request)
self.assertEqual(resp.data, True)
# non-existing slug
request = factory.get('/', {'name': 'new-collection'}, format='json')
force_authenticate(request, self.admin)
resp = self.checkslug_viewset(request)
self.assertEqual(resp.data, False)
# bad query parameter
request = factory.get('/', {'bad': 'parameter'}, format='json')
force_authenticate(request, self.admin)
resp = self.checkslug_viewset(request)
self.assertEqual(resp.status_code, 400)
def test_add_remove_data(self):
c = Collection.objects.create(slug="collection1", name="Collection 1", contributor=self.contributor)
assign_perm('view_collection', self.contributor, c)
assign_perm('edit_collection', self.contributor, c)
assign_perm('share_collection', self.contributor, c)
proc = Process.objects.create(type='test:process', name='Test process', contributor=self.contributor)
d = Data.objects.create(contributor=self.contributor, slug='test1', process=proc)
request = factory.post(self.detail_url(c.pk), {'ids': [str(d.pk)]}, format='json')
force_authenticate(request, self.contributor)
# user w/o permissions cannot add data
resp = self.add_data_viewset(request, pk=c.pk)
self.assertEqual(resp.data['detail'], MESSAGES['NO_PERMS'])
self.assertEqual(c.data.count(), 0)
assign_perm('add_collection', self.contributor, c)
# user w/ permissions can add data
resp = self.add_data_viewset(request, pk=c.pk)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(c.data.count(), 1)
request = factory.post(self.detail_url(c.pk), {'ids': [str(d.pk)]}, format='json')
force_authenticate(request, self.contributor)
remove_perm('add_collection', self.contributor, c)
# user w/o permissions cannot remove data
resp = self.remove_data_viewset(request, pk=c.pk)
self.assertEqual(resp.data['detail'], MESSAGES['NO_PERMS'])
self.assertEqual(c.data.count(), 1)
assign_perm('add_collection', self.contributor, c)
# user w/ permissions can remove data
resp = self.remove_data_viewset(request, pk=c.pk)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(c.data.count(), 0)
request = factory.post(self.detail_url(c.pk), {'ids': ['42']}, format='json')
force_authenticate(request, self.contributor)
resp = self.remove_data_viewset(request, pk=c.pk)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(c.data.count(), 0)
def test_delete(self):
collection = Collection.objects.create(
name="Test collection",
contributor=self.contributor,
)
data_1, data_2 = self._create_data(), self._create_data()
entity_1, entity_2 = self._create_entity(), self._create_entity()
collection.data.add(data_1, data_2)
collection.entity_set.add(entity_1, entity_2)
assign_perm("view_collection", self.user, collection)
assign_perm("edit_collection", self.user, collection)
assign_perm("view_data", self.user, data_1)
assign_perm("view_data", self.user, data_2)
assign_perm("edit_data", self.user, data_1)
assign_perm("view_entity", self.user, entity_1)
assign_perm("view_entity", self.user, entity_2)
assign_perm("edit_entity", self.user, entity_1)
request = factory.delete(self.detail_url(collection.pk))
force_authenticate(request, self.user)
self.collection_detail_viewset(request, pk=collection.pk)
self.assertTrue(Data.objects.filter(pk=data_1.pk).exists())
self.assertTrue(Data.objects.filter(pk=data_2.pk).exists())
self.assertTrue(Entity.objects.filter(pk=entity_1.pk).exists())
self.assertTrue(Entity.objects.filter(pk=entity_2.pk).exists())
# Recreate the initial state and test with `delete_content` flag.
collection = Collection.objects.create(
name="Test collection",
contributor=self.contributor,
)
collection.data.add(data_1, data_2)
collection.entity_set.add(entity_1, entity_2)
assign_perm("view_collection", self.user, collection)
assign_perm("edit_collection", self.user, collection)
request = factory.delete('{}?delete_content=1'.format(self.detail_url(collection.pk)))
force_authenticate(request, self.user)
self.collection_detail_viewset(request, pk=collection.pk)
# Only objects with `edit` permission can be deleted.
self.assertFalse(Data.objects.filter(pk=data_1.pk).exists())
self.assertTrue(Data.objects.filter(pk=data_2.pk).exists())
self.assertFalse(Entity.objects.filter(pk=entity_1.pk).exists())
self.assertTrue(Entity.objects.filter(pk=entity_2.pk).exists())
class ProcessTestCase(ResolweAPITestCase):
def setUp(self):
self.resource_name = 'process'
self.viewset = ProcessViewSet
super().setUp()
def test_create_new(self):
post_data = {
'name': 'Test process',
'slug': 'test-process',
'type': 'data:test:',
}
# Normal user is not allowed to create new processes.
resp = self._post(post_data, self.contributor)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
# Superuser can create process.
resp = self._post(post_data, self.admin)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
def test_is_active(self):
post_data = {
'name': 'Test process',
'slug': 'test-process',
'type': 'data:test:',
'is_active': False,
}
# is_active can not be set through API and is True by default
response = self._post(post_data, self.admin)
self.assertTrue(response.data['is_active'])
# is_active should not be changed through API
process_id = response.data['id']
response = self._patch(process_id, {'is_active': False}, self.admin)
self.assertEqual(response.status_code, 405) # PATCH not allowed on process
class EntityViewSetTest(TestCase):
def setUp(self):
super().setUp()
self.collection = Collection.objects.create(name="Test Collection", contributor=self.contributor)
self.collection2 = Collection.objects.create(name="Test Collection 2", contributor=self.contributor)
self.entity = Entity.objects.create(name="Test entity", contributor=self.contributor)
process = Process.objects.create(name="Test process", contributor=self.contributor)
self.data = Data.objects.create(name="Test data", contributor=self.contributor, process=process)
self.data_2 = Data.objects.create(name="Test data 2", contributor=self.contributor, process=process)
# another Data object to make sure that other objects are not processed
Data.objects.create(name="Dummy data", contributor=self.contributor, process=process)
self.entity.data.add(self.data)
self.entity.collections.add(self.collection2)
assign_perm('add_collection', self.contributor, self.collection)
assign_perm('add_entity', self.contributor, self.entity)
assign_perm('view_collection', self.contributor, self.collection)
assign_perm('view_collection', self.contributor, self.collection2)
assign_perm('view_entity', self.contributor, self.entity)
self.entityviewset = EntityViewSet()
self.entity_detail_viewset = EntityViewSet.as_view(actions={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
})
self.entity_list_viewset = EntityViewSet.as_view(actions={
'get': 'list',
'post': 'create',
})
self.detail_url = lambda pk: reverse('resolwe-api:entity-detail', kwargs={'pk': pk})
def _create_data(self):
process = Process.objects.create(
name='Test process',
contributor=self.contributor,
)
return Data.objects.create(
name='Test data',
contributor=self.contributor,
process=process,
)
def test_list_filter_collections(self):
request = factory.get('/', {}, format='json')
force_authenticate(request, self.contributor)
resp = self.entity_list_viewset(request)
self.assertEqual(len(resp.data), 1)
request = factory.get('/', {'collections': 999999}, format='json')
force_authenticate(request, self.contributor)
resp = self.entity_list_viewset(request)
self.assertEqual(len(resp.data), 0)
request = factory.get('/', {'collections': self.collection.pk}, format='json')
force_authenticate(request, self.contributor)
resp = self.entity_list_viewset(request)
self.assertEqual(len(resp.data), 0)
request = factory.get('/', {'collections': self.collection2.pk}, format='json')
force_authenticate(request, self.contributor)
resp = self.entity_list_viewset(request)
self.assertEqual(len(resp.data), 1)
def test_add_to_collection(self):
request_mock = mock.MagicMock(data={'ids': [self.collection.pk]}, user=self.contributor)
self.entityviewset.get_object = lambda: self.entity
self.assertEqual(self.entity.collections.count(), 1)
self.entityviewset.add_to_collection(request_mock)
self.assertEqual(self.collection.data.count(), 1)
self.assertEqual(self.entity.collections.count(), 2)
def test_remove_from_collection(self):
# Manually add Entity and it's Data objects to the Collection
self.entity.collections.add(self.collection.pk)
self.collection.data.add(self.data)
request_mock = mock.MagicMock(data={'ids': [self.collection.pk]}, user=self.contributor)
self.entityviewset.get_object = lambda: self.entity
self.assertEqual(self.entity.collections.count(), 2)
self.entityviewset.remove_from_collection(request_mock)
self.assertEqual(self.collection.data.count(), 0)
self.assertEqual(self.entity.collections.count(), 1)
def test_add_remove_permissions(self):
request_mock = mock.MagicMock(data={'ids': [self.collection.pk]}, user=self.contributor)
self.entityviewset.get_object = lambda: self.entity
remove_perm('add_collection', self.contributor, self.collection)
with self.assertRaises(exceptions.PermissionDenied):
self.entityviewset.remove_from_collection(request_mock)
with self.assertRaises(exceptions.PermissionDenied):
self.entityviewset.add_to_collection(request_mock)
def test_add_data(self):
self.entity.collections.add(self.collection)
request_mock = mock.MagicMock(data={'ids': [self.data.pk]}, user=self.contributor)
self.entityviewset.get_object = lambda: self.entity
self.entityviewset.add_data(request_mock)
self.assertEqual(self.entity.data.count(), 1)
self.assertEqual(self.collection.data.count(), 1)
def test_delete(self):
entity = Entity.objects.create(
name="Test entity",
contributor=self.contributor,
)
data_1, data_2 = self._create_data(), self._create_data()
entity.data.add(data_1, data_2)
assign_perm('view_entity', self.user, entity)
assign_perm('edit_entity', self.user, entity)
assign_perm('view_data', self.user, data_1)
assign_perm('view_data', self.user, data_2)
assign_perm('edit_data', self.user, data_1)
request = factory.delete(self.detail_url(entity.pk))
force_authenticate(request, self.user)
self.entity_detail_viewset(request, pk=entity.pk)
self.assertTrue(Data.objects.filter(pk=data_1.pk).exists())
self.assertTrue(Data.objects.filter(pk=data_2.pk).exists())
# Recreate the initial state and test with `delete_content` flag.
entity = Entity.objects.create(
name="Test entity",
contributor=self.contributor,
)
entity.data.add(data_1, data_2)
assign_perm('view_entity', self.user, entity)
assign_perm('edit_entity', self.user, entity)
request = factory.delete('{}?delete_content=1'.format(self.detail_url(entity.pk)))
force_authenticate(request, self.user)
self.entity_detail_viewset(request, pk=entity.pk)
# Only objects with `edit` permission can be deleted.
self.assertFalse(Data.objects.filter(pk=data_1.pk).exists())
self.assertTrue(Data.objects.filter(pk=data_2.pk).exists())
# Ensure that deletion works correctly when all data objects of an entity
# are deleted.
entity = Entity.objects.create(
name="Test entity",
contributor=self.contributor,
)
assign_perm('view_entity', self.user, entity)
assign_perm('edit_entity', self.user, entity)
assign_perm('edit_data', self.user, data_2)
entity.data.add(data_2)
request = factory.delete('{}?delete_content=1'.format(self.detail_url(entity.pk)))
force_authenticate(request, self.user)
response = self.entity_detail_viewset(request, pk=entity.pk)
self.assertEqual(response.status_code, 204)
self.assertFalse(Entity.objects.filter(pk=entity.pk).exists())
self.assertFalse(Data.objects.filter(pk=data_2.pk).exists())
|
|
# -*- coding: utf-8 -*-
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseGone
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import DetailView
from django.views.generic import FormView
from django.views.generic import ListView
from django.views.generic import UpdateView
from organizations.backends import invitation_backend
from organizations.backends import registration_backend
from organizations.forms import OrganizationAddForm
from organizations.forms import OrganizationForm
from organizations.forms import OrganizationUserAddForm
from organizations.forms import OrganizationUserForm
from organizations.forms import SignUpForm
from organizations.utils import create_organization
from organizations.views.mixins import OrganizationMixin
from organizations.views.mixins import OrganizationUserMixin
class BaseOrganizationList(ListView):
context_object_name = "organizations"
def get_queryset(self):
return self.org_model.active.filter(users=self.request.user)
class BaseOrganizationDetail(OrganizationMixin, DetailView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["organization_users"] = self.organization.organization_users.all()
context["organization"] = self.organization
return context
class BaseOrganizationCreate(CreateView):
form_class = OrganizationAddForm
template_name = "organizations/organization_form.html"
def get_success_url(self):
return reverse("organization_list")
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"request": self.request})
return kwargs
class BaseOrganizationUpdate(OrganizationMixin, UpdateView):
form_class = OrganizationForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"request": self.request})
return kwargs
class BaseOrganizationDelete(OrganizationMixin, DeleteView):
def get_success_url(self):
return reverse("organization_list")
class BaseOrganizationUserList(OrganizationMixin, ListView):
def get(self, request, *args, **kwargs):
self.organization = self.get_organization()
self.object_list = self.organization.organization_users.all()
context = self.get_context_data(
object_list=self.object_list,
organization_users=self.object_list,
organization=self.organization,
)
return self.render_to_response(context)
class BaseOrganizationUserDetail(OrganizationUserMixin, DetailView):
pass
class BaseOrganizationUserCreate(OrganizationMixin, CreateView):
form_class = OrganizationUserAddForm
template_name = "organizations/organizationuser_form.html"
def get_success_url(self):
return reverse(
"organization_user_list",
kwargs={"organization_pk": self.object.organization.pk},
)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"organization": self.organization, "request": self.request})
return kwargs
def get(self, request, *args, **kwargs):
self.organization = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.organization = self.get_object()
return super().post(request, *args, **kwargs)
class BaseOrganizationUserRemind(OrganizationUserMixin, DetailView):
"""
Reminder view for already-linked org users
This is only applicable for invitation backends using the
strategy the original "default" backend uses, which is to
immediately add existing users to the organization after
invite, but leave new users inactive until confirmation.
"""
template_name = "organizations/organizationuser_remind.html"
# TODO move to invitations backend?
def get_success_url(self):
return reverse(
"organization_user_list",
kwargs={"organization_pk": self.object.organization.pk},
)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.user.is_active:
return HttpResponseGone(_("User is already active"))
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.user.is_active:
return HttpResponseGone(_("User is already active"))
invitation_backend().send_reminder(
self.object.user,
**{
"domain": get_current_site(self.request),
"organization": self.organization,
"sender": request.user,
}
)
return redirect(self.get_success_url())
class BaseOrganizationUserUpdate(OrganizationUserMixin, UpdateView):
form_class = OrganizationUserForm
class BaseOrganizationUserDelete(OrganizationUserMixin, DeleteView):
def get_success_url(self):
return reverse(
"organization_user_list",
kwargs={"organization_pk": self.object.organization.pk},
)
class OrganizationSignup(FormView):
"""
View that allows unregistered users to create an organization account.
It simply processes the form and then calls the specified registration
backend.
"""
form_class = SignUpForm
template_name = "organizations/signup_form.html"
# TODO get success from backend, because some backends may do something
# else, like require verification
backend = registration_backend()
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect("organization_add")
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
if getattr(self, "success_url", None):
return self.success_url
raise ImproperlyConfigured(
"{cls} must either have a `success_url` attribute defined"
"or override `get_success_url`".format(cls=self.__class__.__name__)
)
def form_valid(self, form):
"""
Register user and create the organization
"""
user = self.backend.register_by_email(form.cleaned_data["email"])
create_organization(
user=user,
name=form.cleaned_data["name"],
slug=form.cleaned_data["slug"],
is_active=False,
)
return redirect(self.get_success_url())
class ViewFactory:
"""
A class that can create a faked 'module' with model specific views
These views have NO access control applied.
"""
def __init__(self, org_model):
self.org_model = org_model
@property
def OrganizationList(self):
klass = BaseOrganizationList
klass.org_model = self.org_model
return klass
@property
def OrganizationDetail(self):
klass = BaseOrganizationDetail
klass.org_model = self.org_model
return klass
@property
def OrganizationCreate(self):
klass = BaseOrganizationCreate
klass.org_model = self.org_model
return klass
@property
def OrganizationUpdate(self):
klass = BaseOrganizationUpdate
klass.org_model = self.org_model
return klass
@property
def OrganizationDelete(self):
klass = BaseOrganizationDelete
klass.org_model = self.org_model
return klass
@property
def OrganizationUserList(self):
klass = BaseOrganizationUserList
klass.org_model = self.org_model
return klass
@property
def OrganizationUserDetail(self):
klass = BaseOrganizationUserDetail
klass.org_model = self.org_model
return klass
@property
def OrganizationUserUpdate(self):
klass = BaseOrganizationUserUpdate
klass.org_model = self.org_model
return klass
@property
def OrganizationUserCreate(self):
klass = BaseOrganizationUserCreate
klass.org_model = self.org_model
return klass
@property
def OrganizationUserDelete(self):
klass = BaseOrganizationUserDelete
klass.org_model = self.org_model
return klass
@property
def OrganizationUserRemind(self):
klass = BaseOrganizationUserRemind
klass.org_model = self.org_model
return klass
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for DRAC inspection interface
"""
from dracclient import exceptions as drac_exceptions
import mock
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import inspect as drac_inspect
from ironic import objects
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_drac_info()
class DracInspectionTestCase(db_base.DbTestCase):
def setUp(self):
super(DracInspectionTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_drac')
self.node = obj_utils.create_test_node(self.context,
driver='fake_drac',
driver_info=INFO_DICT)
memory = [{'id': 'DIMM.Socket.A1',
'size_mb': 16384,
'speed': 2133,
'manufacturer': 'Samsung',
'model': 'DDR4 DIMM',
'state': 'ok'},
{'id': 'DIMM.Socket.B1',
'size_mb': 16384,
'speed': 2133,
'manufacturer': 'Samsung',
'model': 'DDR4 DIMM',
'state': 'ok'}]
cpus = [{'id': 'CPU.Socket.1',
'cores': 6,
'speed': 2400,
'model': 'Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz',
'state': 'ok',
'ht_enabled': True,
'turbo_enabled': True,
'vt_enabled': True,
'arch64': True},
{'id': 'CPU.Socket.2',
'cores': 6,
'speed': 2400,
'model': 'Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz',
'state': 'ok',
'ht_enabled': True,
'turbo_enabled': True,
'vt_enabled': True,
'arch64': True}]
virtual_disks = [
{'id': 'Disk.Virtual.0:RAID.Integrated.1-1',
'name': 'disk 0',
'description': 'Virtual Disk 0 on Integrated RAID Controller 1',
'controller': 'RAID.Integrated.1-1',
'raid_level': '1',
'size_mb': 1143552,
'state': 'ok',
'raid_state': 'online',
'span_depth': 1,
'span_length': 2,
'pending_operations': None}]
physical_disks = [
{'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1',
'description': ('Disk 1 in Backplane 1 of '
'Integrated RAID Controller 1'),
'controller': 'RAID.Integrated.1-1',
'manufacturer': 'SEAGATE',
'model': 'ST600MM0006',
'media_type': 'hdd',
'interface_type': 'sas',
'size_mb': 571776,
'free_size_mb': 571776,
'serial_number': 'S0M3EY2Z',
'firmware_version': 'LS0A',
'state': 'ok',
'raid_state': 'ready'},
{'id': 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1',
'description': ('Disk 1 in Backplane 1 of '
'Integrated RAID Controller 1'),
'controller': 'RAID.Integrated.1-1',
'manufacturer': 'SEAGATE',
'model': 'ST600MM0006',
'media_type': 'hdd',
'interface_type': 'sas',
'size_mb': 285888,
'free_size_mb': 285888,
'serial_number': 'S0M3EY2Z',
'firmware_version': 'LS0A',
'state': 'ok',
'raid_state': 'ready'}]
nics = [
{'id': 'NIC.Embedded.1-1-1',
'mac': 'B0:83:FE:C6:6F:A1',
'model': 'Broadcom Gigabit Ethernet BCM5720 - B0:83:FE:C6:6F:A1',
'speed': '1000 Mbps',
'duplex': 'full duplex',
'media_type': 'Base T'},
{'id': 'NIC.Embedded.2-1-1',
'mac': 'B0:83:FE:C6:6F:A2',
'model': 'Broadcom Gigabit Ethernet BCM5720 - B0:83:FE:C6:6F:A2',
'speed': '1000 Mbps',
'duplex': 'full duplex',
'media_type': 'Base T'}]
self.memory = [test_utils.dict_to_namedtuple(values=m) for m in memory]
self.cpus = [test_utils.dict_to_namedtuple(values=c) for c in cpus]
self.virtual_disks = [test_utils.dict_to_namedtuple(values=vd)
for vd in virtual_disks]
self.physical_disks = [test_utils.dict_to_namedtuple(values=pd)
for pd in physical_disks]
self.nics = [test_utils.dict_to_namedtuple(values=n) for n in nics]
def test_get_properties(self):
expected = drac_common.COMMON_PROPERTIES
driver = drac_inspect.DracInspect()
self.assertEqual(expected, driver.get_properties())
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test_inspect_hardware(self, mock_port_create, mock_get_drac_client):
expected_node_properties = {
'memory_mb': 32768,
'local_gb': 1116,
'cpus': 2,
'cpu_arch': 'x86_64'}
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.list_memory.return_value = self.memory
mock_client.list_cpus.return_value = self.cpus
mock_client.list_virtual_disks.return_value = self.virtual_disks
mock_client.list_nics.return_value = self.nics
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
return_value = task.driver.inspect.inspect_hardware(task)
self.node.refresh()
self.assertEqual(expected_node_properties, self.node.properties)
self.assertEqual(states.MANAGEABLE, return_value)
self.assertEqual(2, mock_port_create.call_count)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test_inspect_hardware_fail(self, mock_port_create,
mock_get_drac_client):
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.list_memory.return_value = self.memory
mock_client.list_cpus.return_value = self.cpus
mock_client.list_virtual_disks.side_effect = (
drac_exceptions.BaseClientException('boom'))
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.HardwareInspectionFailure,
task.driver.inspect.inspect_hardware, task)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test_inspect_hardware_no_virtual_disk(self, mock_port_create,
mock_get_drac_client):
expected_node_properties = {
'memory_mb': 32768,
'local_gb': 279,
'cpus': 2,
'cpu_arch': 'x86_64'}
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.list_memory.return_value = self.memory
mock_client.list_cpus.return_value = self.cpus
mock_client.list_virtual_disks.return_value = []
mock_client.list_physical_disks.return_value = self.physical_disks
mock_client.list_nics.return_value = self.nics
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
return_value = task.driver.inspect.inspect_hardware(task)
self.node.refresh()
self.assertEqual(expected_node_properties, self.node.properties)
self.assertEqual(states.MANAGEABLE, return_value)
self.assertEqual(2, mock_port_create.call_count)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test_inspect_hardware_no_cpu(
self, mock_port_create, mock_get_drac_client):
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.list_memory.return_value = self.memory
mock_client.list_cpus.return_value = []
mock_client.list_virtual_disks.return_value = []
mock_client.list_physical_disks.return_value = self.physical_disks
mock_client.list_nics.return_value = self.nics
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.HardwareInspectionFailure,
task.driver.inspect.inspect_hardware, task)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test_inspect_hardware_with_existing_ports(self, mock_port_create,
mock_get_drac_client):
expected_node_properties = {
'memory_mb': 32768,
'local_gb': 1116,
'cpus': 2,
'cpu_arch': 'x86_64'}
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.list_memory.return_value = self.memory
mock_client.list_cpus.return_value = self.cpus
mock_client.list_virtual_disks.return_value = self.virtual_disks
mock_client.list_nics.return_value = self.nics
mock_port_create.side_effect = exception.MACAlreadyExists("boom")
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
return_value = task.driver.inspect.inspect_hardware(task)
self.node.refresh()
self.assertEqual(expected_node_properties, self.node.properties)
self.assertEqual(states.MANAGEABLE, return_value)
self.assertEqual(2, mock_port_create.call_count)
def test__guess_root_disk(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
root_disk = task.driver.inspect._guess_root_disk(
self.physical_disks)
self.assertEqual(285888, root_disk.size_mb)
|
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tasks Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
import re
from analysis_module import AnalysisModule
from devlib.utils.misc import memoized
from trappy.utils import listify
class TasksAnalysis(AnalysisModule):
"""
Support for Tasks signals analysis.
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(TasksAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_top_big_tasks(self, min_samples=100, min_utilization=None):
"""
Tasks which had 'utilization' samples bigger than the specified
threshold
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
"""
if not self._trace.hasEvents('sched_load_avg_task'):
self._log.warning('Events [sched_load_avg_task] not found')
return None
if min_utilization is None:
min_utilization = self._little_cap
# Get utilization samples >= min_utilization
df = self._dfg_trace_event('sched_load_avg_task')
big_tasks_events = df[df.util_avg > min_utilization]
if not len(big_tasks_events):
self._log.warning('No tasks with with utilization samples > %d',
min_utilization)
return None
# Report the number of tasks which match the min_utilization condition
big_tasks = big_tasks_events.pid.unique()
self._log.info('%5d tasks with samples of utilization > %d',
len(big_tasks), min_utilization)
# Compute number of samples above threshold
big_tasks_stats = big_tasks_events.groupby('pid')\
.describe(include=['object'])
big_tasks_stats = big_tasks_stats.unstack()['comm']\
.sort_values(by=['count'], ascending=False)
# Filter for number of occurrences
big_tasks_stats = big_tasks_stats[big_tasks_stats['count'] > min_samples]
if not len(big_tasks_stats):
self._log.warning(' but none with more than %d samples',
min_samples)
return None
self._log.info(' %d with more than %d samples',
len(big_tasks_stats), min_samples)
# Add task name column
big_tasks_stats['comm'] = big_tasks_stats.index.map(
lambda pid: ', '.join(self._trace.getTaskByPid(pid)))
# Filter columns of interest
big_tasks_stats = big_tasks_stats[['count', 'comm']]
big_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
return big_tasks_stats
def _dfg_top_wakeup_tasks(self, min_wakeups=100):
"""
Tasks which wakeup more frequently than a specified threshold.
:param min_wakeups: minimum number of wakeups
:type min_wakeups: int
"""
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Events [sched_wakeup] not found')
return None
df = self._dfg_trace_event('sched_wakeup')
# Compute number of wakeups above threshold
wkp_tasks_stats = df.groupby('pid').describe(include=['object'])
wkp_tasks_stats = wkp_tasks_stats.unstack()['comm']\
.sort_values(by=['count'], ascending=False)
# Filter for number of occurrences
wkp_tasks_stats = wkp_tasks_stats[
wkp_tasks_stats['count'] > min_wakeups]
if not len(df):
self._log.warning('No tasks with more than %d wakeups',
len(wkp_tasks_stats))
return None
self._log.info('%5d tasks with more than %d wakeups',
len(df), len(wkp_tasks_stats))
# Add task name column
wkp_tasks_stats['comm'] = wkp_tasks_stats.index.map(
lambda pid: ', '.join(self._trace.getTaskByPid(pid)))
# Filter columns of interest
wkp_tasks_stats = wkp_tasks_stats[['count', 'comm']]
wkp_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
return wkp_tasks_stats
def _dfg_rt_tasks(self, min_prio=100):
"""
Tasks with RT priority
NOTE: priorities uses scheduler values, thus: the lower the value the
higher is the task priority.
RT Priorities: [ 0..100]
FAIR Priorities: [101..120]
:param min_prio: minumum priority
:type min_prio: int
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Events [sched_switch] not found')
return None
df = self._dfg_trace_event('sched_switch')
# Filters tasks which have a priority bigger than threshold
df = df[df.next_prio <= min_prio]
# Filter columns of interest
rt_tasks = df[['next_pid', 'next_prio']]
# Remove all duplicateds
rt_tasks = rt_tasks.drop_duplicates()
# Order by priority
rt_tasks.sort_values(by=['next_prio', 'next_pid'], ascending=True,
inplace=True)
rt_tasks.rename(columns={'next_pid': 'pid', 'next_prio': 'prio'},
inplace=True)
# Set PID as index
rt_tasks.set_index('pid', inplace=True)
# Add task name column
rt_tasks['comm'] = rt_tasks.index.map(
lambda pid: ', '.join(self._trace.getTaskByPid(pid)))
return rt_tasks
###############################################################################
# Plotting Methods
###############################################################################
def plotTasks(self, tasks=None, signals=None):
"""
Generate a common set of useful plots for each of the specified tasks
This method allows to filter which signals should be plot, if data are
available in the input trace. The list of signals supported are:
Tasks signals plot:
load_avg, util_avg, boosted_util, sched_overutilized
Tasks residencies on CPUs:
residencies, sched_overutilized
Tasks PELT signals:
load_sum, util_sum, period_contrib, sched_overutilized
At least one of the previous signals must be specified to get a valid
plot.
Addidional custom signals can be specified and they will be represented
in the "Task signals plots" if they represent valid keys of the task
load/utilization trace event (e.g. sched_load_avg_task).
Note:
sched_overutilized: enable the plotting of overutilization bands on
top of each subplot
residencies: enable the generation of the CPUs residencies plot
:param tasks: the list of task names and/or PIDs to plot.
Numerical PIDs and string task names can be mixed
in the same list.
default: all tasks defined in Trace
creation time are plotted
:type tasks: list(str) or list(int)
:param signals: list of signals (and thus plots) to generate
default: all the plots and signals available in the
current trace
:type signals: list(str)
"""
if not signals:
signals = ['load_avg', 'util_avg', 'boosted_util',
'sched_overutilized',
'load_sum', 'util_sum', 'period_contrib',
'residencies']
# Check for the minimum required signals to be available
if not self._trace.hasEvents('sched_load_avg_task'):
self._log.warning('Events [sched_load_avg_task] not found, '
'plot DISABLED!')
return
# Defined list of tasks to plot
if tasks and \
not isinstance(tasks, str) and \
not isinstance(tasks, list):
raise ValueError('Wrong format for tasks parameter')
if tasks:
tasks_to_plot = listify(tasks)
elif self._tasks:
tasks_to_plot = sorted(self._tasks)
else:
raise ValueError('No tasks to plot specified')
# Compute number of plots to produce
plots_count = 0
plots_signals = [
# Fist plot: task's utilization
{'load_avg', 'util_avg', 'boosted_util'},
# Second plot: task residency
{'residencies'},
# Third plot: tasks's load
{'load_sum', 'util_sum', 'period_contrib'}
]
hr = []
ysize = 0
for plot_id, signals_to_plot in enumerate(plots_signals):
signals_to_plot = signals_to_plot.intersection(signals)
if len(signals_to_plot):
plots_count = plots_count + 1
# Use bigger size only for the first plot
hr.append(3 if plot_id == 0 else 1)
ysize = ysize + (8 if plot_id else 4)
# Grid
gs = gridspec.GridSpec(plots_count, 1, height_ratios=hr)
gs.update(wspace=0.1, hspace=0.1)
# Build list of all PIDs for each task_name to plot
pids_to_plot = []
for task in tasks_to_plot:
# Add specified PIDs to the list
if isinstance(task, int):
pids_to_plot.append(task)
continue
# Otherwise: add all the PIDs for task with the specified name
pids_to_plot.extend(self._trace.getTaskByName(task))
for tid in pids_to_plot:
savefig = False
task_name = self._trace.getTaskByPid(tid)
if len(task_name) == 1:
task_name = task_name[0]
self._log.info('Plotting %5d: %s...', tid, task_name)
else:
self._log.info('Plotting %5d: %s...', tid, ', '.join(task_name))
plot_id = 0
# For each task create a figure with plots_count plots
plt.figure(figsize=(16, ysize))
plt.suptitle('Task Signals',
y=.94, fontsize=16, horizontalalignment='center')
# Plot load and utilization
signals_to_plot = {'load_avg', 'util_avg', 'boosted_util'}
signals_to_plot = list(signals_to_plot.intersection(signals))
if len(signals_to_plot) > 0:
axes = plt.subplot(gs[plot_id, 0])
axes.set_title('Task [{0:d}:{1:s}] Signals'
.format(tid, task_name))
plot_id = plot_id + 1
is_last = (plot_id == plots_count)
self._plotTaskSignals(axes, tid, signals, is_last)
savefig = True
# Plot CPUs residency
signals_to_plot = {'residencies'}
signals_to_plot = list(signals_to_plot.intersection(signals))
if len(signals_to_plot) > 0:
axes = plt.subplot(gs[plot_id, 0])
axes.set_title(
'Task [{0:d}:{1:s}] Residency (green: LITTLE, red: big)'
.format(tid, task_name)
)
plot_id = plot_id + 1
is_last = (plot_id == plots_count)
if 'sched_overutilized' in signals:
signals_to_plot.append('sched_overutilized')
self._plotTaskResidencies(axes, tid, signals_to_plot, is_last)
savefig = True
# Plot PELT signals
signals_to_plot = {'load_sum', 'util_sum', 'period_contrib'}
signals_to_plot = list(signals_to_plot.intersection(signals))
if len(signals_to_plot) > 0:
axes = plt.subplot(gs[plot_id, 0])
axes.set_title('Task [{0:d}:{1:s}] PELT Signals'
.format(tid, task_name))
plot_id = plot_id + 1
if 'sched_overutilized' in signals:
signals_to_plot.append('sched_overutilized')
self._plotTaskPelt(axes, tid, signals_to_plot)
savefig = True
if not savefig:
self._log.warning('Nothing to plot for %s', task_name)
continue
# Save generated plots into datadir
if isinstance(task_name, list):
task_name = re.sub('[:/]', '_', task_name[0])
else:
task_name = re.sub('[:/]', '_', task_name)
figname = '{}/{}task_util_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
tid, task_name)
pl.savefig(figname, bbox_inches='tight')
def plotBigTasks(self, max_tasks=10, min_samples=100,
min_utilization=None):
"""
For each big task plot utilization and show the smallest cluster
capacity suitable for accommodating task utilization.
:param max_tasks: maximum number of tasks to consider
:type max_tasks: int
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
"""
# Get PID of big tasks
big_frequent_task_df = self._dfg_top_big_tasks(
min_samples, min_utilization)
if max_tasks > 0:
big_frequent_task_df = big_frequent_task_df.head(max_tasks)
big_frequent_task_pids = big_frequent_task_df.index.values
big_frequent_tasks_count = len(big_frequent_task_pids)
if big_frequent_tasks_count == 0:
self._log.warning('No big/frequent tasks to plot')
return
# Get the list of events for all big frequent tasks
df = self._dfg_trace_event('sched_load_avg_task')
big_frequent_tasks_events = df[df.pid.isin(big_frequent_task_pids)]
# Define axes for side-by-side plottings
fig, axes = plt.subplots(big_frequent_tasks_count, 1,
figsize=(16, big_frequent_tasks_count*4))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
plot_idx = 0
for pid, group in big_frequent_tasks_events.groupby('pid'):
# # Build task names (there could be multiple, during the task lifetime)
task_name = 'PID: {} | {}'.format(
pid, ' | '.join(self._trace.getTaskByPid(pid)))
# Plot title
if big_frequent_tasks_count == 1:
ax = axes
else:
ax = axes[plot_idx]
ax.set_title(task_name)
# Left axis: utilization
ax = group.plot(y=['util_avg', 'min_cluster_cap'],
style=['r.', '-b'],
drawstyle='steps-post',
linewidth=1,
ax=ax)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.set_ylim(0, 1100)
ax.set_ylabel('util_avg')
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
plot_idx += 1
ax.set_xlabel('Time [s]')
self._log.info('Tasks which have been a "utilization" of %d for at least %d samples',
self._little_cap, min_samples)
def plotWakeupTasks(self, max_tasks=10, min_wakeups=0, per_cluster=False):
"""
Show waking up tasks over time and newly forked tasks in two separate
plots.
:param max_tasks: maximum number of tasks to consider
:param max_tasks: int
:param min_wakeups: minimum number of wakeups of each task
:type min_wakeups: int
:param per_cluster: if True get per-cluster wakeup events
:type per_cluster: bool
"""
if per_cluster is True and \
not self._trace.hasEvents('sched_wakeup_new'):
self._log.warning('Events [sched_wakeup_new] not found, '
'plots DISABLED!')
return
elif not self._trace.hasEvents('sched_wakeup') and \
not self._trace.hasEvents('sched_wakeup_new'):
self._log.warning('Events [sched_wakeup, sched_wakeup_new] not found, '
'plots DISABLED!')
return
# Define axes for side-by-side plottings
fig, axes = plt.subplots(2, 1, figsize=(14, 5))
plt.subplots_adjust(wspace=0.2, hspace=0.3)
if per_cluster:
# Get per cluster wakeup events
df = self._dfg_trace_event('sched_wakeup_new')
big_frequent = (
(df.target_cpu.isin(self._big_cpus))
)
ntbc = df[big_frequent]
ntbc_count = len(ntbc)
little_frequent = (
(df.target_cpu.isin(self._little_cpus))
)
ntlc = df[little_frequent];
ntlc_count = len(ntlc)
self._log.info('%5d tasks forked on big cluster (%3.1f %%)',
ntbc_count,
100. * ntbc_count / (ntbc_count + ntlc_count))
self._log.info('%5d tasks forked on LITTLE cluster (%3.1f %%)',
ntlc_count,
100. * ntlc_count / (ntbc_count + ntlc_count))
ax = axes[0]
ax.set_title('Tasks Forks on big CPUs');
ntbc.pid.plot(style=['g.'], ax=ax);
ax.set_xlim(self._trace.x_min, self._trace.x_max);
ax.set_xticklabels([])
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
ax = axes[1]
ax.set_title('Tasks Forks on LITTLE CPUs');
ntlc.pid.plot(style=['g.'], ax=ax);
ax.set_xlim(self._trace.x_min, self._trace.x_max);
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
return
# Keep events of defined big tasks
wkp_task_pids = self._dfg_top_wakeup_tasks(min_wakeups)
if len(wkp_task_pids):
wkp_task_pids = wkp_task_pids.index.values[:max_tasks]
self._log.info('Plotting %d frequent wakeup tasks',
len(wkp_task_pids))
ax = axes[0]
ax.set_title('Tasks WakeUps Events')
df = self._dfg_trace_event('sched_wakeup')
if len(df):
df = df[df.pid.isin(wkp_task_pids)]
df.pid.astype(int).plot(style=['b.'], ax=ax)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
ax = axes[1]
ax.set_title('Tasks Forks Events')
df = self._dfg_trace_event('sched_wakeup_new')
if len(df):
df = df[df.pid.isin(wkp_task_pids)]
df.pid.astype(int).plot(style=['r.'], ax=ax)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
def plotBigTasksVsCapacity(self, min_samples=1,
min_utilization=None, big_cluster=True):
"""
Draw a plot that shows whether tasks are placed on the correct cluster
based on their utilization and cluster capacity. Green dots mean the
task was placed on the correct cluster, Red means placement was wrong
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
:param big_cluster:
:type big_cluster: bool
"""
if not self._trace.hasEvents('sched_load_avg_task'):
self._log.warning('Events [sched_load_avg_task] not found')
return
if not self._trace.hasEvents('cpu_frequency'):
self._log.warning('Events [cpu_frequency] not found')
return
if big_cluster:
cluster_correct = 'big'
cpus = self._big_cpus
else:
cluster_correct = 'LITTLE'
cpus = self._little_cpus
# Get all utilization update events
df = self._dfg_trace_event('sched_load_avg_task')
# Keep events of defined big tasks
big_task_pids = self._dfg_top_big_tasks(
min_samples, min_utilization)
if big_task_pids is not None:
big_task_pids = big_task_pids.index.values
df = df[df.pid.isin(big_task_pids)]
if not df.size:
self._log.warning('No events for tasks with more then %d utilization '
'samples bigger than %d, plots DISABLED!')
return
fig, axes = plt.subplots(2, 1, figsize=(14, 5))
plt.subplots_adjust(wspace=0.2, hspace=0.3)
# Add column of expected cluster depending on:
# a) task utilization value
# b) capacity of the selected cluster
bu_bc = ( \
(df['util_avg'] > self._little_cap) & \
(df['cpu'].isin(self._big_cpus))
)
su_lc = ( \
(df['util_avg'] <= self._little_cap) & \
(df['cpu'].isin(self._little_cpus))
)
# The Cluster CAPacity Matches the UTILization (ccap_mutil) iff:
# - tasks with util_avg > little_cap are running on a BIG cpu
# - tasks with util_avg <= little_cap are running on a LITTLe cpu
df.loc[:,'ccap_mutil'] = np.select([(bu_bc | su_lc)], [True], False)
df_freq = self._dfg_trace_event('cpu_frequency')
df_freq = df_freq[df_freq.cpu == cpus[0]]
ax = axes[0]
ax.set_title('Tasks Utilization vs Allocation')
for ucolor, umatch in zip('gr', [True, False]):
cdata = df[df['ccap_mutil'] == umatch]
if len(cdata) > 0:
cdata['util_avg'].plot(ax=ax,
style=[ucolor+'.'], legend=False)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
ax = axes[1]
ax.set_title('Frequencies on "{}" cluster'.format(cluster_correct))
df_freq['frequency'].plot(style=['-b'], ax=ax, drawstyle='steps-post')
ax.set_xlim(self._trace.x_min, self._trace.x_max);
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
legend_y = axes[0].get_ylim()[1]
axes[0].annotate('Utilization-Capacity Matches',
xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes[0].annotate('Task schduled (green) or not (red) on min cluster',
xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
fontsize=14)
###############################################################################
# Utility Methods
###############################################################################
def _plotTaskSignals(self, axes, tid, signals, is_last=False):
"""
For task with ID `tid` plot the specified signals.
:param axes: axes over which to generate the plot
:type axes: :mod:`matplotlib.axes.Axes`
:param tid: task ID
:type tid: int
:param signals: signals to be plot
:param signals: list(str)
:param is_last: if True this is the last plot
:type is_last: bool
"""
# Get dataframe for the required task
util_df = self._dfg_trace_event('sched_load_avg_task')
# Plot load and util
signals_to_plot = set(signals).difference({'boosted_util'})
for signal in signals_to_plot:
if signal not in util_df.columns:
continue
data = util_df[util_df.pid == tid][signal]
data.plot(ax=axes, drawstyle='steps-post', legend=True)
# Plot boost utilization if available
if 'boosted_util' in signals and \
self._trace.hasEvents('sched_boost_task'):
boost_df = self._dfg_trace_event('sched_boost_task')
data = boost_df[boost_df.pid == tid][['boosted_util']]
if len(data):
data.plot(ax=axes, style=['y-'], drawstyle='steps-post')
else:
task_name = self._trace.getTaskByPid(tid)
self._log.warning('No "boosted_util" data for task [%d:%s]',
tid, task_name)
# Add Capacities data if avilable
if 'nrg_model' in self._platform:
nrg_model = self._platform['nrg_model']
max_lcap = nrg_model['little']['cpu']['cap_max']
max_bcap = nrg_model['big']['cpu']['cap_max']
tip_lcap = 0.8 * max_lcap
tip_bcap = 0.8 * max_bcap
self._log.debug(
'LITTLE capacity tip/max: %d/%d, big capacity tip/max: %d/%d',
tip_lcap, max_lcap, tip_bcap, max_bcap
)
axes.axhline(tip_lcap, color='y', linestyle=':', linewidth=2)
axes.axhline(max_lcap, color='y', linestyle='--', linewidth=2)
axes.axhline(tip_bcap, color='r', linestyle=':', linewidth=2)
axes.axhline(max_bcap, color='r', linestyle='--', linewidth=2)
axes.set_ylim(0, 1100)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.grid(True)
if not is_last:
axes.set_xticklabels([])
axes.set_xlabel('')
if 'sched_overutilized' in signals:
self._trace.analysis.status.plotOverutilized(axes)
def _plotTaskResidencies(self, axes, tid, signals, is_last=False):
"""
For task with ID `tid` plot residency information.
:param axes: axes over which to generate the plot
:type axes: :mod:`matplotlib.axes.Axes`
:param tid: task ID
:type tid: int
:param signals: signals to be plot
:param signals: list(str)
:param is_last: if True this is the last plot
:type is_last: bool
"""
util_df = self._dfg_trace_event('sched_load_avg_task')
data = util_df[util_df.pid == tid][['cluster', 'cpu']]
for ccolor, clabel in zip('gr', ['LITTLE', 'big']):
cdata = data[data.cluster == clabel]
if len(cdata) > 0:
cdata.plot(ax=axes, style=[ccolor+'+'], legend=False)
# Y Axis - placeholders for legend, acutal CPUs. topmost empty lane
cpus = [str(n) for n in range(self._platform['cpus_count'])]
ylabels = [''] + cpus
axes.set_yticklabels(ylabels)
axes.set_ylim(-1, self._platform['cpus_count'])
axes.set_ylabel('CPUs')
# X Axis
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.grid(True)
if not is_last:
axes.set_xticklabels([])
axes.set_xlabel('')
if 'sched_overutilized' in signals:
self._trace.analysis.status.plotOverutilized(axes)
def _plotTaskPelt(self, axes, tid, signals):
"""
For task with ID `tid` plot PELT-related signals.
:param axes: axes over which to generate the plot
:type axes: :mod:`matplotlib.axes.Axes`
:param tid: task ID
:type tid: int
:param signals: signals to be plot
:param signals: list(str)
"""
util_df = self._dfg_trace_event('sched_load_avg_task')
data = util_df[util_df.pid == tid][['load_sum',
'util_sum',
'period_contrib']]
data.plot(ax=axes, drawstyle='steps-post')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.ticklabel_format(style='scientific', scilimits=(0, 0),
axis='y', useOffset=False)
axes.grid(True)
if 'sched_overutilized' in signals:
self._trace.analysis.status.plotOverutilized(axes)
# vim :set tabstop=4 shiftwidth=4 expandtab
|
|
"""
Copyright 2016 Pavlos Vougiouklis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import math
import pandas as pd
pd.set_option('display.max_colwidth', -1)
import re
from nltk.tokenize import RegexpTokenizer
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import os
import sys
import glob
import shutil
import cPickle as pickle
# Useful for .xml files manipulation
import dicttoxml
from xml.dom import minidom
import xml.etree.ElementTree as ET
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cluster import KMeans
parser = argparse.ArgumentParser()
parser.add_argument('--evaluation', action='store_true', default=False)
args = parser.parse_args()
csv_dir = 'CrowdFlower/WikiAstronauts/f900315.csv'
output_csv_dir = '../Datasets/WikiAstronauts/WikiAstronauts-DBpedia.xls'
output_xml_dir = '../Datasets/WikiAstronauts/WikiAstronauts-DBpedia.xml'
output_eval_dir = '../Evaluation/WikiAstronauts-DBpedia.xls'
cache_dir = 'Caches/WikiAstronauts/'
num_tokens = []
def load_cache():
if os.path.isfile(cache_dir + 'dataset-WikiAstronauts.p') :
temp = pickle.load(open(cache_dir + 'dataset-WikiAstronauts.p', "rb"))
else:
temp = []
return temp
def shortest_length(sentences):
vectorizer = CountVectorizer(min_df=1, token_pattern='\w+', lowercase=True)
train = vectorizer.fit_transform(sentences)
num_samples, num_features = train.shape
length = np.zeros(num_samples)
for i in range(0, num_samples):
length[i] = sp.linalg.norm(train.getrow(i).toarray())
return sentences[np.argmin(length)]
# Computes distance on the normalised vectors
def distance(v1, v2):
v1_normalized = v1/sp.linalg.norm(v1)
v2_normalized = v2/sp.linalg.norm(v2)
delta = v1_normalized - v2_normalized
return sp.linalg.norm(delta)
def get_original(sentence):
while sentence.find('<') > -1:
start_flag = sentence.find('<')
end_flag = sentence.find('>')
sentence = sentence.replace(sentence[start_flag:end_flag + 1], '')
return sentence
def shortening(sentences):
vectorizer = CountVectorizer(min_df=1, token_pattern='\w+', lowercase=True)
train = vectorizer.fit_transform(sentences)
num_samples, num_features = train.shape
count = np.zeros(num_samples)
for i in range(0, num_samples):
count[i] = count[i] + np.sum(train.getrow(i).toarray())
avg = np.sum(count) / float(num_samples)
extent = np.zeros(num_samples)
for i in range(0, num_samples):
extent[i] = 1 / (1 * np.sqrt(2 * np.pi)) * np.exp( - (count[i] - avg)**2 / (2 * 1**2) )
#plt.plot(count, extent, 'o')
#plt.show()
return [extent[sentence] for sentence in range(0, extent.shape[0])]
def baseline(sentences):
count = np.zeros(len(sentences))
vectorizer = CountVectorizer(min_df=1, token_pattern='\w+', lowercase=True)
train = vectorizer.fit_transform(sentences)
num_samples, num_features = train.shape
print("#samples: %d, #features: %d" % (num_samples, num_features))
for i in range(0, num_samples):
for j in range(0, num_samples):
if np.array_equal(train.getrow(i).toarray(), train.getrow(j).toarray()):
count[i] = count[i] + 1
index = np.argmax(count)
#print count
return sentences[index]
def count_tokens(sentence):
tokenizer = RegexpTokenizer(r'\w+')
return len(tokenizer.tokenize(sentence))
def counter(tensor):
unique = np.zeros((tensor.shape[0], tensor.shape[1] + 2))
print unique.shape
print tensor.shape
# Flag for the number of duplicates.
flag = []
[flag.append(False) for row in range(0, tensor.shape[0])]
#print flag
for i in range(0, tensor.shape[0]):
count = 1
unique[i, :tensor.shape[1]] = tensor.getrow(i).toarray()
unique[i][tensor.shape[1]] = i
for j in range(i + 1, tensor.shape[0]):
if np.array_equal(tensor.getrow(i).toarray(), tensor.getrow(j).toarray()):
count = count + 1
flag[j] = True
unique[i][tensor.shape[1] + 1] = count
for i in range(len(flag) - 1 , - 1, - 1):
if flag[i]:
unique = np.delete(unique, i, 0)
#print unique
print unique.shape
#print flag
return unique
def distance_from_original(initial, simplifications):
vectorizer = CountVectorizer(min_df=1, token_pattern='\w+', lowercase=True)
full = simplifications[:]
#print len(full)
full.insert(0, initial)
#print len(full)
train = vectorizer.fit_transform(full)
distances = np.zeros(train.shape[0] - 1)
for i in range(0, train.shape[0] - 1):
distances[i] = distance(train.getrow(0).toarray(), train.getrow(i + 1).toarray())
print('Length of distances in function: %d' % (len(distances)))
return distances
def cluster(sentences):
vectorizer = CountVectorizer(min_df=1, token_pattern='\w+', lowercase=True)
train = vectorizer.fit_transform(sentences)
num_samples, num_features = train.shape
num_clusters = 4
uniques = counter(train)
if math.ceil(uniques.shape[0] / 3.0) < 2:
num_clusters = uniques.shape[0]
else:
num_clusters = int(math.ceil (uniques.shape[0] / 3.0))
km = KMeans(n_clusters=num_clusters, init='random', n_init=1, verbose=1)
km.fit(uniques[:, :uniques.shape[1] - 2])
total = np.zeros(km.labels_.shape[0])
for i in range(0, km.labels_.shape[0]):
for j in range(0, km.labels_.shape[0]):
if km.labels_[j] == km.labels_[i]:
total[i] = total[i] + uniques[i][uniques.shape[1] - 1]
indeces = []
for i in range(0, km.labels_.shape[0]):
if km.labels_[np.argmax(total)] == km.labels_[i]:
indeces.append(int(uniques[i][uniques.shape[1] - 2]))
#print indeces
return [sentences[j] for j in indeces]
def get_annotations(sentence):
annotations = []
print sentence
while sentence.find('<b><font color="purple">') > -1:
start_flag = sentence.find('<b><font color="purple">') + len('<b><font color="purple">')
end_flag = sentence.find('</font></b>')
annotations.append(sentence[start_flag:end_flag])
sentence = sentence[:start_flag - len('<b><font color="purple">')] + sentence[start_flag:end_flag] + sentence[end_flag + len('</font></b>'):]
#print sentence
return annotations
csv = pd.read_csv(csv_dir, header=0, usecols = ['result1', 'sentence'], skip_blank_lines=False)
dictionary = load_cache()
if len(dictionary) == 0:
raw_input('Cache file: ' + cache_dir + "dataset-WikiAstronauts.p was not found. You should try to execute Dataset-WikiAstronauts.py first. Press Enter to kill this process...")
sys.exit()
else:
raw_input('Cache file: ' + cache_dir + 'dataset-WikiAstronauts.p has been loaded successfully. Press Enter to continue...')
"""
for i in range(0, len(csv.sentence)):
csv.sentence[i] = get_original(csv.sentence[i])
print csv
"""
flag = 0
sentences = {}
for i in range(0, len(csv.sentence)):
if csv.sentence[i] not in sentences:
sentences[csv.sentence[i]] = [csv.result1[i]]
else:
sentences[csv.sentence[i]].append(csv.result1[i])
# Get results according to the baseline approach.
baseline_result = {}
for original in sentences:
baseline_result[original] = baseline(sentences[original])
# Get results according to the k-means clustering approach.
result = {}
for original in sentences:
result[original] = cluster(sentences[original])
ratios = {}
for original in sentences:
num_highlighted = []
annotations = get_annotations(original)
for i in range(0 , len(result[original])):
tmp = result[original][i].lower().replace(',', '')
num_highlighted.append(0)
#print annotations
for j in range(0, len(annotations)):
position = tmp.find(annotations[j].lower())
if position > -1:
if position == 0 and tmp.find(annotations[j].lower() + ' ') > -1:
start_flag = tmp.find(annotations[j].lower() + ' ')
tmp = tmp[:start_flag] + tmp[start_flag + len(annotations[j]) + 1:]
num_highlighted[i] = num_highlighted[i] + 1
elif (position == len(tmp) - len(annotations[j]) or position == len(tmp) - len(annotations[j]) - 1) and tmp.find(' ' + annotations[j].lower()) > -1:
start_flag = tmp.find(' ' + annotations[j].lower())
tmp = tmp[:start_flag] + tmp[start_flag + len(annotations[j]) + 1:]
num_highlighted[i] = num_highlighted[i] + 1
elif tmp.find(' ' + annotations[j].lower() + ' ') > -1:
start_flag = tmp.find(' ' + annotations[j].lower() + ' ')
tmp = tmp[:start_flag] + tmp[start_flag + len(annotations[j]) + 1:]
num_highlighted[i] = num_highlighted[i] + 1
num_highlighted[i] = num_highlighted[i] / float(len(annotations))
ratios[original] = num_highlighted
integrity = {}
distances = {}
shortening_extent = {}
for original in sentences:
distances[original] = distance_from_original(get_original(original), result[original])
shortening_extent[original] = shortening(result[original])
num_additional = []
initial = get_original(original).lower().replace(',', '').replace('.', '').split()
for i in range(0 , len(result[original])):
tmpInitial = initial
tmp = result[original][i].lower().replace(',', '').replace('.', '').split()
num_additional.append(0)
for j in range(0, len(tmp)):
if tmp[j] not in tmpInitial:
num_additional[i] = num_additional[i] + 1
else:
tmpInitial.remove(tmp[j])
num_additional[i] = 1.0 - (num_additional[i] / float(len(tmp)))
#print('Length of distances: %d' % (len(distances[original])))
#print('Length of shortening: %d' % (len(shortening_extent[original])))
#print('Length of num_additional: %d' % (len(num_additional)))
#print('Length of shortening: %d' % (len(shortening_extent[original])))
#print('Length of result[original]: %d' % (len(result[original])))
integrity[original] = num_additional
#print('Length of integrity: %d' % (len(integrity[original])))
score = {}
for original in sentences:
score[original] = []
for j in range(0, len(result[original])):
score[original].append(shortening_extent[original][j] * (math.exp(ratios[original][j] * integrity[original][j] * distances[original][j])))
unclustered_ratios = {}
for original in sentences:
num_highlighted = []
annotations = get_annotations(original)
for i in range(0 , len(sentences[original])):
tmp = sentences[original][i].lower().replace(',', '')
num_highlighted.append(0)
print annotations
for j in range(0, len(annotations)):
position = tmp.find(annotations[j].lower())
if position > -1:
if position == 0 and tmp.find(annotations[j].lower() + ' ') > -1:
start_flag = tmp.find(annotations[j].lower() + ' ')
tmp = tmp[:start_flag] + tmp[start_flag + len(annotations[j]) + 1:]
num_highlighted[i] = num_highlighted[i] + 1
elif (position == len(tmp) - len(annotations[j]) or position == len(tmp) - len(annotations[j]) - 1) and tmp.find(' ' + annotations[j].lower()) > -1:
start_flag = tmp.find(' ' + annotations[j].lower())
tmp = tmp[:start_flag] + tmp[start_flag + len(annotations[j]) + 1:]
num_highlighted[i] = num_highlighted[i] + 1
elif tmp.find(' ' + annotations[j].lower() + ' ') > -1:
start_flag = tmp.find(' ' + annotations[j].lower() + ' ')
tmp = tmp[:start_flag] + tmp[start_flag + len(annotations[j]) + 1:]
num_highlighted[i] = num_highlighted[i] + 1
num_highlighted[i] = num_highlighted[i] / float(len(annotations))
unclustered_ratios[original] = num_highlighted
unclustered_integrity = {}
unclustered_distances = {}
unclustered_shortening_extent = {}
for original in sentences:
unclustered_distances[original] = distance_from_original(get_original(original), sentences[original])
unclustered_shortening_extent[original] = shortening(sentences[original])
num_additional = []
initial = get_original(original).lower().replace(',', '').replace('.', '').split()
for i in range(0 , len(sentences[original])):
tmpInitial = initial
tmp = sentences[original][i].lower().replace(',', '').replace('.', '').split()
num_additional.append(0)
for j in range(0, len(tmp)):
if tmp[j] not in tmpInitial:
num_additional[i] = num_additional[i] + 1
else:
tmpInitial.remove(tmp[j])
num_additional[i] = 1.0 - (num_additional[i] / float(len(tmp)))
print('Length of distances: %d' % (len(unclustered_distances[original])))
print('Length of num_additional: %d' % (len(num_additional)))
print('Length of shortening: %d' % (len(unclustered_shortening_extent[original])))
print('Length of result[original]: %d' % (len(sentences[original])))
unclustered_integrity[original] = num_additional
print('Length of integrity: %d' % (len(unclustered_integrity[original])))
unclustered_score = {}
clustered_distances = {}
unclustered_conformity = {}
for original in sentences:
clustered_distances[original] = shortest_length(result[original])
unclustered_conformity[original] = []
unclustered_score[original] = []
for j in range(0, len(sentences[original])):
unclustered_conformity[original].append(math.exp(unclustered_ratios[original][j] * unclustered_integrity[original][j]))
unclustered_score[original].append(unclustered_shortening_extent[original][j] * (math.exp(unclustered_ratios[original][j] * unclustered_integrity[original][j] * unclustered_distances[original][j])))
output={'Annotated Sentence': [], 'Simplification': []}
for original in sentences:
output['Annotated Sentence'].append(original)
output['Simplification'].append(unicode(result[original][score[original].index(max(score[original]))], 'ascii', errors='ignore'))
num_tokens.append(count_tokens(result[original][score[original].index(max(score[original]))]))
print('Total number of tokens of the simplifications: %d' % (sum(num_tokens)))
output_df = pd.DataFrame(output, index=[i for i in range(0, len(output['Annotated Sentence']))])
output_df['Annotated Sentence'] = output_df['Annotated Sentence'].replace(to_replace='\t', value='', regex=True)
output_df.to_html(output_csv_dir, index=False, escape=False)
if args.evaluation:
# Randomly select sentences for the evaluation tables.
evaluation={'Annotated Sentence': [], '#1 Simplification': [], '#1 Rating': [], '#2 Simplification': [], '#2 Rating': [], '#3 Simplification': [], '#3 Rating': [], '#4 Simplification': [], '#4 Rating': []}
indeces_eval_sentences = []
num_eval_sentences = 0
while num_eval_sentences < 30:
random = np.random.randint(0, len(output['Annotated Sentence']))
if random not in indeces_eval_sentences:
key = output['Annotated Sentence'][random]
evaluation['Annotated Sentence'].append(key.replace('\t', ''))
evaluation['#1 Simplification'].append(unicode(result[key][score[key].index(max(score[key]))], 'ascii', errors='ignore'))
evaluation['#1 Rating'].append(0)
evaluation['#2 Simplification'].append(unicode(sentences[key][unclustered_score[key].index(max(unclustered_score[key]))], 'ascii', errors='ignore'))
evaluation['#2 Rating'].append(0)
evaluation['#3 Simplification'].append(unicode(baseline_result[key], 'ascii', errors='ignore'))
evaluation['#3 Rating'].append(0)
evaluation['#4 Simplification'].append(unicode(clustered_distances[key], 'ascii', errors='ignore'))
evaluation['#4 Rating'].append(0)
num_eval_sentences = num_eval_sentences + 1
indeces_eval_sentences.append(random)
eval_df = pd.DataFrame(evaluation, index=[i for i in range(0, len(evaluation['Annotated Sentence']))], columns=['Annotated Sentence', '#1 Simplification', '#1 Rating', '#2 Simplification', '#2 Rating', '#3 Simplification', '#3 Rating', '#4 Simplification', '#4 Rating'])
eval_df.to_html(output_eval_dir, index=False, escape=False)
tokenizer = RegexpTokenizer(r'\w+')
for i in range(0, len(dictionary)):
flag = False
for original in output['Annotated Sentence']:
if tokenizer.tokenize(dictionary[i]['value']) == tokenizer.tokenize(get_original(original)):
dictionary[i]['simplification'] = result[original][score[original].index(max(score[original]))]
flag = True
break
if flag is False:
print dictionary[i]['annotated_sentence']
xml_dict = []
for i in range(0 , len(dictionary)):
xml_dict.append({'annotated_sentence': dictionary[i]['annotated_sentence'], \
'value': dictionary[i]['value'], \
'triples': dictionary[i]['triples'], \
'simplification': dictionary[i]['simplification']})
xml = dicttoxml.dicttoxml(xml_dict, attr_type=False, custom_root='WikiAstronauts')
root = ET.fromstring(xml)
for i in range(0, len(root)):
root[i][2], root[i][3] = root[i][3], root[i][2]
root[i][2], root[i][1] = root[i][1], root[i][2]
root[i][0], root[i][1] = root[i][1], root[i][0]
for child in root:
child.tag = 'sentence'
for grandchild in child:
if grandchild.tag == 'triples':
for item in grandchild:
item.tag = 'triple'
tree = ET.ElementTree(root)
tree.write(output_xml_dir, encoding='utf-8', xml_declaration=True)
xml = minidom.parse(output_xml_dir)
dom = xml.toprettyxml(encoding='utf-8')
with open(output_xml_dir, "w") as xml_file:
xml_file.write(dom)
xml_file.close()
|
|
##
# \namespace cross3d.studiomax.studiomaxsceneobject
#
# \remarks The StudiomaxSceneObject class provides the implementation of the AbstractSceneObject class as it applies
# to 3d Studio Max scenes
#
# \author eric
# \author Blur Studio
# \date 03/15/10
#
from Py3dsMax import mxs
from cross3d import UserProps
from cross3d.constants import ObjectType
from cross3d.abstract.abstractsceneobject import AbstractSceneObject
class StudiomaxSceneObject( AbstractSceneObject ):
_nativeToAbstractObjectType = { 'light' : ObjectType.Light,
'camera' : ObjectType.Camera,
'Thinking' : ObjectType.Particle | ObjectType.Thinking,
'PF_Source' : ObjectType.Particle,
'FumeFX' : ObjectType.FumeFX,
'GeometryClass' : ObjectType.Geometry,
'Targetobject' : ObjectType.CameraInterest }
_abstractToNativeObjectType = dict((v,k) for k, v in _nativeToAbstractObjectType.iteritems())
#------------------------------------------------------------------------------------------------------------------------
# protected methods
#------------------------------------------------------------------------------------------------------------------------
def _nativeType(self):
"""
\remarks implements the AbstractSceneObject._findNativeChild method as a convinance function to return class
information as string
\param parent <Py3dsMax.mxs.Object> nativeObject (used for recursive searches when necessary)
\return <Py3dsMax.mxs.Object> nativeObject || None
"""
classof = mxs.classof
classN = str(classof (self._nativePointer))
return classN
def _findNativeChild( self, name, recursive = False, parent = None ):
"""
\remarks implements the AbstractSceneObject._findNativeChild method to find
the child by the name and returns it
\sa findChild
\param name <str>
\param recursive <bool>
\param parent <Py3dsMax.mxs.Object> nativeObject (used for recursive searches when necessary)
\return <Py3dsMax.mxs.Object> nativeObject || None
"""
if ( not parent ):
parent = self._nativePointer
# loop through all the objects
for child in parent.children:
if ( child.name == name ):
return child
# if recursive, lookup child nodes
if ( recursive ):
found = self._findNativeChild( name, recursive = True, parent = child )
if ( found ):
return found
return None
def _nativeCaches( self, cacheType = 0 ):
"""
\remarks implements the AbstractSceneObject._nativeCaches method to return a list of the native caches that are applied to this object
\param cacheType <cross3d.constants.CacheType> fitler by the inputed cache type
\return <list> [ <variant> nativeCache, .. ]
"""
output = []
from cross3d.constants import CacheType
# store maxscript methods used
classof = mxs.classof
# collect point cache modifiers
if ( not cacheType or cacheType & CacheType.Point_Cache ):
cls = mxs.Point_Cache
for modifier in self._nativePointer.modifiers:
if ( classof(modifier) == cls ):
output.append(modifier)
# collect transform cache controllers
if ( not cacheType or cacheType & CacheType.Transform_Cache ):
cls = mxs.Transform_Cache
controller = self._nativePointer.controller
while ( classof( controller ) == cls ):
output.append( controller )
controller = controller.basecontroller
return output
def _nativeChildren( self, recursive = False, parent = None, childrenCollector = [] ):
"""
\remarks implements the AbstractSceneObject._nativeChildren method to look up the native children for this object
\sa children
\return <list> [ <Py3dsMax.mxs.Object> nativeObject, .. ]
"""
if recursive:
if parent:
children = parent.children
else:
children = self._nativePointer.children
for child in children:
childrenCollector.append( child )
self._nativeChildren( True, child, childrenCollector )
return childrenCollector
else:
return self._nativePointer.children
def _nativeLayer( self ):
"""
\remarks implements the AbstractSceneObject._nativeLayer method to return the native application's layer that the object is on
\sa layer, setLayer, _setNativeLayer
\return <Py3dsMax.mxs.Layer> nativeLayer || None
"""
return self._nativePointer.layer
def _nativeMaterial( self ):
"""
\remarks implements the AbstractSceneObject._nativeMaterial method to return the native material for this object
\sa material, setMaterial, _setNativeMaterial
\return <Py3dsMax.mxs.Material> nativeMaterial || None
"""
return self._nativePointer.material
def _nativeModel( self ):
"""
\remarks implements the AbstractSceneObject._nativeModel method to look up the native model for this object
\sa model, setModel, _setNativeModel
\return <Py3dsMax.mxs.Object> nativeObject || None
"""
fullname = self._nativePointer.name
if ( '.' in fullname ):
return mxs.getNodeByName( fullname.split('.')[0] )
return None
def _nativeParent( self ):
"""
\remarks implements the AbstractSceneObject._nativeParent method to look up the native parent for this object
\sa parent, setParent, _setNativeParent
\return <Py3dsMax.mxs.Object> nativeObject || None
"""
return self._nativePointer.parent
def _nativeWireColor( self ):
"""
\remarks implements the AbstractSceneObject._nativeWireColor to return the color for the wireframe of this object in the scene
\sa setWireColor
\return <QColor>
"""
return self._nativePointer.wireColor
def _setNativeController( self, name, nativeController ):
"""
\remarks implements the AbstractSceneObject._setNativeController method to set the controller type at the inputed name to the given controller
\param name <str>
\param nativeController <Py3dMax.mxs.Controller> || None
\return <bool> success
"""
# set a point cache controller
if ( name.startswith( 'modifiers[#Point_Cache]' ) ):
from cross3d.constants import CacheType
success = False
# set controllers within the cache system
for cache in self.caches( CacheType.Point_Cache ):
if ( cache._setNativeController( name.replace( 'modifiers[#Point_Cache].', '' ), nativeController ) ):
success = True
return success
return AbstractSceneObject._setNativeController( self, name, nativeController )
def _setNativeLayer( self, nativeLayer ):
"""
\remarks implements the AbstractSceneObject._setNativeLayer method to set the native layer for this object
\sa layer, setLayer, _nativeLayer
\param <Py3dsMax.mxs.Layer> nativeLayer || None
\return <bool> success
"""
if ( not nativeLayer ):
worldlayer = mxs.layerManager.getLayer(0)
worldlayer.addNodes( [ self._nativePointer ] )
else:
nativeLayer.addNodes( [ self._nativePointer ] )
return True
def _setNativeMaterial( self, nativeMaterial ):
"""
\remarks implements the AbstractSceneObject._setNativeMaterial method to set the native material for this object
\sa material, setMaterial, _nativeMaterial
\param <Py3dsMax.mxs.Material> nativeMaterial || None
\return <bool> success
"""
self._nativePointer.material = nativeMaterial
return True
def _setNativeModel( self, nativeModel ):
"""
\remarks implements the AbstractSceneObject._setNativeModel method to set the native model for this object
\sa model, setModel, _nativeModel
\param <Py3dsMax.mxs.Object> nativeObject || None
\return <bool> success
"""
model = self._nativeModel()
# don't process when we need to reset this model
if ( nativeModel == model ):
return True
# otherwise, we need to reparent this node and reset its name
obj = self._nativePointer
obj.parent = nativeModel
splt = obj.name.split( '.' )
if ( nativeModel and len( splt ) == 2 ):
obj.name = nativeModel.name + '.' + splt[1]
elif ( nativeModel ):
obj.name = nativeModel.name + '.' + obj.name
elif ( splt == 2 ):
obj.name = splt[1]
return True
def _setNativeParent( self, nativeParent ):
"""
\remarks implements the AbstractSceneObject._setNativeParent method to set the native parent for this object
\sa parent, setParent, _nativeParent
\param <Py3dsMax.mxs.Object> nativeObject || None
\return <bool> success
"""
self._nativePointer.parent = nativeParent
return True
def _setNativeWireColor( self, color ):
"""
\remarks implements the AbstractSceneObject._setNativeWireColor to sets the wirecolor for the object to the inputed QColor
\sa wireColor
\param color <QColor>
\return <bool> success
"""
self._nativePointer.wireColor = color
return True
def _nativeModel( self ):
"""
\remarks implements the AbstractSceneObject._nativeModel method to look up the native model for this object
\sa children
\return <list> [ <Py3dsMax.mxs.Object> nativeObject, .. ]
"""
name = self.name()
split = name.split( '.' )
if len( split ) > 1:
modelName = split[0]
from cross3d import Scene
scene = Scene()
return scene._findNativeObject( modelName )
return None
#------------------------------------------------------------------------------------------------------------------------
# public methods
#------------------------------------------------------------------------------------------------------------------------
def isDeleted(self):
return (str(self._nativePointer) == '<Deleted scene node>')
def boundingBox(self):
""" Returns a blur3d.lib.cartesian.BoundingBox object representing the bounding box of the SceneObject.
"""
from blur3d.lib.cartesian import BoundingBox, Point
p1, p2 = mxs.nodeGetBoundingBox(self.nativePointer(), mxs.matrix3(1))
return BoundingBox(Point.newFromMaxPoint(p1), Point.newFromMaxPoint(p2))
def clone( self ):
"""
\remarks implements the AbstractSceneobject.clone to make a clone of this object in the scene
\sa N/A
\return <Py3dsMax.mxs.Object>
"""
cloneObject = mxs.cross3dhelper.cloneObjects([self._nativePointer], expandHierarchy=True)
return self.__class__(self.scene(), cloneObject[0])
def keyframeTimeControllers(self, alembic=True):
""" Takes all Alembic, PC and TMC time controllers and keyframe their original time controllers.
This is used as a base setup for further time alterations.
Returns:
SceneAnimationController|boolean: The bezier float keyframed controller used to control time.
"""
np = self._nativePointer
timeController = None
frameRate = self._scene.animationFPS()
# Processing Alembic controllers.
alembicControllers = mxs.getClassInstances(mxs.Alembic_Float_Controller, target=np)
alembicControllers += mxs.getClassInstances(mxs.Alembic_Xform, target=np)
alembicControllers += mxs.getClassInstances(mxs.Alembic_Mesh_Geometry, target=np)
alembicControllers += mxs.getClassInstances(mxs.Alembic_Mesh_Normals, target=np)
for alembicController in alembicControllers:
# Instantiating if we already computed the time controller.
if not timeController:
# Unfortunately the start and end frame of the cache data is not stored on the controller so we have to parse the file.
import cask
archive = cask.Archive(str(alembicController.path))
item = archive.top.children[str(alembicController.identifier)]
# Sometimes the identifier will point to a Xform object.
# Unfortunately I did not find a way to access the sample count from there.
# So instead I am digging through the hierarchy.
while item.children:
item = item.children[item.children.keys()[0]]
properties = item.iobject.getProperties()
geometry = properties.getProperty(0)
core = geometry.getProperty(0)
sampleCount = core.getNumSamples()
startTime = core.getTimeSampling().getSampleTime(0)
endTime = core.getTimeSampling().getSampleTime((sampleCount - 1))
# Creating the controller.
timeController = mxs.bezier_float()
frames = [(round(startTime * frameRate), startTime), (round(endTime * frameRate), endTime)]
for frame, value in frames:
k = mxs.addNewKey(timeController, frame)
k.value = value
k.inTangentType = mxs.pyhelper.namify('linear')
k.outTangentType = mxs.pyhelper.namify('linear')
# Assigning the controller.
mxs.setPropertyController(alembicController, 'time', timeController)
# Processing TMCs and PCs.
nativeCaches = mxs.getClassInstances(mxs.Transform_Cache, target=np) + mxs.getClassInstances(mxs.Point_Cache, target=np)
for nativeCache in nativeCaches:
# Unfortunately the start and end frame of the cache data is not stored on the controller so we have to parse the file.
if mxs.classof(nativeCache) == mxs.Point_Cache:
from blur3d.lib.pclib import PointCacheInfo
cacheInfo = PointCacheInfo.read(nativeCache.filename, header_only=True)
elif mxs.classof(nativeCache) == mxs.Transform_Cache:
# Ensure file exists
try:
from blur3d.lib.tmclib import TMCInfo
cacheInfo = TMCInfo.read(nativeCache.CacheFile, header_only=True)
except IOError as e:
print "Cache file does not exist: {0}".format(nativeCache.CacheFile)
continue
# Playback type 3 is "Playback Graph".
nativeCache.playbackType = 3
# Set the playback frame to a float controller with start and end values pulled from the cache.
mxs.setPropertyController(nativeCache, 'playbackFrame', mxs.bezier_float())
timeController = mxs.getPropertyController(nativeCache, 'playbackFrame')
# Set keys on the playback frame cache that matches the current frame rate.
duration = cacheInfo.end_frame - cacheInfo.start_frame + 1
frames = [(cacheInfo.start_frame, 0), (cacheInfo.end_frame, duration)]
for frame, value in frames:
key = mxs.addNewKey(timeController, frame)
key.value = value
key.inTangentType = mxs.pyhelper.namify('linear')
key.outTangentType = mxs.pyhelper.namify('linear')
# Processing XMeshes.
xMeshes = mxs.getClassInstances(mxs.XMeshLoader, target=np)
for xMesh in xMeshes:
# Enable curve playback.
xMesh.enablePlaybackGraph = True
# Create a new bezier float controller for the time.
mxs.setPropertyController(xMesh, 'playbackGraphTime', mxs.bezier_float())
timeController = mxs.getPropertyController(xMesh, 'playbackGraphTime')
# Set keys on the playback in and out frames.
frames = (xMesh.rangeFirstFrame, xMesh.rangeLastFrame)
for frame in frames:
key = mxs.addNewKey(timeController, frame)
key.value = frame
key.inTangentType = mxs.pyhelper.namify('linear')
key.outTangentType = mxs.pyhelper.namify('linear')
# Processing Ray Fire caches.
rayFireCaches = mxs.getClassInstances(mxs.RF_Cache, target=np)
for rayFireCache in rayFireCaches:
# Enable curve playback.
xMesh.playUseGraph = True
# Create a new bezier float controller for the time.
mxs.setPropertyController(rayFireCache, 'playFrame', mxs.bezier_float())
timeController = mxs.getPropertyController(rayFireCache, 'playFrame')
# Set keys on the playback in and out frames.
frames = (xMesh.rangeFirstFrame, xMesh.rangeLastFrame)
for frame in frames:
key = mxs.addNewKey(timeController, frame)
key.value = frame
key.inTangentType = mxs.pyhelper.namify('linear')
key.outTangentType = mxs.pyhelper.namify('linear')
# Returning the time controller if defined.
if timeController:
# Making the extrapolation linear.
linear = mxs.pyhelper.namify('linear')
mxs.setBeforeORT(timeController, linear)
mxs.setAfterORT(timeController, linear)
from cross3d import SceneAnimationController
return SceneAnimationController(self._scene, timeController)
return None
def isBoxMode( self ):
"""
\remarks implements the AbstractSceneObject.isBoxMode to return whether or not this object is in boxMode
\sa setBoxMode
\return <bool> boxMode
"""
return self._nativePointer.boxmode
def isFrozen( self ):
"""
\remarks implements the AbstractSceneObject.isFrozen to return whether or not this object is frozen(locked)
\sa freeze, setFrozen, unfreeze
\return <bool> frozen
"""
return self._nativePointer.isfrozen
def isHidden( self ):
"""
\remarks implements the AbstractSceneObject.isHidden to return whether or not this object is hidden
\sa hide, setHidden, unhide
\return <bool> hidden
"""
return self._nativePointer.ishiddeninvpt
def isSelected( self ):
"""
\remarks implements the AbstractSceneObject.isSelected to return whether or not this object is selected
\sa deselect, select, setSelected
\return <bool> selected
"""
return self._nativePointer.isselected
def _addNativeController(self, name, group='', tpe=float, default=0.0):
if not group:
group = 'Custom_Attributes'
types = {float: 'float', int:'integer'}
if tpe in [float, int] and isinstance(default, tpe):
maxScript = """fn addAttributeToObject obj = (
attribute = attributes {group} (
parameters main (
{name} type:#{tpe} default:{default}
)
)
CustAttributes.add obj attribute #Unique
return obj.{name}.controller
)"""
mxs.execute(maxScript.format(name=name, group=group, tpe=types[tpe], default=default))
return mxs.addAttributeToObject(self._nativePointer)
else:
raise Exception('This method only support ints ')
return None
def key(self, target='keyable'):
"""
Set keys on the object parameters.
"""
mxs.addNewKey(self._nativePointer.controller, mxs.currentTime)
def transformLocks(self, manipulation=True, keyability=False):
""" Returns a dictionary of position, rotation and scale values. This dictionary
can be passed to setTransformsLocks.
:param manipulation: Flags if manipulation will be affected. Defaults to True.
:param keyability: Flags if keyability will be affected. Defaults to False. (Not implemented.)
"""
ret = {'position': 'xyz', 'rotation': 'xyz', 'scale': 'xyz'}
if manipulation:
# in python BitArrays appear to be zero indexed
keys = {0: ('position', 'x'), 1: ('position', 'y'), 2: ('position', 'z'),
3: ('rotation', 'x'), 4: ('rotation', 'y'), 5: ('rotation', 'z'),
6: ('scale', 'x'), 7: ('scale', 'y'), 8: ('scale', 'z'), }
for index, value in enumerate(mxs.getTransformLockFlags(self._nativePointer)):
if value:
name, axis = keys[index]
ret[name] = ret[name].replace(axis, axis.upper())
return ret
return False
def setTransformsLocks(self, position=None, rotation=None, scale=None, manipulation=True, keyability=False):
if manipulation:
position = 'XYZ' if position is True else position
rotation = 'XYZ' if rotation is True else rotation
scale = 'XYZ' if scale is True else scale
position = 'xyz' if position is False else position
rotation = 'xyz' if rotation is False else rotation
scale = 'xyz' if scale is False else scale
position = '' if position is None else position
rotation = '' if rotation is None else rotation
scale = '' if scale is None else scale
flags = []
initialFlags = mxs.getTransformLockFlags(self._nativePointer)
for i, transform in enumerate((position, rotation, scale)):
for j, key in enumerate(('x', 'y', 'z')):
index = j+3*i
if key.upper() in transform:
flags.append(index+1)
elif not key in transform:
if initialFlags[index]:
flags.append(index+1)
return mxs.setTransformLockFlags(self._nativePointer, mxs.pyHelper.tobits(flags))
return False
def setBoxMode( self, state ):
"""
\remarks implements the AbstractSceneObject.setBoxMode to set whether this object is in boxMode or not
\sa isBoxMode
\return <bool> success
"""
self._nativePointer.boxmode = state
return True
def setFrozen( self, state ):
"""
\remarks implements the AbstractSceneObject.setFrozen to freezes(locks)/unfreezes(unlocks) this object
\sa freeze, isFrozen, unfreeze
\param state <bool>
\return <bool> success
"""
self._nativePointer.isfrozen = state
return True
def setHidden( self, state ):
"""
\remarks implements the AbstractSceneObject.setHidden to hides/unhides this object
\sa hide, isHidden, unhide
\param state <bool>
\return <bool> success
"""
self._nativePointer.ishidden = state
return True
def setSelected( self, state ):
"""
\remarks implements the AbstractSceneObject.setSelected to selects/deselects this object
\sa deselect, isSelected, select
\param state <bool>
\return <bool> success
"""
self._nativePointer.isselected = state
return True
def uniqueId( self ):
"""
\remarks implements the AbstractSceneObject.uniqueId to look up the unique name for this object and returns it
\sa displayName, setDisplayName, setName
\return <str> name
"""
return mxs.blurUtil.uniqueId( self._nativePointer )
#------------------------------------------------------------------------------------------------------------------------
# static methods
#------------------------------------------------------------------------------------------------------------------------
@classmethod
def _typeOfNativeObject(cls, nativeObject):
"""
\remarks reimplements the AbstractSceneObject._typeOfNativeObject method to returns the ObjectType of the nativeObject applied
\param <Py3dsMax.mxs.Object> nativeObject || None
\return <bool> success
"""
# Checking for model.
if mxs.classOf(nativeObject) == mxs.Point:
userProps = UserProps(nativeObject)
if 'model' in userProps:
return ObjectType.Model
output = cls._nativeToAbstractObjectType.get(str(mxs.classOf(nativeObject)),
cls._nativeToAbstractObjectType.get(str(mxs.superClassOf(nativeObject)),
AbstractSceneObject._typeOfNativeObject(nativeObject)))
return output
# register the symbol
import cross3d
cross3d.registerSymbol( 'SceneObject', StudiomaxSceneObject )
|
|
"""
This file is part of SleekXMPP.
SleekXMPP is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
SleekXMPP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SleekXMPP; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import logging
import sleekxmpp
from optparse import OptionParser
from xml.etree import cElementTree as ET
import os
import time
import sys
import Queue
import thread
class testps(sleekxmpp.ClientXMPP):
def __init__(self, jid, password, ssl=False, plugin_config = {}, plugin_whitelist=[], nodenum=0, pshost=None):
sleekxmpp.ClientXMPP.__init__(self, jid, password, ssl, plugin_config, plugin_whitelist)
self.registerPlugin('xep_0004')
self.registerPlugin('xep_0030')
self.registerPlugin('xep_0060')
self.registerPlugin('xep_0092')
self.add_handler("<message xmlns='jabber:client'><event xmlns='http://jabber.org/protocol/pubsub#event' /></message>", self.pubsubEventHandler, threaded=True)
self.add_event_handler("session_start", self.start, threaded=True)
self.add_handler("<iq type='error' />", self.handleError)
self.events = Queue.Queue()
self.default_config = None
self.ps = self.plugin['xep_0060']
self.node = "pstestnode_%s"
self.pshost = pshost
if pshost is None:
self.pshost = self.server
self.nodenum = int(nodenum)
self.leafnode = self.nodenum + 1
self.collectnode = self.nodenum + 2
self.lasterror = ''
self.sprintchars = 0
self.defaultconfig = None
self.tests = ['test_defaultConfig', 'test_createDefaultNode', 'test_getNodes', 'test_deleteNode', 'test_createWithConfig', 'test_reconfigureNode', 'test_subscribeToNode', 'test_addItem', 'test_updateItem', 'test_deleteItem', 'test_unsubscribeNode', 'test_createCollection', 'test_subscribeCollection', 'test_addNodeCollection', 'test_deleteNodeCollection', 'test_addCollectionNode', 'test_deleteCollectionNode', 'test_unsubscribeNodeCollection', 'test_deleteCollection']
self.passed = 0
self.width = 120
def start(self, event):
#TODO: make this configurable
self.getRoster()
self.sendPresence(ppriority=20)
thread.start_new(self.test_all, tuple())
def sprint(self, msg, end=False, color=False):
length = len(msg)
if color:
if color == "red":
color = "1;31"
elif color == "green":
color = "0;32"
msg = "%s%s%s" % ("\033[%sm" % color, msg, "\033[0m")
if not end:
sys.stdout.write(msg)
self.sprintchars += length
else:
self.sprint("%s%s" % ("." * (self.width - self.sprintchars - length), msg))
print('')
self.sprintchars = 0
sys.stdout.flush()
def pubsubEventHandler(self, xml):
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}items/{http://jabber.org/protocol/pubsub#event}item'):
self.events.put(item.get('id', '__unknown__'))
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}items/{http://jabber.org/protocol/pubsub#event}retract'):
self.events.put(item.get('id', '__unknown__'))
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}collection/{http://jabber.org/protocol/pubsub#event}disassociate'):
self.events.put(item.get('node', '__unknown__'))
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}collection/{http://jabber.org/protocol/pubsub#event}associate'):
self.events.put(item.get('node', '__unknown__'))
def handleError(self, xml):
error = xml.find('{jabber:client}error')
self.lasterror = error.getchildren()[0].tag.split('}')[-1]
def test_all(self):
print("Running Publish-Subscribe Tests")
version = self.plugin['xep_0092'].getVersion(self.pshost)
if version:
print("%s %s on %s" % (version.get('name', 'Unknown Server'), version.get('version', 'v?'), version.get('os', 'Unknown OS')))
print("=" * self.width)
for test in self.tests:
testfunc = getattr(self, test)
self.sprint("%s" % testfunc.__doc__)
if testfunc():
self.sprint("Passed", True, "green")
self.passed += 1
else:
if not self.lasterror:
self.lasterror = 'No response'
self.sprint("Failed (%s)" % self.lasterror, True, "red")
self.lasterror = ''
print("=" * self.width)
self.sprint("Cleaning up...")
#self.ps.deleteNode(self.pshost, self.node % self.nodenum)
self.ps.deleteNode(self.pshost, self.node % self.leafnode)
#self.ps.deleteNode(self.pshost, self.node % self.collectnode)
self.sprint("Done", True, "green")
self.disconnect()
self.sprint("%s" % self.passed, False, "green")
self.sprint("/%s Passed -- " % len(self.tests))
if len(self.tests) - self.passed:
self.sprint("%s" % (len(self.tests) - self.passed), False, "red")
else:
self.sprint("%s" % (len(self.tests) - self.passed), False, "green")
self.sprint(" Failed Tests")
print
#print "%s/%s Passed -- %s Failed Tests" % (self.passed, len(self.tests), len(self.tests) - self.passed)
def test_defaultConfig(self):
"Retreiving default configuration"
result = self.ps.getNodeConfig(self.pshost)
if result is False or result is None:
return False
else:
self.defaultconfig = result
try:
self.defaultconfig.field['pubsub#access_model'].setValue('open')
except KeyError:
pass
try:
self.defaultconfig.field['pubsub#notify_retract'].setValue(True)
except KeyError:
pass
return True
def test_createDefaultNode(self):
"Creating default node"
return self.ps.create_node(self.pshost, self.node % self.nodenum)
def test_getNodes(self):
"Getting list of nodes"
self.ps.getNodes(self.pshost)
self.ps.getItems(self.pshost, 'blog')
return True
def test_deleteNode(self):
"Deleting node"
return self.ps.deleteNode(self.pshost, self.node % self.nodenum)
def test_createWithConfig(self):
"Creating node with config"
if self.defaultconfig is None:
self.lasterror = "No Avail Config"
return False
return self.ps.create_node(self.pshost, self.node % self.leafnode, self.defaultconfig)
def test_reconfigureNode(self):
"Retrieving node config and reconfiguring"
nconfig = self.ps.getNodeConfig(self.pshost, self.node % self.leafnode)
if nconfig == False:
return False
return self.ps.setNodeConfig(self.pshost, self.node % self.leafnode, nconfig)
def test_subscribeToNode(self):
"Subscribing to node"
return self.ps.subscribe(self.pshost, self.node % self.leafnode)
def test_addItem(self):
"Adding item, waiting for notification"
item = ET.Element('test')
result = self.ps.setItem(self.pshost, self.node % self.leafnode, (('test_node1', item),))
if result == False:
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
return False
if event == 'test_node1':
return True
return False
def test_updateItem(self):
"Updating item, waiting for notification"
item = ET.Element('test')
item.attrib['crap'] = 'yup, right here'
result = self.ps.setItem(self.pshost, self.node % self.leafnode, (('test_node1', item),))
if result == False:
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
return False
if event == 'test_node1':
return True
return False
def test_deleteItem(self):
"Deleting item, waiting for notification"
result = self.ps.deleteItem(self.pshost, self.node % self.leafnode, 'test_node1')
if result == False:
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == 'test_node1':
return True
return False
def test_unsubscribeNode(self):
"Unsubscribing from node"
return self.ps.unsubscribe(self.pshost, self.node % self.leafnode)
def test_createCollection(self):
"Creating collection node"
return self.ps.create_node(self.pshost, self.node % self.collectnode, self.defaultconfig, True)
def test_subscribeCollection(self):
"Subscribing to collection node"
return self.ps.subscribe(self.pshost, self.node % self.collectnode)
def test_addNodeCollection(self):
"Assigning node to collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.leafnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#collection'].setValue(self.node % self.collectnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#collection', value=self.node % self.collectnode)
if not self.ps.setNodeConfig(self.pshost, self.node % self.leafnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_deleteNodeCollection(self):
"Removing node assignment to collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.leafnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#collection'].delValue(self.node % self.collectnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#collection', value='')
if not self.ps.setNodeConfig(self.pshost, self.node % self.leafnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_addCollectionNode(self):
"Assigning node from collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.collectnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#children'].setValue(self.node % self.leafnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#children', value=self.node % self.leafnode)
if not self.ps.setNodeConfig(self.pshost, self.node % self.collectnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_deleteCollectionNode(self):
"Removing node from collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.collectnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#children'].delValue(self.node % self.leafnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#children', value='')
if not self.ps.setNodeConfig(self.pshost, self.node % self.collectnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_unsubscribeNodeCollection(self):
"Unsubscribing from collection"
return self.ps.unsubscribe(self.pshost, self.node % self.collectnode)
def test_deleteCollection(self):
"Deleting collection"
return self.ps.deleteNode(self.pshost, self.node % self.collectnode)
if __name__ == '__main__':
#parse command line arguements
optp = OptionParser()
optp.add_option('-q','--quiet', help='set logging to ERROR', action='store_const', dest='loglevel', const=logging.ERROR, default=logging.INFO)
optp.add_option('-d','--debug', help='set logging to DEBUG', action='store_const', dest='loglevel', const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v','--verbose', help='set logging to COMM', action='store_const', dest='loglevel', const=5, default=logging.INFO)
optp.add_option("-c","--config", dest="configfile", default="config.xml", help="set config file to use")
optp.add_option("-n","--nodenum", dest="nodenum", default="1", help="set node number to use")
optp.add_option("-p","--pubsub", dest="pubsub", default="1", help="set pubsub host to use")
opts,args = optp.parse_args()
logging.basicConfig(level=opts.loglevel, format='%(levelname)-8s %(message)s')
#load xml config
logging.info("Loading config file: %s" % opts.configfile)
config = ET.parse(os.path.expanduser(opts.configfile)).find('auth')
#init
logging.info("Logging in as %s" % config.attrib['jid'])
plugin_config = {}
plugin_config['xep_0092'] = {'name': 'SleekXMPP Example', 'version': '0.1-dev'}
plugin_config['xep_0199'] = {'keepalive': True, 'timeout': 30, 'frequency': 300}
con = testps(config.attrib['jid'], config.attrib['pass'], plugin_config=plugin_config, plugin_whitelist=[], nodenum=opts.nodenum, pshost=opts.pubsub)
if not config.get('server', None):
# we don't know the server, but the lib can probably figure it out
con.connect()
else:
con.connect((config.attrib['server'], 5222))
con.process(threaded=False)
print("")
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List operations.
Example box operations that are supported:
* areas: compute bounding box areas
* iou: pairwise intersection-over-union scores
* sq_dist: pairwise distances between bounding boxes
Whenever box_list_ops functions output a BoxList, the fields of the incoming
BoxList are retained unless documented otherwise.
"""
import tensorflow as tf
from object_detection.core import box_list
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ascend = 1
descend = 2
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def height_width(boxlist, scope=None):
"""Computes height and width of boxes in boxlist.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
Height: A tensor with shape [N] representing box heights.
Width: A tensor with shape [N] representing box widths.
"""
with tf.name_scope(scope, 'HeightWidth'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])
def scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
"""Clip bounding boxes to a window.
This op clips any input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
scope: name scope.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
with tf.name_scope(scope, 'ClipToWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
clipped = box_list.BoxList(
tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
1))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = tf.cast(
tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def matched_intersection(boxlist1, boxlist2, scope=None):
"""Compute intersection areas between corresponding boxes in two boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise intersections
"""
with tf.name_scope(scope, 'MatchedIntersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
min_ymax = tf.minimum(y_max1, y_max2)
max_ymin = tf.maximum(y_min1, y_min2)
intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)
min_xmax = tf.minimum(x_max1, x_max2)
max_xmin = tf.maximum(x_min1, x_min2)
intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)
return tf.reshape(intersect_heights * intersect_widths, [-1])
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def matched_iou(boxlist1, boxlist2, scope=None):
"""Compute intersection-over-union between corresponding boxes in boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'MatchedIOU'):
intersections = matched_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = areas1 + areas2 - intersections
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def ioa(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-area between box collections.
intersection-over-area (IOA) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, ioa(box1, box2) != ioa(box2, box1).
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise ioa scores.
"""
with tf.name_scope(scope, 'IOA'):
intersections = intersection(boxlist1, boxlist2)
areas = tf.expand_dims(area(boxlist2), 0)
return tf.truediv(intersections, areas)
def prune_non_overlapping_boxes(
boxlist1, boxlist2, min_overlap=0.0, scope=None):
"""Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.
For each box in boxlist1, we want its IOA to be more than minoverlap with
at least one of the boxes in boxlist2. If it does not, we remove it.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
min_overlap: Minimum required overlap between boxes, to count them as
overlapping.
scope: name scope.
Returns:
new_boxlist1: A pruned boxlist with size [N', 4].
keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the
first input BoxList `boxlist1`.
"""
with tf.name_scope(scope, 'PruneNonOverlappingBoxes'):
ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor
ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor
keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap))
keep_inds = tf.squeeze(tf.where(keep_bool), squeeze_dims=[1])
new_boxlist1 = gather(boxlist1, keep_inds)
return new_boxlist1, keep_inds
def prune_small_boxes(boxlist, min_side, scope=None):
"""Prunes small boxes in the boxlist which have a side smaller than min_side.
Args:
boxlist: BoxList holding N boxes.
min_side: Minimum width AND height of box to survive pruning.
scope: name scope.
Returns:
A pruned boxlist.
"""
with tf.name_scope(scope, 'PruneSmallBoxes'):
height, width = height_width(boxlist)
is_valid = tf.logical_and(tf.greater_equal(width, min_side),
tf.greater_equal(height, min_side))
return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def change_coordinate_frame(boxlist, window, scope=None):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: A rank 1 tensor [4].
scope: name scope.
Returns:
Returns a BoxList object with N boxes.
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(box_list.BoxList(
boxlist.get() - [window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
boxlist_new = _copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def sq_dist(boxlist1, boxlist2, scope=None):
"""Computes the pairwise squared distances between box corners.
This op treats each box as if it were a point in a 4d Euclidean space and
computes pairwise squared distances.
Mathematically, we are given two matrices of box coordinates X and Y,
where X(i,:) is the i'th row of X, containing the 4 numbers defining the
corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to
boxlist2. We compute
Z(i,j) = ||X(i,:) - Y(j,:)||^2
= ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:),
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise distances
"""
with tf.name_scope(scope, 'SqDist'):
sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True)
sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True)
innerprod = tf.matmul(boxlist1.get(), boxlist2.get(),
transpose_a=False, transpose_b=True)
return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod
def boolean_mask(boxlist, indicator, fields=None, scope=None,
use_static_shapes=False, indicator_sum=None):
"""Select boxes from BoxList according to indicator and return new BoxList.
`boolean_mask` returns the subset of boxes that are marked as "True" by the
indicator tensor. By default, `boolean_mask` returns boxes corresponding to
the input index list, as well as all additional fields stored in the boxlist
(indexing into the first dimension). However one can optionally only draw
from a subset of fields.
Args:
boxlist: BoxList holding N boxes
indicator: a rank-1 boolean tensor
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
indicator_sum: An integer containing the sum of `indicator` vector. Only
required if `use_static_shape` is True.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indicator
Raises:
ValueError: if `indicator` is not a rank-1 boolean tensor.
"""
with tf.name_scope(scope, 'BooleanMask'):
if indicator.shape.ndims != 1:
raise ValueError('indicator should have rank 1')
if indicator.dtype != tf.bool:
raise ValueError('indicator should be a boolean tensor')
if use_static_shapes:
if not (indicator_sum and isinstance(indicator_sum, int)):
raise ValueError('`indicator_sum` must be a of type int')
selected_positions = tf.to_float(indicator)
indexed_positions = tf.cast(
tf.multiply(
tf.cumsum(selected_positions), selected_positions),
dtype=tf.int32)
one_hot_selector = tf.one_hot(
indexed_positions - 1, indicator_sum, dtype=tf.float32)
sampled_indices = tf.cast(
tf.tensordot(
tf.to_float(tf.range(tf.shape(indicator)[0])),
one_hot_selector,
axes=[0, 0]),
dtype=tf.int32)
return gather(boxlist, sampled_indices, use_static_shapes=True)
else:
subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, `gather` returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a rank-1 tensor of type int32 / int64
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int32
"""
with tf.name_scope(scope, 'Gather'):
if len(indices.shape.as_list()) != 1:
raise ValueError('indices should have rank 1')
if indices.dtype != tf.int32 and indices.dtype != tf.int64:
raise ValueError('indices should be an int32 / int64 tensor')
gather_op = tf.gather
if use_static_shapes:
gather_op = ops.matmul_gather_on_zeroth_axis
subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices))
if fields is None:
fields = boxlist.get_extra_fields()
fields += ['boxes']
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = gather_op(boxlist.get_field(field), indices)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def concatenate(boxlists, fields=None, scope=None):
"""Concatenate list of BoxLists.
This op concatenates a list of input BoxLists into a larger BoxList. It also
handles concatenation of BoxList fields as long as the field tensor shapes
are equal except for the first dimension.
Args:
boxlists: list of BoxList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxList in the list are included in the
concatenation.
scope: name scope.
Returns:
a BoxList with number of boxes equal to
sum([boxlist.num_boxes() for boxlist in BoxList])
Raises:
ValueError: if boxlists is invalid (i.e., is not a list, is empty, or
contains non BoxList objects), or if requested fields are not contained in
all boxlists
"""
with tf.name_scope(scope, 'Concatenate'):
if not isinstance(boxlists, list):
raise ValueError('boxlists should be a list')
if not boxlists:
raise ValueError('boxlists should have nonzero length')
for boxlist in boxlists:
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('all elements of boxlists should be BoxList objects')
concatenated = box_list.BoxList(
tf.concat([boxlist.get() for boxlist in boxlists], 0))
if fields is None:
fields = boxlists[0].get_extra_fields()
for field in fields:
first_field_shape = boxlists[0].get_field(field).get_shape().as_list()
first_field_shape[0] = -1
if None in first_field_shape:
raise ValueError('field %s must have fully defined shape except for the'
' 0th dimension.' % field)
for boxlist in boxlists:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all requested fields')
field_shape = boxlist.get_field(field).get_shape().as_list()
field_shape[0] = -1
if field_shape != first_field_shape:
raise ValueError('field %s must have same shape for all boxlists '
'except for the 0th dimension.' % field)
concatenated_field = tf.concat(
[boxlist.get_field(field) for boxlist in boxlists], 0)
concatenated.add_field(field, concatenated_field)
return concatenated
def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
boxlist: BoxList holding N boxes.
field: A BoxList field for sorting and reordering the BoxList.
order: (Optional) descend or ascend. Default is descend.
scope: name scope.
Returns:
sorted_boxlist: A sorted BoxList with the field in the specified order.
Raises:
ValueError: if specified field does not exist
ValueError: if the order is not either descend or ascend
"""
with tf.name_scope(scope, 'SortByField'):
if order != SortOrder.descend and order != SortOrder.ascend:
raise ValueError('Invalid sort order')
field_to_sort = boxlist.get_field(field)
if len(field_to_sort.shape.as_list()) != 1:
raise ValueError('Field should have rank 1')
num_boxes = boxlist.num_boxes()
num_entries = tf.size(field_to_sort)
length_assert = tf.Assert(
tf.equal(num_boxes, num_entries),
['Incorrect field size: actual vs expected.', num_entries, num_boxes])
with tf.control_dependencies([length_assert]):
_, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True)
if order == SortOrder.ascend:
sorted_indices = tf.reverse_v2(sorted_indices, [0])
return gather(boxlist, sorted_indices)
def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None):
"""Overlay bounding box list on image.
Currently this visualization plots a 1 pixel thick red bounding box on top
of the image. Note that tf.image.draw_bounding_boxes essentially is
1 indexed.
Args:
image: an image tensor with shape [height, width, 3]
boxlist: a BoxList
normalized: (boolean) specify whether corners are to be interpreted
as absolute coordinates in image space or normalized with respect to the
image size.
scope: name scope.
Returns:
image_and_boxes: an image tensor with shape [height, width, 3]
"""
with tf.name_scope(scope, 'VisualizeBoxesInImage'):
if not normalized:
height, width, _ = tf.unstack(tf.shape(image))
boxlist = scale(boxlist,
1.0 / tf.cast(height, tf.float32),
1.0 / tf.cast(width, tf.float32))
corners = tf.expand_dims(boxlist.get(), 0)
image = tf.expand_dims(image, 0)
return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0])
def filter_field_value_equals(boxlist, field, value, scope=None):
"""Filter to keep only boxes with field entries equal to the given value.
Args:
boxlist: BoxList holding N boxes.
field: field name for filtering.
value: scalar value.
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not have
the specified field.
"""
with tf.name_scope(scope, 'FilterFieldValueEquals'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field(field):
raise ValueError('boxlist must contain the specified field')
filter_field = boxlist.get_field(field)
gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1])
return gather(boxlist, gather_index)
def filter_greater_than(boxlist, thresh, scope=None):
"""Filter to keep only boxes with score exceeding a given threshold.
This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold.
TODO(jonathanhuang): Change function name to filter_scores_greater_than
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not
have a scores field
"""
with tf.name_scope(scope, 'FilterGreaterThan'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape.as_list()) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = tf.cast(tf.reshape(
tf.where(tf.greater(scores, thresh)),
[-1]), tf.int32)
return gather(boxlist, high_score_indices)
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope, 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
selected_indices = tf.image.non_max_suppression(
boxlist.get(), boxlist.get_field('scores'),
max_output_size, iou_threshold=thresh)
return gather(boxlist, selected_indices)
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def to_normalized_coordinates(boxlist, height, width,
check_range=True, scope=None):
"""Converts absolute box coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
boxlist: BoxList with coordinates in terms of pixel-locations.
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
boxlist with normalized coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, 1 / height, 1 / width)
def to_absolute_coordinates(boxlist,
height,
width,
check_range=True,
maximum_normalized_coordinate=1.1,
scope=None):
"""Converts normalized box coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum box coordinate
value is larger than maximum_normalized_coordinate (in which case coordinates
are already absolute).
Args:
boxlist: BoxList with coordinates in range [0, 1].
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
scope: name scope.
Returns:
boxlist with absolute coordinates in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input boxes is correct.
if check_range:
box_maximum = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(
tf.greater_equal(maximum_normalized_coordinate, box_maximum),
['maximum box coordinate value is larger '
'than %f: ' % maximum_normalized_coordinate, box_maximum])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, height, width)
def refine_boxes_multi_class(pool_boxes,
num_classes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Box refinement is done independently for each class.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field and a rank 1 'classes' field.
num_classes: (int scalar) Number of classes.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores and classes field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
if not pool_boxes.has_field('classes'):
raise ValueError('pool_boxes must have a \'classes\' field')
refined_boxes = []
for i in range(num_classes):
boxes_class = filter_field_value_equals(pool_boxes, 'classes', i)
refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh,
nms_max_detections, voting_iou_thresh)
refined_boxes.append(refined_boxes_class)
return sort_by_field(concatenate(refined_boxes), 'scores')
def refine_boxes(pool_boxes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
nms_boxes = non_max_suppression(
pool_boxes, nms_iou_thresh, nms_max_detections)
return box_voting(nms_boxes, pool_boxes, voting_iou_thresh)
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
"""Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015.
Performs box voting as described in 'Object detection via a multi-region &
semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For
each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes
with iou overlap >= iou_thresh. The location of B is set to the weighted
average location of boxes in S (scores are used for weighting). And the score
of B is set to the average score of boxes in S.
Args:
selected_boxes: BoxList containing a subset of boxes in pool_boxes. These
boxes are usually selected from pool_boxes using non max suppression.
pool_boxes: BoxList containing a set of (possibly redundant) boxes.
iou_thresh: (float scalar) iou threshold for matching boxes in
selected_boxes and pool_boxes.
Returns:
BoxList containing averaged locations and scores for each box in
selected_boxes.
Raises:
ValueError: if
a) selected_boxes or pool_boxes is not a BoxList.
b) if iou_thresh is not in [0, 1].
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= iou_thresh <= 1.0:
raise ValueError('iou_thresh must be between 0 and 1')
if not isinstance(selected_boxes, box_list.BoxList):
raise ValueError('selected_boxes must be a BoxList')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
iou_ = iou(selected_boxes, pool_boxes)
match_indicator = tf.to_float(tf.greater(iou_, iou_thresh))
num_matches = tf.reduce_sum(match_indicator, 1)
# TODO(kbanoop): Handle the case where some boxes in selected_boxes do not
# match to any boxes in pool_boxes. For such boxes without any matches, we
# should return the original boxes without voting.
match_assert = tf.Assert(
tf.reduce_all(tf.greater(num_matches, 0)),
['Each box in selected_boxes must match with at least one box '
'in pool_boxes.'])
scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
scores_assert = tf.Assert(
tf.reduce_all(tf.greater_equal(scores, 0)),
['Scores must be non negative.'])
with tf.control_dependencies([scores_assert, match_assert]):
sum_scores = tf.matmul(match_indicator, scores)
averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches
box_locations = tf.matmul(match_indicator,
pool_boxes.get() * scores) / sum_scores
averaged_boxes = box_list.BoxList(box_locations)
_copy_extra_fields(averaged_boxes, selected_boxes)
averaged_boxes.add_field('scores', averaged_scores)
return averaged_boxes
def pad_or_clip_box_list(boxlist, num_boxes, scope=None):
"""Pads or clips all fields of a BoxList.
Args:
boxlist: A BoxList with arbitrary of number of boxes.
num_boxes: First num_boxes in boxlist are kept.
The fields are zero-padded if num_boxes is bigger than the
actual number of boxes.
scope: name scope.
Returns:
BoxList with all fields padded or clipped.
"""
with tf.name_scope(scope, 'PadOrClipBoxList'):
subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor(
boxlist.get(), num_boxes))
for field in boxlist.get_extra_fields():
subfield = shape_utils.pad_or_clip_tensor(
boxlist.get_field(field), num_boxes)
subboxlist.add_field(field, subfield)
return subboxlist
def select_random_box(boxlist,
default_box=None,
seed=None,
scope=None):
"""Selects a random bounding box from a `BoxList`.
Args:
boxlist: A BoxList.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[-1., -1., -1., -1.]].
seed: Random seed.
scope: Name scope.
Returns:
bbox: A [1, 4] tensor with a random bounding box.
valid: A bool tensor indicating whether a valid bounding box is returned
(True) or whether the default box is returned (False).
"""
with tf.name_scope(scope, 'SelectRandomBox'):
bboxes = boxlist.get()
combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes)
number_of_boxes = combined_shape[0]
default_box = default_box or tf.constant([[-1., -1., -1., -1.]])
def select_box():
random_index = tf.random_uniform([],
maxval=number_of_boxes,
dtype=tf.int32,
seed=seed)
return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True)
return tf.cond(
tf.greater_equal(number_of_boxes, 1),
true_fn=select_box,
false_fn=lambda: (default_box, tf.constant(False)))
def get_minimal_coverage_box(boxlist,
default_box=None,
scope=None):
"""Creates a single bounding box which covers all boxes in the boxlist.
Args:
boxlist: A Boxlist.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[0., 0., 1., 1.]].
scope: Name scope.
Returns:
A [1, 4] float32 tensor with a bounding box that tightly covers all the
boxes in the box list. If the boxlist does not contain any boxes, the
default box is returned.
"""
with tf.name_scope(scope, 'CreateCoverageBox'):
num_boxes = boxlist.num_boxes()
def coverage_box(bboxes):
y_min, x_min, y_max, x_max = tf.split(
value=bboxes, num_or_size_splits=4, axis=1)
y_min_coverage = tf.reduce_min(y_min, axis=0)
x_min_coverage = tf.reduce_min(x_min, axis=0)
y_max_coverage = tf.reduce_max(y_max, axis=0)
x_max_coverage = tf.reduce_max(x_max, axis=0)
return tf.stack(
[y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
axis=1)
default_box = default_box or tf.constant([[0., 0., 1., 1.]])
return tf.cond(
tf.greater_equal(num_boxes, 1),
true_fn=lambda: coverage_box(boxlist.get()),
false_fn=lambda: default_box)
def sample_boxes_by_jittering(boxlist,
num_boxes_to_sample,
stddev=0.1,
scope=None):
"""Samples num_boxes_to_sample boxes by jittering around boxlist boxes.
It is possible that this function might generate boxes with size 0. The larger
the stddev, this is more probable. For a small stddev of 0.1 this probability
is very small.
Args:
boxlist: A boxlist containing N boxes in normalized coordinates.
num_boxes_to_sample: A positive integer containing the number of boxes to
sample.
stddev: Standard deviation. This is used to draw random offsets for the
box corners from a normal distribution. The offset is multiplied by the
box size so will be larger in terms of pixels for larger boxes.
scope: Name scope.
Returns:
sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in
normalized coordinates.
"""
with tf.name_scope(scope, 'SampleBoxesByJittering'):
num_boxes = boxlist.num_boxes()
box_indices = tf.random_uniform(
[num_boxes_to_sample],
minval=0,
maxval=num_boxes,
dtype=tf.int32)
sampled_boxes = tf.gather(boxlist.get(), box_indices)
sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0]
sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1]
rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0]
minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1]
maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2]
maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3]
maxy = tf.maximum(miny, maxy)
maxx = tf.maximum(minx, maxx)
sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1)
sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0)
return box_list.BoxList(sampled_boxes)
|
|
import numpy as np
import pandas as pd
import cPickle as pickle
import keras
from keras import optimizers
from keras import backend as K
from keras import regularizers
from keras.models import Sequential
from keras.layers import LSTM, Embedding, TimeDistributed, Dense
from keras.layers import RepeatVector, Merge, Activation, Flatten
from keras.preprocessing import image, sequence
EMBEDDING_DIM = 128
DATA_PATH = "/data/vision/fisher/data1/Flickr8k/"
class CaptionGenerator():
def __init__(self):
self.max_cap_len = None
self.vocab_size = None
self.index_word = None
self.word_index = None
self.total_samples = None
self.encoded_images = pickle.load(open(DATA_PATH + "encoded_images.dat", "rb"))
self.variable_initializer()
def variable_initializer(self):
df_train = pd.read_csv(DATA_PATH + 'Flickr8k_text/flickr_8k_train_dataset.txt', delimiter='\t')
df_val = pd.read_csv(DATA_PATH + 'Flickr8k_text/flickr_8k_val_dataset.txt', delimiter='\t')
df = pd.concat([df_train, df_val], axis=0)
df['cap_len'] = df['captions'].apply(lambda words: len(words.split()))
nb_samples = df.shape[0]
iter = df.iterrows()
caps = []
for i in range(nb_samples):
x = iter.next()
caps.append(x[1][1])
self.total_samples=0
for text in caps:
self.total_samples+=len(text.split())-1
print "Total samples : "+str(self.total_samples)
words = [txt.split() for txt in caps]
unique = []
for word in words:
unique.extend(word)
unique = list(set(unique))
self.vocab_size = len(unique)
self.word_index = {}
self.index_word = {}
for i, word in enumerate(unique):
self.word_index[word]=i
self.index_word[i]=word
self.max_cap_len = np.int(df['cap_len'].mean() + 2*df['cap_len'].std())
print "Vocabulary size: "+str(self.vocab_size)
print "Maximum caption length: "+str(self.max_cap_len)
print "Variables initialization done!"
def data_generator_train(self, batch_size = 64):
partial_caps = []
next_words = []
images = []
#print "generating training data..."
gen_count = 0
df = pd.read_csv(DATA_PATH + 'Flickr8k_text/flickr_8k_train_dataset.txt', delimiter='\t')
nb_samples = df.shape[0]
iter = df.iterrows()
caps = []
imgs = []
for i in range(nb_samples):
x = iter.next()
caps.append(x[1][1])
imgs.append(x[1][0])
total_count = 0
while 1:
image_counter = -1
for text in caps:
image_counter+=1
current_image = self.encoded_images[imgs[image_counter]]
for i in range(len(text.split())-1):
total_count+=1
partial = [self.word_index[txt] for txt in text.split()[:i+1]]
partial_caps.append(partial)
next = np.zeros(self.vocab_size)
next[self.word_index[text.split()[i+1]]] = 1
next_words.append(next)
images.append(current_image)
if total_count>=batch_size:
next_words = np.asarray(next_words)
images = np.asarray(images)
partial_caps = sequence.pad_sequences(partial_caps, maxlen=self.max_cap_len, padding='post')
total_count = 0
gen_count+=1
#print "yielding count: "+str(gen_count)
#images: CNN encodings
#partial_caps: embedding indices
#next_words: one-hot encodings
yield [[images, partial_caps], next_words]
partial_caps = []
next_words = []
images = []
#end if
#end for
#end for
#end while
def data_generator_val(self, batch_size = 64):
partial_caps = []
next_words = []
images = []
#print "generating validation data..."
gen_count = 0
df = pd.read_csv(DATA_PATH + 'Flickr8k_text/flickr_8k_val_dataset.txt', delimiter='\t')
nb_samples = df.shape[0]
iter = df.iterrows()
caps = []
imgs = []
for i in range(nb_samples):
x = iter.next()
caps.append(x[1][1])
imgs.append(x[1][0])
total_count = 0
while 1:
image_counter = -1
for text in caps:
image_counter+=1
current_image = self.encoded_images[imgs[image_counter]]
for i in range(len(text.split())-1):
total_count+=1
partial = [self.word_index[txt] for txt in text.split()[:i+1]]
partial_caps.append(partial)
next = np.zeros(self.vocab_size)
next[self.word_index[text.split()[i+1]]] = 1
next_words.append(next)
images.append(current_image)
if total_count>=batch_size:
next_words = np.asarray(next_words)
images = np.asarray(images)
partial_caps = sequence.pad_sequences(partial_caps, maxlen=self.max_cap_len, padding='post')
total_count = 0
gen_count+=1
#print "yielding count: "+str(gen_count)
#images: CNN encodings
#partial_caps: embedding indices
#next_words: one-hot encodings
yield [[images, partial_caps], next_words]
partial_caps = []
next_words = []
images = []
#end if
#end for
#end for
#end while
def load_image(self, path):
img = image.load_img(path, target_size=(224,224))
x = image.img_to_array(img)
return np.asarray(x)
def create_model(self, ret_model = False):
#image branch
image_model = Sequential()
image_model.add(Dense(EMBEDDING_DIM, input_dim = 2048, activation='relu'))
image_model.add(RepeatVector(self.max_cap_len))
#text branch
lang_model = Sequential()
lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_cap_len))
lang_model.add(LSTM(256,return_sequences=True))
lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))
#concatenated
model = Sequential()
model.add(Merge([image_model, lang_model], mode='concat'))
model.add(LSTM(1024, dropout=0.2, recurrent_dropout=0.2, return_sequences=False))
model.add(Dense(self.vocab_size))
model.add(Activation('softmax'))
print "Model created!"
if(ret_model==True):
return model
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
return model
def get_word(self,index):
return self.index_word[index]
|
|
"""Closable and thread-safe queues."""
__all__ = [
'Closed',
'Empty',
'Full',
'Queue',
'PriorityQueue',
'LifoQueue',
'ForwardingQueue',
]
import collections
import heapq
import time
import threading
from queue import Empty
from queue import Full
class Closed(Exception):
"""Exception raised by put() and get() when the queue is closed."""
class _QueueBase:
def __init__(self, capacity):
self._capacity = capacity
self._lock = threading.Lock()
self._not_empty = threading.Condition(self._lock)
self._not_full = threading.Condition(self._lock)
self._closed = False
_queue = None
def _put(self, _):
raise NotImplementedError
def _get(self):
raise NotImplementedError
def __bool__(self):
with self._lock:
return bool(self._queue)
def __len__(self):
"""Return the size, not the capacity, of the queue."""
with self._lock:
return len(self._queue)
def is_full(self):
"""True if size is equal to or greater than capacity."""
with self._lock:
return self._capacity > 0 and len(self._queue) >= self._capacity
def is_closed(self):
with self._lock:
return self._closed
def close(self, graceful=True):
"""Close the queue and return the items (if you need to release
them).
NOTE: All blocking put() and get() will raise Closed; so only
call close() when you really have to.
"""
with self._lock:
if self._closed:
return []
if graceful:
items = []
else: # Drain the queue.
items, self._queue = list(self._queue), ()
self._closed = True
self._not_empty.notify_all()
self._not_full.notify_all()
return items
def put(self, item, block=True, timeout=None):
"""Same as standard library's put() of queue module except that
it will raise Closed in all blocked producer threads after
the queue is being closed.
"""
with self._not_full:
if self._closed:
raise Closed
if self._capacity > 0:
waiter = _make_waiter(block, timeout)
waiter.send(self._not_full)
keep_waiting = True
while True:
if self._closed:
raise Closed
if len(self._queue) < self._capacity:
break
if not keep_waiting:
raise Full
try:
next(waiter)
except StopIteration:
keep_waiting = False
self._put(item)
self._not_empty.notify()
def get(self, block=True, timeout=None):
"""Same as standard library's get() of queue module except that
it will raise Closed in all blocked consumer threads after
the queue is empty and is being closed.
"""
with self._not_empty:
waiter = _make_waiter(block, timeout)
waiter.send(self._not_empty)
keep_waiting = True
while True:
if self._queue:
break
if self._closed:
raise Closed
if not keep_waiting:
raise Empty
try:
next(waiter)
except StopIteration:
keep_waiting = False
item = self._get()
self._not_full.notify()
return item
def _make_waiter(block, timeout):
"""Return a generator that calls Condition.wait.
You first call waiter.send(cond) to give it a condition variable
to wait for, and then every time you call next(waiter), it will
either call Condition.wait or raise StopIteration.
"""
if not block:
waiter = _non_blocking()
elif timeout is None:
waiter = _blocking()
else:
if timeout < 0:
raise ValueError('timeout must be non-negative')
waiter = _blocking_timeout(timeout)
next(waiter)
return waiter
def _non_blocking():
_ = yield
yield
def _blocking():
cond = yield
while True:
yield
cond.wait()
def _blocking_timeout(timeout):
cond = yield
end_time = time.monotonic() + timeout
while True:
remaining = end_time - time.monotonic()
if remaining <= 0.0:
yield
break
yield
cond.wait(remaining)
class Queue(_QueueBase):
def __init__(self, capacity=0):
super().__init__(capacity)
self._queue = collections.deque()
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.popleft()
class PriorityQueue(_QueueBase):
def __init__(self, capacity=0):
super().__init__(capacity)
self._queue = []
def _put(self, item):
heapq.heappush(self._queue, item)
def _get(self):
return heapq.heappop(self._queue)
class LifoQueue(_QueueBase):
def __init__(self, capacity=0):
super().__init__(capacity)
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
class ForwardingQueue:
"""Wrap around a queue and replace its internal lock with a RLock.
All calls are forwarded to the queue. You may sub-class this
class instead of a queue class to alter the its behavior.
"""
def __init__(self, queue):
self.queue = queue
# Replace Lock with RLock for easier sub-classing.
self.queue._lock = threading.RLock()
self.queue._not_empty = threading.Condition(self.queue._lock)
self.queue._not_full = threading.Condition(self.queue._lock)
@property
def lock(self):
return self.queue._lock
def __bool__(self):
return bool(self.queue)
def __len__(self):
return len(self.queue)
def is_full(self):
return self.queue.is_full()
def is_closed(self):
return self.queue.is_closed()
def close(self, graceful=True):
return self.queue.close(graceful=graceful)
def put(self, item, block=True, timeout=None):
self.queue.put(item, block=block, timeout=timeout)
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
|
|
from __future__ import absolute_import, division, unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
)
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from . import utils
@python_2_unicode_compatible
class ThemeContent(ClusterableModel):
name = models.CharField(max_length=255)
contact_email = models.EmailField(
blank=True,
null=True,
help_text="Only provide if this should be different from the site default email contact address.",
)
default = models.BooleanField(default=False)
panels = [
FieldPanel('name'),
FieldPanel('contact_email'),
FieldPanel('default'),
InlinePanel('block_links', label="Content Blocks"),
InlinePanel('follow_links', label="Follow Links"),
InlinePanel('logo_links', label="Logos"),
]
def __str__(self):
return self.name
register_snippet(ThemeContent)
@python_2_unicode_compatible
class Theme(models.Model):
name = models.CharField(max_length=1024)
folder = models.CharField(max_length=1024, default="themes/default")
content = models.ForeignKey(ThemeContent, null=True)
def __str__(self):
return self.name
panels = [
FieldPanel('name'),
FieldPanel('folder'),
SnippetChooserPanel('content'),
]
register_snippet(Theme)
class ThemeablePage(Page):
'''
Abstract model class to inherit from for themable pages
'''
is_creatable = False
class Meta:
abstract = True
theme = models.ForeignKey(
Theme,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
def get_template(self, request, *args, **kwargs):
original_template = super(ThemeablePage, self).get_template(request, *args, **kwargs)
if self.theme is None:
return original_template
custom_template = utils.get_themed_template_name(self.theme, original_template)
if utils.template_exists(custom_template):
return custom_template
return original_template
style_panels = [
MultiFieldPanel(
[
SnippetChooserPanel('theme'),
],
heading="Theme"
),
]
@python_2_unicode_compatible
class TextBlock(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
heading = models.TextField(blank=True, default="")
content = RichTextField(blank=True, default="")
panels = [
FieldPanel('name'),
FieldPanel('heading'),
FieldPanel('content'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(TextBlock)
@python_2_unicode_compatible
class FollowLink(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
link = models.CharField(max_length=1024)
panels = [
FieldPanel('name'),
FieldPanel('link'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(FollowLink)
@python_2_unicode_compatible
class LogoBlock(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
logo = models.ForeignKey(
'images.AttributedImage',
)
link = models.CharField(max_length=2048, blank=True, null=True)
panels = [
FieldPanel('name'),
ImageChooserPanel('logo'),
FieldPanel('link'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(LogoBlock)
class ContentBlockLink(models.Model):
block = models.ForeignKey(
"TextBlock",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='block_links'
)
panels = [SnippetChooserPanel("block")]
class ContentFollowLink(models.Model):
block = models.ForeignKey(
"FollowLink",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='follow_links'
)
panels = [SnippetChooserPanel("block")]
class ContentLogoLink(models.Model):
block = models.ForeignKey(
"LogoBlock",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='logo_links'
)
panels = [SnippetChooserPanel("block")]
|
|
#
# salt api wrapper for horizon dashboard
#
#
#
import salt
import salt.config
from salt import runner
import salt.gdc.groups
#
from openstack_dashboard.settings import SALT_MASTER_CONFIG
#
# Determine package management system type
#
from openstack_dashboard.settings import OS_PACKAGE_SPEC
#
#
opts = salt.config.master_config(SALT_MASTER_CONFIG)
#
#
# database for completed tasks
from openstack_dashboard.dashboards.tasks.models import SaltReturns
from openstack_dashboard.dashboards.groups.models import GroupMember
#
#
##
from openstack_dashboard.dashboards.tasks.task import ActiveTask
##
from openstack_dashboard.dashboards.patch_management.package import Package as Patch_Package
##
from openstack_dashboard.dashboards.patch_management.package import Instance as Patch_Instance
#
from openstack_dashboard.dashboards.patch_management.repository import DebRepository, RpmRepository
#
from openstack_dashboard.dashboards.groups.groupmember import Member,Group
##
from horizon import exceptions
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.api.salt_sls_api import minions_list_sls
#
#
def active_jobs():
runner = salt.runner.RunnerClient(opts)
active_task_list = []
try:
active_tasks_list = runner.cmd('jobs.active', [])
except:
raise exceptions.NotAvailable(_('Salt-master is not available'))
active_task_list_converted = []
# bug is here
for task_id in active_tasks_list.keys():
single_active_task = active_tasks_list[task_id]
active_task_list_converted.append( ActiveTask( id=task_id,
function=single_active_task['Function'] ,
user=single_active_task['User'],
target_type=single_active_task['Target-type'],
returned=single_active_task['Returned'],
running_on=single_active_task['Running'],
arguments=single_active_task['Arguments'] ))
return active_task_list_converted
def delete_task(jid,scope='*'):
local = salt.client.LocalClient()
try:
local.cmd(scope, 'saltutil.kill_job', [jid])
except:
raise exceptions.NotAvailable(_('Unable to delete task %s'%(str(jid))))
def pkg_list_for_upgrade(instance_name=None):
local = salt.client.LocalClient()
pkg_list=local.cmd(instance_name,'pkg.list_upgrades')
pkg_list_m = []
pkg_list = pkg_list[instance_name]
for i in pkg_list.keys():
pkg_list_m.append(Patch_Package(name=i,version=pkg_list[i]))
return pkg_list_m
def get_grains(instance_name=None,*args):
local = salt.client.LocalClient()
grains_names=list(args)
grains_list = local.cmd(instance_name,'grains.item',grains_names)
#grains_list.get('group_name',None)
#return grains_list[instance_name]
return grains_list.get(instance_name,{})
def minions_list():
runner = salt.runner.RunnerClient(opts)
minions_list = runner.cmd('manage.up', [])
return minions_list
def minions_list_sls():
gm = salt.gdc.groups.GdcMatcher()
return gm.get_all_hosts()
def minions_list_custom():
"""Minions list table with os column """
minions_list_base = minions_list_sls()
minions_list_m = []
GRAIN_NAME = 'os'
for instance_name in minions_list_base:
os = get_grains(instance_name,GRAIN_NAME).get(GRAIN_NAME,"Unknown")
minions_list_m.append(Patch_Instance(name=instance_name, os=os))
return minions_list_m
def install_packages(instance_name=None,packages=[]):
local = salt.client.LocalClient()
PKG_INSTALL = "pkg.install"
return local.run_job(instance_name,PKG_INSTALL,["""pkgs='%s'"""%(str(packages).replace("'",'"'))])
def add_zypper_repo(instance_name=None, repo = None , baseurl = None , enabled = None , refresh=None , cache = None , gpgcheck = None , gpgautoimport=None ):
local = salt.client.LocalClient()
MOD_REPO = "pkg.mod_repo"
local.run_job(instance_name,MOD_REPO,[repo],kwarg={'baseurl': baseurl, #'repo': repo,
'url': baseurl,
'enabled': enabled,
'refresh':refresh ,
'cache':cache ,
'gpgcheck':gpgcheck ,
'gpgautoimport':gpgautoimport })
def add_yum_repo(instance_name = None, repo = None , name = None , baseurl = None , mirrorlist = None ):
local = salt.client.LocalClient()
MOD_REPO = "pkg.mod_repo"
local.run_job(instance_name,MOD_REPO ,[repo],kwarg={ 'name': name ,
'baseurl': baseurl })
def get_repo_list_rpm(package_manager=None):
class EmptyClass():
pass
local = salt.client.LocalClient()
GRAIN_NAME = 'os'
minions_objects = minions_list_custom()
rpm_based_machines_names = []
repo_fields = {
"zypper":["alias","autorefresh","baseurl","cache","enabled","gpgcheck","gpgautoimport","keeppackages","mirrorlist","metadataPath","name","packagesPath","priority","refresh","type","url"],
"yum":["baseurl","comments","enabled","failovermethod","file","gpgcheck","gpgkey","metadata_expire","mirrorlist","metalink","name","skip_if_unavailable","file"] }
if package_manager == None: package_manager="rpm"
for machine in minions_objects:
if machine.os in OS_PACKAGE_SPEC[package_manager]:
rpm_based_machines_names.append(machine.id)
instance_list=','.join(rpm_based_machines_names)
repositories = {}
for instance_name in rpm_based_machines_names:
instance_repo_list=local.cmd(instance_name,'pkg.list_repos')
repository_set = instance_repo_list[instance_name]
if type(repository_set) == type("Hello world !"): break
for key in repository_set.keys():
current_repo = repository_set[key]
repo_instance = EmptyClass()
if key not in repositories.keys():
setattr(repo_instance,'id',key)
setattr(repo_instance,'instances',[instance_name])
for field in repo_fields[package_manager]:
setattr(repo_instance,field,current_repo.get(field,""))
repositories[key]=repo_instance
else:
current_repo_clients = getattr(repositories[key],'instances')
current_repo_clients.append(instance_name)
setattr(repositories[key],'instances',current_repo_clients)
return repositories.values()
def get_repo_list_deb():
local = salt.client.LocalClient()
GRAIN_NAME = 'os'
minions_objects = minions_list_custom()
deb_based_machines_names = []
for machine in minions_objects:
if machine.os in OS_PACKAGE_SPEC["deb"]:
deb_based_machines_names.append(machine.id)
#OS_PACKAGE_SPEC["deb"]
instance_list=','.join(deb_based_machines_names)
repositories = []
uniq_repository_names = []
for instance_name in deb_based_machines_names:
instance_repo_list=local.cmd(instance_name,'pkg.list_repos')
repository_set = instance_repo_list[instance_name]
if type(repository_set) == type("Hello world !"): break
for key in repository_set.keys():
current_repo = repository_set[key]
current_repo=current_repo[0]
if key not in uniq_repository_names:
uniq_repository_names.append(key)
repositories.append(DebRepository(id = key,
architectures = current_repo.get("architectures",""),
comps=current_repo.get("comps",""),
disabled=current_repo.get("disabled",""),
dist=current_repo.get("dist",""),
repo_file=current_repo.get("file"),
line=current_repo.get("line",""),
repo_type=current_repo.get("type",""),
uri=current_repo.get("uri","")))
return repositories
def grains_ls():
local = salt.client.LocalClient()
instances_hash=local.cmd('*','grains.ls')
grains = []
for key in instances_hash:
for grain in instances_hash[key]:
if grain not in grains:
grains.append(grain)
return grains
def match_by_grain(grain=None,value=None):
local = salt.client.LocalClient()
instances_list=local.cmd(str(grain+":"+value),'test.ping',expr_form='grain')
return instances_list.keys()
def add_rpm_repo(grain_hash={}):
local = salt.client.LocalClient()
pass
def join_scope(scope=[]):
hash={}
for subscope in scope:
for item in subscope:
hash[item]=1
return hash.keys()
def collect_scope(grainpairs=[]):
# grainpairs == [u'os:Ubuntu', u'os:openSUSE']
for grainpair in grainpairs:
pass
|
|
"""test_poisson.py [options]
Solves the Heterogeneous Poisson equation on a unit cube. A full
script for testing generation and tools provided by proteus.
"""
import numpy as np
import sys
from proteus import Comm, Profiling, NumericalSolution, TransportCoefficients, default_so, default_s
from proteus.FemTools import C0_AffineLinearOnSimplexWithNodalBasis
from proteus.LinearSolvers import LU
from proteus.NonlinearSolvers import Newton
from proteus.NumericalFlux import Advection_DiagonalUpwind_Diffusion_IIPG_exterior
from proteus.Quadrature import SimplexGaussQuadrature
from proteus.superluWrappers import SparseMatrix
from proteus.TimeIntegration import NoIntegration
from ignition.utils.proteus.defaults import ProteusProblem, ProteusNumerics
from ignition.utils.proteus.optparser import get_prog_opts
log = Profiling.logEvent
nd = 3
class Poisson(ProteusProblem):
"""
Heterogeneous Poisson's equation, -div(a(x)u) = f(x), on unit domain [0,1]x[0,1]x[0,1]
"""
##\page Tests Test Problems
# \ref poisson_3d_p.py "Heterogeneous Poisson's equation, -div(a(x)u) = f(x), on unit domain [0,1]x[0,1]x[0,1]"
#
##\ingroup test
#\file poisson_3d_p.py
#
#\brief Heterogenous Poisson's equations in 3D unit domain [0,1]x[0,1]x[0,1]
def __init__(self):
self.name = "Poisson"
#space dimension
self.nd = 3
#if unstructured would need variable polyfile or meshfile set
#steady-state so no initial conditions
self.initialConditions = None
#use sparse diffusion representation
self.sd=True
#identity tensor for defining analytical heterogeneity functions
self.Ident = np.zeros((nd,nd),'d')
self.Ident[0,0]=1.0; self.Ident[1,1] = 1.0; self.Ident[2,2]=1.0
#store a,f in dictionaries since coefficients class allows for one entry per component
self.aOfX = {0:self.a5}; self.fOfX = {0:self.f5}
#one component
self.nc = 1
#load analytical solution, dirichlet conditions, flux boundary conditions into the expected variables
self.analyticalSolution = {0:self.u5Ex()}
self.analyticalSolutionVelocity = {0:self.velEx(self.analyticalSolution[0],self.aOfX[0])}
#
self.dirichletConditions = {0:self.getDBC5}
self.advectiveFluxBoundaryConditions = {0:self.getAdvFluxBC5}
self.diffusiveFluxBoundaryConditions = {0:{0:self.getDiffFluxBC5}}
self.fluxBoundaryConditions = {0:'setFlow'} #options are 'setFlow','noFlow','mixedFlow'
#equation coefficient names
self.coefficients = TransportCoefficients.PoissonEquationCoefficients(self.aOfX,
self.fOfX, self.nc, self.nd)
#
self.coefficients.variableNames=['u0']
#for computing exact 'Darcy' velocity
class velEx:
def __init__(self,duex,aex):
self.duex = duex
self.aex = aex
def uOfX(self,X):
du = self.duex.duOfX(X)
A = np.reshape(self.aex(X),(3,3))
return -np.dot(A,du)
def uOfXT(self,X,T):
return self.uOfX(X)
##################################################
#define coefficients a(x)=[a_{ij}] i,j=0,2, right hand side f(x) and analytical solution u(x)
#u = x*x + y*y + z*z, a_00 = x + 5, a_11 = y + 5.0 + a_22 = z + 10.0
#f = -2*x -2*(5+x) -2*y-2*(5+y) -2*z-2*(10+z)
#
def a5(self, x):
return np.array([[x[0] + 5.0,0.0,0.0],[0.0,x[1] + 5.0,0.0],[0.0,0.0,x[2]+10.0]],'d')
def f5(self, x):
return -2.0*x[0] -2*(5.+x[0]) -2.*x[1]-2.*(5.+x[1]) -2.*x[2]-2.*(10+x[2])
#'manufactured' analytical solution
class u5Ex:
def __init__(self):
pass
def uOfX(self,x):
return x[0]**2+x[1]**2+x[2]**2
def uOfXT(self,X,T):
return self.uOfX(X)
def duOfX(self,X):
du = 2.0*np.reshape(X[0:3],(3,))
return du
def duOfXT(self,X,T):
return self.duOfX(X)
#dirichlet boundary condition functions on (x=0,y,z), (x,y=0,z), (x,y=1,z), (x,y,z=0), (x,y,z=1)
def getDBC5(self, x,flag):
if x[0] in [0.0] or x[1] in [0.0,1.0] or x[2] in [0.0,1.0]:
return lambda x,t: self.u5Ex().uOfXT(x,t)
def getAdvFluxBC5(self, x,flag):
pass
#specify flux on (x=1,y,z)
def getDiffFluxBC5(self, x,flag):
if x[0] == 1.0:
n = np.zeros((nd,),'d'); n[0]=1.0
return lambda x,t: np.dot(self.velEx(self.u5Ex(),self.a5).uOfXT(x,t),n)
if not (x[0] in [0.0] or x[1] in [0.0,1.0] or x[2] in [0.0,1.0]):
return lambda x,t: 0.0
class C0P1_Poisson_Numerics(ProteusNumerics):
#steady-state so no time integration
timeIntegration = NoIntegration
#number of output timesteps
nDTout = 1
#finite element spaces
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
#numerical quadrature choices
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
#number of nodes in x,y,z
nnx = 7
nny = 7
nnz = 7
#if unstructured would need triangleOptions flag to be set
#number of levels in mesh
nLevels = 1
#no stabilization or shock capturing
subgridError = None
shockCapturing = None
#nonlinear solver choices
multilevelNonlinearSolver = Newton
levelNonlinearSolver = Newton
#linear problem so force 1 iteration allowed
maxNonlinearIts = 2
maxLineSearches = 1
fullNewtonFlag = True
#absolute nonlinear solver residual tolerance
nl_atol_res = 1.0e-8
#relative nonlinear solver convergence tolerance as a function of h
#(i.e., tighten relative convergence test as we refine)
tolFac = 0.0
#matrix type
matrix = SparseMatrix
#convenience flag
parallel = False
if parallel:
multilevelLinearSolver = KSP_petsc4py
#for petsc do things lie
#"-ksp_type cg -pc_type asm -pc_asm_type basic -ksp_atol 1.0e-10 -ksp_rtol 1.0e-10 -ksp_monitor_draw" or
#-pc_type lu -pc_factor_mat_solver_package
#can also set -pc_asm_overlap 2 with default asm type (restrict)
levelLinearSolver = KSP_petsc4py#
#for petsc do things like
#"-ksp_type cg -pc_type asm -pc_asm_type basic -ksp_atol 1.0e-10 -ksp_rtol 1.0e-10 -ksp_monitor_draw" or
#-pc_type lu -pc_factor_mat_solver_package
#can also set -pc_asm_overlap 2 with default asm type (restrict)
#levelLinearSolver = PETSc#
#pick number of layers to use in overlap
nLayersOfOverlapForParallel = 0
#type of partition
parallelPartitioningType = MeshParallelPartitioningTypes.node
#parallelPartitioningType = MeshParallelPartitioningTypes.element
#have to have a numerical flux in parallel
numericalFluxType = Advection_DiagonalUpwind_Diffusion_IIPG_exterior
#for true residual test
linearSolverConvergenceTest = 'r-true'
#to allow multiple models to set different ksp options
#linear_solver_options_prefix = 'poisson_'
linearSmoother = None
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
numericalFluxType = Advection_DiagonalUpwind_Diffusion_IIPG_exterior
#linear solver relative convergence test
linTolFac = 0.0
#linear solver absolute convergence test
l_atol_res = 1.0e-10
#conservativeFlux = {0:'pwl'}
def init_mpi_petsc(opts):
log("Initializing MPI")
if opts.petscOptions != None:
petsc_argv = sys.argv[:1]+opts.petscOptions.split()
log("PETSc options from commandline")
log(str(petsc_argv))
else:
petsc_argv=sys.argv[:1]
if opts.petscOptionsFile != None:
petsc_argv=[sys.argv[0]]
petsc_argv += open(opts.petscOptionsFile).read().split()
log("PETSc options from commandline")
log(str(petsc_argv))
return Comm.init(argv=petsc_argv)
def main(*args):
opts, args = get_prog_opts(args, __doc__)
comm = init_mpi_petsc(opts)
problem_list = [Poisson(),]
simulation_list = [default_s]
numerics_list = [C0P1_Poisson_Numerics(),]
numerics_list[0].periodicDirichletConditions = problem_list[0].periodicDirichletConditions
numerics_list[0].T = problem_list[0].T
simulation_name = problem_list[0].name + "_" + numerics_list[0].__class__.__name__
simulation_name_proc = simulation_name + "_" + repr(comm.rank())
simFlagsList = [{ 'simulationName': simulation_name,
'simulationNameProc': simulation_name_proc,
'dataFile': simulation_name_proc + '.dat',
'components' : [ci for ci in range(problem_list[0].coefficients.nc)],
}]
so = default_so
so.name = problem_list[0].name
so.pnList = problem_list
so.sList = [default_s]
try:
so.systemStepControllerType = numerics_list[0].systemStepControllerType
except AttributeError:
pass
try:
so.tnList = numerics_list[0].tnList
so.archiveFlag = numerics_list[0].archiveFlag
except AttributeError:
pass
runNumber = 0
runName = so.name + repr(runNumber)
Profiling.procID=comm.rank()
if simulation_list[0].logAllProcesses or opts.logAllProcesses:
Profiling.logAllProcesses = True
Profiling.flushBuffer=simulation_list[0].flushBuffer
if opts.logLevel > 0:
Profiling.openLog(runName+".log",opts.logLevel)
ns = NumericalSolution.NS_base(default_so, problem_list, numerics_list, simulation_list,
opts, simFlagsList)
ns.calculateSolution(runName)
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import logging
import pymongo
from collections import OrderedDict
from datetime import datetime, timedelta
from facepy.exceptions import FacepyError
from pyusps import address_information
from pygeocode import geocoder
from ubernear.util import mongo
from ubernear.util import (
utc_from_iso8601,
address as addr_util,
)
log = logging.getLogger(__name__)
facebook_batch_size = 50
usps_batch_size = 5
def _mark_as_failed(
events_coll,
event_id,
now,
field,
reason='',
):
save = OrderedDict([
('ubernear', OrderedDict([
(field, OrderedDict([
# If event is retried and it is
# successful it would be useful
# to know when it failed.
('when', now),
('reason', reason),
]),
),
]),
),
])
mongo.save_no_replace(
events_coll,
_id=event_id,
save=save,
)
def _save_venues(
events,
events_coll,
usps_id,
now,
):
# Don't waste a call to the USPS API
if not events:
return
venues = [event['facebook']['venue'] for event in events]
usps_venues = [
OrderedDict([
('address', venue['street']),
('city', venue['city']),
('state', venue['state']),
])
for venue in venues
]
matches = address_information.verify(
usps_id,
*usps_venues
)
# TODO fugly
if len(usps_venues) == 1:
matches = [matches]
for (event,match) in zip(events,matches):
if isinstance(match, ValueError):
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='normalization_failed',
reason=str(match),
)
continue
match['country'] = 'US'
save = OrderedDict([
('normalized', match),
('ubernear.normalization_completed', now),
('ubernear.normalization_source', 'usps'),
])
log.debug(
'Storing normalized venue for {event_id}'.format(
event_id=event['_id'],
)
)
mongo.save_no_replace(
events_coll,
_id=event['_id'],
save=save,
)
def _save_events(
events,
events_coll,
graph,
now,
_log=None,
):
if _log is None:
_log = log
# Don't waste a call to the Facebook Graph API
if not events:
return
batch = [
OrderedDict([
('method', 'GET'),
('relative_url',
'{event_id}?date_format=c'.format(
event_id=event['_id']
),
),
])
for event in events
]
reponses = graph.batch(batch)
for event,response in zip(events,reponses):
if isinstance(response, FacepyError):
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='lookup_failed',
reason=str(response),
)
continue
# Event does not exist anymore
if response is False:
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='lookup_failed',
reason='False response',
)
continue
if response is None:
# None has special significance in mongodb searches
# so use 'null' instead.
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='lookup_failed',
reason='Null response',
)
continue
# We seem to have a valid response but ids are different?
if response['id'] != event['_id']:
_log.error(
'Facebook returned information for an event other than '
'{event_id}. Skipping event.'.format(
event_id=event['_id'],
)
)
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='lookup_failed',
reason='Response id is different',
)
continue
save = OrderedDict([
('facebook', response),
('ubernear', OrderedDict([
# Depending on where the event came from,
# the event source may not have already
# been set
('source', 'facebook'),
('lookup_completed', now),
]),
),
])
# Skip responses without a start_time or end_time.
# Sometimes the Graph API returns events without these
if (
'start_time' in save['facebook']
and
'end_time' in save['facebook']
):
save['facebook']['start_time'] = utc_from_iso8601(
save['facebook']['start_time'],
naive=True,
)
save['facebook']['end_time'] = utc_from_iso8601(
save['facebook']['end_time'],
naive=True,
)
else:
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='lookup_failed',
reason='Missing start_time or end_time',
)
continue
if 'updated_time' in save['facebook']:
save['facebook']['updated_time'] = utc_from_iso8601(
save['facebook']['updated_time'],
naive=True,
)
_log.debug(
'Storing event {event_id}'.format(
event_id=event['_id'],
)
)
mongo.save_no_replace(
events_coll,
_id=event['_id'],
save=save,
)
def update_facebook(
events_coll,
graph,
process_all=False,
_log=None,
_datetime=None,
):
if _log is None:
_log = log
if _datetime is None:
_datetime = datetime
now = _datetime.utcnow()
if process_all:
events = events_coll.find()
else:
# The next three represent transitional graph API errors
null_query = OrderedDict([
('ubernear.lookup_failed.reason',
'Null response',
),
])
validating_query = OrderedDict([
('ubernear.lookup_failed.reason', OrderedDict([
('$regex',
'OAuthException error on get.*: '
'Error validating application..'
)
]),
)
])
retry_query = OrderedDict([
('ubernear.lookup_failed.reason', OrderedDict([
('$regex',
'OAuthException error on get.*: '
'An unexpected error has occurred. '
'Please retry your request later..'
)
]),
)
])
or_query = OrderedDict([
('$or',
[null_query,
validating_query,
retry_query,
],
),
])
match_query = OrderedDict([
('ubernear.lookup_completed',
OrderedDict([
('$exists', False),
]),
),
])
and_query = OrderedDict([
('$and', [match_query, or_query]),
])
events = events_coll.find(
OrderedDict([
('$or', [match_query, and_query]),
]),
sort=[('ubernear.fetched', pymongo.ASCENDING)],
)
count = events.count()
if count != 0:
_log.info(
'Fetching {count} event{s}'.format(
count=count,
s='' if count == 1 else 's',
),
)
event_batch = []
found_work = False
# TODO This cursor may timeout if there are too many results
for event in events:
found_work = True
event_batch.append(event)
if len(event_batch) == facebook_batch_size:
_save_events(
events=event_batch,
events_coll=events_coll,
graph=graph,
now=now,
_log=_log,
)
event_batch = []
_save_events(
events=event_batch,
events_coll=events_coll,
graph=graph,
now=now,
_log=_log,
)
return found_work
def expire(
events_coll,
expired_coll,
_datetime=None,
):
if _datetime is None:
_datetime = datetime
last_week = _datetime.utcnow() - timedelta(days=7)
end_parts = [
# No guarantees in documentation that
# $lt doesn't return rows where
# the field doesn't exist
OrderedDict([
('facebook.end_time', OrderedDict([
('$exists', True),
]),
),
]),
OrderedDict([
('facebook.end_time', OrderedDict([
('$lt', last_week),
]),
),
]),
]
end_query = OrderedDict([
('$and', end_parts),
])
false_query = OrderedDict([
('ubernear.lookup_failed.reason', 'False response'),
])
# It seems facebook should return false instead of this error,
# i.e., the id cannot be found. No bug report has been found to
# confirm this although some reports suggest it.
unsupported_query = OrderedDict([
('ubernear.lookup_failed.reason', OrderedDict([
('$regex',
'GraphMethodException error on get.*'
': Unsupported get request..',
),
('$options', 'i'),
]),
)
])
alias_query = OrderedDict([
('ubernear.lookup_failed.reason', OrderedDict([
('$regex',
'OAuthException error on get.*Some '
'of the aliases you requested do not exist.*'
),
('$options', 'i'),
]),
)
])
or_query = OrderedDict([
('$or',
[false_query,
unsupported_query,
alias_query,
],
),
])
facebook_query = OrderedDict([
('ubernear.lookup_completed', OrderedDict([
('$exists', False),
]),
),
])
failed_query = OrderedDict([
('$and', [facebook_query, or_query]),
])
cursor = events_coll.find(
OrderedDict([
('$or',
[end_query,
failed_query,
]
),
]),
sort=[('facebook.end_time', pymongo.ASCENDING)],
)
for event in cursor:
event_id = event.pop('_id')
kwargs = OrderedDict([
('_id', event_id),
('save', event),
])
ubernear = event['ubernear']
place_ids = ubernear.get('place_ids')
if place_ids is not None:
# Add to a set of ubernear.place_ids
kwargs['add_each'] = OrderedDict([
('ubernear.place_ids', place_ids),
])
del ubernear['place_ids']
mongo.save_no_replace(
expired_coll,
**kwargs
)
events_coll.remove(
OrderedDict([
('_id', event_id),
])
)
def update_venue(
events_coll,
usps_id,
process_all,
):
now = datetime.utcnow()
if process_all:
events = events_coll.find()
else:
completed_query = OrderedDict([
('ubernear.normalization_completed',
OrderedDict([
('$exists', False),
]),
),
])
failed_query = OrderedDict([
('ubernear.normalization_failed',
OrderedDict([
('$exists', False),
]),
),
])
lookup_query = OrderedDict([
('ubernear.lookup_completed',
OrderedDict([
('$exists', True),
]),
),
])
events = events_coll.find(
OrderedDict([
('$and',
[completed_query,
failed_query,
lookup_query,
]
),
]),
sort=[('ubernear.fetched', pymongo.ASCENDING)],
)
count = events.count()
if count != 0:
log.info(
'Normalizing {count} event{s}'.format(
count=count,
s='' if count == 1 else 's',
),
)
event_batch = []
found_work = False
# TODO This cursor may timeout if there are too many results
for event in events:
found_work = True
# Don't send venues in the batch that can't be used
# Check for missing values here instead of in the query
# so it is explicitly known which events are not
# eligible for normalization
if not 'venue' in event['facebook']:
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='normalization_failed',
reason='No venue',
)
continue
venue = event['facebook']['venue']
# The minimal requirements for the USPS API
if (
not 'street' in venue
or not 'city' in venue
or not 'state' in venue
):
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='normalization_failed',
reason='No street, city or state',
)
continue
# USPS doesn't take long names for states
venue['state'] = addr_util.normalize_state(
venue['state']
)
# Make sure it's a valid state abbreviation
if venue['state'] not in addr_util.state_abbrev.keys():
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='normalization_failed',
reason='Invalid state',
)
continue
event_batch.append(event)
if len(event_batch) == usps_batch_size:
_save_venues(
events=event_batch,
events_coll=events_coll,
usps_id=usps_id,
now=now,
)
event_batch = []
_save_venues(
events=event_batch,
events_coll=events_coll,
usps_id=usps_id,
now=now,
)
return found_work
def update_coordinate(
events_coll,
yahoo_id,
process_all,
):
now = datetime.utcnow()
if process_all:
events = events_coll.find()
else:
latitude_query = OrderedDict([
('facebook.venue.latitude',
OrderedDict([
('$exists', False),
]),
),
])
longitude_query = OrderedDict([
('facebook.venue.longitude',
OrderedDict([
('$exists', False),
]),
),
])
or_query = OrderedDict([
('$or',
[latitude_query,
longitude_query,
]
),
])
failed_query = OrderedDict([
('ubernear.geocoding_failed',
OrderedDict([
('$exists', False),
]),
),
])
completed_query = OrderedDict([
('ubernear.geocoding_completed',
OrderedDict([
('$exists', False),
]),
),
])
lookup_query = OrderedDict([
('ubernear.lookup_completed',
OrderedDict([
('$exists', True),
]),
),
])
query = OrderedDict([
('$and',
[or_query,
failed_query,
completed_query,
lookup_query,
]
),
])
events = events_coll.find(
query,
sort=[('ubernear.fetched', pymongo.ASCENDING)],
)
count = events.count()
if count != 0:
log.info(
'Geocoding {count} event{s}'.format(
count=count,
s='' if count == 1 else 's',
),
)
found_work = OrderedDict([
('found_work', False),
('sleep', None),
])
# TODO This cursor may timeout if there are too many results
for event in events:
found_work['found_work'] = True
# Check for missing values here instead of in the query
# so it is explicitly known which events are not
# eligible for geocoding
if not 'venue' in event['facebook']:
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='geocoding_failed',
reason='No venue',
)
continue
venue = event['facebook']['venue']
# The minimal requirements for geocoding
if 'normalized' in event:
address = event['normalized']['address']
city = event['normalized']['city']
elif (
not 'street' in venue
or not 'city' in venue
):
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='geocoding_failed',
reason='No street or city',
)
continue
else:
address = venue['street']
city = venue['city']
request = '{address},{city}'.format(
address=address.encode('utf-8'),
city=city.encode('utf-8'),
)
try:
# TODO figure out which error corresponds to the
# rate limit reached and return the number of hours
# to sleep
response = geocoder.geocode_yahoo(request, yahoo_id)
except geocoder.GeocoderAmbiguousResultError, e:
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='geocoding_failed',
reason=str(e),
)
continue
if response is None:
_mark_as_failed(
events_coll=events_coll,
event_id=event['_id'],
now=now,
field='geocoding_failed',
reason='Null response',
)
continue
save = OrderedDict([
('facebook.venue.latitude', response['lat']),
('facebook.venue.longitude', response['lng']),
('ubernear.geocoding_completed', now),
('ubernear.geocoding_source', 'yahoo'),
])
log.debug(
'Storing coordinates for {event_id}'.format(
event_id=event['_id'],
)
)
mongo.save_no_replace(
events_coll,
_id=event['_id'],
save=save,
)
return found_work
|
|
import django
from django.contrib.contenttypes.fields import GenericRel, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import DEFAULT_DB_ALIAS
from django.db.models.query_utils import Q
from django.utils.functional import lazy
from django.utils.text import capfirst
from fluent_contents import appsettings
from fluent_contents.forms.fields import PlaceholderFormField
from .db import ContentItem, Placeholder
__all__ = ("PlaceholderRelation", "ContentItemRelation", "PlaceholderField")
# The PlaceholderField is inspired by Django CMS
# Yet uses a different methology to access the fields.
#
# In Django CMS it's a ForeignKey to Placeholder.
# Here, the Placeholder has a GenericForeignKey to the parent - hence it will be deleted when the parent is removed -
# so the PlaceholderField is merely a reverse GenericRelation.
#
# In the admin, the logic of the PlaceholderEditor code can be reused.
class PlaceholderRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
is expected to be referenced be a :class:`~fluent_contents.models.Placeholder`. For example:
.. code-block:: python
class Page(models.Model):
placeholder_set = PlaceholderRelation()
"""
def __init__(self, **kwargs):
defaults = {
"limit_choices_to": Q(
parent_type=lazy(
lambda: ContentType.objects.get_for_model(Placeholder), ContentType
)()
)
}
defaults.update(kwargs)
super(PlaceholderRelation, self).__init__(
to=Placeholder,
object_id_field="parent_id",
content_type_field="parent_type",
**defaults
)
class ContentItemRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
is expected to be referenced by the :class:`~fluent_contents.models.ContentItem` classes. For example:
.. code-block:: python
class Page(models.Model):
contentitem_set = ContentItemRelation()
Adding this relation also causes the admin delete page to list the
:class:`~fluent_contents.models.ContentItem` objects which will be deleted.
"""
def __init__(self, **kwargs):
super(ContentItemRelation, self).__init__(
to=ContentItem,
object_id_field="parent_id",
content_type_field="parent_type",
**kwargs
)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
# Fix delete screen. Workaround for https://github.com/django-polymorphic/django-polymorphic/issues/34
return (
super(ContentItemRelation, self)
.bulk_related_objects(objs)
.non_polymorphic()
)
class PlaceholderRel(GenericRel):
"""
The internal :class:`~django.contrib.contenttypes.generic.GenericRel`
that is used by the :class:`PlaceholderField` to support queries.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
):
# Note: all other args are provided for Django 1.9 compatibility
limit_choices_to = Q(
parent_type=lazy(
lambda: ContentType.objects.get_for_model(Placeholder), ContentType
)(),
slot=field.slot,
)
# TODO: make sure reverse queries work properly
super(PlaceholderRel, self).__init__(
field=field,
to=Placeholder,
related_name=None, # NOTE: must be unique for app/model/slot.
limit_choices_to=limit_choices_to,
)
class PlaceholderFieldDescriptor(object):
"""
This descriptor is placed on the PlaceholderField model instance
by the :func:`~PlaceholderField.contribute_to_class` function.
This causes ``instance.field`` to return a :class:`~fluent_contents.models.Placeholder` object.
"""
def __init__(self, slot):
"""Set the slot this descriptor is created for."""
self.slot = slot
def __get__(self, instance, instance_type=None):
"""Return the placeholder by slot."""
if instance is None:
return self
try:
placeholder = Placeholder.objects.get_by_slot(instance, self.slot)
except Placeholder.DoesNotExist:
raise Placeholder.DoesNotExist(
"Placeholder does not exist for parent {0} (type_id: {1}, parent_id: {2}), slot: '{3}'".format(
repr(instance),
ContentType.objects.get_for_model(
instance, for_concrete_model=False
).pk,
instance.pk,
self.slot,
)
)
else:
placeholder.parent = instance # fill the reverse cache
return placeholder
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Descriptor must be accessed via instance")
if value == "-DUMMY-":
return
raise NotImplementedError(
"Setting Placeholder value is not supported, use Placeholder.objects.create_for_object() instead."
)
class PlaceholderField(PlaceholderRelation):
"""
The model field to add :class:`~fluent_contents.models.ContentItem` objects to a model.
:param slot: A programmatic name to identify the placeholder.
:param plugins: Optional, define which plugins are allowed to be used. This can be a list of names, or :class:`~fluent_contents.extensions.ContentPlugin` references.
:type slot: str
:type plugins: list
This class provides the form fields for the field. Use this class in a model to use it:
.. code-block:: python
class Article(models.Model):
contents = PlaceholderField("article_contents")
The data itself is stored as reverse relation in the :class:`~fluent_contents.models.ContentItem` object.
Hence, all contents will be cleaned up properly when the parent model is deleted.
The placeholder will be displayed in the admin:
.. image:: /images/admin/placeholderfieldadmin1.png
:width: 770px
:height: 562px
:alt: django-fluent-contents placeholder field preview
"""
rel_class = PlaceholderRel # Django 1.9
def __init__(self, slot, plugins=None, **kwargs):
"""
Initialize the placeholder field.
"""
self.slot = slot
super(PlaceholderField, self).__init__(**kwargs)
# See if a plugin configuration is defined in the settings
self._slot_config = (
appsettings.FLUENT_CONTENTS_PLACEHOLDER_CONFIG.get(slot) or {}
)
self._plugins = plugins or self._slot_config.get("plugins") or None
# Overwrite some hardcoded defaults from the base class.
# TODO: support blank: False to enforce adding at least one plugin.
self.editable = True
self.blank = True
def formfield(self, **kwargs):
"""
Returns a :class:`PlaceholderFormField` instance for this database Field.
"""
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank,
}
defaults.update(kwargs)
return PlaceholderFormField(slot=self.slot, plugins=self._plugins, **defaults)
def contribute_to_class(self, cls, name, **kwargs):
"""
Internal Django method to associate the field with the Model; it assigns the descriptor.
"""
super(PlaceholderField, self).contribute_to_class(cls, name, **kwargs)
# overwrites what instance.<colname> returns; give direct access to the placeholder
setattr(cls, name, PlaceholderFieldDescriptor(self.slot))
# Make placeholder fields easy to find
# Can't assign this to cls._meta because that gets overwritten by every level of model inheritance.
if not hasattr(cls, "_meta_placeholder_fields"):
cls._meta_placeholder_fields = {}
cls._meta_placeholder_fields[name] = self
# Configure the revere relation if possible.
# TODO: make sure reverse queries work properly
if django.VERSION >= (1, 11):
rel = self.remote_field
else:
rel = self.rel
if rel.related_name is None:
# Make unique for model (multiple models can use same slotnane)
rel.related_name = "{app}_{model}_{slot}_FIXME".format(
app=cls._meta.app_label,
model=cls._meta.object_name.lower(),
slot=self.slot,
)
# Remove attribute must exist for the delete page. Currently it's not actively used.
# The regular ForeignKey assigns a ForeignRelatedObjectsDescriptor to it for example.
# In this case, the PlaceholderRelation is already the reverse relation.
# Being able to move forward from the Placeholder to the derived models does not have that much value.
setattr(rel.to, self.rel.related_name, None)
@property
def plugins(self):
"""
Get the set of plugins that this field may display.
"""
from fluent_contents import extensions
if self._plugins is None:
return extensions.plugin_pool.get_plugins()
else:
try:
return extensions.plugin_pool.get_plugins_by_name(*self._plugins)
except extensions.PluginNotFound as e:
raise extensions.PluginNotFound(
str(e)
+ " Update the plugin list of '{0}.{1}' field or FLUENT_CONTENTS_PLACEHOLDER_CONFIG['{2}'] setting.".format(
self.model._meta.object_name, self.name, self.slot
)
)
def value_from_object(self, obj):
"""
Internal Django method, used to return the placeholder ID when exporting the model instance.
"""
try:
# not using self.attname, access the descriptor instead.
placeholder = getattr(obj, self.name)
except Placeholder.DoesNotExist:
# Still allow ModelForm / admin to open and create a new Placeholder if the table was truncated.
return None
# Be consistent with other fields, like ForeignKey:
return placeholder.id if placeholder else None
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import re
from nova import exception as nova_exception
from nova import utils
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.virt import images
from nova.virt.powervm import command
from nova.virt.powervm import common
from nova.virt.powervm import constants
from nova.virt.powervm import exception
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PowerVMDiskAdapter(object):
pass
class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
"""Default block device providor for PowerVM
This disk adapter uses logical volumes on the hosting VIOS
to provide backing block devices for instances/LPARs
"""
def __init__(self, connection):
super(PowerVMLocalVolumeAdapter, self).__init__()
self.command = command.IVMCommand()
self._connection = None
self.connection_data = connection
def _set_connection(self):
if self._connection is None:
self._connection = common.ssh_connect(self.connection_data)
def create_volume(self, size):
"""Creates a logical volume with a minimum size
:param size: size of the logical volume in bytes
:returns: string -- the name of the new logical volume.
:raises: PowerVMNoSpaceLeftOnVolumeGroup
"""
return self._create_logical_volume(size)
def delete_volume(self, disk_name):
"""Removes the Logical Volume and its associated vSCSI connection
:param disk_name: name of Logical Volume device in /dev/
"""
LOG.debug(_("Removing the logical volume '%s'") % disk_name)
self._remove_logical_volume(disk_name)
def create_volume_from_image(self, context, instance, image_id):
"""Creates a Logical Volume and copies the specified image to it
:param context: nova context used to retrieve image from glance
:param instance: instance to create the volume for
:image_id: image_id reference used to locate image in glance
:returns: dictionary with the name of the created
Logical Volume device in 'device_name' key
"""
file_name = '.'.join([image_id, 'gz'])
file_path = os.path.join(CONF.powervm_img_local_path,
file_name)
if not os.path.isfile(file_path):
LOG.debug(_("Fetching image '%s' from glance") % image_id)
images.fetch_to_raw(context, image_id, file_path,
instance['user_id'],
project_id=instance['project_id'])
else:
LOG.debug((_("Using image found at '%s'") % file_path))
LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path)
remote_path = CONF.powervm_img_remote_path
remote_file_name, size = self._copy_image_file(file_path, remote_path)
# calculate root device size in bytes
# we respect the minimum root device size in constants
size_gb = max(instance['instance_type']['root_gb'],
constants.POWERVM_MIN_ROOT_GB)
size = size_gb * 1024 * 1024 * 1024
try:
LOG.debug(_("Creating logical volume of size %s bytes") % size)
disk_name = self._create_logical_volume(size)
LOG.debug(_("Copying image to the device '%s'") % disk_name)
self._copy_file_to_device(remote_file_name, disk_name)
except Exception:
LOG.error(_("Error while creating logical volume from image. "
"Will attempt cleanup."))
# attempt cleanup of logical volume before re-raising exception
with excutils.save_and_reraise_exception():
try:
self.delete_volume(disk_name)
except Exception:
msg = _('Error while attempting cleanup of failed '
'deploy to logical volume.')
LOG.exception(msg)
return {'device_name': disk_name}
def create_image_from_volume(self):
raise NotImplementedError()
def migrate_volume(self):
raise NotImplementedError()
def attach_volume_to_host(self, *args, **kargs):
pass
def detach_volume_from_host(self, *args, **kargs):
pass
def _create_logical_volume(self, size):
"""Creates a logical volume with a minimum size.
:param size: size of the logical volume in bytes
:returns: string -- the name of the new logical volume.
:raises: PowerVMNoSpaceLeftOnVolumeGroup
"""
vgs = self.run_vios_command(self.command.lsvg())
cmd = self.command.lsvg('%s -field vgname freepps -fmt :' %
' '.join(vgs))
output = self.run_vios_command(cmd)
found_vg = None
# If it's not a multiple of 1MB we get the next
# multiple and use it as the megabyte_size.
megabyte = 1024 * 1024
if (size % megabyte) != 0:
megabyte_size = int(size / megabyte) + 1
else:
megabyte_size = size / megabyte
# Search for a volume group with enough free space for
# the new logical volume.
for vg in output:
# Returned output example: 'rootvg:396 (25344 megabytes)'
match = re.search(r'^(\w+):\d+\s\((\d+).+$', vg)
if match is None:
continue
vg_name, avail_size = match.groups()
if megabyte_size <= int(avail_size):
found_vg = vg_name
break
if not found_vg:
LOG.error(_('Could not create logical volume. '
'No space left on any volume group.'))
raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
cmd = self.command.mklv('%s %sB' % (found_vg, size / 512))
lv_name = self.run_vios_command(cmd)[0]
return lv_name
def _remove_logical_volume(self, lv_name):
"""Removes the lv and the connection between its associated vscsi.
:param lv_name: a logical volume name
"""
cmd = self.command.rmvdev('-vdev %s -rmlv' % lv_name)
self.run_vios_command(cmd)
def _copy_file_to_device(self, source_path, device, decompress=True):
"""Copy file to device.
:param source_path: path to input source file
:param device: output device name
:param decompress: if True (default) the file will be decompressed
on the fly while being copied to the drive
"""
if decompress:
cmd = ('gunzip -c %s | dd of=/dev/%s bs=1024k' %
(source_path, device))
else:
cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)
self.run_vios_command_as_root(cmd)
def _copy_image_file(self, source_path, remote_path, decompress=False):
"""Copy file to VIOS, decompress it, and return its new size and name.
:param source_path: source file path
:param remote_path remote file path
:param decompress: if True, decompressess the file after copying;
if False (default), just copies the file
"""
# Calculate source image checksum
hasher = hashlib.md5()
block_size = 0x10000
img_file = file(source_path, 'r')
buf = img_file.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = img_file.read(block_size)
source_cksum = hasher.hexdigest()
comp_path = os.path.join(remote_path, os.path.basename(source_path))
uncomp_path = comp_path.rstrip(".gz")
if not decompress:
final_path = comp_path
else:
final_path = "%s.%s" % (uncomp_path, source_cksum)
# Check whether the image is already on IVM
output = self.run_vios_command("ls %s" % final_path,
check_exit_code=False)
# If the image does not exist already
if not len(output):
# Copy file to IVM
common.ftp_put_command(self.connection_data, source_path,
remote_path)
# Verify image file checksums match
cmd = ("/usr/bin/csum -h MD5 %s |"
"/usr/bin/awk '{print $1}'" % comp_path)
output = self.run_vios_command_as_root(cmd)
if not len(output):
LOG.error(_("Unable to get checksum"))
raise exception.PowerVMFileTransferFailed()
if source_cksum != output[0]:
LOG.error(_("Image checksums do not match"))
raise exception.PowerVMFileTransferFailed()
if decompress:
# Unzip the image
cmd = "/usr/bin/gunzip %s" % comp_path
output = self.run_vios_command_as_root(cmd)
# Remove existing image file
cmd = "/usr/bin/rm -f %s.*" % uncomp_path
output = self.run_vios_command_as_root(cmd)
# Rename unzipped image
cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
output = self.run_vios_command_as_root(cmd)
# Remove compressed image file
cmd = "/usr/bin/rm -f %s" % comp_path
output = self.run_vios_command_as_root(cmd)
else:
LOG.debug(_("Image found on host at '%s'") % final_path)
# Calculate file size in multiples of 512 bytes
output = self.run_vios_command("ls -o %s|awk '{print $4}'" %
final_path, check_exit_code=False)
if len(output):
size = int(output[0])
else:
LOG.error(_("Uncompressed image file not found"))
raise exception.PowerVMFileTransferFailed()
if (size % 512 != 0):
size = (int(size / 512) + 1) * 512
return final_path, size
def run_vios_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
:param command: String with the command to run.
"""
self._set_connection()
stdout, stderr = utils.ssh_execute(self._connection, cmd,
check_exit_code=check_exit_code)
return stdout.strip().splitlines()
def run_vios_command_as_root(self, command, check_exit_code=True):
"""Run a remote command as root using an active ssh connection.
:param command: List of commands.
"""
self._set_connection()
stdout, stderr = common.ssh_command_as_root(
self._connection, command, check_exit_code=check_exit_code)
return stdout.read().splitlines()
|
|
"""
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import os
import re
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.utils.functional import LazyObject, empty
from django.utils import importlib
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
# NOTE: This is arguably an EnvironmentError, but that causes
# problems with Python's interactive help.
raise ImportError("Settings cannot be imported, because environment variable %s is undefined." % ENVIRONMENT_VARIABLE)
self._wrapped = Settings(settings_module)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
warnings.warn("If set, %s must end with a slash" % name,
DeprecationWarning)
elif name == "ADMIN_MEDIA_PREFIX":
warnings.warn("The ADMIN_MEDIA_PREFIX setting has been removed; "
"use STATIC_URL instead.", DeprecationWarning)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError, e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in self.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = importlib.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
app_subdirs.sort()
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in app_subdirs:
if name_pattern.match(d) and os.path.isdir(os.path.join(appdir, d)):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
self.INSTALLED_APPS = new_installed_apps
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if self.LOGGING_CONFIG:
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# Backwards-compatibility shim for #16288 fix
compat_patch_logging_config(self.LOGGING)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.default_settings = default_settings
def __getattr__(self, name):
return getattr(self.default_settings, name)
def __dir__(self):
return self.__dict__.keys() + dir(self.default_settings)
# For Python < 2.6:
__members__ = property(lambda self: self.__dir__())
settings = LazySettings()
def compat_patch_logging_config(logging_config):
"""
Backwards-compatibility shim for #16288 fix. Takes initial value of
``LOGGING`` setting and patches it in-place (issuing deprecation warning)
if "mail_admins" logging handler is configured but has no filters.
"""
# Shim only if LOGGING["handlers"]["mail_admins"] exists,
# but has no "filters" key
if "filters" not in logging_config.get(
"handlers", {}).get(
"mail_admins", {"filters": []}):
warnings.warn(
"You have no filters defined on the 'mail_admins' logging "
"handler: adding implicit debug-false-only filter. "
"See http://docs.djangoproject.com/en/dev/releases/1.4/"
"#request-exceptions-are-now-always-logged",
PendingDeprecationWarning)
filter_name = "require_debug_false"
filters = logging_config.setdefault("filters", {})
while filter_name in filters:
filter_name = filter_name + "_"
def _callback(record):
from django.conf import settings
return not settings.DEBUG
filters[filter_name] = {
"()": "django.utils.log.CallbackFilter",
"callback": _callback
}
logging_config["handlers"]["mail_admins"]["filters"] = [filter_name]
|
|
# encoding: utf-8
"""
Test suite for pptx.parts.slide module
"""
from __future__ import absolute_import
import pytest
from pptx.chart.data import ChartData
from pptx.enum.base import EnumValue
from pptx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from pptx.opc.packuri import PackURI
from pptx.opc.package import Part
from pptx.oxml.parts.slide import CT_Slide
from pptx.oxml.shapes.groupshape import CT_GroupShape
from pptx.package import Package
from pptx.parts.chart import ChartPart
from pptx.parts.image import Image, ImagePart
from pptx.parts.slide import BaseSlide, Slide, _SlidePlaceholders
from pptx.parts.slidelayout import SlideLayout
from pptx.shapes.placeholder import _BaseSlidePlaceholder
from pptx.shapes.shapetree import SlideShapeTree
from ..unitutil.cxml import element
from ..unitutil.file import absjoin, test_file_dir
from ..unitutil.mock import (
call, class_mock, function_mock, initializer_mock, instance_mock,
method_mock, property_mock
)
class DescribeBaseSlide(object):
def it_knows_its_name(self, name_fixture):
base_slide, expected_value = name_fixture
assert base_slide.name == expected_value
def it_can_get_a_related_image_by_rId(self, get_image_fixture):
slide, rId, image_ = get_image_fixture
assert slide.get_image(rId) is image_
def it_can_add_an_image_part(self, image_part_fixture):
slide, image_file, image_part_, rId_ = image_part_fixture
image_part, rId = slide.get_or_add_image_part(image_file)
slide._package.get_or_add_image_part.assert_called_once_with(
image_file
)
slide.relate_to.assert_called_once_with(image_part_, RT.IMAGE)
assert image_part is image_part_
assert rId is rId_
def it_provides_access_to_its_spTree_element_to_help(self, slide):
spTree = slide.spTree
assert isinstance(spTree, CT_GroupShape)
# fixtures -------------------------------------------------------
@pytest.fixture
def get_image_fixture(self, related_parts_prop_, image_part_, image_):
slide = BaseSlide(None, None, None, None)
rId = 'rId42'
related_parts_prop_.return_value = {rId: image_part_}
image_part_.image = image_
return slide, rId, image_
@pytest.fixture
def image_part_fixture(
self, partname_, package_, image_part_, relate_to_):
slide = BaseSlide(partname_, None, None, package_)
image_file, rId = 'foobar.png', 'rId6'
package_.get_or_add_image_part.return_value = image_part_
relate_to_.return_value = rId
return slide, image_file, image_part_, rId
@pytest.fixture
def name_fixture(self):
sld_cxml, expected_value = 'p:sld/p:cSld{name=Foobar}', 'Foobar'
sld = element(sld_cxml)
base_slide = BaseSlide(None, None, sld, None)
return base_slide, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def image_(self, request):
return instance_mock(request, Image)
@pytest.fixture
def image_part_(self, request):
return instance_mock(request, ImagePart)
@pytest.fixture
def package_(self, request):
return instance_mock(request, Package)
@pytest.fixture
def partname_(self):
return PackURI('/foo/bar.xml')
@pytest.fixture
def relate_to_(self, request):
return method_mock(request, BaseSlide, 'relate_to')
@pytest.fixture
def related_parts_prop_(self, request):
return property_mock(request, BaseSlide, 'related_parts')
@pytest.fixture
def slide(self):
sld = element('p:sld/p:cSld/p:spTree')
return BaseSlide(None, None, sld, None)
class DescribeSlide(object):
def it_can_add_a_chart_part(self, add_chart_part_fixture):
slide, chart_type_, chart_data_ = add_chart_part_fixture[:3]
ChartPart_, chart_part_, package_, rId = add_chart_part_fixture[3:]
_rId = slide.add_chart_part(chart_type_, chart_data_)
ChartPart_.new.assert_called_once_with(
chart_type_, chart_data_, package_
)
slide.relate_to.assert_called_once_with(slide, chart_part_, RT.CHART)
assert _rId is rId
def it_provides_access_to_the_shapes_on_the_slide(self, shapes_fixture):
slide, SlideShapeTree_, slide_shape_tree_ = shapes_fixture
shapes = slide.shapes
SlideShapeTree_.assert_called_once_with(slide)
assert shapes is slide_shape_tree_
def it_provides_access_to_its_placeholders(self, placeholders_fixture):
slide, _SlidePlaceholders_, slide_placeholders_ = (
placeholders_fixture
)
placeholders = slide.placeholders
_SlidePlaceholders_.assert_called_once_with(
slide._element.spTree, slide
)
assert placeholders is slide_placeholders_
def it_can_create_a_new_slide(self, new_fixture):
slide_layout_, partname_, package_ = new_fixture[:3]
Slide_init_, slide_elm_, shapes_, relate_to_ = new_fixture[3:]
slide = Slide.new(slide_layout_, partname_, package_)
Slide_init_.assert_called_once_with(
partname_, CT.PML_SLIDE, slide_elm_, package_
)
shapes_.clone_layout_placeholders.assert_called_once_with(
slide_layout_
)
relate_to_.assert_called_once_with(
slide, slide_layout_, RT.SLIDE_LAYOUT
)
assert isinstance(slide, Slide)
def it_knows_the_slide_layout_it_inherits_from(self, layout_fixture):
slide, slide_layout_ = layout_fixture
slide_layout = slide.slide_layout
slide.part_related_by.assert_called_once_with(RT.SLIDE_LAYOUT)
assert slide_layout is slide_layout_
def it_knows_the_minimal_element_xml_for_a_slide(self, slide):
path = absjoin(test_file_dir, 'minimal_slide.xml')
sld = CT_Slide.new()
with open(path, 'r') as f:
expected_xml = f.read()
assert sld.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture
def add_chart_part_fixture(
self, package_, chart_type_, chart_data_, ChartPart_,
chart_part_, rId, relate_to_):
slide = Slide(None, None, None, package_)
return (
slide, chart_type_, chart_data_, ChartPart_, chart_part_,
package_, rId
)
@pytest.fixture
def layout_fixture(self, slide_layout_, part_related_by_):
slide = Slide(None, None, None, None)
return slide, slide_layout_
@pytest.fixture
def placeholders_fixture(self, slide_elm_, _SlidePlaceholders_,
slide_placeholders_):
slide = Slide(None, None, slide_elm_, None)
return slide, _SlidePlaceholders_, slide_placeholders_
@pytest.fixture
def shapes_fixture(self, SlideShapeTree_, slide_shape_tree_):
slide = Slide(None, None, None, None)
return slide, SlideShapeTree_, slide_shape_tree_
@pytest.fixture
def new_fixture(
self, slide_layout_, partname_, package_, Slide_init_,
CT_Slide_, slide_elm_, shapes_prop_, shapes_,
relate_to_):
return (
slide_layout_, partname_, package_, Slide_init_, slide_elm_,
shapes_, relate_to_
)
# fixture components -----------------------------------
@pytest.fixture
def ChartPart_(self, request, chart_part_):
ChartPart_ = class_mock(request, 'pptx.parts.slide.ChartPart')
ChartPart_.new.return_value = chart_part_
return ChartPart_
@pytest.fixture
def chart_data_(self, request):
return instance_mock(request, ChartData)
@pytest.fixture
def chart_part_(self, request):
return instance_mock(request, ChartPart)
@pytest.fixture
def chart_type_(self, request):
return instance_mock(request, EnumValue)
@pytest.fixture
def CT_Slide_(self, request, slide_elm_):
CT_Slide_ = class_mock(request, 'pptx.parts.slide.CT_Slide')
CT_Slide_.new.return_value = slide_elm_
return CT_Slide_
@pytest.fixture
def package_(self, request):
return instance_mock(request, Package)
@pytest.fixture
def part_related_by_(self, request, slide_layout_):
return method_mock(
request, Slide, 'part_related_by',
return_value=slide_layout_
)
@pytest.fixture
def partname_(self, request):
return instance_mock(request, PackURI)
@pytest.fixture
def relate_to_(self, request, rId):
return method_mock(
request, Part, 'relate_to', autospec=True, return_value=rId
)
@pytest.fixture
def rId(self):
return 'rId42'
@pytest.fixture
def shapes_(self, request):
return instance_mock(request, SlideShapeTree)
@pytest.fixture
def shapes_prop_(self, request, shapes_):
return property_mock(request, Slide, 'shapes', return_value=shapes_)
@pytest.fixture
def slide(self):
return Slide(None, None, None, None)
@pytest.fixture
def _SlidePlaceholders_(self, request, slide_placeholders_):
return class_mock(
request, 'pptx.parts.slide._SlidePlaceholders',
return_value=slide_placeholders_
)
@pytest.fixture
def SlideShapeTree_(self, request, slide_shape_tree_):
return class_mock(
request, 'pptx.parts.slide.SlideShapeTree',
return_value=slide_shape_tree_
)
@pytest.fixture
def slide_elm_(self, request):
return instance_mock(request, CT_Slide)
@pytest.fixture
def Slide_init_(self, request):
return initializer_mock(request, Slide)
@pytest.fixture
def slide_layout_(self, request):
return instance_mock(request, SlideLayout)
@pytest.fixture
def slide_placeholders_(self, request):
return instance_mock(request, _SlidePlaceholders)
@pytest.fixture
def slide_shape_tree_(self, request):
return instance_mock(request, SlideShapeTree)
class Describe_SlidePlaceholders(object):
def it_can_get_a_placeholder_by_idx(self, getitem_fixture):
placeholders, idx, SlideShapeFactory_ = getitem_fixture[:3]
shape_elm, placeholder_ = getitem_fixture[3:]
placeholder = placeholders[idx]
SlideShapeFactory_.assert_called_once_with(shape_elm, placeholders)
assert placeholder is placeholder_
def it_can_iterate_over_its_placeholders(self, iter_fixture):
placeholders, SlideShapeFactory_ = iter_fixture[:2]
expected_calls, expected_values = iter_fixture[2:]
ps = [p for p in placeholders]
assert SlideShapeFactory_.call_args_list == expected_calls
assert ps == expected_values
def it_knows_how_many_placeholders_it_contains(self, len_fixture):
placeholders, expected_value = len_fixture
assert len(placeholders) == expected_value
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('p:spTree/p:sp/p:nvSpPr/p:nvPr/p:ph{type=pic,idx=1}', 1, 0),
('p:spTree/p:pic/p:nvPicPr/p:nvPr/p:ph{type=pic,idx=1}', 1, 0),
('p:spTree/(p:sp,p:sp/p:nvSpPr/p:nvPr/p:ph{type=title})', 0, 1),
('p:spTree/(p:sp,p:pic/p:nvPicPr/p:nvPr/p:ph{type=pic,idx=1})',
1, 1),
('p:spTree/(p:sp/p:nvSpPr/p:nvPr/p:ph{type=title},'
'p:sp/p:nvSpPr/p:nvPr/p:ph{type=pic,idx=3})', 3, 1),
])
def getitem_fixture(self, request, SlideShapeFactory_, placeholder_):
spTree_cxml, idx, offset = request.param
spTree = element(spTree_cxml)
placeholders = _SlidePlaceholders(spTree, None)
shape_elm = spTree[offset]
SlideShapeFactory_.return_value = placeholder_
return placeholders, idx, SlideShapeFactory_, shape_elm, placeholder_
@pytest.fixture(params=[
('p:spTree/('
'p:sp/p:nvSpPr/p:nvPr/p:ph{type=body,idx=1},'
'p:sp/p:nvSpPr/p:nvPr/p:ph{type=title},'
'p:pic/p:nvPicPr/p:nvPr/p:ph{type=pic,idx=3})', (1, 0, 2)),
])
def iter_fixture(self, request, SlideShapeFactory_, placeholder_):
spTree_cxml, sequence = request.param
spTree = element(spTree_cxml)
placeholders = _SlidePlaceholders(spTree, None)
SlideShapeFactory_.return_value = placeholder_
calls = [call(spTree[i], placeholders) for i in sequence]
values = [placeholder_] * len(sequence)
return placeholders, SlideShapeFactory_, calls, values
@pytest.fixture(params=[
('p:spTree', 0),
('p:spTree/(p:sp,p:pic,p:sp)', 0),
('p:spTree/(p:sp,p:sp/p:nvSpPr/p:nvPr/p:ph{type=title},p:pic)', 1),
('p:spTree/('
'p:sp/p:nvSpPr/p:nvPr/p:ph{type=body,idx=1},'
'p:sp/p:nvSpPr/p:nvPr/p:ph{type=title},'
'p:pic/p:nvPicPr/p:nvPr/p:ph{type=pic,idx=3})', 3),
])
def len_fixture(self, request):
spTree_cxml, length = request.param
placeholders = _SlidePlaceholders(element(spTree_cxml), None)
return placeholders, length
# fixture components ---------------------------------------------
@pytest.fixture
def placeholder_(self, request):
return instance_mock(request, _BaseSlidePlaceholder)
@pytest.fixture
def SlideShapeFactory_(self, request, placeholder_):
return function_mock(
request, 'pptx.parts.slide.SlideShapeFactory',
return_value=placeholder_
)
|
|
import inspect
import functools
import mongoengine as me
from marshmallow_mongoengine import fields as ma_fields
from marshmallow_mongoengine.conversion import params
from marshmallow_mongoengine.exceptions import ModelConversionError
class MetaFieldBuilder(object):
"""
Convert a given Mongoengine Field to an equivalent Marshmallow Field
"""
BASE_AVAILABLE_PARAMS = (params.DescriptionParam, params.AllowNoneParam,
params.ChoiceParam, params.RequiredParam)
AVAILABLE_PARAMS = ()
MARSHMALLOW_FIELD_CLS = None
def __init__(self, field):
self.mongoengine_field = field
self.params = [paramCls(field)
for paramCls in self.BASE_AVAILABLE_PARAMS + self.AVAILABLE_PARAMS]
def build_marshmallow_field(self, **kwargs):
"""
:return: The Marshmallow Field instanciated and configured
"""
field_kwargs = None
for param in self.params:
field_kwargs = param.apply(field_kwargs)
field_kwargs.update(kwargs)
return self.marshmallow_field_cls(**field_kwargs)
def _get_marshmallow_field_cls(self):
"""
Return the marshmallow Field class, overload this method to
generate more dynamic field class
"""
return self.MARSHMALLOW_FIELD_CLS
@property
def marshmallow_field_cls(self):
return self._get_marshmallow_field_cls()
class ListBuilder(MetaFieldBuilder):
AVAILABLE_PARAMS = (params.LenghtParam,)
MARSHMALLOW_FIELD_CLS = ma_fields.List
def _get_marshmallow_field_cls(self):
sub_field = get_field_builder_for_data_type(
self.mongoengine_field.field)
return functools.partial(
self.MARSHMALLOW_FIELD_CLS,
sub_field.build_marshmallow_field()
)
class ReferenceBuilder(MetaFieldBuilder):
AVAILABLE_PARAMS = ()
MARSHMALLOW_FIELD_CLS = ma_fields.Reference
def _get_marshmallow_field_cls(self):
return functools.partial(
self.MARSHMALLOW_FIELD_CLS,
self.mongoengine_field.document_type
)
class GenericReferenceBuilder(MetaFieldBuilder):
BASE_AVAILABLE_PARAMS = tuple([p for p in MetaFieldBuilder.BASE_AVAILABLE_PARAMS
if p is not params.ChoiceParam])
AVAILABLE_PARAMS = ()
MARSHMALLOW_FIELD_CLS = ma_fields.GenericReference
def build_marshmallow_field(self, **kwargs):
# Special handle for the choice field given it represent the
# reference's document class
kwargs['choices'] = getattr(self.mongoengine_field, 'choices', None)
return super(GenericReferenceBuilder, self).build_marshmallow_field(**kwargs)
class EmbeddedDocumentBuilder(MetaFieldBuilder):
AVAILABLE_PARAMS = ()
MARSHMALLOW_FIELD_CLS = ma_fields.Nested
BASE_NESTED_SCHEMA_CLS = None
def _get_marshmallow_field_cls(self):
# Recursive build of marshmallow schema
from marshmallow_mongoengine.schema import ModelSchema
base_nested_schema_cls = self.BASE_NESTED_SCHEMA_CLS or ModelSchema
class NestedSchema(base_nested_schema_cls):
class Meta:
model = self.mongoengine_field.document_type
return functools.partial(
self.MARSHMALLOW_FIELD_CLS,
NestedSchema
)
class MapBuilder(MetaFieldBuilder):
AVAILABLE_PARAMS = ()
MARSHMALLOW_FIELD_CLS = ma_fields.Map
def _get_marshmallow_field_cls(self):
# Recursive build of marshmallow schema
from marshmallow_mongoengine.convert import convert_field
return functools.partial(
self.MARSHMALLOW_FIELD_CLS,
convert_field(self.mongoengine_field.field)
)
def get_field_builder_for_data_type(field_me):
field_me_types = inspect.getmro(type(field_me))
for field_me_type in field_me_types:
if field_me_type in FIELD_MAPPING:
field_ma_cls = FIELD_MAPPING[field_me_type]
break
else:
raise ModelConversionError(
'Could not find field of type {0}.'.format(field_me))
return field_ma_cls(field_me)
FIELD_MAPPING = {
}
def register_field_builder(mongo_field_cls, builder):
"""
Register a :class MetaFieldBuilder: to a given Mongoengine Field
:param mongo_field_cls: Mongoengine Field
:param build: field_builder to register
"""
FIELD_MAPPING[mongo_field_cls] = builder
def register_field(mongo_field_cls, marshmallow_field_cls,
available_params=()):
"""
Bind a marshmallow field to it corresponding mongoengine field
:param mongo_field_cls: Mongoengine Field
:param marshmallow_field_cls: Marshmallow Field
:param available_params: List of :class marshmallow_mongoengine.cnoversion.params.MetaParam:
instances to import the mongoengine field config to marshmallow
"""
class Builder(MetaFieldBuilder):
AVAILABLE_PARAMS = available_params
MARSHMALLOW_FIELD_CLS = marshmallow_field_cls
register_field_builder(mongo_field_cls, Builder)
register_field(me.fields.BinaryField, ma_fields.Integer,
available_params=(params.SizeParam,))
register_field(me.fields.BooleanField, ma_fields.Boolean)
register_field(me.fields.ComplexDateTimeField, ma_fields.DateTime)
register_field(me.fields.DateTimeField, ma_fields.DateTime)
register_field(me.fields.DecimalField, ma_fields.Decimal,
available_params=(params.SizeParam, params.PrecisionParam))
register_field(me.fields.DictField, ma_fields.Raw)
register_field(me.fields.DynamicField, ma_fields.Raw)
register_field(me.fields.EmailField, ma_fields.Email,
available_params=(params.LenghtParam,))
register_field(me.fields.FloatField, ma_fields.Float,
available_params=(params.SizeParam,))
register_field(me.fields.GenericEmbeddedDocumentField,
ma_fields.GenericEmbeddedDocument)
register_field_builder(me.fields.GenericReferenceField, GenericReferenceBuilder)
register_field_builder(me.fields.ReferenceField, ReferenceBuilder)
# FilesField and ImageField can't be simply displayed...
register_field(me.fields.FileField, ma_fields.Skip)
register_field(me.fields.ImageField, ma_fields.Skip)
register_field(me.fields.IntField, ma_fields.Integer,
available_params=(params.SizeParam,))
register_field(me.fields.LongField, ma_fields.Integer,
available_params=(params.SizeParam,))
register_field(me.fields.ObjectIdField, ma_fields.ObjectId)
register_field(me.fields.UUIDField, ma_fields.UUID)
register_field(me.fields.PointField, ma_fields.Point)
register_field(me.fields.SequenceField, ma_fields.Integer,
available_params=(params.SizeParam,)) # TODO: handle value_decorator
register_field(me.fields.StringField, ma_fields.String,
available_params=(params.LenghtParam,))
register_field(me.fields.URLField, ma_fields.URL,
available_params=(params.LenghtParam,))
register_field_builder(me.fields.EmbeddedDocumentField, EmbeddedDocumentBuilder)
register_field_builder(me.fields.ListField, ListBuilder)
register_field_builder(me.fields.MapField, MapBuilder)
register_field_builder(me.fields.SortedListField, ListBuilder)
# TODO: finish fields...
# me.fields.GeoPointField: ma_fields.GeoPoint,
# me.fields.LineStringField: ma_fields.LineString,
# me.fields.PolygonField: ma_fields.Polygon,
# me.fields.MultiPointField: ma_fields.MultiPoint,
# me.fields.MultiLineStringField: ma_fields.MultiLineString,
# me.fields.MultiPolygonField: ma_fields.MultiPolygon,
|
|
import tensorflow as tf
import numpy as np
import os
import sys
import random
import subprocess
from redis import Redis
import time
sys.path.append(os.path.realpath(".."))
import helpers.utils as hlp
from models.feed_forward import FFDiscrete
class A3CDiscreteTrainer(FFDiscrete):
def __init__(self, sess, args):
FFDiscrete.__init__(self, sess, args)
self.sess = sess
self.config = args['config']
self.env = args['environment']
self.timesteps_per_launch = args['max_pathlength']
self.n_workers = args['n_workers']
self.distributed = args['distributed']
self.n_tests = args['n_tests']
self.entropy_coef = args['entropy_coef']
self.learning_rate = args['learning_rate']
self.n_steps = args['n_steps']
self.scale = args['scale']
self.gamma = args['gamma']
self.save_every = args.get('save_every', 1)
self.test_every = args.get('test_every', 10)
self.sums = self.sumsqrs = self.sumtime = 0
self.timestep = 0
self.create_internal()
self.train_scores = []
self.test_scores = []
np.set_printoptions(precision=6)
# Worker parameters:
self.id_worker = args['id_worker']
self.test_mode = args['test_mode']
def create_internal(self):
self.targets = {
"advantage": tf.placeholder(dtype=tf.float32, shape=[None]),
"return": tf.placeholder(dtype=tf.float32, shape=[None]),
}
for i in range(len(self.n_actions)):
self.targets["action_{}".format(i)] = tf.placeholder(dtype=tf.int32, shape=[None])
N = tf.shape(self.targets["advantage"])[0]
base = [N] + [1 for _ in range(len(self.n_actions))]
log_dist = tf.zeros(shape=[N] + self.n_actions)
p_n = tf.zeros(shape=[N])
for i, n in enumerate(self.n_actions):
right_shape = base[:]
right_shape[i + 1] = n
actions = self.targets["action_{}".format(i)]
action_log_dist = tf.reshape(self.action_logprobs[i], [-1])
p = tf.reshape(tf.gather(action_log_dist, tf.range(0, N) * n + actions), [-1])
p_n += p
log_dist += tf.reshape(action_log_dist, right_shape)
N = tf.cast(N, tf.float32)
self.loss = -tf.reduce_mean(p_n * self.targets["advantage"])
self.entropy = tf.reduce_sum(-tf.exp(log_dist) * log_dist) / N
value_loss = tf.reduce_mean((self.targets["return"] - self.value) ** 2)
self.loss += -self.entropy_coef * self.entropy + value_loss / 2
self.weights += self.value_weights
self.gradients = tf.gradients(self.loss, self.weights)
def save(self, name):
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
os.makedirs(directory)
directory += 'iteration_{}'.format(self.timestep) + '/'
if not os.path.exists(directory):
os.makedirs(directory)
for i, tensor in enumerate(tf.global_variables()):
value = self.sess.run(tensor)
np.save(directory + 'weight_{}'.format(i), value)
if self.scale != 'off':
np.save(directory + 'sums', self.sums)
np.save(directory + 'sumsquares', self.sumsqrs)
np.save(directory + 'sumtime', self.sumtime)
np.save(directory + 'timestep', np.array([self.timestep]))
np.save(directory + 'train_scores', np.array(self.train_scores))
np.save(directory + 'test_scores', np.array(self.test_scores))
print("Agent successfully saved in folder {}".format(directory))
def load(self, name, iteration=None):
try:
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
print('That directory does not exist!')
raise Exception
if iteration is None:
iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]])
directory += 'iteration_{}'.format(iteration) + '/'
for i, tensor in enumerate(tf.global_variables()):
arr = np.load(directory + 'weight_{}.npy'.format(i))
self.sess.run(tensor.assign(arr))
if self.scale != 'off':
self.sums = np.load(directory + 'sums.npy')
self.sumsqrs = np.load(directory + 'sumsquares.npy')
self.sumtime = np.load(directory + 'sumtime.npy')
self.timestep = np.load(directory + 'timestep.npy')[0]
self.train_scores = np.load(directory + 'train_scores.npy').tolist()
self.test_scores = np.load(directory + 'test_scores.npy').tolist()
print("Agent successfully loaded from folder {}".format(directory))
except:
print("Something is wrong, loading failed")
def apply_adam_updates(self, variables_server, gradients, learning_rate, epsilon=1e-6):
update_steps = hlp.load_object(variables_server.get('update_steps')) + 1
variables_server.set('update_steps', hlp.dump_object(update_steps))
learning_rate = learning_rate * ((1 - 0.999 ** update_steps) ** 0.5) / (1 - 0.9 ** update_steps)
for i, gradient in enumerate(gradients):
momentum = hlp.load_object(variables_server.get('momentum_{}'.format(i)))
momentum = 0.999 * momentum + (1 - 0.999) * gradient * gradient
variables_server.set('momentum_{}'.format(i), hlp.dump_object(momentum))
velocity = hlp.load_object(variables_server.get('velocity_{}'.format(i)))
velocity = 0.9 * velocity + (1 - 0.9) * gradient
variables_server.set('velocity_{}'.format(i), hlp.dump_object(velocity))
weight = hlp.load_object(variables_server.get('weight_{}'.format(i)))
new_weight = weight - velocity * learning_rate / ((momentum ** 0.5) + epsilon)
variables_server.set('weight_{}'.format(i), hlp.dump_object(new_weight))
return update_steps
def work(self):
variables_server = Redis(port=12000)
if self.scale != 'off':
try:
means = hlp.load_object(variables_server.get("means"))
stds = hlp.load_object(variables_server.get("stds"))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
except:
pass
try:
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
except:
pass
env = self.env
while True:
observations, action_tuples, rewards, timestamps = [], [], [], []
for _ in range(self.n_steps):
observations.append(env.features[0])
timestamps.append(env.timestamp)
actions = self.act(env.features)
env.step(actions)
action_tuples.append(actions)
rewards.append(env.reward)
if env.done or env.timestamp > self.timesteps_per_launch:
variables_server.lpush('results', hlp.dump_object(env.get_total_reward()))
print("Episode reward: {}".format(env.get_total_reward()), "Length: {}".format(env.timestamp))
break
timestamps.append(env.timestamp)
observations_batch = np.array(observations)
actions_batch = np.array(action_tuples)
feed_dict = {self.state_input: observations_batch}
for i in range(len(self.n_actions)):
feed_dict[self.targets["action_{}".format(i)]] = actions_batch[:, i]
if env.done or env.timestamp > self.timesteps_per_launch:
rewards.append(0)
env.reset()
else:
obs = observations[-1]
rewards.append(self.sess.run(self.value, feed_dict={self.state_input: obs.reshape((1,) + obs.shape)}))
returns_batch = hlp.discount(np.array(rewards), self.gamma, np.array(timestamps))[:-1]
values = self.sess.run(self.value, feed_dict)
feed_dict[self.targets["advantage"]] = returns_batch - values
feed_dict[self.targets["return"]] = returns_batch
gradients = self.sess.run(self.gradients, feed_dict)
self.apply_adam_updates(variables_server, gradients, self.learning_rate)
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
def make_rollout(self):
variables_server = Redis(port=12000)
if self.scale != 'off':
try:
means = hlp.load_object(variables_server.get("means"))
stds = hlp.load_object(variables_server.get("stds"))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
except:
pass
try:
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
except:
pass
env = self.env
n_tasks = self.n_tests
timestep = 0
i_task = 0
paths = []
while i_task < n_tasks:
path = {}
observations, action_tuples, rewards, dist_tuples, timestamps = [], [], [], [], []
sums = np.zeros((1, env.get_observation_space()))
sumsqrs = np.zeros(sums.shape)
env.reset()
while not env.done and env.timestamp < self.timesteps_per_launch:
sums += env.features
sumsqrs += np.square(env.features)
observations.append(env.features[0])
timestamps.append(env.timestamp)
if not self.test_mode:
actions, dist_tuple = self.act(env.features, return_dists=True)
dist_tuples.append(dist_tuple)
else:
actions = self.act(env.features, exploration=False)
env.step(actions)
timestep += 1
action_tuples.append(actions)
rewards.append(env.reward)
path["observations"] = np.array(observations)
path["action_tuples"] = np.array(action_tuples)
path["rewards"] = np.array(rewards)
if not self.test_mode:
path["dist_tuples"] = np.array(dist_tuples)
path["timestamps"] = np.array(timestamps)
path["sumobs"] = sums
path["sumsqrobs"] = sumsqrs
path["terminated"] = env.done
path["total"] = env.get_total_reward()
paths.append(path)
i_task += 1
if self.distributed:
variables_server.set("paths_{}".format(self.id_worker), hlp.dump_object(paths))
else:
self.paths = paths
def train(self):
cmd_server = 'redis-server --port 12000'
p = subprocess.Popen(cmd_server, shell=True, preexec_fn=os.setsid)
self.variables_server = Redis(port=12000)
means = "-"
stds = "-"
if self.scale != 'off':
if self.timestep == 0:
print("Time to measure features!")
if self.distributed:
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
else:
self.test_mode = False
self.make_rollout()
paths = self.paths
for path in paths:
self.sums += path["sumobs"]
self.sumsqrs += path["sumsqrobs"]
self.sumtime += path["observations"].shape[0]
stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))
means = self.sums / self.sumtime
print("Init means: {}".format(means))
print("Init stds: {}".format(stds))
self.variables_server.set("means", hlp.dump_object(means))
self.variables_server.set("stds", hlp.dump_object(stds))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
weights = self.get_weights()
for i, weight in enumerate(weights):
self.variables_server.set("weight_" + str(i), hlp.dump_object(weight))
self.variables_server.set('momentum_{}'.format(i), hlp.dump_object(np.zeros(weight.shape)))
self.variables_server.set('velocity_{}'.format(i), hlp.dump_object(np.zeros(weight.shape)))
self.variables_server.set('update_steps', hlp.dump_object(0))
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers, command='work', wait=False)
while True:
time.sleep(self.test_every)
print("Time for testing!")
if self.distributed:
worker_args = \
{
'config': self.config,
'test_mode': True,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
else:
self.test_mode = True
self.make_rollout()
paths = self.paths
total_rewards = np.array([path["total"] for path in paths])
eplens = np.array([len(path["rewards"]) for path in paths])
print("""
-------------------------------------------------------------
Mean test score: {test_scores}
Mean test episode length: {test_eplengths}
Max test score: {max_test}
Number of train episodes: {number}
Mean of features: {means}
Std of features: {stds}
-------------------------------------------------------------
""".format(
means=means,
stds=stds,
test_scores=np.mean(total_rewards),
test_eplengths=np.mean(eplens),
max_test=np.max(total_rewards),
number=self.variables_server.llen('results')
))
self.timestep += 1
self.train_scores = [hlp.load_object(res) for res in self.variables_server.lrange('results', 0, -1)][::-1]
self.test_scores.append(np.mean(total_rewards))
if self.timestep % self.save_every == 0:
self.save(self.config[:-5])
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_embedding_v2
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.util import nest
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
flags.DEFINE_string('model_dir', os.environ.get('TEST_TMPDIR'),
'A temporary directory.')
class TPUEmbeddingCorrectness(parameterized.TestCase, test.TestCase):
def setUp(self):
super(TPUEmbeddingCorrectness, self).setUp()
self.embedding_values = np.array(list(range(32)), dtype=np.float64)
self.initializer = init_ops_v2.Constant(self.embedding_values)
# Embedding for video initialized to
# 0 1 2 3
# 4 5 6 7
# ...
self.table_video = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=8,
dim=4,
initializer=self.initializer,
combiner='sum',
name='video')
# Embedding for user initialized to
# 0 1
# 2 3
# 4 5
# 6 7
# ...
self.table_user = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=16,
dim=2,
initializer=self.initializer,
combiner='mean',
name='user')
self.feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends'))
self.batch_size = 2
self.data_batch_size = 4
# One (global) batch of inputs
# sparse tensor for watched:
# row 0: 0
# row 1: 0, 1
# row 2: 0, 1
# row 3: 1
self.feature_watched_indices = [[0, 0], [1, 0], [1, 1],
[2, 0], [2, 1], [3, 0]]
self.feature_watched_values = [0, 0, 1, 0, 1, 1]
self.feature_watched_row_lengths = [1, 2, 2, 1]
# sparse tensor for favorited:
# row 0: 0, 1
# row 1: 1
# row 2: 0
# row 3: 0, 1
self.feature_favorited_indices = [[0, 0], [0, 1], [1, 0],
[2, 0], [3, 0], [3, 1]]
self.feature_favorited_values = [0, 1, 1, 0, 0, 1]
self.feature_favorited_row_lengths = [2, 1, 1, 2]
# sparse tensor for friends:
# row 0: 3
# row 1: 0, 1, 2
# row 2: 3
# row 3: 0, 1, 2
self.feature_friends_indices = [[0, 0], [1, 0], [1, 1], [1, 2],
[2, 0], [3, 0], [3, 1], [3, 2]]
self.feature_friends_values = [3, 0, 1, 2, 3, 0, 1, 2]
self.feature_friends_row_lengths = [1, 3, 1, 3]
self.resolver = None
def tearDown(self):
if self.resolver:
tpu_strategy_util.shutdown_tpu_system(self.resolver)
super(TPUEmbeddingCorrectness, self).tearDown()
def _get_strategy(self):
self.resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
remote.connect_to_cluster(self.resolver)
tpu_strategy_util.initialize_tpu_system(self.resolver)
return tpu_strategy.TPUStrategy(self.resolver)
def _create_strategy_and_mid_level(self, optimizer_name):
strategy = self._get_strategy()
with strategy.scope():
if optimizer_name == 'sgd':
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
elif optimizer_name == 'adagrad':
optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
elif optimizer_name == 'adam':
optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
else:
raise ValueError('optimizer is not recognized: ', optimizer_name)
mid_level_api = self._create_mid_level(optimizer=optimizer)
return strategy, mid_level_api, optimizer
@parameterized.parameters(
*itertools.product(
['sgd', 'adagrad', 'adam'],
[True, False]))
def test_embedding(self, optimizer_name, training):
strategy, mid_level_api, optimizer = (
self._create_strategy_and_mid_level(optimizer_name))
dataset = self._create_sparse_dataset(strategy)
dist = strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
"""Create and run computation that returns the embedding activations."""
if not training:
activations = mid_level_api.dequeue()
total_loss = _get_total_loss_tensor(activations)
ret_val = [total_loss] + list(activations)
return ret_val
else:
with backprop.GradientTape() as tape:
activations = mid_level_api.dequeue()
tape.watch(activations)
total_loss = _get_total_loss_tensor(activations)
loss_per_replica = total_loss / strategy.num_replicas_in_sync
gradients = tape.gradient(loss_per_replica, activations)
mid_level_api.apply_gradients(gradients)
ret_val = [total_loss] + list(activations)
return ret_val
mid_level_api.enqueue(next(dist_iter), training=training)
result = strategy.run(step)
return result
# Run model.
shard_out_val = test_fn()
# Retrieve TPU weights to CPU.
mid_level_api._retrieve_variables()
# Compute sparse tensors for global batch.
input_data = next(iter(self._create_sparse_dataset(strategy)))
# Check results.
self._check_results(strategy, shard_out_val, training, input_data,
mid_level_api._variables,
optimizer)
def _create_mid_level(self, optimizer=None):
# Create `TPUEmbedding` object.
if optimizer is None:
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
return tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
optimizer=optimizer)
def _create_sparse_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
sparse_features = (
sparse_tensor.SparseTensor(
indices=self.feature_watched_indices,
values=self.feature_watched_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_favorited_indices,
values=self.feature_favorited_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_friends_indices,
values=self.feature_friends_values,
dense_shape=[self.data_batch_size, 3]))
if include_weights:
weights = []
for sparse in sparse_features:
values = (
array_ops.ones_like(sparse.values, dtype=dtypes.float32) * weight)
weights.append(sparse_tensor.SparseTensor(
indices=sparse.indices,
values=values,
dense_shape=sparse.dense_shape))
sparse_features = (sparse_features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(sparse_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_dense_input_fn(self, strategy, include_weights=False, weight=0.5):
def input_fn(ctx):
del ctx
features = (
constant_op.constant(self.feature_watched_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_favorited_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_friends_values[-2:],
dtype=dtypes.int32))
if include_weights:
weights = [array_ops.ones_like(t, dtype=dtypes.float32) * weight
for t in features]
features = (features, tuple(weights))
return dataset_ops.DatasetV2.from_tensors(features).repeat()
return input_fn
def _check_results(self, strategy, shard_out_val, training, input_data,
table_to_variable, optimizer):
num_replicas = strategy.num_replicas_in_sync
# Unpack the values `strategy.run()` returns.
loss = _unpack(strategy, shard_out_val[0])
activation_watched = _unpack(strategy, shard_out_val[1])
activation_favorited = _unpack(strategy, shard_out_val[2])
activation_friends = _unpack(strategy, shard_out_val[3])
# Core 0:
# Calculate the values of embedding activations.
activation_watched_gold0 = np.array([[0, 1, 2, 3], [4, 6, 8, 10]])
activation_favorited_gold0 = np.array([[4, 6, 8, 10], [4, 5, 6, 7]])
# Second row of `activation_friends_gold0` is the mean of the following.
# row 0: 0 1
# row 1: 2 3
# row 2: 4 5
activation_friends_gold0 = np.array([[6, 7], [2, 3]])
loss_gold0 = _compute_loss(activation_watched_gold0,
activation_favorited_gold0,
activation_friends_gold0)
# Add on values from other cores:
# Activations for watched are an alternating sequence of
# activation_watched_gold0 and activation_favorited_gold0.
# For favorited it is the same but in the opposite order.
activation_watched_gold = np.concatenate(
(np.concatenate((np.expand_dims(activation_watched_gold0, axis=0),) *
(num_replicas // 2)),
np.concatenate((np.expand_dims(activation_favorited_gold0, axis=0),) *
(num_replicas // 2))),
axis=1).reshape([self.batch_size * num_replicas, 4])
activation_favorited_gold = np.concatenate(
(activation_watched_gold[self.batch_size:,],
activation_watched_gold[0:self.batch_size,]))
activation_friends_gold = np.concatenate(
(activation_friends_gold0,) * num_replicas)
loss_gold = [loss_gold0] * num_replicas
# Test values.
self.assertAllClose(activation_watched_gold, activation_watched)
self.assertAllClose(activation_favorited_gold, activation_favorited)
self.assertAllClose(activation_friends_gold, activation_friends)
self.assertAllClose(loss_gold, loss)
embedding_table_video_before = np.copy(
np.reshape(self.embedding_values, [8, 4]))
embedding_table_user_before = np.copy(
np.reshape(self.embedding_values, [16, 2]))
global_batch_size = self.batch_size * num_replicas
if training:
gradient_wrt_watched_gold = (2 * activation_watched_gold /
global_batch_size)
gradient_wrt_favorited_gold = (2 * activation_favorited_gold /
global_batch_size)
gradient_wrt_friends_gold = (2 * activation_friends_gold /
global_batch_size)
# Calculate gradients wrt embedding tables.
gradients_wrt_user = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_friends_gold,
embedding_table_user_before, input_data[2].indices.numpy(),
input_data[2].values.numpy(), self.table_user.combiner))
gradients_wrt_video = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_favorited_gold,
embedding_table_video_before, input_data[1].indices.numpy(),
input_data[1].values.numpy(), self.table_video.combiner) +
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_watched_gold,
embedding_table_video_before, input_data[0].indices.numpy(),
input_data[0].values.numpy(), self.table_video.combiner))
self._check_embedding_and_slot_variables(embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable)
def _check_embedding_and_slot_variables(self, embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable):
if isinstance(optimizer, tpu_embedding_v2_utils.SGD):
check_fn = self._check_embedding_and_slot_variables_for_sgd
elif isinstance(optimizer, tpu_embedding_v2_utils.Adagrad):
check_fn = self._check_embedding_and_slot_variables_for_adagrad
elif isinstance(optimizer, tpu_embedding_v2_utils.Adam):
check_fn = self._check_embedding_and_slot_variables_for_adam
else:
raise ValueError('optimizer is not recognized: ', type(optimizer))
check_fn(embedding_table_user_before, gradients_wrt_user,
optimizer, table_to_variable[self.table_user.name])
check_fn(embedding_table_video_before, gradients_wrt_video,
optimizer, table_to_variable[self.table_video.name])
def _check_embedding_and_slot_variables_for_sgd(self, embedding_table_before,
gradients,
optimizer,
variables):
embedding_table = np.copy(embedding_table_before)
embedding_table -= optimizer.learning_rate * np.sum(gradients, axis=0)
self.assertAllClose(_get_variable(variables['parameters']).numpy(),
embedding_table)
def _check_embedding_and_slot_variables_for_adagrad(self,
embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
accumulator = (
optimizer.initial_accumulator_value + np.sum(gradients, axis=0)**2)
embedding_table -= (
optimizer.learning_rate * np.sum(gradients, axis=0) /
np.sqrt(accumulator))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table)
self.assertAllClose(_get_variable(variable['accumulators']).numpy(),
accumulator)
def _check_embedding_and_slot_variables_for_adam(self, embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
g = np.sum(gradients, axis=0)
v = g**2 * (1 - optimizer.beta_2)
m = g * (1 - optimizer.beta_1)
epsilon = optimizer.epsilon
# TPU Embeddings don't have the LR decay factor for Adam.
lr_modifier = 1
embedding_table -= (
m * optimizer.learning_rate * lr_modifier / (np.sqrt(v) + epsilon))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table, rtol=1e-4)
self.assertAllClose(_get_variable(variable['momenta']).numpy(),
m, rtol=1e-4)
self.assertAllClose(_get_variable(variable['velocities']).numpy(),
v, rtol=1e-4)
def _get_replica_numpy(self, structured, strategy, replica_id):
def select_replica(x):
x = strategy.experimental_local_results(x)
if len(x) == 1:
return x.numpy()
return x[replica_id].numpy()
return nest.map_structure(select_replica, structured)
def test_dense_lookup(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy)
dist = strategy.distribute_datasets_from_function(
input_fn,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(dist_iter), training=False)
return strategy.run(step)
# Run model.
shard0 = self._get_replica_numpy(test_fn(), strategy, 0)
# embedding_values is a linear list, so we reshape to match the correct
# shape of the corresponding table before performing the lookup.
numpy_videos = np.reshape(self.embedding_values, (8, 4))
numpy_users = np.reshape(self.embedding_values, (16, 2))
golden = ((numpy_videos[self.feature_watched_values[-2:]],
numpy_videos[self.feature_favorited_values[-2:]],
numpy_users[self.feature_friends_values[-2:]]))
self.assertAllClose(shard0, golden)
def test_sequence_embeddings(self):
feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends',
max_sequence_length=3))
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config=feature_config,
optimizer=optimizer)
# Call build here. We call 'next' outside of the tf.function and this
# results in data where the shape of the sparse tensor is a tensor which we
# can't tell the shape of at tracing time.
mid_level.build(self.batch_size)
dataset = self._create_sparse_dataset(strategy)
data = next(iter(strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_prefetch_to_device=False))))
@def_function.function
def embedding_and_set_gradients(data):
def tpu_fn():
activations = mid_level.dequeue()
mid_level.apply_gradients(nest.map_structure(array_ops.ones_like,
activations))
return activations
mid_level.enqueue(data)
return strategy.run(tpu_fn)
@def_function.function
def embedding_only(data):
def tpu_fn():
return mid_level.dequeue()
mid_level.enqueue(data)
return strategy.run(tpu_fn)
# Only check core 0.
before_update = self._get_replica_numpy(
embedding_and_set_gradients(data), strategy, 0)
after_update = self._get_replica_numpy(embedding_only(data), strategy, 0)
# For videos table, row 0 and row 1 are looked up 3*num_replicas times as
# they occur 3 times per replica (considering the features 0 and 1 which are
# both looked up in the videos table).
# Feature 0 has ids [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
# Feature 1 has ids [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
# This means that both rows 0 and 1 get a -0.1*3*num_replicas update
# For users table, each row is looked up twice:
# Feature 2 has ids [3, 0, 1, 2], .. repeated over num_replicas
# This means that we get a -0.1*num_replicas update to the third feature.
# In general this means that after the update, if we lookup feature 0 and 1
# the values will be 0.3*num_replicas lower per entry and for feature 2 they
# will be 0.1*num_replicas lower.
# The one issue that that these lookups contain padding values.
# For core 0, we get the first 2 elements of the 4 element batch.
# For feature 0, the indices are [[0, 0], [1, 0], [1, 1]] with max sequence
# length of 2, which means that [0, 1] will be 0s.
# For feature 1, the indices are [[0, 0], [0, 1], [1, 0]] with max sequence
# length of 2, which means that [1, 1] will be 0s.
# For feature 2, the indices are [[0, 0], [1, 0], [1, 1], [1, 2]] with max
# sequence length of 3, which means that [0, 1], [0, 2] will be 0s.
# The following masks represent that so that we only apply the above updates
# to the non-padding rows:
masks = (
np.array([[[1], [0]], [[1], [1]]]),
np.array([[[1], [1]], [[1], [0]]]),
np.array([[[1], [0], [0]], [[1], [1], [1]]]))
per_row_update = (0.3 * num_replicas,
0.3 * num_replicas,
0.1 * num_replicas)
golden = tuple([before - update * mask for before, update, mask in
zip(before_update, per_row_update, masks)])
self.assertAllClose(golden, after_update)
def _compute_gradients_wrt_embedding_table(batch_size,
gradient_wrt_activation,
embedding_table,
feature_indices,
feature_values,
combiner,
max_sequence_length=0):
"""Compute gradients wrt embedding_table.
Args:
batch_size: `int`, batch size.
gradient_wrt_activation: `np.array` with shape `batch_size` by
embedding `dimension`.
embedding_table: `np.array` with shape `vocabulary_size` by embedding
`dimension`.
feature_indices: `indices` as used to construct `SparseTensor`.
feature_values: `values` as used to construct `SparseTensor`.
combiner: `String`, 'mean' or 'sum'.
max_sequence_length: If non-zero, a sequence feature with the given length.
Returns:
Gradients wrt `embedding_table`, an `np.array`s with shape
`batch_size` by `vocabulary_size` by
embedding `dimension`.
Raises:
ValueError: if `combiner` is not one of 'mean' or 'sum'.
"""
if combiner not in ('mean', 'sum'):
raise ValueError('`combiner` must be mean or sum; got {}.'.format(combiner))
grads = []
for i in range(batch_size):
grad = np.zeros_like(embedding_table)
count = 0
for (batch_i, seq_index), vocabulary_id in zip(feature_indices,
feature_values):
if batch_i == i:
count += 1
if max_sequence_length > 0:
if seq_index < max_sequence_length:
grad[vocabulary_id, :] += gradient_wrt_activation[i, seq_index, :]
else:
grad[vocabulary_id, :] += gradient_wrt_activation[i, :]
if combiner == 'mean' and not max_sequence_length:
grad = grad / count
grads.append(grad)
return np.stack(grads)
def _unpack(strategy, per_replica_output):
per_replica_output = strategy.experimental_local_results(per_replica_output)
per_replica_output = array_ops.concat(per_replica_output, axis=0).numpy()
return per_replica_output
def _get_total_loss_tensor(activations):
losses = []
for activation in activations:
losses.append(
math_ops.reduce_mean(
math_ops.reduce_sum(
gen_math_ops.squared_difference(activation, 0), 1)))
total_loss = array_ops.expand_dims_v2(sum(losses), 0)
return total_loss
def _compute_loss(activation_watched, activation_favorited, activation_friends):
watched_loss = np.mean(np.sum(activation_watched**2, axis=1))
if len(activation_favorited.shape) == 2:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=1))
else:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=(1, 2)))
if len(activation_friends.shape) == 2:
friends_loss = np.mean(np.sum(activation_friends**2, axis=1))
else:
friends_loss = np.mean(np.sum(activation_friends**2, axis=(1, 2)))
loss = watched_loss + favorited_loss + friends_loss
return loss
def _get_variable(variable):
if isinstance(variable, tpu_embedding_v2.TPUShardedVariable):
return variable.variables[0]
return variable
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
from sahara.plugins import provisioning as p
from sahara.utils import files as f
CDH5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cdh5'
'/ubuntu/precise/amd64/cdh precise-cdh5.3.0 contrib'
'\ndeb-src http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh precise-cdh5.3.0 contrib')
DEFAULT_CDH5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh/archive.key')
CM5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cm5'
'/ubuntu/precise/amd64/cm precise-cm5.3.0 contrib'
'\ndeb-src http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm precise-cm5.3.0 contrib')
DEFAULT_CM5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm/archive.key')
CDH5_CENTOS_REPO = ('[cloudera-cdh5]'
'\nname=Cloudera\'s Distribution for Hadoop, Version 5'
'\nbaseurl=http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/5.3.0/'
'\ngpgkey = http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
CM5_CENTOS_REPO = ('[cloudera-manager]'
'\nname=Cloudera Manager'
'\nbaseurl=http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/5.3.0/'
'\ngpgkey = http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
DEFAULT_SWIFT_LIB_URL = ('https://repository.cloudera.com/artifactory/repo/org'
'/apache/hadoop/hadoop-openstack/2.5.0-cdh5.3.0'
'/hadoop-openstack-2.5.0-cdh5.3.0.jar')
DEFAULT_EXTJS_LIB_URL = 'http://dev.sencha.com/deploy/ext-2.2.zip'
HIVE_SERVER2_SENTRY_SAFETY_VALVE = (
'<property>'
'\n <name>hive.security.authorization.task.factory</name>'
'\n <value>org.apache.sentry.binding.hive.SentryHiveAuthorizationTask'
'FactoryImpl</value>'
'\n</property>'
'\n<property>'
'\n <name>hive.server2.session.hook</name>'
'\n <value>org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook'
'</value>'
'\n</property>'
'\n<property>'
'\n <name>hive.sentry.conf.url</name>'
'\n <value>file:///{{CMF_CONF_DIR}}/sentry-site.xml</value>'
'\n</property>')
HIVE_METASTORE_SENTRY_SAFETY_VALVE = (
'<property>'
'\n <name>hive.metastore.client.impl</name>'
'\n <value>org.apache.sentry.binding.metastore.SentryHiveMetaStore'
'Client</value>'
'\n <description>Sets custom Hive metastore client which Sentry uses'
' to filter out metadata.</description>'
'\n</property>'
'\n<property>'
'\n <name>hive.metastore.pre.event.listeners</name>'
'\n <value>org.apache.sentry.binding.metastore.MetastoreAuthzBinding'
'</value>'
'\n <description>list of comma separated listeners for metastore'
' events.</description>'
'\n</property>'
'\n<property>'
'\n <name>hive.metastore.event.listeners</name>'
'\n <value>org.apache.sentry.binding.metastore.SentryMetastorePost'
'EventListener</value>'
'\n <description>list of comma separated listeners for metastore,'
' post events.</description>'
'\n</property>')
SENTRY_IMPALA_CLIENT_SAFETY_VALVE = (
'<property>'
'\n <name>sentry.service.client.server.rpc-port</name>'
'\n <value>3893</value>'
'\n</property>'
'\n<property>'
'\n <name>sentry.service.client.server.rpc-address</name>'
'\n <value>hostname</value>'
'\n</property>'
'\n<property>'
'\n <name>sentry.service.client.server.rpc-connection-timeout</name>'
'\n <value>200000</value>'
'\n</property>'
'\n<property>'
'\n <name>sentry.service.security.mode</name>'
'\n <value>none</value>'
'\n</property>')
CDH5_REPO_URL = p.Config(
'CDH5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CDH5_REPO_KEY_URL = p.Config(
'CDH5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
CM5_REPO_URL = p.Config(
'CM5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CM5_REPO_KEY_URL = p.Config(
'CM5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster',
config_type='bool', priority=1,
default_value=True)
ENABLE_HBASE_COMMON_LIB = p.Config('Enable HBase Common Lib',
'general', 'cluster', config_type='bool',
priority=1, default_value=True)
SWIFT_LIB_URL = p.Config(
'Hadoop OpenStack library URL', 'general', 'cluster', priority=1,
default_value=DEFAULT_SWIFT_LIB_URL,
description=("Library that adds Swift support to CDH. The file will be "
"downloaded from VM."))
EXTJS_LIB_URL = p.Config(
"ExtJS library URL", 'general', 'cluster', priority=1,
default_value=DEFAULT_EXTJS_LIB_URL,
description=("Ext 2.2 library is required for Oozie Web Console. "
"The file will be downloaded from VM with oozie."))
AWAIT_AGENTS_TIMEOUT = p.Config(
'Await Cloudera agents timeout', 'general', 'cluster', config_type='int',
priority=1, default_value=300, is_optional=True,
description='Timeout for Cloudera agents connecting to Cloudera'
' Manager, in seconds')
AWAIT_MANAGER_STARTING_TIMEOUT = p.Config(
'Timeout for Cloudera Manager starting', 'general', 'cluster',
config_type='int', priority=1, default_value=300, is_optional=True,
description='Timeout for Cloudera Manager starting, in seconds')
def _get_cluster_plugin_configs():
return [CDH5_REPO_URL, CDH5_REPO_KEY_URL, CM5_REPO_URL, CM5_REPO_KEY_URL,
ENABLE_SWIFT, ENABLE_HBASE_COMMON_LIB, SWIFT_LIB_URL,
EXTJS_LIB_URL, AWAIT_AGENTS_TIMEOUT,
AWAIT_MANAGER_STARTING_TIMEOUT]
# ng wide configs
def _load_json(path_to_file):
data = f.get_file_text(path_to_file)
return json.loads(data)
path_to_config = 'plugins/cdh/v5_3_0/resources/'
hdfs_confs = _load_json(path_to_config + 'hdfs-service.json')
namenode_confs = _load_json(path_to_config + 'hdfs-namenode.json')
datanode_confs = _load_json(path_to_config + 'hdfs-datanode.json')
secnamenode_confs = _load_json(path_to_config + 'hdfs-secondarynamenode.json')
yarn_confs = _load_json(path_to_config + 'yarn-service.json')
resourcemanager_confs = _load_json(
path_to_config + 'yarn-resourcemanager.json')
mapred_confs = _load_json(path_to_config + 'yarn-gateway.json')
nodemanager_confs = _load_json(path_to_config + 'yarn-nodemanager.json')
jobhistory_confs = _load_json(path_to_config + 'yarn-jobhistory.json')
oozie_service_confs = _load_json(path_to_config + 'oozie-service.json')
oozie_role_confs = _load_json(path_to_config + 'oozie-oozie_server.json')
hive_service_confs = _load_json(path_to_config + 'hive-service.json')
hive_metastore_confs = _load_json(path_to_config + 'hive-hivemetastore.json')
hive_hiveserver_confs = _load_json(path_to_config + 'hive-hiveserver2.json')
hive_webhcat_confs = _load_json(path_to_config + 'hive-webhcat.json')
hue_service_confs = _load_json(path_to_config + 'hue-service.json')
hue_role_confs = _load_json(path_to_config + 'hue-hue_server.json')
spark_service_confs = _load_json(path_to_config + 'spark-service.json')
spark_role_confs = _load_json(
path_to_config + 'spark-spark_yarn_history_server.json')
zookeeper_service_confs = _load_json(path_to_config + 'zookeeper-service.json')
zookeeper_server_confs = _load_json(path_to_config + 'zookeeper-server.json')
hbase_confs = _load_json(path_to_config + 'hbase-service.json')
master_confs = _load_json(path_to_config + 'hbase-master.json')
regionserver_confs = _load_json(path_to_config + 'hbase-regionserver.json')
flume_service_confs = _load_json(path_to_config + 'flume-service.json')
flume_agent_confs = _load_json(path_to_config + 'flume-agent.json')
sentry_service_confs = _load_json(path_to_config + 'sentry-service.json')
sentry_server_confs = _load_json(path_to_config +
'sentry-sentry_server.json')
solr_service_confs = _load_json(path_to_config + 'solr-service.json')
solr_server_confs = _load_json(path_to_config + 'solr-solr_server.json')
sqoop_service_confs = _load_json(path_to_config + 'sqoop-service.json')
sqoop_server_confs = _load_json(path_to_config +
'sqoop-sqoop_server.json')
ks_indexer_service_confs = _load_json(path_to_config +
'ks_indexer-service.json')
ks_indexer_role_confs = _load_json(path_to_config +
'ks_indexer-hbase_indexer.json')
impala_service_confs = _load_json(path_to_config + 'impala-service.json')
impala_catalogserver_confs = _load_json(path_to_config +
'impala-catalogserver.json')
impala_impalad_confs = _load_json(path_to_config +
'impala-impalad.json')
impala_llama_confs = _load_json(path_to_config +
'impala-llama.json')
impala_statestore_confs = _load_json(path_to_config +
'impala-statestore.json')
priority_one_confs = _load_json(path_to_config + 'priority-one-confs.json')
def _prepare_value(value):
if not value:
return ""
return value.replace('\n', ' ')
def _init_configs(confs, app_target, scope):
cfgs = []
for cfg in confs:
priority = 1 if cfg['name'] in priority_one_confs else 2
c = p.Config(cfg['name'], app_target, scope, priority=priority,
default_value=_prepare_value(cfg['value']),
description=cfg['desc'], is_optional=True)
cfgs.append(c)
return cfgs
def _get_ng_plugin_configs():
cfg = []
cfg += _init_configs(hdfs_confs, 'HDFS', 'cluster')
cfg += _init_configs(namenode_confs, 'NAMENODE', 'node')
cfg += _init_configs(datanode_confs, 'DATANODE', 'node')
cfg += _init_configs(secnamenode_confs, 'SECONDARYNAMENODE', 'node')
cfg += _init_configs(yarn_confs, 'YARN', 'cluster')
cfg += _init_configs(resourcemanager_confs, 'RESOURCEMANAGER', 'node')
cfg += _init_configs(mapred_confs, 'MAPREDUCE', 'node')
cfg += _init_configs(nodemanager_confs, 'NODEMANAGER', 'node')
cfg += _init_configs(jobhistory_confs, 'JOBHISTORY', 'node')
cfg += _init_configs(oozie_service_confs, 'OOZIE', 'cluster')
cfg += _init_configs(oozie_role_confs, 'OOZIE', 'node')
cfg += _init_configs(hive_service_confs, 'HIVE', 'cluster')
cfg += _init_configs(hive_metastore_confs, 'HIVEMETASTORE', 'node')
cfg += _init_configs(hive_hiveserver_confs, 'HIVESERVER', 'node')
cfg += _init_configs(hive_webhcat_confs, 'WEBHCAT', 'node')
cfg += _init_configs(hue_service_confs, 'HUE', 'cluster')
cfg += _init_configs(hue_role_confs, 'HUE', 'node')
cfg += _init_configs(spark_service_confs, 'SPARK_ON_YARN', 'cluster')
cfg += _init_configs(spark_role_confs, 'SPARK_ON_YARN', 'node')
cfg += _init_configs(zookeeper_service_confs, 'ZOOKEEPER', 'cluster')
cfg += _init_configs(zookeeper_server_confs, 'ZOOKEEPER', 'node')
cfg += _init_configs(hbase_confs, 'HBASE', 'cluster')
cfg += _init_configs(master_confs, 'MASTER', 'node')
cfg += _init_configs(regionserver_confs, 'REGIONSERVER', 'node')
cfg += _init_configs(flume_service_confs, 'FLUME', 'cluster')
cfg += _init_configs(flume_agent_confs, 'FLUME', 'node')
cfg += _init_configs(sentry_service_confs, 'SENTRY', 'cluster')
cfg += _init_configs(sentry_server_confs, 'SENTRY', 'node')
cfg += _init_configs(solr_service_confs, 'SOLR', 'cluster')
cfg += _init_configs(solr_server_confs, 'SOLR', 'node')
cfg += _init_configs(sqoop_service_confs, 'SQOOP', 'cluster')
cfg += _init_configs(sqoop_server_confs, 'SQOOP', 'node')
cfg += _init_configs(ks_indexer_service_confs, 'KS_INDEXER', 'cluster')
cfg += _init_configs(ks_indexer_role_confs, 'KS_INDEXER', 'node')
cfg += _init_configs(impala_service_confs, 'IMPALA', 'cluster')
cfg += _init_configs(impala_catalogserver_confs, 'CATALOGSERVER', 'node')
cfg += _init_configs(impala_impalad_confs, 'IMPALAD', 'node')
cfg += _init_configs(impala_statestore_confs, 'STATESTORE', 'node')
return cfg
def get_plugin_configs():
cluster_wide = _get_cluster_plugin_configs()
ng_wide = _get_ng_plugin_configs()
return cluster_wide + ng_wide
def _get_config_value(cluster, key):
return cluster.cluster_configs.get(
'general', {}).get(key.name, key.default_value)
def get_cdh5_repo_url(cluster):
return _get_config_value(cluster, CDH5_REPO_URL)
def get_cdh5_key_url(cluster):
return _get_config_value(cluster, CDH5_REPO_KEY_URL)
def get_cm5_repo_url(cluster):
return _get_config_value(cluster, CM5_REPO_URL)
def get_cm5_key_url(cluster):
return _get_config_value(cluster, CM5_REPO_KEY_URL)
def is_swift_enabled(cluster):
return _get_config_value(cluster, ENABLE_SWIFT)
def is_hbase_common_lib_enabled(cluster):
return _get_config_value(cluster, ENABLE_HBASE_COMMON_LIB)
def get_swift_lib_url(cluster):
return _get_config_value(cluster, SWIFT_LIB_URL)
def get_extjs_lib_url(cluster):
return _get_config_value(cluster, EXTJS_LIB_URL)
|
|
# Copyright (c) 2017, John Skinner
import database.entity
import enum
class JobState(enum.Enum):
UNSTARTED = 0
RUNNING = 1
DONE = 2
class TaskType(enum.Enum):
"""
These are the 8 kinds of tasks in this system.
They are things that are done asynchronously, and take significant time.
"""
GENERATE_DATASET = 0
IMPORT_DATASET = 1
TRAIN_SYSTEM = 2
IMPORT_SYSTEM = 3
TEST_SYSTEM = 4
BENCHMARK_RESULT = 5
COMPARE_TRIALS = 6
COMPARE_BENCHMARKS = 7
class Task(database.entity.Entity, metaclass=database.entity.AbstractEntityMetaclass):
"""
A Task entity tracks performing a specific task.
The only two properties you should use here are 'is_finished' and 'result',
to check if your tasks are done and get their output.
NEVER EVER CREATE THESE OUTSIDE TASK MANAGER.
Instead, call the appropriate get method on task manager to get a new task instance.
"""
def __init__(self, state=JobState.UNSTARTED, node_id=None, job_id=None, result=None, num_cpus=1, num_gpus=0,
memory_requirements='3GB', expected_duration='1:00:00', id_=None):
super().__init__(id_=id_)
self._state = JobState(state)
self._node_id = node_id
self._job_id = int(job_id) if job_id is not None else None
self._result = result
self._num_cpus = int(num_cpus)
self._num_gpus = int(num_gpus)
self._memory_requirements = memory_requirements
self._expected_duration = expected_duration
self._updates = {}
@property
def is_finished(self):
"""
Is the job already done?
Experiments should use this to check if result will be set.
:return: True iff the task has been completed
"""
return JobState.DONE == self._state
@property
def result(self):
"""
Get the result from running this task, usually a database id.
:return:
"""
return self._result
@property
def node_id(self):
"""
Get the id of the job system node running this task if it is running.
You should not need this property, TaskManager is handling it.
:return: String node id from the job system configuration.
"""
return self._node_id
@property
def job_id(self):
"""
Get the id of the job with the job system.
You should not need this property, TaskManager is handling it
:return: Integer
"""
return self._job_id
@property
def is_unstarted(self):
"""
Is the job unstarted. This is used internally by TaskManager to choose which new tasks to queue.
You shouldn't need to use it.
:return:
"""
return JobState.UNSTARTED == self._state
@property
def num_cpus(self):
return self._num_cpus
@property
def num_gpus(self):
return self._num_gpus
@property
def memory_requirements(self):
return self._memory_requirements
@property
def expected_duration(self):
return self._expected_duration
def run_task(self, db_client):
"""
Actually perform the task.
Different subtypes do different things.
:return:
"""
pass
def mark_job_started(self, node_id, job_id):
if JobState.UNSTARTED == self._state:
self._state = JobState.RUNNING
self._node_id = node_id
self._job_id = job_id
if '$set' not in self._updates:
self._updates['$set'] = {}
self._updates['$set']['state'] = JobState.RUNNING.value
self._updates['$set']['node_id'] = node_id
self._updates['$set']['job_id'] = job_id
# Don't unset the job id anymore, we're setting it to something else insted
if '$unset' in self._updates:
if 'node_id' in self._updates['$unset']:
del self._updates['$unset']['node_id']
if 'job_id' in self._updates['$unset']:
del self._updates['$unset']['job_id']
if self._updates['$unset'] == {}:
del self._updates['$unset']
def mark_job_complete(self, result):
if JobState.RUNNING == self._state:
self._state = JobState.DONE
self._result = result
self._node_id = None
self._job_id = None
if '$set' not in self._updates:
self._updates['$set'] = {}
self._updates['$set']['state'] = JobState.DONE.value
self._updates['$set']['result'] = result
if '$unset' not in self._updates:
self._updates['$unset'] = {}
self._updates['$unset']['node_id'] = True
self._updates['$unset']['job_id'] = True
# Don't set the job id anymore, it's getting unset
if 'node_id' in self._updates['$set']:
del self._updates['$set']['node_id']
if 'job_id' in self._updates['$set']:
del self._updates['$set']['job_id']
def mark_job_failed(self):
if JobState.RUNNING == self._state:
self._state = JobState.UNSTARTED
self._node_id = None
self._job_id = None
if '$set' not in self._updates:
self._updates['$set'] = {}
self._updates['$set']['state'] = JobState.UNSTARTED.value
if '$unset' not in self._updates:
self._updates['$unset'] = {}
self._updates['$unset']['node_id'] = True
self._updates['$unset']['job_id'] = True
# Don't set the job id anymore, it's getting unset
if 'node_id' in self._updates['$set']:
del self._updates['$set']['node_id']
if 'job_id' in self._updates['$set']:
del self._updates['$set']['job_id']
def save_updates(self, collection):
if self.identifier is None:
s_task = self.serialize()
id_ = collection.insert(s_task)
self.refresh_id(id_)
elif len(self._updates) > 0:
collection.update({'_id': self.identifier}, self._updates)
self._updates = {}
def serialize(self):
serialized = super().serialize()
serialized['state'] = self._state.value
serialized['num_cpus'] = self._num_cpus
serialized['num_gpus'] = self._num_gpus
serialized['memory_requirements'] = self._memory_requirements
serialized['expected_duration'] = self._expected_duration
if self._state:
serialized['node_id'] = self.node_id
serialized['job_id'] = self.job_id
if self.is_finished:
serialized['result'] = self.result
return serialized
@classmethod
def deserialize(cls, serialized_representation, db_client, **kwargs):
if 'state' in serialized_representation:
kwargs['state'] = serialized_representation['state']
if 'num_cpus' in serialized_representation:
kwargs['num_cpus'] = serialized_representation['num_cpus']
if 'num_gpus' in serialized_representation:
kwargs['num_gpus'] = serialized_representation['num_gpus']
if 'memory_requirements' in serialized_representation:
kwargs['memory_requirements'] = serialized_representation['memory_requirements']
if 'expected_duration' in serialized_representation:
kwargs['expected_duration'] = serialized_representation['expected_duration']
if 'node_id' in serialized_representation:
kwargs['node_id'] = serialized_representation['node_id']
if 'job_id' in serialized_representation:
kwargs['job_id'] = serialized_representation['job_id']
if 'result' in serialized_representation:
kwargs['result'] = serialized_representation['result']
return super().deserialize(serialized_representation, db_client, **kwargs)
|
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class NotificationMessages(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'additional_info': 'dict(str, str)',
'content': 'str',
'created_epoch_millis': 'int',
'creator_id': 'str',
'deleted': 'bool',
'id': 'str',
'method': 'str',
'name': 'str',
'subject': 'str',
'updated_epoch_millis': 'int',
'updater_id': 'str'
}
attribute_map = {
'additional_info': 'additionalInfo',
'content': 'content',
'created_epoch_millis': 'createdEpochMillis',
'creator_id': 'creatorId',
'deleted': 'deleted',
'id': 'id',
'method': 'method',
'name': 'name',
'subject': 'subject',
'updated_epoch_millis': 'updatedEpochMillis',
'updater_id': 'updaterId'
}
def __init__(self, additional_info=None, content=None, created_epoch_millis=None, creator_id=None, deleted=None, id=None, method=None, name=None, subject=None, updated_epoch_millis=None, updater_id=None, _configuration=None): # noqa: E501
"""NotificationMessages - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._additional_info = None
self._content = None
self._created_epoch_millis = None
self._creator_id = None
self._deleted = None
self._id = None
self._method = None
self._name = None
self._subject = None
self._updated_epoch_millis = None
self._updater_id = None
self.discriminator = None
if additional_info is not None:
self.additional_info = additional_info
if content is not None:
self.content = content
if created_epoch_millis is not None:
self.created_epoch_millis = created_epoch_millis
if creator_id is not None:
self.creator_id = creator_id
if deleted is not None:
self.deleted = deleted
if id is not None:
self.id = id
if method is not None:
self.method = method
if name is not None:
self.name = name
if subject is not None:
self.subject = subject
if updated_epoch_millis is not None:
self.updated_epoch_millis = updated_epoch_millis
if updater_id is not None:
self.updater_id = updater_id
@property
def additional_info(self):
"""Gets the additional_info of this NotificationMessages. # noqa: E501
:return: The additional_info of this NotificationMessages. # noqa: E501
:rtype: dict(str, str)
"""
return self._additional_info
@additional_info.setter
def additional_info(self, additional_info):
"""Sets the additional_info of this NotificationMessages.
:param additional_info: The additional_info of this NotificationMessages. # noqa: E501
:type: dict(str, str)
"""
self._additional_info = additional_info
@property
def content(self):
"""Gets the content of this NotificationMessages. # noqa: E501
:return: The content of this NotificationMessages. # noqa: E501
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this NotificationMessages.
:param content: The content of this NotificationMessages. # noqa: E501
:type: str
"""
self._content = content
@property
def created_epoch_millis(self):
"""Gets the created_epoch_millis of this NotificationMessages. # noqa: E501
:return: The created_epoch_millis of this NotificationMessages. # noqa: E501
:rtype: int
"""
return self._created_epoch_millis
@created_epoch_millis.setter
def created_epoch_millis(self, created_epoch_millis):
"""Sets the created_epoch_millis of this NotificationMessages.
:param created_epoch_millis: The created_epoch_millis of this NotificationMessages. # noqa: E501
:type: int
"""
self._created_epoch_millis = created_epoch_millis
@property
def creator_id(self):
"""Gets the creator_id of this NotificationMessages. # noqa: E501
:return: The creator_id of this NotificationMessages. # noqa: E501
:rtype: str
"""
return self._creator_id
@creator_id.setter
def creator_id(self, creator_id):
"""Sets the creator_id of this NotificationMessages.
:param creator_id: The creator_id of this NotificationMessages. # noqa: E501
:type: str
"""
self._creator_id = creator_id
@property
def deleted(self):
"""Gets the deleted of this NotificationMessages. # noqa: E501
:return: The deleted of this NotificationMessages. # noqa: E501
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""Sets the deleted of this NotificationMessages.
:param deleted: The deleted of this NotificationMessages. # noqa: E501
:type: bool
"""
self._deleted = deleted
@property
def id(self):
"""Gets the id of this NotificationMessages. # noqa: E501
:return: The id of this NotificationMessages. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NotificationMessages.
:param id: The id of this NotificationMessages. # noqa: E501
:type: str
"""
self._id = id
@property
def method(self):
"""Gets the method of this NotificationMessages. # noqa: E501
The notification method, can either be WEBHOOK, EMAIL or PAGERDUTY # noqa: E501
:return: The method of this NotificationMessages. # noqa: E501
:rtype: str
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this NotificationMessages.
The notification method, can either be WEBHOOK, EMAIL or PAGERDUTY # noqa: E501
:param method: The method of this NotificationMessages. # noqa: E501
:type: str
"""
allowed_values = ["WEBHOOK", "PAGERDUTY", "EMAIL"] # noqa: E501
if (self._configuration.client_side_validation and
method not in allowed_values):
raise ValueError(
"Invalid value for `method` ({0}), must be one of {1}" # noqa: E501
.format(method, allowed_values)
)
self._method = method
@property
def name(self):
"""Gets the name of this NotificationMessages. # noqa: E501
The alert target name, easier to read than ID # noqa: E501
:return: The name of this NotificationMessages. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this NotificationMessages.
The alert target name, easier to read than ID # noqa: E501
:param name: The name of this NotificationMessages. # noqa: E501
:type: str
"""
self._name = name
@property
def subject(self):
"""Gets the subject of this NotificationMessages. # noqa: E501
:return: The subject of this NotificationMessages. # noqa: E501
:rtype: str
"""
return self._subject
@subject.setter
def subject(self, subject):
"""Sets the subject of this NotificationMessages.
:param subject: The subject of this NotificationMessages. # noqa: E501
:type: str
"""
self._subject = subject
@property
def updated_epoch_millis(self):
"""Gets the updated_epoch_millis of this NotificationMessages. # noqa: E501
:return: The updated_epoch_millis of this NotificationMessages. # noqa: E501
:rtype: int
"""
return self._updated_epoch_millis
@updated_epoch_millis.setter
def updated_epoch_millis(self, updated_epoch_millis):
"""Sets the updated_epoch_millis of this NotificationMessages.
:param updated_epoch_millis: The updated_epoch_millis of this NotificationMessages. # noqa: E501
:type: int
"""
self._updated_epoch_millis = updated_epoch_millis
@property
def updater_id(self):
"""Gets the updater_id of this NotificationMessages. # noqa: E501
:return: The updater_id of this NotificationMessages. # noqa: E501
:rtype: str
"""
return self._updater_id
@updater_id.setter
def updater_id(self, updater_id):
"""Sets the updater_id of this NotificationMessages.
:param updater_id: The updater_id of this NotificationMessages. # noqa: E501
:type: str
"""
self._updater_id = updater_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NotificationMessages, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NotificationMessages):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NotificationMessages):
return True
return self.to_dict() != other.to_dict()
|
|
# -*- coding: utf-8 -*-
# pylint: disable=W0102
from datetime import datetime, date
import nose
import numpy as np
import re
import itertools
from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex,
Series, Categorical)
from pandas.compat import OrderedDict, lrange
from pandas.sparse.array import SparseArray
from pandas.core.internals import (BlockPlacement, SingleBlockManager,
make_block, BlockManager)
import pandas.core.algorithms as algos
import pandas.util.testing as tm
import pandas as pd
from pandas import lib
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
randn, assert_series_equal)
from pandas.compat import zip, u
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert (left.dtype == right.dtype)
tm.assertIsInstance(left.mgr_locs, lib.BlockPlacement)
tm.assertIsInstance(right.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(
arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N, )
shape = (num_items, ) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b', 'bool', ):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ('category2', ):
values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'
])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N, )
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr,
placement,
item_shape=item_shape,
num_offset=num_offset, ))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
self.assertEqual(int32block.dtype, np.int32)
def test_pickle(self):
def _check(blk):
assert_block_equal(self.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
tm.assertIsInstance(self.fblock.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
self.assertEqual(self.fblock.shape, self.fblock.values.shape)
self.assertEqual(self.fblock.dtype, self.fblock.values.dtype)
self.assertEqual(len(self.fblock), len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(merged.mgr_locs.as_array,
np.array([0, 1, 2, 3], dtype=np.int64))
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assertIsNot(cop, self.fblock)
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
tm.assertIsInstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
self.assertTrue((newb.values[0] == 1).all())
newb = self.fblock.copy()
newb.delete(1)
tm.assertIsInstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
self.assertTrue((newb.values[1] == 2).all())
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
self.assertTrue((newb.values[1] == 1).all())
newb = self.fblock.copy()
self.assertRaises(Exception, newb.delete, 3)
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
self.assertEqual(len(bs), 2)
self.assertTrue(np.array_equal(bs[0].items, ['a']))
self.assertTrue(np.array_equal(bs[1].items, ['e']))
bs = list(self.fblock.split_block_at('e'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
# bblock = get_bool_ex(['f'])
# bs = list(bblock.split_block_at('f'))
# self.assertEqual(len(bs), 0)
class TestDatetimeBlock(tm.TestCase):
_multiprocess_can_split_ = True
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[2]
self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[2]
self.assertEqual(np.int64, type(coerced))
self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
class TestBlockManager(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.mgr = create_mgr(
'a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
self.assertEqual(mgr.nblocks, 2)
self.assertEqual(len(mgr), 6)
def test_is_mixed_dtype(self):
self.assertFalse(create_mgr('a,b:f8').is_mixed_type)
self.assertFalse(create_mgr('a:f8-1; b:f8-2').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: f4').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: object').is_mixed_type)
def test_is_indexed_like(self):
mgr1 = create_mgr('a,b: f8')
mgr2 = create_mgr('a:i8; b:bool')
mgr3 = create_mgr('a,b,c: f8')
self.assertTrue(mgr1._is_indexed_like(mgr1))
self.assertTrue(mgr1._is_indexed_like(mgr2))
self.assertTrue(mgr1._is_indexed_like(mgr3))
self.assertFalse(mgr1._is_indexed_like(mgr1.get_slice(
slice(-1), axis=1)))
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
self.assertRaises(AssertionError, BlockManager, blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self):
self.assertIn('a', self.mgr)
self.assertNotIn('baz', self.mgr)
def test_pickle(self):
mgr2 = self.round_trip_pickle(self.mgr)
assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
# share ref_items
# self.assertIs(mgr2.blocks[0].ref_items, mgr2.blocks[1].ref_items)
# GH2431
self.assertTrue(hasattr(mgr2, "_is_consolidated"))
self.assertTrue(hasattr(mgr2, "_known_consolidated"))
# reset to False on load
self.assertFalse(mgr2._is_consolidated)
self.assertFalse(mgr2._known_consolidated)
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = self.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get_scalar(self):
for item in self.mgr.items:
for i, index in enumerate(self.mgr.axes[1]):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item, fastpath=False)[i]
self.assertEqual(res, exp)
exp = self.mgr.get(item).internal_values()[i]
self.assertEqual(res, exp)
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3, ))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
tm.assert_numpy_array_equal(mgr.get('a').internal_values(),
np.array([0] * 3))
tm.assert_numpy_array_equal(mgr.get('b').internal_values(),
np.array(['bar'] * 3, dtype=np.object_))
tm.assert_numpy_array_equal(mgr.get('c').internal_values(),
np.array([2] * 3))
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
def test_insert(self):
self.mgr.insert(0, 'inserted', np.arange(N))
self.assertEqual(self.mgr.items[0], 'inserted')
assert_almost_equal(self.mgr.get('inserted'), np.arange(N))
for blk in self.mgr.blocks:
yield self.assertIs, self.mgr.items, blk.ref_items
def test_set_change_dtype(self):
self.mgr.set('baz', np.zeros(N, dtype=bool))
self.mgr.set('baz', np.repeat('foo', N))
self.assertEqual(self.mgr.get('baz').dtype, np.object_)
mgr2 = self.mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
self.assertEqual(mgr2.get('baz').dtype, np.object_)
mgr2.set('quux', randn(N).astype(int))
self.assertEqual(mgr2.get('quux').dtype, np.int_)
mgr2.set('quux', randn(N))
self.assertEqual(mgr2.get('quux').dtype, np.float_)
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
self.assertEqual(sorted(df.blocks.keys()), ['float64', 'int64'])
assert_frame_equal(df.blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(df.blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self):
cp = self.mgr.copy(deep=False)
for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
# view assertion
self.assertTrue(cp_blk.equals(blk))
self.assertTrue(cp_blk.values.base is blk.values.base)
cp = self.mgr.copy(deep=True)
for blk, cp_blk in zip(self.mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
self.assertTrue(cp_blk.equals(blk))
if cp_blk.values.base is not None and blk.values.base is not None:
self.assertFalse(cp_blk.values.base is blk.values.base)
else:
self.assertTrue(cp_blk.values.base is None and blk.values.base
is None)
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
self.assertEqual(mgr.as_matrix().dtype, np.float64)
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
self.assertEqual(len(mgr.blocks), 3)
self.assertIsInstance(mgr, BlockManager)
# what to test here?
def test_as_matrix_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
self.assertEqual(mgr.as_matrix().dtype, np.float64)
mgr = create_mgr('c: f4; d: f2')
self.assertEqual(mgr.as_matrix().dtype, np.float32)
def test_as_matrix_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
self.assertEqual(mgr.as_matrix().dtype, np.bool_)
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int64)
mgr = create_mgr('c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int32)
def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
self.assertEqual(mgr.as_matrix().dtype, 'M8[ns]')
def test_as_matrix_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
self.assertEqual(mgr.get('h').dtype, 'datetime64[ns, US/Eastern]')
self.assertEqual(mgr.get('g').dtype, 'datetime64[ns, CET]')
self.assertEqual(mgr.as_matrix().dtype, 'object')
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('d').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, raise_on_error=False)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
self.assertEqual(tmgr.get('f').dtype.type, t)
self.assertEqual(tmgr.get('g').dtype.type, t)
self.assertEqual(tmgr.get('a').dtype.type, np.object_)
self.assertEqual(tmgr.get('b').dtype.type, np.object_)
if t != np.int64:
self.assertEqual(tmgr.get('d').dtype.type, np.datetime64)
else:
self.assertEqual(tmgr.get('d').dtype.type, t)
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
self.assertEqual(len(old_blocks), len(new_blocks))
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
self.assertTrue(found)
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
self.assertTrue(found)
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int32)
self.assertEqual(new_mgr.get('bool').dtype, np.bool_)
self.assertEqual(new_mgr.get('dt').dtype.type, np.datetime64)
self.assertEqual(new_mgr.get('i').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
self.assertEqual(new_mgr.get('h').dtype, np.float16)
def test_interleave(self):
# self
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
self.assertEqual(mgr.as_matrix().dtype, dtype)
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
self.assertEqual(mgr.as_matrix().dtype, dtype)
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
self.assertEqual(mgr.as_matrix().dtype, 'i8')
mgr = create_mgr('a: category; b: category')
self.assertEqual(mgr.as_matrix().dtype, 'i8'),
mgr = create_mgr('a: category; b: category2')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: category2')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: category2; b: category2')
self.assertEqual(mgr.as_matrix().dtype, 'object')
# combinations
mgr = create_mgr('a: f8')
self.assertEqual(mgr.as_matrix().dtype, 'f8')
mgr = create_mgr('a: f8; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'f8')
mgr = create_mgr('a: f4; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'f4')
mgr = create_mgr('a: f4; b: i8; d: object')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: bool; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: complex')
self.assertEqual(mgr.as_matrix().dtype, 'complex')
mgr = create_mgr('a: f8; b: category')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: category')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: bool')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: m8[ns]; b: bool')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: m8[ns]; b: i8')
self.assertEqual(mgr.as_matrix().dtype, 'object')
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
self.assertEqual(mgr.as_matrix().dtype, 'object')
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
self.assertEqual(df_unique.values.shape, df.values.shape)
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self):
self.mgr.set('f', randn(N))
self.mgr.set('d', randn(N))
self.mgr.set('b', randn(N))
self.mgr.set('g', randn(N))
self.mgr.set('h', randn(N))
# we have datetime/tz blocks in self.mgr
cons = self.mgr.consolidate()
self.assertEqual(cons.nblocks, 4)
cons = self.mgr.consolidate().get_numeric_data()
self.assertEqual(cons.nblocks, 1)
tm.assertIsInstance(cons.blocks[0].mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
self.assertEqual(reindexed.nblocks, 2)
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
assert_almost_equal(
mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))
assert_almost_equal(
mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))
assert_almost_equal(
mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))
assert_almost_equal(
mgr.get('g').internal_values(),
reindexed.get('g').internal_values())
assert_almost_equal(
mgr.get('c').internal_values(),
reindexed.get('c').internal_values())
assert_almost_equal(
mgr.get('a').internal_values(),
reindexed.get('a').internal_values())
assert_almost_equal(
mgr.get('d').internal_values(),
reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
self.assertEqual(result.shape, (6, 2))
self.assertEqual(result.axes[1][0], ('bar', 'one'))
self.assertEqual(result.axes[1][1], ('bar', 'two'))
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
assert_almost_equal(
mgr.get('float', fastpath=False), numeric.get('float',
fastpath=False))
assert_almost_equal(
mgr.get('float').internal_values(),
numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(['bool']))
assert_almost_equal(mgr.get('bool', fastpath=False),
bools.get('bool', fastpath=False))
assert_almost_equal(
mgr.get('bool').internal_values(),
bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr(u('b,\u05d0: object')))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.ix[:, u("\u05d0")] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
self.assertTrue(bm1.equals(bm2))
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
self.assertTrue(bm1.equals(bm2))
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
self.assertTrue(bm.equals(bm_this))
self.assertTrue(bm_this.equals(bm))
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
self.assertEqual(mgr.as_matrix().tolist(), [0., 1., 2., 3., 4.])
class TestIndexing(object):
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
# create_single_mgr('sparse', N),
create_single_mgr('sparse_na', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
# create_mgr('a: sparse', item_shape=(N,)),
create_mgr('a: sparse_na', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
# create_mgr('a: sparse', item_shape=(1, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
# import pudb; pudb.set_trace()
mat = mgr.as_matrix()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(
len(ax) - len(slobj), dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None), ) * axis + (slobj, )
tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_matrix(),
check_dtype=False)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
yield assert_slice_ok, mgr, ax, slice(None)
yield assert_slice_ok, mgr, ax, slice(3)
yield assert_slice_ok, mgr, ax, slice(100)
yield assert_slice_ok, mgr, ax, slice(1, 4)
yield assert_slice_ok, mgr, ax, slice(3, 0, -2)
# boolean mask
yield assert_slice_ok, mgr, ax, np.array([], dtype=np.bool_)
yield (assert_slice_ok, mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
yield (assert_slice_ok, mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
yield (assert_slice_ok, mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
yield (assert_slice_ok, mgr, ax, np.array(
[True, True, False], dtype=np.bool_))
# fancy indexer
yield assert_slice_ok, mgr, ax, []
yield assert_slice_ok, mgr, ax, lrange(mgr.shape[ax])
if mgr.shape[ax] >= 3:
yield assert_slice_ok, mgr, ax, [0, 1, 2]
yield assert_slice_ok, mgr, ax, [-1, -2, -3]
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_matrix()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(np.take(mat, indexer, axis),
taken.as_matrix(), check_dtype=False)
tm.assert_index_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
yield assert_take_ok, mgr, ax, []
yield assert_take_ok, mgr, ax, [0, 0, 0]
yield assert_take_ok, mgr, ax, lrange(mgr.shape[ax])
if mgr.shape[ax] >= 3:
yield assert_take_ok, mgr, ax, [0, 1, 2]
yield assert_take_ok, mgr, ax, [-1, -2, -3]
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_matrix()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
yield (assert_reindex_axis_is_ok, mgr, ax,
pd.Index([]), fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax, mgr.axes[ax],
fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']), fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
fill_value)
if mgr.shape[ax] >= 3:
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][:-3], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_matrix()
reindexed_mat = algos.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(reindexed_mat,
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index([]), [], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo'] * mgr.shape[ax]),
np.arange(mgr.shape[ax]), fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax, mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 0, 0], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[-1, 0, -1], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
[-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
yield (assert_reindex_indexer_is_ok, mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement(tm.TestCase):
_multiprocess_can_split_ = True
def test_slice_len(self):
self.assertEqual(len(BlockPlacement(slice(0, 4))), 4)
self.assertEqual(len(BlockPlacement(slice(0, 4, 2))), 2)
self.assertEqual(len(BlockPlacement(slice(0, 3, 2))), 2)
self.assertEqual(len(BlockPlacement(slice(0, 1, 2))), 1)
self.assertEqual(len(BlockPlacement(slice(1, 0, -1))), 1)
def test_zero_step_raises(self):
self.assertRaises(ValueError, BlockPlacement, slice(1, 1, 0))
self.assertRaises(ValueError, BlockPlacement, slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
self.assertRaisesRegexp(ValueError, "unbounded slice",
lambda: BlockPlacement(slc))
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
self.assertTrue(not BlockPlacement(slc).is_slice_like)
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
self.assertTrue(not BlockPlacement(slice(0, 0)).is_slice_like)
self.assertTrue(not BlockPlacement(slice(100, 100)).is_slice_like)
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
self.assertEqual(BlockPlacement(arr).as_slice, slc)
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
self.assertTrue(not BlockPlacement(arr).is_slice_like)
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
self.assertEqual(list(BlockPlacement(slice(0, 3))), [0, 1, 2])
self.assertEqual(list(BlockPlacement(slice(0, 0))), [])
self.assertEqual(list(BlockPlacement(slice(3, 0))), [])
self.assertEqual(list(BlockPlacement(slice(3, 0, -1))), [3, 2, 1])
self.assertEqual(list(BlockPlacement(slice(3, None, -1))),
[3, 2, 1, 0])
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray, dtype=np.int64))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
self.assertEqual(bpl.add(1).as_slice, slice(1, 6, 1))
self.assertEqual(bpl.add(np.arange(5)).as_slice, slice(0, 10, 2))
self.assertEqual(list(bpl.add(np.arange(5, 0, -1))), [5, 5, 5, 5, 5])
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
self.assertEqual(list(BlockPlacement(val).add(inc)), result)
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
self.assertRaises(ValueError,
lambda: BlockPlacement(slice(1, 4)).add(-10))
self.assertRaises(ValueError,
lambda: BlockPlacement([1, 2, 4]).add(-10))
self.assertRaises(ValueError,
lambda: BlockPlacement(slice(2, None, -1)).add(-1))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
|
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory Fibre Channel Driver for OpenStack Cinder
Provides fibre channel specific LUN services for V6000 series flash
arrays.
This driver requires VMOS v6.3.0.4 or newer software on the array.
You will need to install the Violin Memory REST client library:
sudo pip install vmemclient
Set the following in the cinder.conf file to enable the VMEM V6000
Fibre Channel Driver along with the required flags:
volume_driver=cinder.volume.drivers.violin.v6000_fcp.V6000FCDriver
NOTE: this driver file requires the use of synchronization points for
certain types of backend operations, and as a result may not work
properly in an active-active HA configuration. See OpenStack Cinder
driver documentation for more information.
"""
from oslo_log import log as logging
from oslo_utils import units
from six.moves import range
from cinder import context
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.violin import v6000_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class V6000FCDriver(driver.FibreChannelDriver):
"""Executes commands relating to fibre channel based Violin Memory Arrays.
Version history:
1.0 - Initial driver
1.0.1 - Fixes polling for export completion
"""
VERSION = '1.0.1'
def __init__(self, *args, **kwargs):
super(V6000FCDriver, self).__init__(*args, **kwargs)
self.gateway_fc_wwns = []
self.stats = {}
self.configuration.append_config_values(v6000_common.violin_opts)
self.configuration.append_config_values(san.san_opts)
self.common = v6000_common.V6000Common(self.configuration)
self.lookup_service = fczm_utils.create_lookup_service()
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
{'name': self.__class__.__name__, 'vers': self.VERSION})
def do_setup(self, context):
"""Any initialization the driver does while starting."""
super(V6000FCDriver, self).do_setup(context)
self.common.do_setup(context)
self.gateway_fc_wwns = self._get_active_fc_targets()
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self.common.check_for_setup_error()
if len(self.gateway_fc_wwns) == 0:
raise exception.ViolinInvalidBackendConfig(
reason=_('No FCP targets found'))
def create_volume(self, volume):
"""Creates a volume."""
self.common._create_lun(volume)
def delete_volume(self, volume):
"""Deletes a volume."""
self.common._delete_lun(volume)
def extend_volume(self, volume, new_size):
"""Deletes a volume."""
self.common._extend_lun(volume, new_size)
def create_snapshot(self, snapshot):
"""Creates a snapshot from an existing volume."""
self.common._create_lun_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.common._delete_lun_snapshot(snapshot)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
ctxt = context.get_admin_context()
snapshot['size'] = snapshot['volume']['size']
self.common._create_lun(volume)
self.copy_volume_data(ctxt, snapshot, volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a full clone of the specified volume."""
ctxt = context.get_admin_context()
self.common._create_lun(volume)
self.copy_volume_data(ctxt, src_vref, volume)
def ensure_export(self, context, volume):
"""Synchronously checks and re-exports volumes at cinder start time."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection (target<-->initiator)."""
igroup = None
if self.configuration.use_igroups:
#
# Most drivers don't use igroups, because there are a
# number of issues with multipathing and iscsi/fcp where
# lun devices either aren't cleaned up properly or are
# stale (from previous scans).
#
# If the customer really wants igroups for whatever
# reason, we create a new igroup for each host/hypervisor.
# Every lun that is exported to the particular
# hypervisor/host will be contained in this igroup. This
# should prevent other hosts from seeing luns they aren't
# using when they perform scans.
#
igroup = self.common._get_igroup(volume, connector)
self._add_igroup_member(connector, igroup)
if isinstance(volume, models.Volume):
lun_id = self._export_lun(volume, connector, igroup)
else:
lun_id = self._export_snapshot(volume, connector, igroup)
self.common.vip.basic.save_config()
target_wwns, init_targ_map = self._build_initiator_target_map(
connector)
properties = {}
properties['target_discovered'] = True
properties['target_wwn'] = target_wwns
properties['target_lun'] = lun_id
properties['initiator_target_map'] = init_targ_map
LOG.debug("Return FC data for zone addition: %(properties)s.",
{'properties': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties}
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, force=False, **kwargs):
"""Terminates the connection (target<-->initiator)."""
if isinstance(volume, models.Volume):
self._unexport_lun(volume)
else:
self._unexport_snapshot(volume)
self.common.vip.basic.save_config()
properties = {}
if not self._is_initiator_connected_to_array(connector):
target_wwns, init_targ_map = self._build_initiator_target_map(
connector)
properties['target_wwn'] = target_wwns
properties['initiator_target_map'] = init_targ_map
LOG.debug("Return FC data for zone deletion: %(properties)s.",
{'properties': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties}
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
if refresh or not self.stats:
self._update_stats()
return self.stats
@utils.synchronized('vmem-export')
def _export_lun(self, volume, connector=None, igroup=None):
"""Generates the export configuration for the given volume.
The equivalent CLI command is "lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
connector -- connector object provided by the Manager
igroup -- name of igroup to use for exporting
Returns:
lun_id -- the LUN ID assigned by the backend
"""
lun_id = -1
export_to = ''
v = self.common.vip
if igroup:
export_to = igroup
elif connector:
export_to = self._convert_wwns_openstack_to_vmem(
connector['wwpns'])
else:
raise exception.Error(_("No initiators found, cannot proceed"))
LOG.debug("Exporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
v.lun.export_lun, self.common._wait_for_export_state, '',
[self.common.container, volume['id'], 'all', export_to,
'auto'], [volume['id'], None, True])
except Exception:
LOG.exception(_LE("LUN export for %s failed!"), volume['id'])
raise
lun_id = self.common._get_lun_id(volume['id'])
return lun_id
@utils.synchronized('vmem-export')
def _unexport_lun(self, volume):
"""Removes the export configuration for the given volume.
The equivalent CLI command is "no lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.common.vip
LOG.debug("Unexporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
v.lun.unexport_lun, self.common._wait_for_export_state, '',
[self.common.container, volume['id'], 'all', 'all', 'auto'],
[volume['id'], None, False])
except exception.ViolinBackendErrNotFound:
LOG.debug("Lun %s already unexported, continuing.", volume['id'])
except Exception:
LOG.exception(_LE("LUN unexport for %s failed!"), volume['id'])
raise
@utils.synchronized('vmem-export')
def _export_snapshot(self, snapshot, connector=None, igroup=None):
"""Generates the export configuration for the given snapshot.
The equivalent CLI command is "snapshot export container
PROD08 lun <snapshot_name> name <volume_name>"
Arguments:
snapshot -- snapshot object provided by the Manager
connector -- connector object provided by the Manager
igroup -- name of igroup to use for exporting
Returns:
lun_id -- the LUN ID assigned by the backend
"""
lun_id = -1
export_to = ''
v = self.common.vip
if igroup:
export_to = igroup
elif connector:
export_to = self._convert_wwns_openstack_to_vmem(
connector['wwpns'])
else:
raise exception.Error(_("No initiators found, cannot proceed"))
LOG.debug("Exporting snapshot %s.", snapshot['id'])
try:
self.common._send_cmd(v.snapshot.export_lun_snapshot, '',
self.common.container, snapshot['volume_id'],
snapshot['id'], export_to, 'all', 'auto')
except Exception:
LOG.exception(_LE("Snapshot export for %s failed!"),
snapshot['id'])
raise
else:
self.common._wait_for_export_state(snapshot['volume_id'],
snapshot['id'], state=True)
lun_id = self.common._get_snapshot_id(snapshot['volume_id'],
snapshot['id'])
return lun_id
@utils.synchronized('vmem-export')
def _unexport_snapshot(self, snapshot):
"""Removes the export configuration for the given snapshot.
The equivalent CLI command is "no snapshot export container
PROD08 lun <snapshot_name> name <volume_name>"
Arguments:
snapshot -- snapshot object provided by the Manager
"""
v = self.common.vip
LOG.debug("Unexporting snapshot %s.", snapshot['id'])
try:
self.common._send_cmd(v.snapshot.unexport_lun_snapshot, '',
self.common.container, snapshot['volume_id'],
snapshot['id'], 'all', 'all', 'auto', False)
except Exception:
LOG.exception(_LE("Snapshot unexport for %s failed!"),
snapshot['id'])
raise
else:
self.common._wait_for_export_state(snapshot['volume_id'],
snapshot['id'], state=False)
def _add_igroup_member(self, connector, igroup):
"""Add an initiator to the openstack igroup so it can see exports.
The equivalent CLI command is "igroup addto name <igroup_name>
initiators <initiator_name>"
Arguments:
connector -- connector object provided by the Manager
"""
v = self.common.vip
wwpns = self._convert_wwns_openstack_to_vmem(connector['wwpns'])
LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s.",
{'wwpns': wwpns, 'igroup': igroup})
resp = v.igroup.add_initiators(igroup, wwpns)
if resp['code'] != 0:
raise exception.Error(
_('Failed to add igroup member: %(code)d, %(message)s') % resp)
def _build_initiator_target_map(self, connector):
"""Build the target_wwns and the initiator target map."""
target_wwns = []
init_targ_map = {}
if self.lookup_service:
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'], self.gateway_fc_wwns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(
set(init_targ_map[initiator]))
target_wwns = list(set(target_wwns))
else:
initiator_wwns = connector['wwpns']
target_wwns = self.gateway_fc_wwns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return target_wwns, init_targ_map
def _is_initiator_connected_to_array(self, connector):
"""Check array to see if any initiator wwns still have active sessions.
We only need to check to see if any one initiator wwn is
connected, since all initiators are connected to all targets
on a lun export for fibrechannel.
"""
v = self.common.vip
initiator_wwns = self._convert_wwns_openstack_to_vmem(
connector['wwpns'])
bn = "/vshare/config/export/container/%s/lun/**" \
% self.common.container
global_export_config = v.basic.get_node_values(bn)
for node in global_export_config:
if node.endswith(initiator_wwns[0]):
return True
return False
def _update_stats(self):
"""Update array stats.
Gathers array stats from the backend and converts them to GB values.
"""
data = {}
total_gb = 0
free_gb = 0
v = self.common.vip
master_cluster_id = list(v.basic.get_node_values(
'/cluster/state/master_id').values())[0]
bn1 = "/vshare/state/global/%s/container/%s/total_bytes" \
% (master_cluster_id, self.common.container)
bn2 = "/vshare/state/global/%s/container/%s/free_bytes" \
% (master_cluster_id, self.common.container)
resp = v.basic.get_node_values([bn1, bn2])
if bn1 in resp:
total_gb = resp[bn1] // units.Gi
else:
LOG.warning(_LW("Failed to receive update for total_gb stat!"))
if 'total_capacity_gb' in self.stats:
total_gb = self.stats['total_capacity_gb']
if bn2 in resp:
free_gb = resp[bn2] // units.Gi
else:
LOG.warning(_LW("Failed to receive update for free_gb stat!"))
if 'free_capacity_gb' in self.stats:
free_gb = self.stats['free_capacity_gb']
backend_name = self.configuration.volume_backend_name
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['vendor_name'] = 'Violin Memory, Inc.'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'fibre_channel'
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['total_capacity_gb'] = total_gb
data['free_capacity_gb'] = free_gb
for i in data:
LOG.debug("stat update: %(name)s=%(data)s.",
{'name': i, 'data': data[i]})
self.stats = data
def _get_active_fc_targets(self):
"""Get a list of gateway WWNs that can be used as FCP targets.
Arguments:
mg_conn -- active XG connection to one of the gateways
Returns:
active_gw_fcp_wwns -- list of WWNs
"""
v = self.common.vip
active_gw_fcp_wwns = []
gateway_ids = v.basic.get_node_values(
'/vshare/state/global/*').values()
for i in gateway_ids:
bn = "/vshare/state/global/%d/target/fc/**" % i
resp = v.basic.get_node_values(bn)
for node in resp:
if node.endswith('/wwn'):
active_gw_fcp_wwns.append(resp[node])
return self._convert_wwns_vmem_to_openstack(active_gw_fcp_wwns)
def _convert_wwns_openstack_to_vmem(self, wwns):
"""Convert a list of OpenStack WWNs to VMEM compatible WWN strings.
Input format is '50014380186b3f65', output format is
'wwn.50:01:43:80:18:6b:3f:65'.
Arguments:
wwns -- list of OpenStack-based WWN strings.
Returns:
output -- list of VMEM-based WWN strings.
"""
output = []
for w in wwns:
output.append('wwn.{0}'.format(
':'.join(w[x:x + 2] for x in range(0, len(w), 2))))
return output
def _convert_wwns_vmem_to_openstack(self, wwns):
"""Convert a list of VMEM WWNs to OpenStack compatible WWN strings.
Input format is 'wwn.50:01:43:80:18:6b:3f:65', output format
is '50014380186b3f65'.
Arguments:
wwns -- list of VMEM-based WWN strings.
Returns:
output -- list of OpenStack-based WWN strings.
"""
output = []
for w in wwns:
output.append(''.join(w[4:].split(':')))
return output
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from tornado import netutil
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
from tornado import gen
from tornado.http1connection import HTTP1Connection
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado.netutil import ssl_options_to_context
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
from contextlib import closing
import datetime
import gzip
import os
import shutil
import socket
import ssl
import sys
import tempfile
from io import BytesIO
def read_stream_body(stream, callback):
"""Reads an HTTP response from `stream` and runs callback with its
start_line, headers and body."""
chunks = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
self.headers = headers
self.start_line = start_line
def data_received(self, chunk):
chunks.append(chunk)
def finish(self):
callback((self.start_line, self.headers, b''.join(chunks)))
conn = HTTP1Connection(stream, True)
conn.read_response(Delegate())
class HandlerBaseTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
if self.request.protocol != self.expected_protocol:
raise Exception("unexpected protocol")
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
skipIfOldSSL = unittest.skipIf(
getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
"old version of ssl module and/or openssl")
class BaseSSLTest(AsyncHTTPSTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
class SSLTestMixin(object):
def get_ssl_options(self):
return dict(ssl_version=self.get_ssl_version(), # type: ignore
**AsyncHTTPSTestCase.get_ssl_options())
def get_ssl_version(self):
raise NotImplementedError()
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A' * 5000)
self.assertEqual(response.body, b"Got 5000 bytes in POST")
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
with ExpectLog(gen_log, 'Uncaught exception', required=False):
self.http_client.fetch(
self.get_url("/").replace('https:', 'http:'),
self.stop,
request_timeout=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
def test_error_logging(self):
# No stack traces are logged for SSL errors.
with ExpectLog(gen_log, 'SSL Error') as expect_log:
self.http_client.fetch(
self.get_url("/").replace("https:", "http:"),
self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertFalse(expect_log.logged_stack)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv23
@skipIfOldSSL
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv3
@skipIfOldSSL
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_TLSv1
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class SSLContextTest(BaseSSLTest, SSLTestMixin):
def get_ssl_options(self):
context = ssl_options_to_context(
AsyncHTTPSTestCase.get_ssl_options(self))
assert isinstance(context, ssl.SSLContext)
return context
class BadSSLOptionsTest(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(KeyError, HTTPServer, application, ssl_options={
"keyfile": "/__missing__.crt",
})
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, 'test.crt')
existing_key = os.path.join(module_dir, 'test.key')
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": "/__mising__.crt",
})
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key"
})
# This actually works because both files exist
HTTPServer(application, ssl_options={
"certfile": existing_certificate,
"keyfile": existing_key,
})
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body, newline=b"\r\n"):
with closing(IOStream(socket.socket())) as stream:
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
stream.write(
newline.join(headers +
[utf8("Content-Length: %d" % len(body))]) +
newline + newline + body)
read_stream_body(stream, self.stop)
start_line, headers, body = self.wait()
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9",
],
b"\r\n".join([
b"Content-Disposition: form-data; name=argument",
b"",
u"\u00e1".encode("utf-8"),
b"--1234567890",
u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"),
b"",
u"\u00fa".encode("utf-8"),
b"--1234567890--",
b"",
]))
data = json_decode(response)
self.assertEqual(u"\u00e9", data["header"])
self.assertEqual(u"\u00e1", data["argument"])
self.assertEqual(u"\u00f3", data["filename"])
self.assertEqual(u"\u00fa", data["filebody"])
def test_newlines(self):
# We support both CRLF and bare LF as line separators.
for newline in (b"\r\n", b"\n"):
response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"",
newline=newline)
self.assertEqual(response, b'Hello world')
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket())
stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
b"Content-Length: 1024",
b"Expect: 100-continue",
b"Connection: close",
b"\r\n"]), callback=self.stop)
self.wait()
stream.read_until(b"\r\n\r\n", self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
stream.write(b"a" * 1024)
stream.read_until(b"\r\n", self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
stream.read_until(b"\r\n\r\n", self.stop)
header_data = self.wait()
headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Got 1024 bytes in POST")
stream.close()
class EchoHandler(RequestHandler):
def get(self):
self.write(recursive_unicode(self.request.arguments))
def post(self):
self.write(recursive_unicode(self.request.arguments))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
fields = [
('method', str),
('uri', str),
('version', str),
('remote_ip', str),
('protocol', str),
('host', str),
('path', str),
('query', str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type('header_key', list(self.request.headers.keys())[0], str)
self.check_type('header_value', list(self.request.headers.values())[0], str)
self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
# secure cookies
self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
def post(self):
self.check_type('body', self.request.body, bytes)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class HTTPServerTest(AsyncHTTPTestCase):
def get_app(self):
return Application([("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
("//doubleslash", EchoHandler),
])
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u"\u00e9"]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u"", u""]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
def test_double_slash(self):
# urlparse.urlsplit (which tornado.httpserver used to use
# incorrectly) would parse paths beginning with "//" as
# protocol-relative urls.
response = self.fetch("//doubleslash")
self.assertEqual(200, response.code)
self.assertEqual(json_decode(response.body), {})
def test_malformed_body(self):
# parse_qs is pretty forgiving, but it will fail on python 3
# if the data is not utf8. On python 2 parse_qs will work,
# but then the recursive_unicode call in EchoHandler will
# fail.
if str is bytes:
return
with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
response = self.fetch(
'/echo', method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=b'\xe9')
self.assertEqual(200, response.code)
self.assertEqual(b'{}', response.body)
class HTTPServerRawTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/echo', EchoHandler),
])
def setUp(self):
super(HTTPServerRawTest, self).setUp()
self.stream = IOStream(socket.socket())
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def tearDown(self):
self.stream.close()
super(HTTPServerRawTest, self).tearDown()
def test_empty_request(self):
self.stream.close()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
def test_malformed_first_line_response(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
read_stream_body(self.stream, self.stop)
start_line, headers, response = self.wait()
self.assertEqual('HTTP/1.1', start_line.version)
self.assertEqual(400, start_line.code)
self.assertEqual('Bad Request', start_line.reason)
def test_malformed_first_line_log(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
# TODO: need an async version of ExpectLog so we don't need
# hard-coded timeouts here.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
self.stop)
self.wait()
def test_malformed_headers(self):
with ExpectLog(gen_log, '.*Malformed HTTP headers'):
self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
self.stop)
self.wait()
def test_chunked_request_body(self):
# Chunked requests are not widely supported and we don't have a way
# to generate them in AsyncHTTPClient, but HTTPServer will read them.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
start_line, headers, response = self.wait()
self.assertEqual(json_decode(response), {u'foo': [u'bar']})
def test_chunked_request_uppercase(self):
# As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is
# case-insensitive.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: Chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
start_line, headers, response = self.wait()
self.assertEqual(json_decode(response), {u'foo': [u'bar']})
def test_invalid_content_length(self):
with ExpectLog(gen_log, '.*Only integer Content-Length is allowed'):
self.stream.write(b"""\
POST /echo HTTP/1.1
Content-Length: foo
bar
""".replace(b"\n", b"\r\n"))
self.stream.read_until_close(self.stop)
self.wait()
class XHeaderTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(remote_ip=self.request.remote_ip,
remote_protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(xheaders=True, trusted_downstream=['5.5.5.5'])
def test_ip_headers(self):
self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
"4.4.4.4")
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
invalid_chars = {"X-Real-IP": "4.4.4.4<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars)["remote_ip"],
"127.0.0.1")
invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars_list)["remote_ip"],
"127.0.0.1")
invalid_host = {"X-Real-IP": "www.google.com"}
self.assertEqual(
self.fetch_json("/", headers=invalid_host)["remote_ip"],
"127.0.0.1")
def test_trusted_downstream(self):
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4, 5.5.5.5"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
def test_scheme_headers(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "http")
https_scheme = {"X-Scheme": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_scheme)["remote_protocol"],
"https")
https_forwarded = {"X-Forwarded-Proto": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_forwarded)["remote_protocol"],
"https")
bad_forwarded = {"X-Forwarded-Proto": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_forwarded)["remote_protocol"],
"http")
class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase):
def get_app(self):
return Application([('/', XHeaderTest.Handler)])
def get_httpserver_options(self):
output = super(SSLXHeaderTest, self).get_httpserver_options()
output['xheaders'] = True
return output
def test_request_without_xprotocol(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "https")
http_scheme = {"X-Scheme": "http"}
self.assertEqual(
self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http")
bad_scheme = {"X-Scheme": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https")
class ManualProtocolTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(protocol='https')
def test_manual_protocol(self):
self.assertEqual(self.fetch_json('/')['protocol'], 'https')
@unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin',
"unix sockets not supported on this platform")
class UnixSocketTest(AsyncTestCase):
"""HTTPServers can listen on Unix sockets too.
Why would you want to do this? Nginx can proxy to backends listening
on unix sockets, for one thing (and managing a namespace for unix
sockets can be easier than managing a bunch of TCP port numbers).
Unfortunately, there's no way to specify a unix socket in a url for
an HTTP client, so we have to test this by hand.
"""
def setUp(self):
super(UnixSocketTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.sockfile = os.path.join(self.tmpdir, "test.sock")
sock = netutil.bind_unix_socket(self.sockfile)
app = Application([("/hello", HelloWorldRequestHandler)])
self.server = HTTPServer(app)
self.server.add_socket(sock)
self.stream = IOStream(socket.socket(socket.AF_UNIX))
self.stream.connect(self.sockfile, self.stop)
self.wait()
def tearDown(self):
self.stream.close()
self.server.stop()
shutil.rmtree(self.tmpdir)
super(UnixSocketTest, self).tearDown()
def test_unix_socket(self):
self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
self.stream.read_until(b"\r\n", self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.1 200 OK\r\n")
self.stream.read_until(b"\r\n\r\n", self.stop)
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
self.stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Hello world")
def test_unix_socket_bad_request(self):
# Unix sockets don't have remote addresses so they just return an
# empty string.
with ExpectLog(gen_log, "Malformed HTTP message from"):
self.stream.write(b"garbage\r\n\r\n")
self.stream.read_until_close(self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.1 400 Bad Request\r\n\r\n")
class KeepAliveTest(AsyncHTTPTestCase):
"""Tests various scenarios for HTTP 1.1 keep-alive support.
These tests don't use AsyncHTTPClient because we want to control
connection reuse and closing.
"""
def get_app(self):
class HelloHandler(RequestHandler):
def get(self):
self.finish('Hello world')
def post(self):
self.finish('Hello world')
class LargeHandler(RequestHandler):
def get(self):
# 512KB should be bigger than the socket buffers so it will
# be written out in chunks.
self.write(''.join(chr(i % 256) * 1024 for i in range(512)))
class FinishOnCloseHandler(RequestHandler):
@asynchronous
def get(self):
self.flush()
def on_connection_close(self):
# This is not very realistic, but finishing the request
# from the close callback has the right timing to mimic
# some errors seen in the wild.
self.finish('closed')
return Application([('/', HelloHandler),
('/large', LargeHandler),
('/finish_on_close', FinishOnCloseHandler)])
def setUp(self):
super(KeepAliveTest, self).setUp()
self.http_version = b'HTTP/1.1'
def tearDown(self):
# We just closed the client side of the socket; let the IOLoop run
# once to make sure the server side got the message.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
if hasattr(self, 'stream'):
self.stream.close()
super(KeepAliveTest, self).tearDown()
# The next few methods are a crude manual http client
def connect(self):
self.stream = IOStream(socket.socket())
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def read_headers(self):
self.stream.read_until(b'\r\n', self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b'HTTP/1.1 200'), first_line)
self.stream.read_until(b'\r\n\r\n', self.stop)
header_bytes = self.wait()
headers = HTTPHeaders.parse(header_bytes.decode('latin1'))
return headers
def read_response(self):
self.headers = self.read_headers()
self.stream.read_bytes(int(self.headers['Content-Length']), self.stop)
body = self.wait()
self.assertEqual(b'Hello world', body)
def close(self):
self.stream.close()
del self.stream
def test_two_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.close()
def test_request_close(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertEqual(self.headers['Connection'], 'close')
self.close()
# keepalive is supported for http 1.0 too, but it's opt-in
def test_http10(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertTrue('Connection' not in self.headers)
self.close()
def test_http10_keepalive(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_http10_keepalive_extra_crlf(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_pipelined_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
self.read_response()
self.read_response()
self.close()
def test_pipelined_cancel(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
# only read once
self.read_response()
self.close()
def test_cancel_during_download(self):
self.connect()
self.stream.write(b'GET /large HTTP/1.1\r\n\r\n')
self.read_headers()
self.stream.read_bytes(1024, self.stop)
self.wait()
self.close()
def test_finish_while_closed(self):
self.connect()
self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n')
self.read_headers()
self.close()
def test_keepalive_chunked(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n0\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
class GzipBaseTest(object):
def get_app(self):
return Application([('/', EchoHandler)])
def post_gzip(self, body):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
gzip_file.write(utf8(body))
gzip_file.close()
compressed_body = bytesio.getvalue()
return self.fetch('/', method='POST', body=compressed_body,
headers={'Content-Encoding': 'gzip'})
def test_uncompressed(self):
response = self.fetch('/', method='POST', body='foo=bar')
self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
def get_httpserver_options(self):
return dict(decompress_request=True)
def test_gzip(self):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
def test_gzip_unsupported(self):
# Gzip support is opt-in; without it the server fails to parse
# the body (but parsing form bodies is currently just a log message,
# not a fatal error).
with ExpectLog(gen_log, "Unsupported Content-Encoding"):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {})
class StreamingChunkSizeTest(AsyncHTTPTestCase):
# 50 characters long, and repetitive so it can be compressed.
BODY = b'01234567890123456789012345678901234567890123456789'
CHUNK_SIZE = 16
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient()
def get_httpserver_options(self):
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def headers_received(self, start_line, headers):
self.chunk_lengths = []
def data_received(self, chunk):
self.chunk_lengths.append(len(chunk))
def finish(self):
response_body = utf8(json_encode(self.chunk_lengths))
self.connection.write_headers(
ResponseStartLine('HTTP/1.1', 200, 'OK'),
HTTPHeaders({'Content-Length': str(len(response_body))}))
self.connection.write(response_body)
self.connection.finish()
def get_app(self):
class App(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return StreamingChunkSizeTest.MessageDelegate(request_conn)
return App()
def fetch_chunk_sizes(self, **kwargs):
response = self.fetch('/', method='POST', **kwargs)
response.rethrow()
chunks = json_decode(response.body)
self.assertEqual(len(self.BODY), sum(chunks))
for chunk_size in chunks:
self.assertLessEqual(chunk_size, self.CHUNK_SIZE,
'oversized chunk: ' + str(chunks))
self.assertGreater(chunk_size, 0,
'empty chunk: ' + str(chunks))
return chunks
def compress(self, body):
bytesio = BytesIO()
gzfile = gzip.GzipFile(mode='w', fileobj=bytesio)
gzfile.write(body)
gzfile.close()
compressed = bytesio.getvalue()
if len(compressed) >= len(body):
raise Exception("body did not shrink when compressed")
return compressed
def test_regular_body(self):
chunks = self.fetch_chunk_sizes(body=self.BODY)
# Without compression we know exactly what to expect.
self.assertEqual([16, 16, 16, 2], chunks)
def test_compressed_body(self):
self.fetch_chunk_sizes(body=self.compress(self.BODY),
headers={'Content-Encoding': 'gzip'})
# Compression creates irregular boundaries so the assertions
# in fetch_chunk_sizes are as specific as we can get.
def test_chunked_body(self):
def body_producer(write):
write(self.BODY[:20])
write(self.BODY[20:])
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
# HTTP chunk boundaries translate to application-visible breaks
self.assertEqual([16, 4, 16, 14], chunks)
def test_chunked_compressed(self):
compressed = self.compress(self.BODY)
self.assertGreater(len(compressed), 20)
def body_producer(write):
write(compressed[:20])
write(compressed[20:])
self.fetch_chunk_sizes(body_producer=body_producer,
headers={'Content-Encoding': 'gzip'})
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(max_header_size=1024)
def test_small_headers(self):
response = self.fetch("/", headers={'X-Filler': 'a' * 100})
response.rethrow()
self.assertEqual(response.body, b"Hello world")
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read", required=False):
response = self.fetch("/", headers={'X-Filler': 'a' * 1000})
# 431 is "Request Header Fields Too Large", defined in RFC
# 6585. However, many implementations just close the
# connection in this case, resulting in a 599.
self.assertIn(response.code, (431, 599))
@skipOnTravis
class IdleTimeoutTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(idle_connection_timeout=0.1)
def setUp(self):
super(IdleTimeoutTest, self).setUp()
self.streams = []
def tearDown(self):
super(IdleTimeoutTest, self).tearDown()
for stream in self.streams:
stream.close()
def connect(self):
stream = IOStream(socket.socket())
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
self.streams.append(stream)
return stream
def test_unused_connection(self):
stream = self.connect()
stream.set_close_callback(self.stop)
self.wait()
def test_idle_after_use(self):
stream = self.connect()
stream.set_close_callback(lambda: self.stop("closed"))
# Use the connection twice to make sure keep-alives are working
for i in range(2):
stream.write(b"GET / HTTP/1.1\r\n\r\n")
stream.read_until(b"\r\n\r\n", self.stop)
self.wait()
stream.read_bytes(11, self.stop)
data = self.wait()
self.assertEqual(data, b"Hello world")
# Now let the timeout trigger and close the connection.
data = self.wait()
self.assertEqual(data, "closed")
class BodyLimitsTest(AsyncHTTPTestCase):
def get_app(self):
class BufferedHandler(RequestHandler):
def put(self):
self.write(str(len(self.request.body)))
@stream_request_body
class StreamingHandler(RequestHandler):
def initialize(self):
self.bytes_read = 0
def prepare(self):
if 'expected_size' in self.request.arguments:
self.request.connection.set_max_body_size(
int(self.get_argument('expected_size')))
if 'body_timeout' in self.request.arguments:
self.request.connection.set_body_timeout(
float(self.get_argument('body_timeout')))
def data_received(self, data):
self.bytes_read += len(data)
def put(self):
self.write(str(self.bytes_read))
return Application([('/buffered', BufferedHandler),
('/streaming', StreamingHandler)])
def get_httpserver_options(self):
return dict(body_timeout=3600, max_body_size=4096)
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient()
def test_small_body(self):
response = self.fetch('/buffered', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
response = self.fetch('/streaming', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
def test_large_body_buffered(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/buffered', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 400)
def test_large_body_buffered_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/buffered', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
# this test is flaky on windows; accept 400 (expected) or 599
self.assertIn(response.code, [400, 599])
def test_large_body_streaming(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/streaming', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 400)
def test_large_body_streaming_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/streaming', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
# this test is flaky on windows; accept 400 (expected) or 599
self.assertIn(response.code, [400, 599])
def test_large_body_streaming_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body=b'a' * 10240)
self.assertEqual(response.body, b'10240')
def test_large_body_streaming_chunked_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.body, b'10240')
@gen_test
def test_timeout(self):
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream because AsyncHTTPClient won't let us read a
# response without finishing a body.
stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n'
b'Content-Length: 42\r\n\r\n')
with ExpectLog(gen_log, 'Timeout reading body'):
response = yield stream.read_until_close()
self.assertEqual(response, b'')
finally:
stream.close()
@gen_test
def test_body_size_override_reset(self):
# The max_body_size override is reset between requests.
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream so we can make sure it's all on one connection.
stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
stream.write(b'a' * 10240)
start_line, headers, response = yield gen.Task(read_stream_body, stream)
self.assertEqual(response, b'10240')
# Without the ?expected_size parameter, we get the old default value
stream.write(b'PUT /streaming HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
with ExpectLog(gen_log, '.*Content-Length too long'):
data = yield stream.read_until_close()
self.assertEqual(data, b'HTTP/1.1 400 Bad Request\r\n\r\n')
finally:
stream.close()
class LegacyInterfaceTest(AsyncHTTPTestCase):
def get_app(self):
# The old request_callback interface does not implement the
# delegate interface, and writes its response via request.write
# instead of request.connection.write_headers.
def handle_request(request):
self.http1 = request.version.startswith("HTTP/1.")
if not self.http1:
# This test will be skipped if we're using HTTP/2,
# so just close it out cleanly using the modern interface.
request.connection.write_headers(
ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
message = b"Hello world"
request.write(utf8("HTTP/1.1 200 OK\r\n"
"Content-Length: %d\r\n\r\n" % len(message)))
request.write(message)
request.finish()
return handle_request
def test_legacy_interface(self):
response = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(response.body, b"Hello world")
|
|
"""Shelly entity helper."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from dataclasses import dataclass
import logging
from typing import Any, Final, cast
from aioshelly.block_device import Block
import async_timeout
from homeassistant.components.sensor import ATTR_STATE_CLASS
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import (
device_registry,
entity,
entity_registry,
update_coordinator,
)
from homeassistant.helpers.entity import DeviceInfo, EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import StateType
from . import BlockDeviceWrapper, RpcDeviceWrapper, ShellyDeviceRestWrapper
from .const import (
AIOSHELLY_DEVICE_TIMEOUT_SEC,
BLOCK,
DATA_CONFIG_ENTRY,
DOMAIN,
REST,
RPC,
)
from .utils import (
async_remove_shelly_entity,
get_block_entity_name,
get_rpc_entity_name,
get_rpc_key_instances,
)
_LOGGER: Final = logging.getLogger(__name__)
async def async_setup_entry_attribute_entities(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
sensors: dict[tuple[str, str], BlockAttributeDescription],
sensor_class: Callable,
) -> None:
"""Set up entities for attributes."""
wrapper: BlockDeviceWrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][
config_entry.entry_id
][BLOCK]
if wrapper.device.initialized:
await async_setup_block_attribute_entities(
hass, async_add_entities, wrapper, sensors, sensor_class
)
else:
await async_restore_block_attribute_entities(
hass, config_entry, async_add_entities, wrapper, sensors, sensor_class
)
async def async_setup_block_attribute_entities(
hass: HomeAssistant,
async_add_entities: AddEntitiesCallback,
wrapper: BlockDeviceWrapper,
sensors: dict[tuple[str, str], BlockAttributeDescription],
sensor_class: Callable,
) -> None:
"""Set up entities for block attributes."""
blocks = []
assert wrapper.device.blocks
for block in wrapper.device.blocks:
for sensor_id in block.sensor_ids:
description = sensors.get((block.type, sensor_id))
if description is None:
continue
# Filter out non-existing sensors and sensors without a value
if getattr(block, sensor_id, None) in (-1, None):
continue
# Filter and remove entities that according to settings should not create an entity
if description.removal_condition and description.removal_condition(
wrapper.device.settings, block
):
domain = sensor_class.__module__.split(".")[-1]
unique_id = f"{wrapper.mac}-{block.description}-{sensor_id}"
await async_remove_shelly_entity(hass, domain, unique_id)
else:
blocks.append((block, sensor_id, description))
if not blocks:
return
async_add_entities(
[
sensor_class(wrapper, block, sensor_id, description)
for block, sensor_id, description in blocks
]
)
async def async_restore_block_attribute_entities(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
wrapper: BlockDeviceWrapper,
sensors: dict[tuple[str, str], BlockAttributeDescription],
sensor_class: Callable,
) -> None:
"""Restore block attributes entities."""
entities = []
ent_reg = await entity_registry.async_get_registry(hass)
entries = entity_registry.async_entries_for_config_entry(
ent_reg, config_entry.entry_id
)
domain = sensor_class.__module__.split(".")[-1]
for entry in entries:
if entry.domain != domain:
continue
attribute = entry.unique_id.split("-")[-1]
description = BlockAttributeDescription(
name="",
icon=entry.original_icon,
unit=entry.unit_of_measurement,
device_class=entry.original_device_class,
)
entities.append(
sensor_class(wrapper, None, attribute, description, entry, sensors)
)
if not entities:
return
async_add_entities(entities)
async def async_setup_entry_rpc(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
sensors: dict[str, RpcAttributeDescription],
sensor_class: Callable,
) -> None:
"""Set up entities for REST sensors."""
wrapper: RpcDeviceWrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][
config_entry.entry_id
][RPC]
entities = []
for sensor_id in sensors:
description = sensors[sensor_id]
key_instances = get_rpc_key_instances(wrapper.device.status, description.key)
for key in key_instances:
# Filter non-existing sensors
if description.sub_key not in wrapper.device.status[key]:
continue
# Filter and remove entities that according to settings should not create an entity
if description.removal_condition and description.removal_condition(
wrapper.device.config, key
):
domain = sensor_class.__module__.split(".")[-1]
unique_id = f"{wrapper.mac}-{key}-{sensor_id}"
await async_remove_shelly_entity(hass, domain, unique_id)
else:
entities.append((key, sensor_id, description))
if not entities:
return
async_add_entities(
[
sensor_class(wrapper, key, sensor_id, description)
for key, sensor_id, description in entities
]
)
async def async_setup_entry_rest(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
sensors: dict[str, RestAttributeDescription],
sensor_class: Callable,
) -> None:
"""Set up entities for REST sensors."""
wrapper: ShellyDeviceRestWrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][
config_entry.entry_id
][REST]
entities = []
for sensor_id in sensors:
description = sensors.get(sensor_id)
if not wrapper.device.settings.get("sleep_mode"):
entities.append((sensor_id, description))
if not entities:
return
async_add_entities(
[
sensor_class(wrapper, sensor_id, description)
for sensor_id, description in entities
]
)
@dataclass
class BlockAttributeDescription:
"""Class to describe a sensor."""
name: str
# Callable = lambda attr_info: unit
icon: str | None = None
unit: None | str | Callable[[dict], str] = None
value: Callable[[Any], Any] = lambda val: val
device_class: str | None = None
state_class: str | None = None
default_enabled: bool = True
available: Callable[[Block], bool] | None = None
# Callable (settings, block), return true if entity should be removed
removal_condition: Callable[[dict, Block], bool] | None = None
extra_state_attributes: Callable[[Block], dict | None] | None = None
entity_category: EntityCategory | None = None
@dataclass
class RpcAttributeDescription:
"""Class to describe a RPC sensor."""
key: str
sub_key: str
name: str
icon: str | None = None
unit: str | None = None
value: Callable[[Any, Any], Any] | None = None
device_class: str | None = None
state_class: str | None = None
default_enabled: bool = True
available: Callable[[dict], bool] | None = None
removal_condition: Callable[[dict, str], bool] | None = None
extra_state_attributes: Callable[[dict, dict], dict | None] | None = None
entity_category: EntityCategory | None = None
@dataclass
class RestAttributeDescription:
"""Class to describe a REST sensor."""
name: str
icon: str | None = None
unit: str | None = None
value: Callable[[dict, Any], Any] | None = None
device_class: str | None = None
state_class: str | None = None
default_enabled: bool = True
extra_state_attributes: Callable[[dict], dict | None] | None = None
entity_category: EntityCategory | None = None
class ShellyBlockEntity(entity.Entity):
"""Helper class to represent a block entity."""
def __init__(self, wrapper: BlockDeviceWrapper, block: Block) -> None:
"""Initialize Shelly entity."""
self.wrapper = wrapper
self.block = block
self._name = get_block_entity_name(wrapper.device, block)
@property
def name(self) -> str:
"""Name of entity."""
return self._name
@property
def should_poll(self) -> bool:
"""If device should be polled."""
return False
@property
def device_info(self) -> DeviceInfo:
"""Device info."""
return {
"connections": {(device_registry.CONNECTION_NETWORK_MAC, self.wrapper.mac)}
}
@property
def available(self) -> bool:
"""Available."""
return self.wrapper.last_update_success
@property
def unique_id(self) -> str:
"""Return unique ID of entity."""
return f"{self.wrapper.mac}-{self.block.description}"
async def async_added_to_hass(self) -> None:
"""When entity is added to HASS."""
self.async_on_remove(self.wrapper.async_add_listener(self._update_callback))
async def async_update(self) -> None:
"""Update entity with latest info."""
await self.wrapper.async_request_refresh()
@callback
def _update_callback(self) -> None:
"""Handle device update."""
self.async_write_ha_state()
async def set_state(self, **kwargs: Any) -> Any:
"""Set block state (HTTP request)."""
_LOGGER.debug("Setting state for entity %s, state: %s", self.name, kwargs)
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
return await self.block.set_state(**kwargs)
except (asyncio.TimeoutError, OSError) as err:
_LOGGER.error(
"Setting state for entity %s failed, state: %s, error: %s",
self.name,
kwargs,
repr(err),
)
self.wrapper.last_update_success = False
return None
class ShellyRpcEntity(entity.Entity):
"""Helper class to represent a rpc entity."""
def __init__(self, wrapper: RpcDeviceWrapper, key: str) -> None:
"""Initialize Shelly entity."""
self.wrapper = wrapper
self.key = key
self._attr_should_poll = False
self._attr_device_info = {
"connections": {(device_registry.CONNECTION_NETWORK_MAC, wrapper.mac)}
}
self._attr_unique_id = f"{wrapper.mac}-{key}"
self._attr_name = get_rpc_entity_name(wrapper.device, key)
@property
def available(self) -> bool:
"""Available."""
return self.wrapper.device.connected
async def async_added_to_hass(self) -> None:
"""When entity is added to HASS."""
self.async_on_remove(self.wrapper.async_add_listener(self._update_callback))
async def async_update(self) -> None:
"""Update entity with latest info."""
await self.wrapper.async_request_refresh()
@callback
def _update_callback(self) -> None:
"""Handle device update."""
self.async_write_ha_state()
async def call_rpc(self, method: str, params: Any) -> Any:
"""Call RPC method."""
_LOGGER.debug(
"Call RPC for entity %s, method: %s, params: %s",
self.name,
method,
params,
)
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
return await self.wrapper.device.call_rpc(method, params)
except asyncio.TimeoutError as err:
_LOGGER.error(
"Call RPC for entity %s failed, method: %s, params: %s, error: %s",
self.name,
method,
params,
repr(err),
)
self.wrapper.last_update_success = False
return None
class ShellyBlockAttributeEntity(ShellyBlockEntity, entity.Entity):
"""Helper class to represent a block attribute."""
def __init__(
self,
wrapper: BlockDeviceWrapper,
block: Block,
attribute: str,
description: BlockAttributeDescription,
) -> None:
"""Initialize sensor."""
super().__init__(wrapper, block)
self.attribute = attribute
self.description = description
unit = self.description.unit
if callable(unit):
unit = unit(block.info(attribute))
self._unit: None | str | Callable[[dict], str] = unit
self._unique_id: str = f"{super().unique_id}-{self.attribute}"
self._name = get_block_entity_name(wrapper.device, block, self.description.name)
@property
def unique_id(self) -> str:
"""Return unique ID of entity."""
return self._unique_id
@property
def name(self) -> str:
"""Name of sensor."""
return self._name
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if it should be enabled by default."""
return self.description.default_enabled
@property
def attribute_value(self) -> StateType:
"""Value of sensor."""
if (value := getattr(self.block, self.attribute)) is None:
return None
return cast(StateType, self.description.value(value))
@property
def device_class(self) -> str | None:
"""Device class of sensor."""
return self.description.device_class
@property
def icon(self) -> str | None:
"""Icon of sensor."""
return self.description.icon
@property
def available(self) -> bool:
"""Available."""
available = super().available
if not available or not self.description.available:
return available
return self.description.available(self.block)
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return the state attributes."""
if self.description.extra_state_attributes is None:
return None
return self.description.extra_state_attributes(self.block)
@property
def entity_category(self) -> EntityCategory | None:
"""Return category of entity."""
return self.description.entity_category
class ShellyRestAttributeEntity(update_coordinator.CoordinatorEntity):
"""Class to load info from REST."""
def __init__(
self,
wrapper: BlockDeviceWrapper,
attribute: str,
description: RestAttributeDescription,
) -> None:
"""Initialize sensor."""
super().__init__(wrapper)
self.wrapper = wrapper
self.attribute = attribute
self.description = description
self._name = get_block_entity_name(wrapper.device, None, self.description.name)
self._last_value = None
@property
def name(self) -> str:
"""Name of sensor."""
return self._name
@property
def device_info(self) -> DeviceInfo:
"""Device info."""
return {
"connections": {(device_registry.CONNECTION_NETWORK_MAC, self.wrapper.mac)}
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if it should be enabled by default."""
return self.description.default_enabled
@property
def available(self) -> bool:
"""Available."""
return self.wrapper.last_update_success
@property
def attribute_value(self) -> StateType:
"""Value of sensor."""
if callable(self.description.value):
self._last_value = self.description.value(
self.wrapper.device.status, self._last_value
)
return self._last_value
@property
def device_class(self) -> str | None:
"""Device class of sensor."""
return self.description.device_class
@property
def icon(self) -> str | None:
"""Icon of sensor."""
return self.description.icon
@property
def unique_id(self) -> str:
"""Return unique ID of entity."""
return f"{self.wrapper.mac}-{self.attribute}"
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return the state attributes."""
if self.description.extra_state_attributes is None:
return None
return self.description.extra_state_attributes(self.wrapper.device.status)
@property
def entity_category(self) -> EntityCategory | None:
"""Return category of entity."""
return self.description.entity_category
class ShellyRpcAttributeEntity(ShellyRpcEntity, entity.Entity):
"""Helper class to represent a rpc attribute."""
def __init__(
self,
wrapper: RpcDeviceWrapper,
key: str,
attribute: str,
description: RpcAttributeDescription,
) -> None:
"""Initialize sensor."""
super().__init__(wrapper, key)
self.sub_key = description.sub_key
self.attribute = attribute
self.description = description
self._attr_unique_id = f"{super().unique_id}-{attribute}"
self._attr_name = get_rpc_entity_name(wrapper.device, key, description.name)
self._attr_entity_registry_enabled_default = description.default_enabled
self._attr_device_class = description.device_class
self._attr_icon = description.icon
self._last_value = None
@property
def attribute_value(self) -> StateType:
"""Value of sensor."""
if callable(self.description.value):
self._last_value = self.description.value(
self.wrapper.device.status[self.key][self.sub_key], self._last_value
)
else:
self._last_value = self.wrapper.device.status[self.key][self.sub_key]
return self._last_value
@property
def available(self) -> bool:
"""Available."""
available = super().available
if not available or not self.description.available:
return available
return self.description.available(
self.wrapper.device.status[self.key][self.sub_key]
)
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return the state attributes."""
if self.description.extra_state_attributes is None:
return None
assert self.wrapper.device.shelly
return self.description.extra_state_attributes(
self.wrapper.device.status[self.key][self.sub_key],
self.wrapper.device.shelly,
)
@property
def entity_category(self) -> EntityCategory | None:
"""Return category of entity."""
return self.description.entity_category
class ShellySleepingBlockAttributeEntity(ShellyBlockAttributeEntity, RestoreEntity):
"""Represent a shelly sleeping block attribute entity."""
# pylint: disable=super-init-not-called
def __init__(
self,
wrapper: BlockDeviceWrapper,
block: Block | None,
attribute: str,
description: BlockAttributeDescription,
entry: entity_registry.RegistryEntry | None = None,
sensors: dict[tuple[str, str], BlockAttributeDescription] | None = None,
) -> None:
"""Initialize the sleeping sensor."""
self.sensors = sensors
self.last_state: StateType = None
self.wrapper = wrapper
self.attribute = attribute
self.block: Block | None = block # type: ignore[assignment]
self.description = description
self._unit = self.description.unit
if block is not None:
if callable(self._unit):
self._unit = self._unit(block.info(attribute))
self._unique_id = f"{self.wrapper.mac}-{block.description}-{attribute}"
self._name = get_block_entity_name(
self.wrapper.device, block, self.description.name
)
elif entry is not None:
self._unique_id = entry.unique_id
self._name = cast(str, entry.original_name)
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if last_state is not None:
self.last_state = last_state.state
self.description.state_class = last_state.attributes.get(ATTR_STATE_CLASS)
@callback
def _update_callback(self) -> None:
"""Handle device update."""
if (
self.block is not None
or not self.wrapper.device.initialized
or self.sensors is None
):
super()._update_callback()
return
_, entity_block, entity_sensor = self.unique_id.split("-")
assert self.wrapper.device.blocks
for block in self.wrapper.device.blocks:
if block.description != entity_block:
continue
for sensor_id in block.sensor_ids:
if sensor_id != entity_sensor:
continue
description = self.sensors.get((block.type, sensor_id))
if description is None:
continue
self.block = block
self.description = description
_LOGGER.debug("Entity %s attached to block", self.name)
super()._update_callback()
return
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.io.graph_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import os
import random
import tempfile
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import server_lib
_VALID_FILE_PATTERN = "VALID"
_VALID_FILE_PATTERN_2 = "VALID_2"
_FILE_NAMES = [b"abc", b"def", b"ghi", b"jkl"]
_FILE_NAMES_2 = [b"mno", b"pqr"]
_INVALID_FILE_PATTERN = "INVALID"
class GraphIOTest(test.TestCase):
def _mock_glob(self, pattern):
if _VALID_FILE_PATTERN == pattern:
return _FILE_NAMES
if _VALID_FILE_PATTERN_2 == pattern:
return _FILE_NAMES_2
self.assertEqual(_INVALID_FILE_PATTERN, pattern)
return []
def setUp(self):
super(GraphIOTest, self).setUp()
random.seed(42)
self._orig_glob = gfile.Glob
gfile.Glob = self._mock_glob
def tearDown(self):
gfile.Glob = self._orig_glob
super(GraphIOTest, self).tearDown()
def test_dequeue_batch_value_errors(self):
default_batch_size = 17
queue_capacity = 1234
num_threads = 3
name = "my_batch"
self.assertRaisesRegexp(
ValueError,
"No files match",
graph_io.read_batch_examples,
_INVALID_FILE_PATTERN,
default_batch_size,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=queue_capacity,
num_threads=num_threads,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid batch_size",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
None,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=queue_capacity,
num_threads=num_threads,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid batch_size",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
-1,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=queue_capacity,
num_threads=num_threads,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid batch_size",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
default_batch_size,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=default_batch_size,
num_threads=num_threads,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid queue_capacity",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
default_batch_size,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=None,
num_threads=num_threads,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid num_threads",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
default_batch_size,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=queue_capacity,
num_threads=None,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid num_threads",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
default_batch_size,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=queue_capacity,
num_threads=-1,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid batch_size",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
queue_capacity + 1,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=queue_capacity,
num_threads=1,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid num_epochs",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
default_batch_size,
io_ops.TFRecordReader,
False,
num_epochs=-1,
queue_capacity=queue_capacity,
num_threads=1,
name=name)
self.assertRaisesRegexp(
ValueError,
"Invalid read_batch_size",
graph_io.read_batch_examples,
_VALID_FILE_PATTERN,
default_batch_size,
io_ops.TFRecordReader,
False,
num_epochs=None,
queue_capacity=queue_capacity,
num_threads=1,
read_batch_size=0,
name=name)
def test_batch_record_features(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
shape = (0,)
features = {
"feature":
parsing_ops.FixedLenFeature(
shape=shape, dtype=dtypes_lib.float32)
}
with ops.Graph().as_default() as g, self.test_session(graph=g) as sess:
features = graph_io.read_batch_record_features(
_VALID_FILE_PATTERN,
batch_size,
features,
randomize_input=False,
queue_capacity=queue_capacity,
reader_num_threads=2,
name=name)
self.assertTrue("feature" in features,
"'feature' missing from %s." % features.keys())
feature = features["feature"]
self.assertEqual("%s/fifo_queue_1_Dequeue:0" % name, feature.name)
self.assertAllEqual((batch_size,) + shape, feature.get_shape().as_list())
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/fifo_queue" % name
parse_example_queue_name = "%s/fifo_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueueV2",
"%s/read/TFRecordReaderV2" % name: "TFRecordReaderV2",
example_queue_name: "FIFOQueueV2",
parse_example_queue_name: "FIFOQueueV2",
name: "QueueDequeueManyV2"
}, g)
self.assertAllEqual(_FILE_NAMES, sess.run(["%s:0" % file_names_name])[0])
self.assertEqual(queue_capacity,
op_nodes[example_queue_name].attr["capacity"].i)
def test_one_epoch(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as sess:
inputs = graph_io.read_batch_examples(
_VALID_FILE_PATTERN,
batch_size,
reader=io_ops.TFRecordReader,
randomize_input=True,
num_epochs=1,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((None,), inputs.get_shape().as_list())
self.assertEqual("%s:1" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_name_queue_limit_name = ("%s/limit_epochs/epochs" %
file_name_queue_name)
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/random_shuffle_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueueV2",
"%s/read/TFRecordReaderV2" % name: "TFRecordReaderV2",
example_queue_name: "RandomShuffleQueueV2",
name: "QueueDequeueUpToV2",
file_name_queue_limit_name: "VariableV2"
}, g)
self.assertEqual(
set(_FILE_NAMES), set(sess.run(["%s:0" % file_names_name])[0]))
self.assertEqual(queue_capacity,
op_nodes[example_queue_name].attr["capacity"].i)
def test_batch_randomized_multiple_globs(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as sess:
inputs = graph_io.read_batch_examples(
[_VALID_FILE_PATTERN, _VALID_FILE_PATTERN_2],
batch_size,
reader=io_ops.TFRecordReader,
randomize_input=True,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((batch_size,), inputs.get_shape().as_list())
self.assertEqual("%s:1" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/random_shuffle_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueueV2",
"%s/read/TFRecordReaderV2" % name: "TFRecordReaderV2",
example_queue_name: "RandomShuffleQueueV2",
name: "QueueDequeueManyV2"
}, g)
self.assertEqual(
set(_FILE_NAMES + _FILE_NAMES_2),
set(sess.run(["%s:0" % file_names_name])[0]))
self.assertEqual(queue_capacity,
op_nodes[example_queue_name].attr["capacity"].i)
def _create_temp_file(self, lines):
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "temp_file")
gfile.Open(filename, "w").write(lines)
return filename
def _create_sorted_temp_files(self, lines_list):
tempdir = tempfile.mkdtemp()
filenames = []
for i, lines in enumerate(lines_list):
filename = os.path.join(tempdir, "temp_file%05d" % i)
gfile.Open(filename, "w").write(lines)
filenames.append(filename)
return filenames
def test_read_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("ABC\nDEF\nGHK\n")
batch_size = 1
queue_capacity = 5
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = graph_io.read_batch_examples(
filename,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def _create_file_from_list_of_features(self, lines):
json_lines = [
"".join([
'{"features": { "feature": { "sequence": {',
'"bytes_list": { "value": ["', base64.b64encode(l).decode("ascii"),
'"]}}}}}\n'
]) for l in lines
]
return self._create_temp_file("".join(json_lines))
def test_read_text_lines_large(self):
gfile.Glob = self._orig_glob
sequence_prefix = "abcdefghijklmnopqrstuvwxyz123456789"
num_records = 49999
lines = [
"".join([sequence_prefix, str(l)]).encode("ascii")
for l in xrange(num_records)
]
filename = self._create_file_from_list_of_features(lines)
batch_size = 10000
queue_capacity = 100000
name = "my_large_batch"
features = {"sequence": parsing_ops.FixedLenFeature([], dtypes_lib.string)}
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
keys, result = graph_io.read_keyed_batch_features(
filename,
batch_size,
features,
io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
num_enqueue_threads=2,
parse_fn=parsing_ops.decode_json_example,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertEqual(1, len(result))
self.assertAllEqual((None,), result["sequence"].get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
data = []
try:
while not coord.should_stop():
data.append(session.run(result))
except errors.OutOfRangeError:
pass
finally:
coord.request_stop()
coord.join(threads)
parsed_records = [
item for sublist in [d["sequence"] for d in data] for item in sublist
]
# Check that the number of records matches expected and all records
# are present.
self.assertEqual(len(parsed_records), num_records)
self.assertEqual(set(parsed_records), set(lines))
def test_read_batch_features_maintains_order(self):
"""Make sure that examples are read in the right order.
When randomize_input=False, num_enqueue_threads=1 and reader_num_threads=1
read_keyed_batch_features() should read the examples in the same order as
they appear in the file.
"""
gfile.Glob = self._orig_glob
num_records = 1000
lines = ["".join(str(l)).encode("ascii") for l in xrange(num_records)]
filename = self._create_file_from_list_of_features(lines)
batch_size = 10
queue_capacity = 1000
name = "my_large_batch"
features = {"sequence": parsing_ops.FixedLenFeature([], dtypes_lib.string)}
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
result = graph_io.read_batch_features(
filename,
batch_size,
features,
io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
reader_num_threads=1,
num_enqueue_threads=1,
parse_fn=parsing_ops.decode_json_example,
name=name)
self.assertEqual(1, len(result))
self.assertAllEqual((None,), result["sequence"].get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
data = []
try:
while not coord.should_stop():
data.append(session.run(result))
except errors.OutOfRangeError:
pass
finally:
coord.request_stop()
coord.join(threads)
parsed_records = [
item for sublist in [d["sequence"] for d in data] for item in sublist
]
# Check that the number of records matches expected and all records
# are present in the right order.
self.assertEqual(len(parsed_records), num_records)
self.assertEqual(parsed_records, lines)
def test_read_text_lines_multifile(self):
gfile.Glob = self._orig_glob
filenames = self._create_sorted_temp_files(["ABC\n", "DEF\nGHK\n"])
batch_size = 1
queue_capacity = 5
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = graph_io.read_batch_examples(
filenames,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
self.assertEqual("%s:1" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/fifo_queue" % name
test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueueV2",
"%s/read/TextLineReaderV2" % name: "TextLineReaderV2",
example_queue_name: "FIFOQueueV2",
name: "QueueDequeueUpToV2"
}, g)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def test_read_text_lines_multifile_with_shared_queue(self):
gfile.Glob = self._orig_glob
filenames = self._create_sorted_temp_files(["ABC\n", "DEF\nGHK\n"])
batch_size = 1
queue_capacity = 5
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
keys, inputs = graph_io.read_keyed_batch_examples_shared_queue(
filenames,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
self.assertEqual("%s:1" % name, inputs.name)
example_queue_name = "%s/fifo_queue" % name
worker_file_name_queue_name = "%s/file_name_queue/fifo_queue" % name
test_util.assert_ops_in_graph({
"%s/read/TextLineReaderV2" % name: "TextLineReaderV2",
example_queue_name: "FIFOQueueV2",
worker_file_name_queue_name: "FIFOQueueV2",
name: "QueueDequeueUpToV2"
}, g)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def _get_qr(self, name):
for qr in ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
if qr.name == name:
return qr
def _run_queue(self, name, session):
qr = self._get_qr(name)
for op in qr.enqueue_ops:
session.run(op)
def test_multiple_workers_with_shared_queue(self):
gfile.Glob = self._orig_glob
filenames = self._create_sorted_temp_files([
"ABC\n", "DEF\n", "GHI\n", "JKL\n", "MNO\n", "PQR\n", "STU\n", "VWX\n",
"YZ\n"
])
batch_size = 1
queue_capacity = 5
name = "my_batch"
example_queue_name = "%s/fifo_queue" % name
worker_file_name_queue_name = "%s/file_name_queue/fifo_queue" % name
server = server_lib.Server.create_local_server()
with ops.Graph().as_default() as g1, session_lib.Session(
server.target, graph=g1) as session:
keys, inputs = graph_io.read_keyed_batch_examples_shared_queue(
filenames,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
# Run the two queues once manually.
self._run_queue(worker_file_name_queue_name, session)
self._run_queue(example_queue_name, session)
self.assertAllEqual(session.run(inputs), [b"ABC"])
# Run the worker and the example queue.
self._run_queue(worker_file_name_queue_name, session)
self._run_queue(example_queue_name, session)
self.assertAllEqual(session.run(inputs), [b"DEF"])
with ops.Graph().as_default() as g2, session_lib.Session(
server.target, graph=g2) as session:
keys, inputs = graph_io.read_keyed_batch_examples_shared_queue(
filenames,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertAllEqual((None,), inputs.get_shape().as_list())
# Run the worker and the example queue.
self._run_queue(worker_file_name_queue_name, session)
self._run_queue(example_queue_name, session)
self.assertAllEqual(session.run(inputs), [b"GHI"])
self.assertTrue(g1 is not g2)
def test_batch_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("A\nB\nC\nD\nE\n")
batch_size = 3
queue_capacity = 10
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = graph_io.read_batch_examples(
[filename],
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
read_batch_size=10,
name=name)
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"A", b"B", b"C"])
self.assertAllEqual(session.run(inputs), [b"D", b"E"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def test_keyed_read_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("ABC\nDEF\nGHK\n")
batch_size = 1
queue_capacity = 5
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
keys, inputs = graph_io.read_keyed_batch_examples(
filename,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
self.assertAllEqual(
session.run([keys, inputs]),
[[filename.encode("utf-8") + b":1"], [b"ABC"]])
self.assertAllEqual(
session.run([keys, inputs]),
[[filename.encode("utf-8") + b":2"], [b"DEF"]])
self.assertAllEqual(
session.run([keys, inputs]),
[[filename.encode("utf-8") + b":3"], [b"GHK"]])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def test_keyed_parse_json(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file(
'{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}\n'
'{"features": {"feature": {"age": {"int64_list": {"value": [1]}}}}}\n'
'{"features": {"feature": {"age": {"int64_list": {"value": [2]}}}}}\n')
batch_size = 1
queue_capacity = 5
name = "my_batch"
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
dtypes = {"age": parsing_ops.FixedLenFeature([1], dtypes_lib.int64)}
parse_fn = lambda example: parsing_ops.parse_single_example( # pylint: disable=g-long-lambda
parsing_ops.decode_json_example(example), dtypes)
keys, inputs = graph_io.read_keyed_batch_examples(
filename,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
queue_capacity=queue_capacity,
parse_fn=parse_fn,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertEqual(1, len(inputs))
self.assertAllEqual((None, 1), inputs["age"].get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
key, age = session.run([keys, inputs["age"]])
self.assertAllEqual(age, [[0]])
self.assertAllEqual(key, [filename.encode("utf-8") + b":1"])
key, age = session.run([keys, inputs["age"]])
self.assertAllEqual(age, [[1]])
self.assertAllEqual(key, [filename.encode("utf-8") + b":2"])
key, age = session.run([keys, inputs["age"]])
self.assertAllEqual(age, [[2]])
self.assertAllEqual(key, [filename.encode("utf-8") + b":3"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def test_keyed_features_filter(self):
gfile.Glob = self._orig_glob
lines = [
'{"features": {"feature": {"age": {"int64_list": {"value": [2]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [1]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [3]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [5]}}}}}'
]
filename = self._create_temp_file("\n".join(lines))
batch_size = 2
queue_capacity = 4
name = "my_batch"
features = {"age": parsing_ops.FixedLenFeature([], dtypes_lib.int64)}
def filter_fn(keys, examples_json):
del keys
serialized = parsing_ops.decode_json_example(examples_json)
examples = parsing_ops.parse_example(serialized, features)
return math_ops.less(examples["age"], 2)
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
keys, inputs = graph_io._read_keyed_batch_examples_helper(
filename,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
read_batch_size=batch_size,
queue_capacity=queue_capacity,
filter_fn=filter_fn,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
# First batch of two filtered examples.
out_keys, out_vals = session.run((keys, inputs))
self.assertAllEqual(
[filename.encode("utf-8") + b":2", filename.encode("utf-8") + b":3"],
out_keys)
self.assertAllEqual([lines[1].encode("utf-8"), lines[2].encode("utf-8")],
out_vals)
# Second batch will only have one filtered example as that's the only
# remaining example that satisfies the filtering criterion.
out_keys, out_vals = session.run((keys, inputs))
self.assertAllEqual([filename.encode("utf-8") + b":4"], out_keys)
self.assertAllEqual([lines[3].encode("utf-8")], out_vals)
# Exhausted input.
with self.assertRaises(errors.OutOfRangeError):
session.run((keys, inputs))
coord.request_stop()
coord.join(threads)
def test_queue_parsed_features_single_tensor(self):
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
features = {"test": constant_op.constant([1, 2, 3])}
_, queued_features = graph_io.queue_parsed_features(features)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
out_features = session.run(queued_features["test"])
self.assertAllEqual([1, 2, 3], out_features)
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
|
|
"""
Test lldb-vscode setBreakpoints request
"""
import unittest2
import vscode
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import lldbvscode_testcase
import os
import shutil
import subprocess
import tempfile
import threading
import time
def spawn_and_wait(program, delay):
if delay:
time.sleep(delay)
process = subprocess.Popen([program],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
class TestVSCode_attach(lldbvscode_testcase.VSCodeTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def set_and_hit_breakpoint(self, continueToExit=True):
source = 'main.c'
breakpoint1_line = line_number(source, '// breakpoint 1')
lines = [breakpoint1_line]
# Set breakpoint in the thread function so we can step the threads
breakpoint_ids = self.set_source_breakpoints(source, lines)
self.assertEqual(len(breakpoint_ids), len(lines),
"expect correct number of breakpoints")
self.continue_to_breakpoints(breakpoint_ids)
if continueToExit:
self.continue_to_exit()
@skipIfWindows
@skipIfNetBSD # Hangs on NetBSD as well
@skipIfRemote
def test_by_pid(self):
'''
Tests attaching to a process by process ID.
'''
self.build_and_create_debug_adaptor()
program = self.getBuildArtifact("a.out")
self.process = subprocess.Popen([program],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.attach(pid=self.process.pid)
self.set_and_hit_breakpoint(continueToExit=True)
@skipIfWindows
@skipIfNetBSD # Hangs on NetBSD as well
@skipIfRemote
def test_by_name(self):
'''
Tests attaching to a process by process name.
'''
self.build_and_create_debug_adaptor()
orig_program = self.getBuildArtifact("a.out")
# Since we are going to attach by process name, we need a unique
# process name that has minimal chance to match a process that is
# already running. To do this we use tempfile.mktemp() to give us a
# full path to a location where we can copy our executable. We then
# run this copy to ensure we don't get the error "more that one
# process matches 'a.out'".
program = tempfile.mktemp()
shutil.copyfile(orig_program, program)
shutil.copymode(orig_program, program)
# Use a file as a synchronization point between test and inferior.
pid_file_path = lldbutil.append_to_process_working_directory(self,
"pid_file_%d" % (int(time.time())))
def cleanup():
if os.path.exists(program):
os.unlink(program)
self.run_platform_command("rm %s" % (pid_file_path))
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
popen = self.spawnSubprocess(program, [pid_file_path])
pid = lldbutil.wait_for_file_on_target(self, pid_file_path)
self.attach(program=program)
self.set_and_hit_breakpoint(continueToExit=True)
@skipUnlessDarwin
@skipIfDarwin
@skipIfNetBSD # Hangs on NetBSD as well
def test_by_name_waitFor(self):
'''
Tests attaching to a process by process name and waiting for the
next instance of a process to be launched, ingoring all current
ones.
'''
self.build_and_create_debug_adaptor()
program = self.getBuildArtifact("a.out")
self.spawn_thread = threading.Thread(target=spawn_and_wait,
args=(program, 1.0,))
self.spawn_thread.start()
self.attach(program=program, waitFor=True)
self.set_and_hit_breakpoint(continueToExit=True)
@skipIfWindows
@skipIfDarwin
@skipIfNetBSD # Hangs on NetBSD as well
@skipIf(archs=["arm", "aarch64"]) # Example of a flaky run http://lab.llvm.org:8011/builders/lldb-aarch64-ubuntu/builds/5527/steps/test/logs/stdio
def test_commands(self):
'''
Tests the "initCommands", "preRunCommands", "stopCommands",
"exitCommands", "terminateCommands" and "attachCommands"
that can be passed during attach.
"initCommands" are a list of LLDB commands that get executed
before the targt is created.
"preRunCommands" are a list of LLDB commands that get executed
after the target has been created and before the launch.
"stopCommands" are a list of LLDB commands that get executed each
time the program stops.
"exitCommands" are a list of LLDB commands that get executed when
the process exits
"attachCommands" are a list of LLDB commands that get executed and
must have a valid process in the selected target in LLDB after
they are done executing. This allows custom commands to create any
kind of debug session.
"terminateCommands" are a list of LLDB commands that get executed when
the debugger session terminates.
'''
self.build_and_create_debug_adaptor()
program = self.getBuildArtifact("a.out")
# Here we just create a target and launch the process as a way to test
# if we are able to use attach commands to create any kind of a target
# and use it for debugging
attachCommands = [
'target create -d "%s"' % (program),
'process launch'
]
initCommands = ['target list', 'platform list']
preRunCommands = ['image list a.out', 'image dump sections a.out']
stopCommands = ['frame variable', 'bt']
exitCommands = ['expr 2+3', 'expr 3+4']
terminateCommands = ['expr 4+2']
self.attach(program=program,
attachCommands=attachCommands,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands,
terminateCommands=terminateCommands)
# Get output from the console. This should contain both the
# "initCommands" and the "preRunCommands".
output = self.get_console()
# Verify all "initCommands" were found in console output
self.verify_commands('initCommands', output, initCommands)
# Verify all "preRunCommands" were found in console output
self.verify_commands('preRunCommands', output, preRunCommands)
functions = ['main']
breakpoint_ids = self.set_function_breakpoints(functions)
self.assertEquals(len(breakpoint_ids), len(functions),
"expect one breakpoint")
self.continue_to_breakpoints(breakpoint_ids)
output = self.get_console(timeout=1.0)
self.verify_commands('stopCommands', output, stopCommands)
# Continue after launch and hit the "pause()" call and stop the target.
# Get output from the console. This should contain both the
# "stopCommands" that were run after we stop.
self.vscode.request_continue()
time.sleep(0.5)
self.vscode.request_pause()
self.vscode.wait_for_stopped()
output = self.get_console(timeout=1.0)
self.verify_commands('stopCommands', output, stopCommands)
# Continue until the program exits
self.continue_to_exit()
# Get output from the console. This should contain both the
# "exitCommands" that were run after the second breakpoint was hit
# and the "terminateCommands" due to the debugging session ending
output = self.collect_console(duration=1.0)
self.verify_commands('exitCommands', output, exitCommands)
self.verify_commands('terminateCommands', output, terminateCommands)
@skipIfWindows
@skipIfDarwin
@skipIfNetBSD # Hangs on NetBSD as well
@skipIf(archs=["arm", "aarch64"]) # Example of a flaky run http://lab.llvm.org:8011/builders/lldb-aarch64-ubuntu/builds/5517/steps/test/logs/stdio
def test_terminate_commands(self):
'''
Tests that the "terminateCommands", that can be passed during
attach, are run when the debugger is disconnected.
'''
self.build_and_create_debug_adaptor()
program = self.getBuildArtifact("a.out")
# Here we just create a target and launch the process as a way to test
# if we are able to use attach commands to create any kind of a target
# and use it for debugging
attachCommands = [
'target create -d "%s"' % (program),
'process launch'
]
terminateCommands = ['expr 4+2']
self.attach(program=program,
attachCommands=attachCommands,
terminateCommands=terminateCommands,
disconnectAutomatically=False)
self.get_console()
# Once it's disconnected the console should contain the
# "terminateCommands"
self.vscode.request_disconnect(terminateDebuggee=True)
output = self.collect_console(duration=1.0)
self.verify_commands('terminateCommands', output, terminateCommands)
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A cloudwatch log subscriber that records error messages into getsentry.com
Features
- For on premise sentry installations, also supports relaying through
sqs for final delivery.
- For extant logs supports replaying them through to sentry.
- Supports self-provisioning into lambda with minimal dependency set.
- Supports collecting errors from custodian policy lambda logs or on
ec2 instance policy logs.
- Can be run as cli against historical logs
- Auto creates projects in sentry
Todo:
- Finish lambda provision / sqs relay
Discussion:
For realtime indexing w/ custodian this is currently setup as a
lambda per account.
- We need one lambda in the spoke account for all lambda policies
executing in the spoke account.
- We need one lambda in the hub account for each spoke account
that has instance policies executing there.
OrgMode
- Can operate with a single lambda given a mapping of accounts
to sentry projects
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import base64
from datetime import datetime
from functools import partial
import json
import logging
import os
import time
import uuid
import zlib
# no third-party libs needed in lambda
import boto3
from botocore.exceptions import ClientError
from botocore.vendored import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
from dateutil.parser import parse as parse_date
from six.moves.urllib.parse import urlparse
sqs = logs = config = None
VERSION = "0.1"
log = logging.getLogger("c7n-sentry")
def init():
""" Lambda globals cache.
"""
global sqs, logs, config
if config is None:
with open('config.json') as fh:
config = json.load(fh)
if sqs is None:
sqs = boto3.client('sqs')
if logs is None:
logs = boto3.client('logs')
def process_log_event(event, context):
"""Lambda Entrypoint - Log Subscriber
Format log events and relay to sentry (direct or sqs)
"""
init()
# Grab the actual error log payload
serialized = event['awslogs'].pop('data')
data = json.loads(zlib.decompress(
base64.b64decode(serialized), 16 + zlib.MAX_WBITS))
msg = get_sentry_message(config, data)
if msg is None:
return
if config['sentry_dsn']:
# Deliver directly to sentry
send_sentry_message(config['sentry_dsn'], msg)
elif config['sentry_sqs']:
# Delivery indirectly via sqs
sqs.send_message(
QueueUrl=config['sentry_sqs'])
def process_sqs(event, context):
"""Lambda Entrypoint - SQS
"""
init()
def process_log_group(config):
"""CLI - Replay / Index
"""
from c7n.credentials import SessionFactory
factory = SessionFactory(
config.region, config.profile, assume_role=config.role)
session = factory()
client = session.client('logs')
params = dict(logGroupName=config.log_group,
filterPattern='Traceback', interleaved=True)
if config.log_streams:
params['logStreamNames'] = config.log_streams
if config.start:
params['startTime'] = int(time.mktime(
parse_date(config.start).replace(
hour=0, minute=0, second=0, microsecond=0).timetuple()) * 1000)
if config.end:
params['endTime'] = int(time.mktime(
parse_date(config.end).replace(
hour=0, minute=0, second=0, microsecond=0).timetuple()) * 1000)
settings = dict(account_id=config.account_id,
account_name=config.account_name)
paginator = client.get_paginator('filter_log_events')
event_count = 0
log.debug("Querying log events with %s", params)
for p in paginator.paginate(**params):
# log.debug("Searched streams\n %s", ", ".join(
# [s['logStreamName'] for s in p['searchedLogStreams']]))
for e in p['events']:
event_count += 1
msg = get_sentry_message(
settings, {'logEvents': [e],
'logStream': e['logStreamName'],
'logGroup': config.log_group}, client)
if msg is None:
continue
send_sentry_message(config.sentry_dsn, msg)
if event_count > 0:
log.info("Processed %s %d error events", config.account_name, event_count)
def send_sentry_message(sentry_dsn, msg):
# reversed from raven.base along with raven docs
parsed = urlparse(sentry_dsn)
key, secret = parsed.netloc.split('@')[0].split(':')
project_id = parsed.path.strip('/')
msg['project'] = project_id
endpoint = "%s://%s/api/%s/store/" % (
parsed.scheme, parsed.netloc.split('@')[1], project_id)
client = 'custodian-python-%s' % VERSION
auth_header_keys = [
('sentry_timestamp', time.time()),
('sentry_client', client),
('sentry_version', '7'), # try 7?
('sentry_key', key),
('sentry_secret', secret)]
auth_header = "Sentry %s" % ', '.join(
"%s=%s" % (k, v) for k, v in auth_header_keys)
headers = {
'User-Agent': client,
'X-Sentry-Auth': auth_header,
'Content-Encoding': 'deflate',
'Content-Type': 'application/octet-stream'}
encoded = zlib.compress(json.dumps(msg).encode('utf8'))
result = requests.post(endpoint, data=encoded, headers=headers)
if result.status_code != 200:
log.info("Got status code %s" % result.status_code)
def get_sentry_message(config, data, log_client=None, is_lambda=True):
# Policy name extraction from log group and stream.
group = data['logGroup']
stream = data['logStream']
if group.startswith('/aws/lambda'):
policy = "-".join(group.split('/')[-1].split('-')[1:])
module_prefix = "/var/task"
else:
policy = stream
module_prefix = "site-package"
# Parse the stringified traceback to get a structured exception
# for sentry.
try:
error_msg, error = parse_traceback(
data['logEvents'][0]['message'], module_prefix)
except IndexError:
# error messages without a traceback .. skip
log.info("no traceback, %s" % data['logEvents'][0]['message'])
return None
# WARNING - highly log format dependent :-(
try:
_, level, logger, msg_frag = [s.strip() for s in error_msg[
error_msg.find(','):].split('-', 3)]
error_msg = " - ".join([level, logger, msg_frag])
except:
level, logger = 'ERROR', None
for f in reversed(error['stacktrace']['frames']):
culprit = "%s.%s" % (f['module'], f['function'])
if f['module'].startswith('c7n'):
break
breadcrumbs = None
# Fetch additional logs for context (10s window)
# if 0:
# timestamps = [e['timestamp'] for e in data['logEvents']]
# start = min(timestamps) - 1000 * 10
# end = max(timestamps) + 1000
# breadcrumbs = log_client.get_log_events(
# logGroupName=data['logGroup'],
# logStreamName=data['logStream'],
# startTime=start,
# endTime=end,
# startFromHead=True)['events'][:5]
# if data['logEvents'][0] in breadcrumbs:
# breadcrumbs.remove(data['logEvents'][0])
# else:
sentry_msg = {
'event_id': uuid.uuid4().hex,
'timestamp': datetime.fromtimestamp(
data['logEvents'][0]['timestamp'] / 1000).isoformat(),
'user': {
'id': config['account_id'],
'username': config['account_name']},
'level': level.lower(),
'culprit': culprit,
'message': error_msg,
'platform': 'python',
'exception': {'values': [error]},
'tags': {
'policy': policy,
'stream': stream,
'group': group},
}
if logger:
sentry_msg['logger'] = logger
if breadcrumbs:
sentry_msg['breadcrumbs'] = [
{'category': 'policy',
'message': e['message'],
'timestamp': e['timestamp'] / 1000} for e in breadcrumbs]
return sentry_msg
def parse_traceback(msg, site_path="site-packages", in_app_prefix="c7n"):
"""Extract a sentry traceback structure,
From a python formatted traceback string per python stdlib
traceback.print_exc()
"""
data = {}
lines = list(filter(None, msg.split('\n')))
data['frames'] = []
err_ctx = None
for l in lines[1:-1]:
l = l.strip()
if l.startswith('Traceback'):
continue
elif l.startswith('File'):
abs_path, lineno, function = l.split(',', 3)
abs_path = abs_path[abs_path.find('"'):-1]
f_path = abs_path[abs_path.find(site_path) + len(site_path) + 1:]
module = f_path[:f_path.find('.')].replace('/', '.').strip('.')
lineno = int(lineno.strip().split()[1])
function = function.strip().split()[-1]
err_ctx = dict(lineno=lineno,
abs_path=abs_path,
function=function,
filename=f_path,
module=module)
if module.startswith(in_app_prefix):
err_ctx['in_app'] = True
elif err_ctx is not None:
err_ctx['context_line'] = l
data['frames'].append(err_ctx)
err_ctx = None
return lines[0], {
'type': lines[-1].strip().split(':')[0],
'value': lines[-1].strip().split(':', 1)[1].strip(),
'module': data['frames'][-1]['module'],
'stacktrace': data}
def get_function(session_factory, name, handler, role,
log_groups,
project, account_name, account_id,
sentry_dsn,
pattern="Traceback"):
"""Lambda function provisioning.
Self contained within the component, to allow for easier reuse.
"""
# Lazy import to avoid runtime dependency
from c7n.mu import (
LambdaFunction, PythonPackageArchive, CloudWatchLogSubscription)
config = dict(
name=name,
handler=handler,
runtime='python2.7',
memory_size=512,
timeout=15,
role=role,
description='Custodian Sentry Relay',
events=[
CloudWatchLogSubscription(
session_factory, log_groups, pattern)])
archive = PythonPackageArchive('c7n_sentry')
archive.add_contents(
'config.json', json.dumps({
'project': project,
'account_name': account_name,
'account_id': account_id,
'sentry_dsn': sentry_dsn,
}))
archive.add_contents(
'handler.py',
'from c7n_sentry.c7nsentry import process_log_event'
)
archive.close()
return LambdaFunction(config, archive)
def orgreplay(options):
from .common import Bag, get_accounts
accounts = get_accounts(options)
auth_headers = {'Authorization': 'Bearer %s' % options.sentry_token}
sget = partial(requests.get, headers=auth_headers)
spost = partial(requests.post, headers=auth_headers)
dsn = urlparse(options.sentry_dsn)
endpoint = "%s://%s/api/0/" % (
dsn.scheme,
"@" in dsn.netloc and dsn.netloc.rsplit('@', 1)[1] or dsn.netloc)
log.info("sentry endpoint: %s", endpoint)
teams = set([t['slug'] for t in sget(
endpoint + "organizations/%s/teams/" % options.sentry_org).json()])
projects = {p['name']: p for p in sget(endpoint + "projects/").json()}
def process_account(a):
log.debug("processing %s", a['name'])
team_name = a['name'].rsplit('-', 1)[0]
if team_name not in teams:
log.info("creating org team %s", team_name)
spost(
endpoint + "organizations/%s/teams/" % options.sentry_org,
json={'name': team_name})
teams.add(team_name)
if a['name'] not in projects:
log.info("creating account project %s", a['name'])
spost(endpoint + "teams/%s/%s/projects/" % (
options.sentry_org, team_name),
json={'name': a['name']})
bagger = partial(
Bag,
profile=options.profile, role=None, log_streams=None,
start=options.start, end=options.end, sentry_dsn=options.sentry_dsn,
account_id=a['account_id'],
account_name=a['name'])
for r in options.regions:
log.debug("Fetching hub instance policy errors for %s", a['name'])
b = bagger(
region=r, log_group="/cloud-custodian/%s/%s" % (a['name'], r))
try:
process_log_group(b)
except ClientError as e:
log.warning("Could not process %s region %s error: %s",
a['name'], r, e)
log.debug("Fetching spoke lambda policy errors for %s", a['name'])
for fname, config in a['config_files'].items():
for p in config.get('policies', ()):
if not p.get('mode'):
continue
b = bagger(region=r, assume_role=a['role'],
log_group="/aws/lambda/custodian-%s" % p['name'])
try:
process_log_group(b)
except ClientError as e:
if e.response['Error']['Code']:
log.info("account: %s region: %s group: %s not found",
a['name'], r, b.log_group)
continue
return [process_account(a) for a in accounts]
with ThreadPoolExecutor(max_workers=3) as w:
futures = {}
for a in accounts:
futures[w.submit(process_account, a)] = a
for f in as_completed(futures):
exc = f.exception()
if exc:
log.error("Error processing account %s: %r", a['name'], exc)
def deploy(options):
from .common import get_accounts
for account in get_accounts(options):
for region_name in options.regions:
for fname, config in account['config_files'].items():
for policy in config.get('policies', ()):
if policy.get('mode'):
deploy_one(
region_name, account, policy, options.sentry_dsn)
def deploy_one(region_name, account, policy, sentry_dsn):
from c7n.mu import LambdaManager
def session_factory():
return boto3.Session(region_name=region_name)
log_group_name = '/aws/lambda/custodian-{}'.format(policy['name'])
arn = 'arn:aws:logs:{}:{}:log-group:{}:*'.format(
region_name, account['account_id'], log_group_name)
function = get_function(
session_factory=session_factory,
name='cloud-custodian-sentry',
handler='handler.process_log_event',
role=account['role'],
log_groups=[{'logGroupName': log_group_name, 'arn': arn}],
project=None,
account_name=account['name'],
account_id=account['account_id'],
sentry_dsn=sentry_dsn,
)
log.info("Deploying lambda for {} in {}".format(
log_group_name, region_name))
LambdaManager(session_factory).publish(function)
def setup_parser():
from .common import setup_parser as common_parser
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', default=False, action="store_true")
subs = parser.add_subparsers()
cmd_orgreplay = subs.add_parser('orgreplay')
common_parser(cmd_orgreplay)
cmd_orgreplay.set_defaults(command=orgreplay)
cmd_orgreplay.add_argument('--profile')
# cmd_orgreplay.add_argument('--role')
cmd_orgreplay.add_argument('--start')
cmd_orgreplay.add_argument('--end')
cmd_orgreplay.add_argument('--sentry-org', default="c7n")
cmd_orgreplay.add_argument('--sentry-dsn',
default=os.environ.get('SENTRY_DSN'))
cmd_orgreplay.add_argument('--sentry-token',
default=os.environ.get('SENTRY_TOKEN'))
cmd_deploy = subs.add_parser('deploy')
common_parser(cmd_deploy)
cmd_deploy.add_argument('--sentry-dsn',
default=os.environ.get('SENTRY_DSN'))
cmd_deploy.set_defaults(command=deploy)
return parser
def main():
parser = setup_parser()
options = parser.parse_args()
level = options.verbose and logging.DEBUG or logging.INFO
logging.basicConfig(
level=level,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(logging.ERROR)
if not options.regions:
options.regions = ["us-east-1", "us-west-2"]
options.command(options)
if __name__ == '__main__':
try:
main()
except (SystemExit, KeyboardInterrupt):
raise
except:
import traceback, sys, pdb
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
|
|
"""current schema
Revision ID: e3a246e0dc1
Revises:
Create Date: 2015-08-18 16:35:00.883495
"""
# revision identifiers, used by Alembic.
revision = 'e3a246e0dc1'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy.engine.reflection import Inspector
from airflow import settings
def upgrade():
inspector = Inspector.from_engine(settings.engine)
tables = inspector.get_table_names()
if 'connection' not in tables:
op.create_table(
'connection',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('conn_id', sa.String(length=250), nullable=True),
sa.Column('conn_type', sa.String(length=500), nullable=True),
sa.Column('host', sa.String(length=500), nullable=True),
sa.Column('schema', sa.String(length=500), nullable=True),
sa.Column('login', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(length=500), nullable=True),
sa.Column('port', sa.Integer(), nullable=True),
sa.Column('extra', sa.String(length=5000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'dag' not in tables:
op.create_table(
'dag',
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('is_paused', sa.Boolean(), nullable=True),
sa.Column('is_subdag', sa.Boolean(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('last_scheduler_run', sa.DateTime(), nullable=True),
sa.Column('last_pickled', sa.DateTime(), nullable=True),
sa.Column('last_expired', sa.DateTime(), nullable=True),
sa.Column('scheduler_lock', sa.Boolean(), nullable=True),
sa.Column('pickle_id', sa.Integer(), nullable=True),
sa.Column('fileloc', sa.String(length=2000), nullable=True),
sa.Column('owners', sa.String(length=2000), nullable=True),
sa.PrimaryKeyConstraint('dag_id')
)
if 'dag_pickle' not in tables:
op.create_table(
'dag_pickle',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pickle', sa.PickleType(), nullable=True),
sa.Column('created_dttm', sa.DateTime(), nullable=True),
sa.Column('pickle_hash', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'import_error' not in tables:
op.create_table(
'import_error',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('filename', sa.String(length=1024), nullable=True),
sa.Column('stacktrace', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'job' not in tables:
op.create_table(
'job',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=True),
sa.Column('state', sa.String(length=20), nullable=True),
sa.Column('job_type', sa.String(length=30), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('latest_heartbeat', sa.DateTime(), nullable=True),
sa.Column('executor_class', sa.String(length=500), nullable=True),
sa.Column('hostname', sa.String(length=500), nullable=True),
sa.Column('unixname', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(
'job_type_heart',
'job',
['job_type', 'latest_heartbeat'],
unique=False
)
if 'known_event_type' not in tables:
op.create_table(
'known_event_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('know_event_type', sa.String(length=200), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'log' not in tables:
op.create_table(
'log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dttm', sa.DateTime(), nullable=True),
sa.Column('dag_id', sa.String(length=250), nullable=True),
sa.Column('task_id', sa.String(length=250), nullable=True),
sa.Column('event', sa.String(length=30), nullable=True),
sa.Column('execution_date', sa.DateTime(), nullable=True),
sa.Column('owner', sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint('id')
)
if 'sla_miss' not in tables:
op.create_table(
'sla_miss',
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('email_sent', sa.Boolean(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date')
)
if 'slot_pool' not in tables:
op.create_table(
'slot_pool',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pool', sa.String(length=50), nullable=True),
sa.Column('slots', sa.Integer(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('pool')
)
if 'task_instance' not in tables:
op.create_table(
'task_instance',
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('state', sa.String(length=20), nullable=True),
sa.Column('try_number', sa.Integer(), nullable=True),
sa.Column('hostname', sa.String(length=1000), nullable=True),
sa.Column('unixname', sa.String(length=1000), nullable=True),
sa.Column('job_id', sa.Integer(), nullable=True),
sa.Column('pool', sa.String(length=50), nullable=True),
sa.Column('queue', sa.String(length=50), nullable=True),
sa.Column('priority_weight', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date')
)
op.create_index(
'ti_dag_state',
'task_instance',
['dag_id', 'state'],
unique=False
)
op.create_index(
'ti_pool',
'task_instance',
['pool', 'state', 'priority_weight'],
unique=False
)
op.create_index(
'ti_state_lkp',
'task_instance',
['dag_id', 'task_id', 'execution_date', 'state'],
unique=False
)
if 'user' not in tables:
op.create_table(
'user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
if 'variable' not in tables:
op.create_table(
'variable',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('key', sa.String(length=250), nullable=True),
sa.Column('val', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key')
)
if 'chart' not in tables:
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
if 'known_event' not in tables:
op.create_table(
'known_event',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('known_event_type_id', sa.Integer(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['known_event_type_id'],
['known_event_type.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
if 'xcom' not in tables:
op.create_table(
'xcom',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('key', sa.String(length=512), nullable=True),
sa.Column('value', sa.PickleType(), nullable=True),
sa.Column(
'timestamp',
sa.DateTime(),
default=func.now(),
nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.PrimaryKeyConstraint('id')
)
if 'task_fail' not in tables:
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('known_event')
op.drop_table('chart')
op.drop_table('variable')
op.drop_table('user')
op.drop_index('ti_state_lkp', table_name='task_instance')
op.drop_index('ti_pool', table_name='task_instance')
op.drop_index('ti_dag_state', table_name='task_instance')
op.drop_table('task_instance')
op.drop_table('slot_pool')
op.drop_table('sla_miss')
op.drop_table('log')
op.drop_table('known_event_type')
op.drop_index('job_type_heart', table_name='job')
op.drop_table('job')
op.drop_table('import_error')
op.drop_table('dag_pickle')
op.drop_table('dag')
op.drop_table('connection')
op.drop_table('xcom')
op.drop_table('task_fail')
|
|
# -*- coding: utf-8 -*-
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import os
import re
import shutil
import subprocess
import threading
import uuid
from azurelinuxagent.common import logger
from azurelinuxagent.common.cgroup import CpuCgroup
from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.conf import get_agent_pid_file_path
from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes, ExtensionError, \
ExtensionOperationError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import systemd
from azurelinuxagent.common.utils import fileutil, shellutil
from azurelinuxagent.common.utils.extensionprocessutil import handle_process_completion, read_output, \
TELEMETRY_MESSAGE_MAX_LEN
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from azurelinuxagent.common.version import get_distro
CGROUPS_FILE_SYSTEM_ROOT = '/sys/fs/cgroup'
CGROUP_CONTROLLERS = ["cpu", "memory"]
EXTENSION_SLICE_PREFIX = "azure-vmextensions"
class SystemdRunError(CGroupsException):
"""
Raised when systemd-run fails
"""
def __init__(self, msg=None):
super(SystemdRunError, self).__init__(msg)
class CGroupsApi(object):
@staticmethod
def cgroups_supported():
distro_info = get_distro()
distro_name = distro_info[0]
try:
distro_version = FlexibleVersion(distro_info[1])
except ValueError:
return False
return ((distro_name.lower() == 'ubuntu' and distro_version.major >= 16) or
(distro_name.lower() in ("centos", "redhat") and
((distro_version.major == 7 and distro_version.minor >= 8) or distro_version.major >= 8)))
@staticmethod
def track_cgroups(extension_cgroups):
try:
for cgroup in extension_cgroups:
CGroupsTelemetry.track_cgroup(cgroup)
except Exception as exception:
logger.warn("Cannot add cgroup '{0}' to tracking list; resource usage will not be tracked. "
"Error: {1}".format(cgroup.path, ustr(exception)))
@staticmethod
def get_processes_in_cgroup(cgroup_path):
with open(os.path.join(cgroup_path, "cgroup.procs"), "r") as cgroup_procs:
return [int(pid) for pid in cgroup_procs.read().split()]
@staticmethod
def _foreach_legacy_cgroup(operation):
"""
Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent;
starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. Also,
when running under systemd, the PIDs should not be explicitly moved to the cgroup filesystem. The older daemons would
incorrectly do that under certain conditions.
This method checks for the existence of the legacy cgroups and, if the daemon's PID has been added to them, executes the
given operation on the cgroups. After this check, the method attempts to remove the legacy cgroups.
:param operation:
The function to execute on each legacy cgroup. It must take 2 arguments: the controller and the daemon's PID
"""
legacy_cgroups = []
for controller in ['cpu', 'memory']:
cgroup = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, "WALinuxAgent", "WALinuxAgent")
if os.path.exists(cgroup):
logger.info('Found legacy cgroup {0}', cgroup)
legacy_cgroups.append((controller, cgroup))
try:
for controller, cgroup in legacy_cgroups:
procs_file = os.path.join(cgroup, "cgroup.procs")
if os.path.exists(procs_file):
procs_file_contents = fileutil.read_file(procs_file).strip()
daemon_pid = CGroupsApi.get_daemon_pid()
if ustr(daemon_pid) in procs_file_contents:
operation(controller, daemon_pid)
finally:
for _, cgroup in legacy_cgroups:
logger.info('Removing {0}', cgroup)
shutil.rmtree(cgroup, ignore_errors=True)
return len(legacy_cgroups)
@staticmethod
def get_daemon_pid():
return int(fileutil.read_file(get_agent_pid_file_path()).strip())
class SystemdCgroupsApi(CGroupsApi):
"""
Cgroups interface via systemd
"""
def __init__(self):
self._cgroup_mountpoints = None
self._agent_unit_name = None
self._systemd_run_commands = []
self._systemd_run_commands_lock = threading.RLock()
def get_systemd_run_commands(self):
"""
Returns a list of the systemd-run commands currently running (given as PIDs)
"""
with self._systemd_run_commands_lock:
return self._systemd_run_commands[:]
def get_cgroup_mount_points(self):
"""
Returns a tuple with the mount points for the cpu and memory controllers; the values can be None
if the corresponding controller is not mounted
"""
# the output of mount is similar to
# $ mount -t cgroup
# cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
# cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
# cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
# etc
#
if self._cgroup_mountpoints is None:
cpu = None
memory = None
for line in shellutil.run_command(['mount', '-t', 'cgroup']).splitlines():
match = re.search(r'on\s+(?P<path>/\S+(memory|cpuacct))\s', line)
if match is not None:
path = match.group('path')
if 'cpuacct' in path:
cpu = path
else:
memory = path
self._cgroup_mountpoints = {'cpu': cpu, 'memory': memory}
return self._cgroup_mountpoints['cpu'], self._cgroup_mountpoints['memory']
@staticmethod
def get_process_cgroup_relative_paths(process_id):
"""
Returns a tuple with the path of the cpu and memory cgroups for the given process (relative to the mount point of the corresponding
controller).
The 'process_id' can be a numeric PID or the string "self" for the current process.
The values returned can be None if the process is not in a cgroup for that controller (e.g. the controller is not mounted).
"""
# The contents of the file are similar to
# # cat /proc/1218/cgroup
# 10:memory:/system.slice/walinuxagent.service
# 3:cpu,cpuacct:/system.slice/walinuxagent.service
# etc
cpu_path = None
memory_path = None
for line in fileutil.read_file("/proc/{0}/cgroup".format(process_id)).splitlines():
match = re.match(r'\d+:(?P<controller>(memory|.*cpuacct.*)):(?P<path>.+)', line)
if match is not None:
controller = match.group('controller')
path = match.group('path').lstrip('/') if match.group('path') != '/' else None
if controller == 'memory':
memory_path = path
else:
cpu_path = path
return cpu_path, memory_path
def get_process_cgroup_paths(self, process_id):
"""
Returns a tuple with the path of the cpu and memory cgroups for the given process. The 'process_id' can be a numeric PID or the string "self" for the current process.
The values returned can be None if the process is not in a cgroup for that controller (e.g. the controller is not mounted).
"""
cpu_cgroup_relative_path, memory_cgroup_relative_path = self.get_process_cgroup_relative_paths(process_id)
cpu_mount_point, memory_mount_point = self.get_cgroup_mount_points()
cpu_cgroup_path = os.path.join(cpu_mount_point, cpu_cgroup_relative_path) \
if cpu_mount_point is not None and cpu_cgroup_relative_path is not None else None
memory_cgroup_path = os.path.join(memory_mount_point, memory_cgroup_relative_path) \
if memory_mount_point is not None and memory_cgroup_relative_path is not None else None
return cpu_cgroup_path, memory_cgroup_path
def get_unit_cgroup_paths(self, unit_name):
"""
Returns a tuple with the path of the cpu and memory cgroups for the given unit.
The values returned can be None if the controller is not mounted.
Ex: ControlGroup=/azure.slice/walinuxagent.service
controlgroup_path[1:] = azure.slice/walinuxagent.service
"""
controlgroup_path = systemd.get_unit_property(unit_name, "ControlGroup")
cpu_mount_point, memory_mount_point = self.get_cgroup_mount_points()
cpu_cgroup_path = os.path.join(cpu_mount_point, controlgroup_path[1:]) \
if cpu_mount_point is not None else None
memory_cgroup_path = os.path.join(memory_mount_point, controlgroup_path[1:]) \
if memory_mount_point is not None else None
return cpu_cgroup_path, memory_cgroup_path
@staticmethod
def get_cgroup2_controllers():
"""
Returns a tuple with the mount point for the cgroups v2 controllers, and the currently mounted controllers;
either value can be None if cgroups v2 or its controllers are not mounted
"""
# the output of mount is similar to
# $ mount -t cgroup2
# cgroup2 on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime,nsdelegate)
#
for line in shellutil.run_command(['mount', '-t', 'cgroup2']).splitlines():
match = re.search(r'on\s+(?P<path>/\S+)\s', line)
if match is not None:
mount_point = match.group('path')
controllers = None
controllers_file = os.path.join(mount_point, 'cgroup.controllers')
if os.path.exists(controllers_file):
controllers = fileutil.read_file(controllers_file)
return mount_point, controllers
return None, None
@staticmethod
def _is_systemd_failure(scope_name, stderr):
stderr.seek(0)
stderr = ustr(stderr.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', errors='backslashreplace')
unit_not_found = "Unit {0} not found.".format(scope_name)
return unit_not_found in stderr or scope_name not in stderr
@staticmethod
def get_extension_slice_name(extension_name):
# Since '-' is used as a separator in systemd unit names, we replace it with '_' to prevent side-effects.
return EXTENSION_SLICE_PREFIX + "-" + extension_name.replace('-', '_') + ".slice"
def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr,
error_code=ExtensionErrorCodes.PluginUnknownFailure):
scope = "{0}_{1}".format(cmd_name, uuid.uuid4())
extension_slice_name = self.get_extension_slice_name(extension_name)
with self._systemd_run_commands_lock:
process = subprocess.Popen( # pylint: disable=W1509
"systemd-run --unit={0} --scope --slice={1} {2}".format(scope, extension_slice_name, command),
shell=shell,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
preexec_fn=os.setsid)
# We start systemd-run with shell == True so process.pid is the shell's pid, not the pid for systemd-run
self._systemd_run_commands.append(process.pid)
scope_name = scope + '.scope'
logger.info("Started extension in unit '{0}'", scope_name)
try:
cgroup_relative_path = os.path.join('azure.slice/azure-vmextensions.slice', extension_slice_name)
cpu_cgroup_mountpoint, _ = self.get_cgroup_mount_points()
if cpu_cgroup_mountpoint is None:
logger.info("The CPU controller is not mounted; will not track resource usage")
else:
cpu_cgroup_path = os.path.join(cpu_cgroup_mountpoint, cgroup_relative_path)
CGroupsTelemetry.track_cgroup(CpuCgroup(extension_name, cpu_cgroup_path))
except IOError as e:
if e.errno == 2: # 'No such file or directory'
logger.info("The extension command already completed; will not track resource usage")
logger.info("Failed to start tracking resource usage for the extension: {0}", ustr(e))
except Exception as e:
logger.info("Failed to start tracking resource usage for the extension: {0}", ustr(e))
# Wait for process completion or timeout
try:
return handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout,
stderr=stderr, error_code=error_code)
except ExtensionError as e:
# The extension didn't terminate successfully. Determine whether it was due to systemd errors or
# extension errors.
if not self._is_systemd_failure(scope, stderr):
# There was an extension error; it either timed out or returned a non-zero exit code. Re-raise the error
raise
# There was an issue with systemd-run. We need to log it and retry the extension without systemd.
process_output = read_output(stdout, stderr)
# Reset the stdout and stderr
stdout.truncate(0)
stderr.truncate(0)
if isinstance(e, ExtensionOperationError):
# no-member: Instance of 'ExtensionError' has no 'exit_code' member (no-member) - Disabled: e is actually an ExtensionOperationError
err_msg = 'Systemd process exited with code %s and output %s' % (
e.exit_code, process_output) # pylint: disable=no-member
else:
err_msg = "Systemd timed-out, output: %s" % process_output
raise SystemdRunError(err_msg)
finally:
with self._systemd_run_commands_lock:
self._systemd_run_commands.remove(process.pid)
def cleanup_legacy_cgroups(self):
"""
Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent;
starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. If
we find that any of the legacy groups include the PID of the daemon then we need to disable data collection for this
instance (under systemd, moving PIDs across the cgroup file system can produce unpredictable results)
"""
return CGroupsApi._foreach_legacy_cgroup(lambda *_: None)
|
|
"""
webpack_manifest.py - https://github.com/markfinger/python-webpack-manifest
MIT License
Copyright (c) 2015-present, Mark Finger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import json
import time
from datetime import datetime, timedelta
__version__ = '2.1.1'
MANIFEST_CACHE = {}
BUILDING_STATUS = 'building'
BUILT_STATUS = 'built'
ERRORS_STATUS = 'errors'
def load(path, static_url, debug=False, timeout=60, read_retry=None, static_root=None):
# Enable failed reads to be retried after a delay of 1 second
if debug and read_retry is None:
read_retry = 1
if debug or path not in MANIFEST_CACHE:
manifest = build(path, static_url, debug, timeout, read_retry, static_root)
if not debug:
MANIFEST_CACHE[path] = manifest
return manifest
return MANIFEST_CACHE[path]
def build(path, static_url, debug, timeout, read_retry, static_root):
data = read(path, read_retry)
status = data.get('status', None)
if debug:
# Lock up the process and wait for webpack to finish building
max_timeout = datetime.utcnow() + timedelta(seconds=timeout)
while status == BUILDING_STATUS:
time.sleep(0.1)
if datetime.utcnow() > max_timeout:
raise WebpackManifestBuildingStatusTimeout(
'Timed out reading the webpack manifest at "{}"'.format(path)
)
data = read(path, read_retry)
status = data.get('status', None)
if status == ERRORS_STATUS:
raise WebpackError(
'Webpack errors: \n\n{}'.format(
'\n\n'.join(data['errors'])
)
)
if status != BUILT_STATUS:
raise WebpackManifestStatusError('Unknown webpack manifest status: "{}"'.format(status))
return WebpackManifest(path, data, static_url, static_root)
class WebpackManifest(object):
def __init__(self, path, data, static_url, static_root=None):
self._path = path
self._data = data
self._files = data['files']
self._static_url = static_url
self._static_root = static_root
self._manifest_entries = {}
def __getattr__(self, item):
if item in self._manifest_entries:
return self._manifest_entries[item]
if item in self._files:
manifest_entry = WebpackManifestEntry(self._files[item], self._static_url, self._static_root)
self._manifest_entries[item] = manifest_entry
return manifest_entry
raise WebpackErrorUnknownEntryError('Unknown entry "%s" in manifest "%s"' % (item, self._path))
class WebpackManifestTypeEntry(object):
def __init__(self, manifest, static_url, static_root=None):
self.manifest = manifest
self.static_url = static_url
self.static_root = static_root
self.rel_urls = []
self.output = ''
self._content = None
if self.static_root:
self.paths = []
def add_file(self, rel_path):
rel_url = '/'.join(rel_path.split(os.path.sep))
self.rel_urls.append(rel_url)
self.output += self.template.format(self.static_url + rel_url)
if self.static_root:
self.paths.append(os.path.join(self.static_root, rel_path))
def __str__(self):
return self.output
@property
def content(self):
if self._content is None:
if not self.static_root:
raise WebpackManifestConfigError("Provide static_root to access webpack entry content.")
buffer = []
for path in self.paths:
with open(path, 'r') as content_file:
buffer.append(content_file.read())
self._content = '\n'.join(buffer)
return self._content
@property
def inline(self):
content = self.content
return self.inline_template.format(content) if content else ''
class WebpackManifestJsEntry(WebpackManifestTypeEntry):
template = '<script src="{}"></script>'
inline_template = '<script>{}</script>'
class WebpackManifestCssEntry(WebpackManifestTypeEntry):
template = '<link rel="stylesheet" href="{}">'
inline_template = '<style>{}</style>'
class WebpackManifestEntry(object):
supported_extensions = {
'js': WebpackManifestJsEntry,
'css': WebpackManifestCssEntry,
}
def __init__(self, rel_paths, static_url, static_root=None):
# Frameworks tend to be inconsistent about what they
# allow with regards to static urls
if not static_url.endswith('/'):
static_url += '/'
self.rel_paths = rel_paths
self.static_url = static_url
self.static_root = static_root
for ext, ext_class in self.supported_extensions.items():
setattr(self, ext, ext_class(self, static_url, static_root))
# Build strings of elements that can be dumped into a template
for rel_path in rel_paths:
name, ext = os.path.splitext(rel_path)
ext = ext.lstrip('.').lower()
if ext in self.supported_extensions:
getattr(self, ext).add_file(rel_path)
# Backwards compatibility accessors
@property
def rel_js(self):
return self.js.rel_urls
@property
def rel_css(self):
return self.css.rel_urls
def read(path, read_retry):
if not os.path.isfile(path):
raise WebpackManifestFileError('Path "{}" is not a file or does not exist'.format(path))
if not os.path.isabs(path):
raise WebpackManifestFileError('Path "{}" is not an absolute path to a file'.format(path))
with open(path, 'r') as manifest_file:
content = manifest_file.read()
# In certain conditions, the file's contents evaluate to an empty string, so
# we provide a hook to perform a single retry after a delay.
# While it's a difficult bug to pin down it can happen most commonly during
# periods of high cpu-load, so the suspicion is that it's down to race conditions
# that are a combination of delays in the OS writing buffers and the fact that we
# are handling two competing processes
try:
return json.loads(content)
except ValueError:
if not read_retry:
raise
time.sleep(read_retry)
return read(path, 0)
class WebpackManifestFileError(Exception):
pass
class WebpackError(Exception):
pass
class WebpackManifestStatusError(Exception):
pass
class WebpackManifestBuildingStatusTimeout(Exception):
pass
class WebpackErrorUnknownEntryError(Exception):
pass
class WebpackManifestConfigError(Exception):
pass
|
|
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
import math, random
from reqs.twisted.internet import reactor
from core.plugins import ProtocolPlugin
from core.decorators import *
from core.constants import *
class ToolsPlugin(ProtocolPlugin):
commands = {
"dune": "commandDune",
"hill": "commandHill",
"hole": "commandHole",
"lake": "commandLake",
"mountain": "commandMountain",
"pit": "commandPit",
"tree": "commandTree",
}
hooks = {
"blockchange": "blockChanged",
"newworld": "newWorld",
}
def gotClient(self):
self.build_trees = False
self.trunk_height = 5, 9
self.fanout = 2, 4
def newWorld(self, world):
"Hook to reset dynamiting abilities in new worlds if not op."
if not self.client.isBuilderPlus():
self.build_trees = False
def blockChanged(self, x, y, z, block, selected_block, fromloc):
"Hook trigger for block changes."
tobuild = []
# Randomise the variables
trunk_height = random.randint(*self.trunk_height)
fanout = random.randint(*self.fanout)
if self.build_trees and block == BLOCK_PLANT:
# Build the main tree bit
for i in range(-fanout-1, fanout):
for j in range(-fanout-1, fanout):
for k in range(-fanout-1, fanout):
if not self.client.AllowedToBuild(x+i, y+j, z+k):
return
if (i**2 + j**2 + k**2)**0.5 < fanout:
tobuild.append((i, j+trunk_height, k, BLOCK_LEAVES))
# Build the trunk
for i in range(trunk_height):
tobuild.append((0, i, 0, BLOCK_LOG))
# OK, send the build changes
for dx, dy, dz, block in tobuild:
try:
self.client.world[x+dx, y+dy, z+dz] = chr(block)
self.client.sendBlock(x+dx, y+dy, z+dz, block)
self.client.factory.queue.put((self.client, TASK_BLOCKSET, (x+dx, y+dy, z+dz, block)))
except AssertionError:
pass
return True
@build_list
@op_only
@on_off_command
def commandTree(self, onoff, fromloc, overriderank):
"/tree on|off - Builder\nBuilds trees, save the earth!"
if onoff == "on":
self.build_trees = True
self.client.sendServerMessage("You are now building trees; place a plant!")
else:
self.build_trees = False
self.client.sendServerMessage("You are no longer building trees.")
@build_list
@op_only
def commandDune(self, parts, fromloc, overriderank):
"/dune - Op\nCreates a sand dune between the two blocks you touched last."
# Use the last two block places
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
x_range = x2 - x
z_range = z2 - z
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
for i in range(x, x2+1):
for k in range(z, z2+1):
# Work out the height at this place
dx = (x_range / 2.0) - abs((x_range / 2.0) - (i - x))
dz = (z_range / 2.0) - abs((z_range / 2.0) - (k - z))
dy = int((dx**2 * dz**2) ** 0.2)
for j in range(y, y+dy+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
block = BLOCK_SAND if j == y+dy else BLOCK_SAND
try:
world[i, j, k] = chr(block)
except AssertionError:
pass
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world=world)
self.client.sendBlock(i, j, k, block)
yield
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your dune just completed.")
pass
do_step()
@build_list
@op_only
def commandHill(self, parts, fromloc, overriderank):
"/hill - Op\nCreates a hill between the two blocks you touched last."
# Use the last two block places
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
x_range = x2 - x
z_range = z2 - z
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
for i in range(x, x2+1):
for k in range(z, z2+1):
# Work out the height at this place
dx = (x_range / 2.0) - abs((x_range / 2.0) - (i - x))
dz = (z_range / 2.0) - abs((z_range / 2.0) - (k - z))
dy = int((dx**2 * dz**2) ** 0.2)
for j in range(y, y+dy+1):
if not self.client.AllowedToBuild(x, y, z) and not overriderank:
return
block = BLOCK_GRASS if j == y+dy else BLOCK_DIRT
try:
world[i, j, k] = chr(block)
except AssertionError:
pass
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world=world)
self.client.sendBlock(i, j, k, block)
yield
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your hill just completed.")
pass
do_step()
@build_list
@op_only
def commandHole(self, parts, fromloc, overriderank):
"/hole - Op\ncreates a hole between two blocks"
# Use the last two block places
try:
x1, y1, z1 = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet")
return
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
if z1 > z2:
z1, z2 = z2, z1
x_range = x2 - x1
z_range = z2 - z1
block = BLOCK_AIR
world = self.client.world
def generate_changes():
for x in range(x1, x2+1):
for z in range(z1, z2+1):
# Work out the height at this place
dx = (x_range / 2.0) - abs((x_range / 2.0) - (x - x1))
dz = (z_range / 2.0) - abs((z_range / 2.0) - (z - z1))
dy = int((dx**2 * dz**2) ** 0.3)
for y in range(y1-dy-1, y1+1):
if not self.client.AllowedToBuild(x, y, z) and not overriderank:
return
if y < 0:
continue
try:
world[x, y, z] = chr(block)
except AssertionError:
pass
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world = world)
self.client.sendBlock(x, y, z, block)
yield
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your hole just completed.")
pass
do_step()
@build_list
@op_only
def commandLake(self, parts, fromloc, overriderank):
"/lake - Op\ncreates a lake between two blocks"
# Use the last two block places
try:
x1, y1, z1 = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet")
return
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
if z1 > z2:
z1, z2 = z2, z1
x_range = x2 - x1
z_range = z2 - z1
block = BLOCK_WATER
world = self.client.world
def generate_changes():
for x in range(x1, x2+1):
for z in range(z1, z2+1):
# Work out the height at this place
dx = (x_range / 2.0) - abs((x_range / 2.0) - (x - x1))
dz = (z_range / 2.0) - abs((z_range / 2.0) - (z - z1))
dy = int((dx**2 * dz**2) ** 0.3)
for y in range(y1-dy-1, y1):
if not self.client.AllowedToBuild(x, y, z) and not overriderank:
return
try:
world[x, y, z] = chr(block)
except AssertionError:
pass
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world = world)
self.client.sendBlock(x, y, z, block)
yield
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your lake just completed.")
pass
do_step()
@build_list
@op_only
def commandMountain(self, parts, fromloc, overriderank):
"/mountain blockname - Op\nCreates a mountain between the two blocks you touched last."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a type.")
return
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
# If they only provided the type argument, use the last two block places
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
x_range = x2 - x
z_range = z2 - z
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
for i in range(x, x2+1):
for k in range(z, z2+1):
# Work out the height at this place
dx = (x_range / 2.0) - abs((x_range / 2.0) - (i - x))
dz = (z_range / 2.0) - abs((z_range / 2.0) - (k - z))
dy = int((dx**2 * dz**2) ** 0.3)
for j in range(y, y+dy+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
try:
world[i, j, k] = block
except AssertionError:
pass
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world=world)
self.client.sendBlock(i, j, k, block)
yield
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your mountain just completed.")
pass
do_step()
@build_list
@op_only
def commandPit(self, parts, fromloc, overriderank):
"/pit - Op\ncreates a lava pit between two blocks"
# Use the last two block places
try:
x1, y1, z1 = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet")
return
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
if z1 > z2:
z1, z2 = z2, z1
x_range = x2 - x1
z_range = z2 - z1
block = BLOCK_LAVA
world = self.client.world
def generate_changes():
for x in range(x1, x2+1):
for z in range(z1, z2+1):
# Work out the height at this place
dx = (x_range / 2.0) - abs((x_range / 2.0) - (x - x1))
dz = (z_range / 2.0) - abs((z_range / 2.0) - (z - z1))
dy = int((dx**2 * dz**2) ** 0.3)
for y in range(y1-dy-1, y1):
if not self.client.AllowedToBuild(x, y, z) and not overriderank:
return
try:
world[x, y, z] = chr(block)
except AssertionError:
pass
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world = world)
self.client.sendBlock(x, y, z, block)
yield
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your pit just completed.")
pass
do_step()
|
|
#!/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to create API keys for your AWS IAM user accounts.
It can be used for individual accounts or for all your accounts at once.
Note: we have a limit of 2 access keys per user, so by default this will delete the old ones
Usage:
For individual accounts:
aws_api_key_manager -p ded-stage-aws -p ded-int-aws -p <some-other-account>
For all accounts found in /etc/openshift_tools/aws_accounts.txt:
aws_api_key_manager --all
To manage keys for another user, use the '-u' option:
aws_api_key_manager -u <some-other-user> -p ded-stage-aws
"""
from __future__ import print_function
import argparse
import ConfigParser
import getpass
import os
import pwd
import re
import sys
import time
import yaml
# pylint: disable=import-error
import boto3
import botocore
from openshift_tools import saml_aws_creds
class ManageKeys(object):
""" Class to create and update IAM user account API keys. """
def __init__(self):
""" constructor """
self.response = None
@staticmethod
def check_arguments():
""" Ensure that an argument was passed in from the command line.
Returns:
Parsed argument(s), if provided
"""
parser = argparse.ArgumentParser(description='Create API keys for IAM accounts.')
parser.add_argument('-a', '--all',
help='Create API keys for every ops aws account.',
action='store_true')
parser.add_argument('-p', '--profile',
help='Create new API keys for the specified profile.',
action='append')
parser.add_argument('-u', '--user',
help='Specify a username for the account.')
args = parser.parse_args()
if not args.all and not args.profile:
print('Specify an account ID or profile name.\n'
'To generate the keys for all ops accounts, use "--all"\n'
'Usage:\n'
'example: {0} -p <account-name>\n'
'example: {0} -u <some-other-user> -p <account-name>\n'
'example: {0} --all'.format(parser.prog))
sys.exit(10)
if not args.user:
if getpass.getuser() != 'root' and os.getegid() < 1000:
args.user = getpass.getuser()
return args
@staticmethod
def check_accounts():
""" Retrieves a list of the config-managed ops AWS accounts.
Returns:
A list containing each of the lines found in the aws accounts file
Raises:
A ValueError if the path does not exist
"""
config_path = '/etc/openshift_tools/sso-config.yaml'
if os.path.isfile(config_path):
with open(config_path, 'r') as sso_config:
yaml_config = yaml.load(sso_config)
if yaml_config["aws_account_file"]:
path = yaml_config["aws_account_file"]
accounts_list = []
if os.path.isfile(path):
with open(path) as open_file:
stripped_line = list([line.rstrip() for line in open_file.readlines()])
for line in stripped_line:
if line is not None:
accounts_list.append(line)
return accounts_list
else:
raise ValueError(path + ' does not exist.')
def check_user(self, aws_account, user_name, client):
""" Check if the user exists locally and in aws. creates iam user if not found.
Returns:
True, after checking if the IAM user exists in the specified AWS account
and creating a user account for them if one does not already exist
"""
try:
client.get_user(UserName=user_name)
except botocore.exceptions.ClientError as client_exception:
if client_exception.response['Error']['Code'] == 'NoSuchEntity':
system_users = []
for user in pwd.getpwall():
system_users.append(user[0])
if user_name in system_users and user_name != 'root' and os.getegid() < 1000:
print("User does not have an existing IAM account for %s, \
creating new account for user %s" % (aws_account, user_name))
self.create_user(aws_account, user_name, client)
return True
@staticmethod
def create_user(aws_account, user_name, client):
""" Create an IAM user account. """
client.create_user(
UserName=user_name
)
client.add_user_to_group(GroupName='admin', UserName=user_name)
print("A new user account was added.\n"
"Use change_iam_password -p %s to set your password" % aws_account.split(':')[0])
return True
@staticmethod
def get_all_profiles():
""" If -a is specified, generate a list of all profiles found in ~/.aws/credentials.
Returns
Each profile from the credentials file, stored in a list.
Raises:
A ValueError if path is does not exist.
"""
path = os.path.join(os.path.expanduser('~'), '.aws/credentials')
profile_list = []
if os.path.isfile(path):
with open(path) as open_file:
stripped_line = list([line.rstrip() for line in open_file.readlines()])
for line in stripped_line:
account = re.match(r"^\[([A-Za-z0-9_\-]+)\]", line)
if account is not None:
profile_list.append(account.group(1))
return profile_list
else:
raise ValueError(path + ' does not exist.')
@staticmethod
def get_keys(user_name, client):
""" Get the Access Key IDs of the user, and return them in a list.
Returns:
All access keys found for the IAM user, in a list.
List will be empty if the user has no keys.
"""
existing_keys = client.list_access_keys(
UserName=user_name)
all_keys = existing_keys['AccessKeyMetadata']
keys_list = []
if all_keys:
for ekey in all_keys:
keys_list.append(ekey['AccessKeyId'])
return keys_list
@staticmethod
def get_token(aws_account):
""" Generate temporary SSO access credentials.
Requires the config file containing the IDP hostname.
Returns:
A temporary boto3 client created with a session token provided by the IDP host.
Raises:
A ValueError if the config path can not be found.
"""
sso_config_path = '/etc/openshift_tools/sso-config.yaml'
if os.path.isfile(sso_config_path):
with open(sso_config_path, 'r') as sso_config:
yaml_config = yaml.load(sso_config)
if yaml_config["idp_host"]:
ops_idp_host = yaml_config["idp_host"]
try:
creds = saml_aws_creds.get_temp_credentials(
metadata_id='urn:amazon:webservices:%s' % aws_account,
idp_host=ops_idp_host
)
client = boto3.client(
'iam',
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken']
)
return client
except ValueError as client_exception:
if 'Error retrieving SAML token' in client_exception.message and \
'Metadata not found' in client_exception.message:
print(client_exception)
print('Metadata for %s missing or misconfigured, skipping' % aws_account)
else:
raise
else:
raise ValueError(sso_config_path + 'does not exist.')
@staticmethod
def create_key(aws_account, user_name, client):
""" Change an API key for the specified account.
Returns:
A response object from boto3, which contains information about the new IAM key.
Their values can be accessed like:
['AccessKey']['AccessKeyId']
['AccessKey']['SecretAccessKey']
"""
response = client.create_access_key(
UserName=user_name
)
print('Key successfully created for:', aws_account)
return response
@staticmethod
def delete_key(aws_account, user_name, key, client):
""" Delete an API key for the specified account. """
client.delete_access_key(
UserName=user_name,
AccessKeyId=key
)
print('Key successfully deleted for:', aws_account)
return True
@staticmethod
def manage_timestamp(update=False):
""" Update the expiration file, or create it if it does not already exist. """
path = os.path.join(os.path.expanduser('~'), '.aws/credentials_expiration')
exp_date = str(int(time.time())+180*24*60*60)
if os.path.isfile(path) and update is True:
print('File exists, overwriting.')
with open(path, 'w') as open_file:
open_file.write(exp_date)
elif not os.path.isfile(path):
print('File does not exist, creating.')
with open(path, 'w') as open_file:
open_file.write(exp_date)
else:
print('Checked for stamp file and it exists. No write was called, nothing to do here.')
return True
@staticmethod
def write_credentials(aws_account, key_object):
""" Write the profile for the user account to the AWS credentials file.
Raise:
A ValueError if the path to the credentials file does not exist.
"""
path = os.path.join(os.path.expanduser('~'), '.aws/credentials')
if os.path.isfile(path):
config = ConfigParser.RawConfigParser()
config.read(path)
try:
config.get(aws_account, 'aws_access_key_id')
except ConfigParser.NoSectionError:
config.add_section(aws_account)
config.set(aws_account, 'aws_access_key_id',\
key_object['AccessKey']['AccessKeyId'])
config.set(aws_account, 'aws_secret_access_key',\
key_object['AccessKey']['SecretAccessKey'])
with open(path, 'w') as configfile:
config.write(configfile)
else:
config.set(aws_account, 'aws_access_key_id',\
key_object['AccessKey']['AccessKeyId'])
config.set(aws_account, 'aws_secret_access_key',\
key_object['AccessKey']['SecretAccessKey'])
with open(path, 'w') as configfile:
config.write(configfile)
else:
raise ValueError(path + ' does not exist.')
def run_all(self, args, ops_accounts):
""" Loop through a list of every ops-managed AWS account and create API keys for each. """
for aws_account in ops_accounts:
matching = [s for s in ops_accounts if aws_account in s]
account_name = matching[0].split(':')[0]
account_number = matching[0].split(':')[1]
client = self.get_token(account_number)
if client:
self.check_user(aws_account, args.user, client)
current_accounts = self.get_all_profiles()
existing_keys = self.get_keys(args.user, client)
if existing_keys:
for key in existing_keys:
self.delete_key(aws_account, args.user, key, client)
if aws_account not in current_accounts:
key_object = self.create_key(aws_account, args.user, client)
self.write_credentials(account_name, key_object)
self.manage_timestamp(True)
def run_one(self, args, ops_accounts):
""" Create API keys for only the specified ops-managed AWS accounts. """
for aws_account in args.profile:
matching = [s for s in ops_accounts if aws_account in s]
account_name = matching[0].split(':')[0]
account_number = matching[0].split(':')[1]
client = self.get_token(account_number)
if client:
self.check_user(aws_account, args.user, client)
existing_keys = self.get_keys(args.user, client)
if existing_keys:
for key in existing_keys:
self.delete_key(aws_account, args.user, key, client)
key_object = self.create_key(aws_account, args.user, client)
self.write_credentials(account_name, key_object)
else:
key_object = self.create_key(aws_account, args.user, client)
self.write_credentials(account_name, key_object)
self.manage_timestamp()
def main(self):
""" Main function. """
args = self.check_arguments()
ops_accounts = self.check_accounts()
if args.profile and args.user:
self.run_one(args, ops_accounts)
elif args.all and args.user:
self.run_all(args, ops_accounts)
else:
raise ValueError('No suitable arguments provided.')
if __name__ == '__main__':
MANAGE = ManageKeys()
MANAGE.main()
|
|
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import re
import sys
import datetime
from django.analysis.tracer import mark_sql_call, is_analysis_running, is_analysis_paused, taint, in_view
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.db.backends.sqlite3.utils import parameterize_limit
from django.utils.safestring import SafeUnicode, SafeString
import django.htoken as htoken
try:
import psycopg2ct as Database
import psycopg2ct.extensions as extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
extensions.register_type(extensions.UNICODE)
extensions.register_adapter(SafeString, extensions.QuotedString)
extensions.register_adapter(SafeUnicode, extensions.QuotedString)
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=()):
if is_analysis_running():
ana_call,ana_para = parameterize_limit(query, args)
ana_call = str(ana_call).strip()
self.cur_query = ana_call
mark_sql_call(ana_call, ana_para)
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
if is_analysis_running():
for params in args:
ana_call,ana_para = parameterize_limit(query, params)
ana_call = str(ana_call).strip()
self.cur_query = ana_call
mark_sql_call(ana_call, ana_para)
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def fetchmany(self, size=False):
val = self.cursor.fetchmany(size)
if is_analysis_running() and in_view() and (not is_analysis_paused()):# and len(val) == 1:
tval = [taint(row) for row in val]
htoken.add_sql_value(self.cur_query, tval)
return tval
return val
def fetchall(self):
val = self.cursor.fetchall()
if is_analysis_running() and (not is_analysis_paused()) and \
in_view(): # and len(val) == 1:
tval = [taint(row) for row in val]
htoken.add_sql_value(self.cur_query, tval)
return tval
return val
def fetchone(self):
val = self.cursor.fetchone()
if is_analysis_running() and in_view() and not is_analysis_paused():
tval = taint(val)
htoken.add_sql_value(self.cur_query, [tval])
return tval
return val
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = False
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With psycopg2, cursor objects have a "query" attribute that is the
# exact query sent to the database. See docs here:
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
return cursor.query
def return_insert_id(self):
return "RETURNING %s", ()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
self._set_isolation_level(int(not autocommit))
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
set_tz = False
settings_dict = self.settings_dict
if self.connection is None:
new_connection = True
set_tz = settings_dict.get('TIME_ZONE')
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
self.connection.set_isolation_level(self.isolation_level)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = None
if new_connection:
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings_dict['TIME_ZONE']])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version[0:2] < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
if self.features.uses_autocommit:
if self._version[0:2] < (8, 2):
# FIXME: Needs extra code to do reliable model insert
# handling, so we forbid it for now.
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You cannot use autocommit=True with PostgreSQL prior to 8.2 at the moment.")
else:
# FIXME: Eventually we're enable this by default for
# versions that support it, but, right now, that's hard to
# do without breaking other things (#10509).
self.features.can_return_id_from_insert = True
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
|
|
# Copyright 2016 - Nokia Networks.
# Copyright 2016 - Brocade Communications Systems, Inc.
# Copyright 2018 - Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_config import cfg
from oslo_log import log as logging
from osprofiler import profiler
from mistral.db.v2 import api as db_api
from mistral.engine import post_tx_queue
from mistral.engine import utils as engine_utils
from mistral.engine import workflow_handler as wf_handler
from mistral import exceptions as exc
from mistral.executors import base as exe
from mistral.lang import parser as spec_parser
from mistral.rpc import clients as rpc
from mistral.services import security
from mistral.utils import wf_trace
from mistral.workflow import data_flow
from mistral.workflow import states
from mistral_lib import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Action(object, metaclass=abc.ABCMeta):
"""Action.
Represents a workflow action and defines interface that can be used by
Mistral engine or its components in order to manipulate with actions.
"""
def __init__(self, action_desc, action_ex=None, task_ex=None,
task_ctx=None):
self.action_desc = action_desc
self.action_ex = action_ex
self.namespace = action_desc.namespace if action_desc else None
self.task_ex = action_ex.task_execution if action_ex else task_ex
self.task_ctx = task_ctx
@abc.abstractmethod
def complete(self, result):
"""Complete action and process its result.
:param result: Action result.
"""
raise NotImplementedError
def fail(self, msg):
assert self.action_ex
# When we set an ERROR state we should safely set output value getting
# w/o exceptions due to field size limitations.
msg = utils.cut_by_kb(
msg,
cfg.CONF.engine.execution_field_size_limit_kb
)
self.action_ex.state = states.ERROR
self.action_ex.output = {'result': msg}
def update(self, state):
assert self.action_ex
# TODO(rakhmerov): Not sure we can do it for all actions.
action = self.action_desc.instantiate(self.action_ex.input, {})
if state == states.PAUSED and action.is_sync():
raise exc.InvalidStateTransitionException(
'Transition to the PAUSED state is only supported '
'for asynchronous action execution.'
)
if not states.is_valid_transition(self.action_ex.state, state):
raise exc.InvalidStateTransitionException(
'Invalid state transition from %s to %s.' %
(self.action_ex.state, state)
)
self.action_ex.state = state
@abc.abstractmethod
def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False,
timeout=None):
"""Schedule action run.
This method is needed to schedule action run so its result can
be received later by engine. In this sense it will be running in
asynchronous mode from engine perspective (don't confuse with
executor asynchrony when executor doesn't immediately send a
result).
:param timeout: a period of time in seconds after which execution of
action will be interrupted
:param input_dict: Action input.
:param target: Target (group of action executors).
:param index: Action execution index. Makes sense for some types.
:param desc: Action execution description.
:param safe_rerun: If true, action would be re-run if executor dies
during execution.
"""
raise NotImplementedError
@abc.abstractmethod
def run(self, input_dict, target, index=0, desc='', save=True,
safe_rerun=False, timeout=None):
"""Immediately run action.
This method runs method w/o scheduling its run for a later time.
From engine perspective action will be processed in synchronous
mode.
:param timeout: a period of time in seconds after which execution of
action will be interrupted
:param input_dict: Action input.
:param target: Target (group of action executors).
:param index: Action execution index. Makes sense for some types.
:param desc: Action execution description.
:param save: True if action execution object needs to be saved.
:param safe_rerun: If true, action would be re-run if executor dies
during execution.
:return: Action output.
"""
raise NotImplementedError
def _prepare_execution_context(self):
res = {}
if self.task_ex:
wf_ex = self.task_ex.workflow_execution
res['workflow_execution_id'] = wf_ex.id
res['task_execution_id'] = self.task_ex.id
res['workflow_name'] = wf_ex.name
if self.action_ex:
res['action_execution_id'] = self.action_ex.id
res['callback_url'] = (
'/v2/action_executions/%s' % self.action_ex.id
)
return res
def _create_action_execution(self, input_dict, runtime_ctx,
desc='', action_ex_id=None, is_sync=True):
action_ex_id = action_ex_id or utils.generate_unicode_uuid()
values = {
'id': action_ex_id,
'name': self.action_desc.name,
'state': states.RUNNING,
'input': input_dict,
'runtime_context': runtime_ctx,
'workflow_namespace': self.namespace,
'description': desc,
'is_sync': is_sync
}
if self.task_ex:
values.update({
'task_execution_id': self.task_ex.id,
'workflow_name': self.task_ex.workflow_name,
'workflow_namespace': self.task_ex.workflow_namespace,
'workflow_id': self.task_ex.workflow_id,
'project_id': self.task_ex.project_id,
})
else:
values.update({
'project_id': security.get_project_id(),
})
self.action_ex = db_api.create_action_execution(values)
if self.task_ex:
# Add to collection explicitly so that it's in a proper
# state within the current session.
self.task_ex.action_executions.append(self.action_ex)
@profiler.trace('action-log-result', hide_args=True)
def _log_result(self, prev_state, result):
state = self.action_ex.state
if prev_state != state:
wf_trace.info(
None,
"Action '%s' (%s)(task=%s) [%s -> %s, %s]" %
(self.action_ex.name,
self.action_ex.id,
self.task_ex.name if self.task_ex else None,
prev_state,
state,
result.cut_repr())
)
class RegularAction(Action):
"""Regular Python action."""
@profiler.trace('regular-action-complete', hide_args=True)
def complete(self, result):
assert self.action_ex
if states.is_completed(self.action_ex.state):
raise ValueError(
"Action {} is already completed".format(self.action_ex.id)
)
prev_state = self.action_ex.state
if result.is_success():
self.action_ex.state = states.SUCCESS
elif result.is_cancel():
self.action_ex.state = states.CANCELLED
else:
self.action_ex.state = states.ERROR
# Convert the result, if needed.
converted_result = self.action_desc.post_process_result(result)
self.action_ex.output = converted_result.to_dict()
self.action_ex.accepted = True
self._log_result(prev_state, result)
@profiler.trace('action-schedule', hide_args=True)
def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False,
timeout=None):
assert not self.action_ex
self.action_desc.check_parameters(input_dict)
wf_ex = self.task_ex.workflow_execution if self.task_ex else None
wf_ctx = data_flow.ContextView(
self.task_ctx,
data_flow.get_workflow_environment_dict(wf_ex),
wf_ex.context if wf_ex else {}
)
try:
action = self.action_desc.instantiate(input_dict, wf_ctx)
except Exception:
raise exc.InvalidActionException(
'Failed to instantiate an action'
' [action_desc=%s, input_dict=%s]'
% (self.action_desc, input_dict)
)
# Assign the action execution ID here to minimize database calls.
# Otherwise, the input property of the action execution DB object needs
# to be updated with the action execution ID after the action execution
# DB object is created.
action_ex_id = utils.generate_unicode_uuid()
self._create_action_execution(
input_dict,
self._prepare_runtime_context(index, safe_rerun),
desc=desc,
action_ex_id=action_ex_id,
is_sync=action.is_sync()
)
def _run_action():
executor = exe.get_executor(cfg.CONF.executor.type)
return executor.run_action(
action,
self.action_ex.id if self.action_ex is not None else None,
safe_rerun,
self._prepare_execution_context(),
target=target,
timeout=timeout
)
# Register an asynchronous command to run the action
# on an executor outside of the main DB transaction.
post_tx_queue.register_operation(_run_action)
@profiler.trace('action-run', hide_args=True)
def run(self, input_dict, target, index=0, desc='', save=True,
safe_rerun=False, timeout=None):
assert not self.action_ex
self.action_desc.check_parameters(input_dict)
try:
action = self.action_desc.instantiate(input_dict, {})
except Exception:
raise exc.InvalidActionException(
'Failed to instantiate an action'
' [action_desc=%s, input_dict=%s]'
% (self.action_desc, input_dict)
)
# Assign the action execution ID here to minimize database calls.
# Otherwise, the input property of the action execution DB object needs
# to be updated with the action execution ID after the action execution
# DB object is created.
action_ex_id = utils.generate_unicode_uuid()
if save:
self._create_action_execution(
input_dict,
self._prepare_runtime_context(index, safe_rerun),
desc=desc,
action_ex_id=action_ex_id,
is_sync=action.is_sync()
)
executor = exe.get_executor(cfg.CONF.executor.type)
return executor.run_action(
action,
self.action_ex.id if self.action_ex is not None else None,
safe_rerun,
self._prepare_execution_context(),
target=target,
async_=False,
timeout=timeout
)
def _prepare_runtime_context(self, index, safe_rerun):
"""Template method to prepare action runtime context.
Regular action inserts an index into its runtime context and
the flag showing if the action is safe to rerun (i.e. it's
idempotent).
"""
return {'index': index, 'safe_rerun': safe_rerun}
class WorkflowAction(Action):
"""Workflow action."""
def __init__(self, wf_name, **kwargs):
super(WorkflowAction, self).__init__(None, **kwargs)
self.wf_name = wf_name
@profiler.trace('workflow-action-complete', hide_args=True)
def complete(self, result):
# No-op because in case of workflow result is already processed.
pass
@profiler.trace('workflow-action-schedule', hide_args=True)
def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False,
timeout=None):
assert not self.action_ex
self.validate_input(input_dict)
parent_wf_ex = self.task_ex.workflow_execution
parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id(
parent_wf_ex.id
)
wf_def = engine_utils.resolve_workflow_definition(
parent_wf_ex.workflow_name,
parent_wf_spec.get_name(),
namespace=parent_wf_ex.params['namespace'],
wf_spec_name=self.wf_name
)
wf_spec = spec_parser.get_workflow_spec_by_definition_id(
wf_def.id,
wf_def.updated_at
)
# If the parent has a root_execution_id, it must be a sub-workflow. So
# we should propagate that ID down. Otherwise the parent must be the
# root execution and we should use the parents ID.
root_execution_id = parent_wf_ex.root_execution_id or parent_wf_ex.id
wf_params = {
'root_execution_id': root_execution_id,
'task_execution_id': self.task_ex.id,
'index': index,
'namespace': parent_wf_ex.params['namespace']
}
if 'notify' in parent_wf_ex.params:
wf_params['notify'] = parent_wf_ex.params['notify']
for k, v in list(input_dict.items()):
if k not in wf_spec.get_input():
wf_params[k] = v
del input_dict[k]
if cfg.CONF.engine.start_subworkflows_via_rpc:
def _start_subworkflow():
rpc.get_engine_client().start_workflow(
wf_def.id,
wf_def.namespace,
None,
input_dict,
"sub-workflow execution",
async_=True,
**wf_params
)
post_tx_queue.register_operation(_start_subworkflow)
else:
wf_handler.start_workflow(
wf_def.id,
wf_def.namespace,
None,
input_dict,
"sub-workflow execution",
wf_params
)
@profiler.trace('workflow-action-run', hide_args=True)
def run(self, input_dict, target, index=0, desc='', save=True,
safe_rerun=True, timeout=None):
raise NotImplementedError('Does not apply to this WorkflowAction.')
def validate_input(self, input_dict):
# TODO(rakhmerov): Implement.
pass
|
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dataset.run import dataset
from starthinker.task.google_api.run import google_api
from starthinker.task.url.run import url
from starthinker.task.vision_api.run import vision_api
from starthinker.task.bigquery.run import bigquery
def recipe_cm360_oculi(config, auth_cm, auth_bigquery, account, limit, recipe_slug):
"""Export CM360 Creatives into BigQuery, process them with the Vision API, and
generate a breakdown of each creative asset mapped back to its parent. Also
generate a series of views to flatten the data.
Args:
auth_cm (authentication) - CM360 read credentials.
auth_bigquery (authentication) - BigQuery read/ write credentials.
account (integer) - CM360 Account Identifier
limit (integer) - Number of creatives to pull.
recipe_slug (string) - name of dataset in BigQuery.
"""
dataset(config, {
'auth':auth_bigquery,
'dataset':recipe_slug
})
google_api(config, {
'__comment__':'Download all creatives, limit set to 20K for 4 hour processing time, and up to maximum 80K to prevent triggering 500 Error in API.',
'auth':auth_cm,
'api':'dfareporting',
'version':'v3.4',
'function':'creatives.list',
'kwargs':{
'accountId':account,
'sortField':'ID',
'sortOrder':'DESCENDING'
},
'iterate':True,
'limit':limit,
'results':{
'bigquery':{
'auth':auth_bigquery,
'dataset':recipe_slug,
'table':'CM_Creatives'
}
}
})
url(config, {
'auth':auth_bigquery,
'status':True,
'read':True,
'urls':{
'bigquery':{
'dataset':recipe_slug,
'query':'''WITH
URL_PARTS AS (
SELECT
C.id,
CAST(C.AdvertiserId AS STRING) AS AdvertiserId,
CA.assetIdentifier.name AS Name
FROM `CM_Creatives` AS C, UNNEST(creativeAssets) AS CA
WHERE REPLACE(RIGHT(CA.assetIdentifier.name, 4), '.', '') IN ('jpg', 'png', 'gif', 'jpeg','html','htm')
AND CA.size.width >1 and CA.size.height > 1
)
SELECT FORMAT('https://s0.2mdn.net/%s/%s', AdvertiserId, REPLACE(Name, ' ', '%20')) AS URL, id AS URI FROM URL_PARTS
UNION ALL
SELECT FORMAT('https://s0.2mdn.net/sadbundle/%s', REPLACE(Name, ' ', '%20')) AS URL, id AS URI FROM URL_PARTS
UNION ALL
SELECT FORMAT('https://s0.2mdn.net/simgad/%s', REPLACE(Name, ' ', '%20')) AS URL, id AS URI FROM URL_PARTS ''',
'legacy':False
}
},
'to':{
'bigquery':{
'dataset':recipe_slug,
'table':'Creative_URLs'
}
}
})
vision_api(config, {
'auth':auth_cm,
'requests':{
'bigquery':{
'dataset':recipe_slug,
'query':'''
SELECT
STRUCT(
Read AS content,
STRUCT(
URI AS imageUri
) AS source
) AS image,
[
STRUCT(
'TEXT_DETECTION' AS type,
10 AS maxResults,
'builtin/stable' AS model
),
STRUCT(
'IMAGE_PROPERTIES' AS type,
10 AS maxResults,
'builtin/stable' AS model
),
STRUCT(
'SAFE_SEARCH_DETECTION' AS type,
10 AS maxResults,
'builtin/stable' AS model
),
STRUCT(
'LABEL_DETECTION' AS type,
10 AS maxResults,
'builtin/stable' AS model
),
STRUCT(
'LOGO_DETECTION' AS type,
10 AS maxResults,
'builtin/stable' AS model
),
STRUCT(
'FACE_DETECTION' AS type,
10 AS maxResults,
'builtin/stable' AS model
),
STRUCT(
'OBJECT_LOCALIZATION' AS type,
10 AS maxResults,
'builtin/stable' AS model
)
] AS features
FROM `Creative_URLs`
WHERE Status=200 ''',
'legacy':False
}
},
'responses':{
'bigquery':{
'auth':auth_bigquery,
'dataset':recipe_slug,
'table':'Vision_Creatives'
}
}
})
bigquery(config, {
'auth':auth_bigquery,
'from':{
'query':'''SELECT
C.*,
VC.*
FROM `{dataset}.CM_Creatives` AS C
LEFT JOIN `{dataset}.Vision_Creatives` AS VC
ON C.Id=CAST(VC.imageUri AS INT64) ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Oculi_Creatives'
}
})
bigquery(config, {
'auth':auth_bigquery,
'from':{
'query':'''SELECT
CAST(imageUri AS INT64) AS creativeID,
description AS label,
score FROM
`{dataset}.Vision_Creatives`, UNNEST( labelAnnotations) ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Oculi_labelAnnotations'
}
})
bigquery(config, {
'auth':auth_bigquery,
'from':{
'query':'''WITH Creative_Sizes AS (
SELECT
Id AS creativeId,
MAX(size.width) AS width,
MAX(size.height) AS height,
FROM `{dataset}.CM_Creatives`
GROUP BY 1 ) SELECT
CAST(VC.imageUri AS INT64) AS creativeId,
LOWER(T.description) AS word,
SAFE_DIVIDE(MAX(V.x) - MIN(v.x), ANY_VALUE(width)) * SAFE_DIVIDE(MAX(V.y) - MIN(v.y), ANY_VALUE(height)) AS area_fraction
FROM
`{dataset}.Vision_Creatives` AS VC
JOIN UNNEST(textAnnotations) AS T
JOIN UNNEST(boundingPoly.vertices) AS V
JOIN Creative_Sizes AS CS
ON CAST(VC.imageUri AS INT64) = CS.creativeId
WHERE
/* Exclude small and common words */
LENGTH(description) > 2
AND LOWER(description) NOT IN ('for', 'the')
GROUP BY 1,2 ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Oculi_textAnnotations'
}
})
bigquery(config, {
'auth':auth_bigquery,
'function':'RGB To HSV',
'to':{
'dataset':recipe_slug
}
})
bigquery(config, {
'auth':auth_bigquery,
'from':{
'query':'''SELECT
CAST(VC.imageUri AS INT64) AS creativeId,
LOWER(LO.name) AS name,
(MAX(V.x) - MIN(v.x)) * (MAX(V.y) - MIN(v.y)) AS areaFraction FROM `{dataset}.Vision_Creatives` AS VC
JOIN UNNEST(localizedObjectAnnotations) AS LO
JOIN UNNEST(boundingPoly.normalizedVertices) AS V GROUP BY 1,2 ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Oculi_localizedObjectAnnotations'
}
})
bigquery(config, {
'auth':auth_bigquery,
'from':{
'dataset':recipe_slug,
'query':'''WITH Vision_Colors AS (
SELECT
CAST(VC.imageUri AS INT64) AS creativeId,
STRUCT(
CAST(C.color.red AS INT64) AS r,
CAST(C.color.green AS INT64) AS g,
CAST(C.color.blue AS INT64) AS b
) as rgb,
STRUCT(
FORMAT('%02X', CAST(C.color.red AS INT64)) as r,
FORMAT('%02X', CAST(C.color.green AS INT64)) as g,
FORMAT('%02X', CAST(C.color.blue AS INT64)) as b
) as html,
`{dataset}`.rgb_to_hsv(C.color.red, C.color.green, C.color.blue) AS hsv,
(0.2126*C.color.red + 0.7152*C.color.green + 0.0722*C.color.blue) / 255.0 AS percievedBrightness,
C.score,
C.pixelFraction AS areaFraction
FROM
`{dataset}.Vision_Creatives` AS VC
JOIN UNNEST(imagePropertiesAnnotation.dominantColors.colors) AS C )
SELECT
*,
CASE
WHEN hsv.h < 90 THEN (90 - hsv.h) / 90
WHEN hsv.h < 270 THEN 0
ELSE (hsv.h - 270) / 90
END AS warmness,
CASE
WHEN hsv.h < 90 THEN 0
WHEN hsv.h < 270 THEN ( 90 - ABS(180 - hsv.h)) / 90
ELSE 0
END AS coldness FROM Vision_Colors; ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Oculi_imagePropertiesAnnotation'
}
})
bigquery(config, {
'auth':auth_bigquery,
'from':{
'query':'''
WITH Creative_Sizes AS (
SELECT
Id AS creativeId,
MAX(size.width) AS width,
MAX(size.height) AS height,
FROM `{dataset}.CM_Creatives`
GROUP BY 1
)
SELECT
CAST(VC.imageUri AS INT64) AS creativeId,
F.angerLikelihood,
F.headwearLikelihood,
F.surpriseLikelihood,
F.sorrowLikelihood,
F.joyLikelihood,
F.blurredLikelihood,
F.panAngle,
F.rollAngle,
F.tiltAngle,
detectionConfidence AS score,
SAFE_DIVIDE(MAX(v.x) - MIN(v.x), ANY_VALUE(width)) * SAFE_DIVIDE(MAX(v.y) - MIN(v.y), ANY_VALUE(height)) AS area_fraction
FROM
`{dataset}.Vision_Creatives` AS VC
JOIN UNNEST(faceAnnotations ) AS F
JOIN UNNEST(boundingPoly.vertices) AS V
JOIN Creative_Sizes AS CS
ON CAST(VC.imageUri AS INT64) = CS.creativeId
GROUP BY 1,2,3,4,5,6,7,8,9,10,11 ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Oculi_faceAnnotations'
}
})
bigquery(config, {
'auth':auth_bigquery,
'from':{
'query':'''
WITH Creative_Sizes AS (
SELECT
Id AS creativeId,
MAX(size.width) AS width,
MAX(size.height) AS height,
FROM `{dataset}.CM_Creatives`
GROUP BY 1
),
Creative_Faces AS (
SELECT
CAST(VC.imageUri AS INT64) AS creativeId,
description AS logo,
score,
SAFE_DIVIDE((MAX(v.x) + MIN(v.x)) / 2, ANY_VALUE(width)) AS x_fraction,
SAFE_DIVIDE((MAX(v.y) + MIN(v.y)) / 2, ANY_VALUE(height)) AS y_fraction,
SAFE_DIVIDE(MAX(v.x) - MIN(v.x), ANY_VALUE(width)) * SAFE_DIVIDE(MAX(v.y) - MIN(v.y), ANY_VALUE(height)) AS area_fraction
FROM
`{dataset}.Vision_Creatives` AS VC
JOIN UNNEST(logoAnnotations ) AS L
JOIN UNNEST(boundingPoly.vertices) AS V
JOIN Creative_Sizes AS CS
ON CAST(VC.imageUri AS INT64) = CS.creativeId
GROUP BY 1,2,3
)
SELECT
*,
score * area_fraction AS prominenceScore,
RANK() OVER (PARTITION BY creativeId ORDER BY score * area_fraction DESC) AS prominenceRank,
CASE
WHEN x_fraction < 0.33 AND y_fraction < 0.33 THEN 'TOP LEFT'
WHEN x_fraction > 0.66 AND y_fraction < 0.33 THEN 'TOP RIGHT'
WHEN x_fraction < 0.33 AND y_fraction > 0.66 THEN 'BOTTOM LEFT'
WHEN x_fraction > 0.66 AND y_fraction > 0.66 THEN 'BOTTOM RIGHT'
WHEN y_fraction < 0.33 THEN 'TOP CENTER'
WHEN y_fraction > 0.66 THEN 'BOTTOM CENTER'
WHEN X_fraction > 0.66 THEN 'RIGHT CENTER'
WHEN x_fraction < 0.33 THEN 'LEFT CENTER'
ELSE 'CENTER'
END AS position
FROM Creative_Faces; ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Oculi_logoAnnotations'
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Export CM360 Creatives into BigQuery, process them with the Vision API, and generate a breakdown of each creative asset mapped back to its parent. Also generate a series of views to flatten the data.
1. Wait for BigQuery->->->Oculi_... to be created.
2. Then use the data for analysis.
3. Or give these intructions to the client.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_cm", help="CM360 read credentials.", default='user')
parser.add_argument("-auth_bigquery", help="BigQuery read/ write credentials.", default='service')
parser.add_argument("-account", help="CM360 Account Identifier", default='')
parser.add_argument("-limit", help="Number of creatives to pull.", default=1000)
parser.add_argument("-recipe_slug", help="name of dataset in BigQuery.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_cm360_oculi(config, args.auth_cm, args.auth_bigquery, args.account, args.limit, args.recipe_slug)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.