gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from baselines.common import Dataset, explained_variance, fmt_row, zipsame
from baselines import logger
import baselines.common.tf_util as U
import tensorflow as tf, numpy as np
import time
from baselines.common.mpi_adam import MpiAdam
from baselines.common.mpi_moments import mpi_moments
from mpi4py import MPI
from collections import deque
def traj_segment_generator(pi, env, horizon, stochastic):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
cur_ep_ret = 0 # return in current episode
cur_ep_len = 0 # len of current episode
ep_rets = [] # returns of completed episodes in this segment
ep_lens = [] # lengths of ...
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
rews = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
news = np.zeros(horizon, 'int32')
acs = np.array([ac for _ in range(horizon)])
prevacs = acs.copy()
while True:
prevac = ac
ac, vpred = pi.act(stochastic, ob)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
yield {"ob" : obs, "rew" : rews, "vpred" : vpreds, "new" : news,
"ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new),
"ep_rets" : ep_rets, "ep_lens" : ep_lens}
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_lens = []
i = t % horizon
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
ob, rew, new, _ = env.step(ac)
rews[i] = rew
cur_ep_ret += rew
cur_ep_len += 1
if new:
ep_rets.append(cur_ep_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_len = 0
ob = env.reset()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
def learn(env, policy_fn, *,
timesteps_per_actorbatch, # timesteps per actor per update
clip_param, entcoeff, # clipping parameter epsilon, entropy coeff
optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers
gamma, lam, # advantage estimation
max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint
callback=None, # you can do anything in the callback, since it takes locals(), globals()
adam_epsilon=1e-5,
schedule='constant' # annealing for stepsize parameters (epsilon and adam)
):
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_fn("pi", ob_space, ac_space) # Construct network for new policy
oldpi = policy_fn("oldpi", ob_space, ac_space) # Network for old policy
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule
clip_param = clip_param * lrmult # Annealed clipping parameter epsilon
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
pol_entpen = (-entcoeff) * meanent
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold
surr1 = ratio * atarg # surrogate from conservative policy iteration
surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #
pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)
vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
var_list = pi.get_trainable_variables()
lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])
adam = MpiAdam(var_list, epsilon=adam_epsilon)
assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)
U.initialize()
adam.sync()
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_actorbatch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards
assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, "Only one time constraint permitted"
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
elif max_seconds and time.time() - tstart >= max_seconds:
break
if schedule == 'constant':
cur_lrmult = 1.0
elif schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)
else:
raise NotImplementedError
logger.log("********** Iteration %i ************"%iters_so_far)
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)
optim_batchsize = optim_batchsize or ob.shape[0]
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
assign_old_eq_new() # set old parameter values to new parameter values
logger.log("Optimizing...")
logger.log(fmt_row(13, loss_names))
# Here we do a bunch of optimization epochs over the data
for _ in range(optim_epochs):
losses = [] # list of tuples, each of which gives the loss for a minibatch
for batch in d.iterate_once(optim_batchsize):
*newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
adam.update(g, optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
logger.log("Evaluating losses...")
losses = []
for batch in d.iterate_once(optim_batchsize):
newlosses = compute_losses(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
losses.append(newlosses)
meanlosses,_,_ = mpi_moments(losses, axis=0)
logger.log(fmt_row(13, meanlosses))
for (lossval, name) in zipsame(meanlosses, loss_names):
logger.record_tabular("loss_"+name, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if MPI.COMM_WORLD.Get_rank()==0:
logger.dump_tabular()
return pi
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
|
|
#!/usr/bin/env python
from collections import OrderedDict
from datetime import date, datetime, timedelta
import json
import os
from time import sleep
import yaml
from apiclient import discovery
import httplib2
from jinja2 import Environment, PackageLoader
from oauth2client.file import Storage
from clan.utils import GLOBAL_ARGUMENTS, format_comma, format_duration, format_percent, load_field_definitions
class ReportCommand(object):
def __init__(self):
self.args = None
self.config = None
self.service = None
def __call__(self, args):
self.args = args
self.field_definitions = load_field_definitions()
if not self.args.auth:
home_path = os.path.expanduser('~/.clan_auth.dat')
if os.path.exists('analytics.dat'):
self.args.auth = 'analytics.dat'
elif os.path.exists(home_path):
self.args.auth = home_path
else:
raise Exception('Could not locate local authorization token (analytics.dat). Please specify it using --auth or run "clan auth".')
storage = Storage(self.args.auth)
credentials = storage.get()
if not credentials or credentials.invalid:
raise Exception('Invalid authentication. Please run "clan auth" to generate a new token.')
http = credentials.authorize(http=httplib2.Http())
self.service = discovery.build('analytics', 'v3', http=http)
input_format = os.path.splitext(self.args.input_path)[1]
if input_format == '.json':
with open(self.args.input_path) as f:
report = json.load(f, object_pairs_hook=OrderedDict)
elif input_format == '.yml' or input_format == '.yaml':
with open(self.args.input_path) as f:
self.config = yaml.load(f)
if 'property-id' not in self.config:
raise Exception('You must specify a property-id either in your YAML file or using the --property-id argument.')
report = self.report()
else:
raise Exception('Unsupported input format: %s. Must be .yml or .json.' % input_format)
output_format = os.path.splitext(self.args.output_path)[1]
if output_format == '.html':
with open(self.args.output_path, 'w') as f:
self.html(report, f)
elif output_format == '.json':
with open(self.args.output_path, 'w') as f:
json.dump(report, f, indent=4)
else:
raise Exception('Unsupported output format: %s. Must be .html or .json.' % output_format)
def add_argparser(self, root, parents):
"""
Add arguments for this command.
"""
parser = root.add_parser('report', parents=parents)
parser.set_defaults(func=self)
parser.add_argument(
'--auth',
dest='auth', action='store',
help='Path to the authorized credentials file (analytics.dat).'
)
parser.add_argument(
'--title',
dest='title', action='store',
help='User-friendly title for your report.'
)
parser.add_argument(
'--property-id',
dest='property-id', action='store',
help='Google Analytics ID of the property to query.'
)
parser.add_argument(
'--start-date',
dest='start-date', action='store',
help='Start date for the query in YYYY-MM-DD format.'
)
parser.add_argument(
'--end-date',
dest='end-date', action='store',
help='End date for the query in YYYY-MM-DD format. Supersedes --ndays.'
)
parser.add_argument(
'--ndays',
dest='ndays', action='store', type=int,
help='The number of days from the start-date to query. Requires start-date. Superseded by end-date.'
)
parser.add_argument(
'--domain',
dest='domain', action='store',
help='Restrict results to only urls with this domain.'
)
parser.add_argument(
'--prefix',
dest='prefix', action='store',
help='Restrict results to only urls with this prefix.'
)
parser.add_argument(
'input_path',
action='store',
help='Path to either a YAML configuration file or pre-reported JSON data.'
)
parser.add_argument(
'output_path',
action='store',
help='Path to output either an HTML report or a JSON data file.'
)
return parser
def _ndays(self, start_date, ndays):
"""
Compute an end date given a start date and a number of days.
"""
if not getattr(self.args, 'start-date') and not self.config.get('start-date', None):
raise Exception('start-date must be provided when ndays is used.')
d = date(*map(int, start_date.split('-')))
d += timedelta(days=ndays)
return d.strftime('%Y-%m-%d')
def query(self, start_date=None, end_date=None, ndays=None, metrics=[], dimensions=[], filters=None, segment=None, sort=[], start_index=1, max_results=10):
"""
Execute a query.
"""
if start_date:
start_date = start_date
elif getattr(self.args, 'start-date'):
start_date = getattr(self.args, 'start-date')
elif self.config.get('start-date', None):
start_date = self.config['start-date']
else:
start_date = '2005-01-01'
if end_date:
end_date = end_date
elif getattr(self.args, 'end-date'):
end_date = getattr(self.args, 'end-date')
elif self.config.get('end-date', None):
end_date = self.config['end-date']
elif ndays:
end_date = self._ndays(start_date, ndays)
elif self.args.ndays:
end_date = self._ndays(start_date, self.args.ndays)
elif self.config.get('ndays', None):
end_date = self._ndays(start_date, self.config['ndays'])
else:
end_date = 'today'
if self.args.domain:
domain = self.args.domain
elif self.config.get('domain', None):
domain = self.config['domain']
else:
domain = None
if domain:
domain_filter = 'ga:hostname==%s' % domain
if filters:
filters = '%s;%s' % (domain_filter, filters)
else:
filters = domain_filter
if self.args.prefix:
prefix = self.args.prefix
elif self.config.get('prefix', None):
prefix = self.config['prefix']
else:
prefix = None
if prefix:
prefix_filter = 'ga:pagePath=~^%s' % prefix
if filters:
filters = '%s;%s' % (prefix_filter, filters)
else:
filters = prefix_filter
return self.service.data().ga().get(
ids='ga:' + self.config['property-id'],
start_date=start_date,
end_date=end_date,
metrics=','.join(metrics) or None,
dimensions=','.join(dimensions) or None,
filters=filters,
segment=segment,
sort=','.join(sort) or None,
start_index=str(start_index),
max_results=str(max_results)
).execute()
def report(self):
"""
Query analytics and stash data in a format suitable for serializing.
"""
output = OrderedDict()
for arg in GLOBAL_ARGUMENTS:
output[arg] = getattr(self.args, arg) or self.config.get(arg, None)
output['title'] = getattr(self.args, 'title') or self.config.get('title', 'Untitled Report')
output['run_date'] = datetime.now().strftime('%Y-%m-%d')
output['queries'] = []
for analytic in self.config.get('queries', []):
print 'Querying "%s"' % analytic['name']
results = self.query(
metrics=analytic['metrics'],
dimensions=analytic.get('dimensions', []),
filters=analytic.get('filter', None),
segment=analytic.get('segment', None),
sort=analytic.get('sort', []),
start_index=analytic.get('start-index', 1),
max_results=analytic.get('max-results', 10)
)
dimensions_len = len(analytic.get('dimensions', []))
data = OrderedDict([
('config', analytic),
('sampled', results.get('containsSampledData', False)),
('sampleSize', int(results.get('sampleSize', 0))),
('sampleSpace', int(results.get('sampleSpace', 0))),
('data_types', OrderedDict()),
('data', OrderedDict())
])
for column in results['columnHeaders'][dimensions_len:]:
data['data_types'][column['name']] = column['dataType']
def cast_data_type(d, dt):
if dt == 'INTEGER':
return int(d)
elif data_type in ['TIME', 'FLOAT', 'CURRENCY', 'PERCENT']:
return float(d)
else:
raise Exception('Unknown metric data type: %s' % data_type)
for i, metric in enumerate(analytic['metrics']):
data['data'][metric] = OrderedDict()
data_type = data['data_types'][metric]
if dimensions_len:
for row in results.get('rows', []):
column = i + dimensions_len
label = ','.join(row[:dimensions_len])
value = cast_data_type(row[column], data_type)
data['data'][metric][label] = value
data['data'][metric]['total'] = cast_data_type(results['totalsForAllResults'][metric], data_type)
# Prevent rate-limiting
sleep(1)
output['queries'].append(data)
return output
def html(self, report, f):
"""
Write report data to an HTML file.
"""
env = Environment(loader=PackageLoader('clan', 'templates'))
template = env.get_template('report.html')
context = {
'report': report,
'GLOBAL_ARGUMENTS': GLOBAL_ARGUMENTS,
'field_definitions': self.field_definitions,
'format_comma': format_comma,
'format_duration': format_duration,
'format_percent': format_percent
}
f.write(template.render(**context).encode('utf-8'))
|
|
"""
Support pre-0.12 series pickle compatibility.
"""
import copy
import pickle as pkl
import sys
import pandas # noqa
from pandas import Index
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except Exception as e:
# If we have a deprecated function,
# try to replace and try again.
msg = "_reconstruct: First argument must be a sub-type of ndarray"
if msg in str(e):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except TypeError:
pass
# try to re-encode the arguments
if getattr(self, "encoding", None) is not None:
args = tuple(
arg.encode(self.encoding) if isinstance(arg, str) else arg
for arg in args
)
try:
stack[-1] = func(*args)
return
except TypeError:
pass
# unknown exception, re-raise
if getattr(self, "is_verbose", None):
print(sys.exc_info())
print(func, args)
raise
# If classes are moved, provide compat here.
_class_locations_map = {
("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
# 15477
#
# TODO: When FrozenNDArray is removed, add
# the following lines for compat:
#
# ('pandas.core.base', 'FrozenNDArray'):
# ('numpy', 'ndarray'),
# ('pandas.core.indexes.frozen', 'FrozenNDArray'):
# ('numpy', 'ndarray'),
#
# Afterwards, remove the current entry
# for `pandas.core.base.FrozenNDArray`.
("pandas.core.base", "FrozenNDArray"): (
"pandas.core.indexes.frozen",
"FrozenNDArray",
),
("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
# 10890
("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
("pandas.sparse.series", "SparseTimeSeries"): (
"pandas.core.sparse.series",
"SparseSeries",
),
# 12588, extensions moving
("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
# 18543 moving period
("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
("pandas.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
("pandas._libs.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
# 15998 top-level dirs moving
("pandas.sparse.array", "SparseArray"): (
"pandas.core.arrays.sparse",
"SparseArray",
),
("pandas.sparse.series", "SparseSeries"): (
"pandas.core.sparse.series",
"SparseSeries",
),
("pandas.sparse.frame", "SparseDataFrame"): (
"pandas.core.sparse.frame",
"SparseDataFrame",
),
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
("pandas.indexes.numeric", "Int64Index"): (
"pandas.core.indexes.numeric",
"Int64Index",
),
("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
("pandas.tseries.index", "_new_DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"_new_DatetimeIndex",
),
("pandas.tseries.index", "DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"DatetimeIndex",
),
("pandas.tseries.period", "PeriodIndex"): (
"pandas.core.indexes.period",
"PeriodIndex",
),
# 19269, arrays moving
("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
# 19939, add timedeltaindex, float64index compat from 15998 move
("pandas.tseries.tdi", "TimedeltaIndex"): (
"pandas.core.indexes.timedeltas",
"TimedeltaIndex",
),
("pandas.indexes.numeric", "Float64Index"): (
"pandas.core.indexes.numeric",
"Float64Index",
),
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat
class Unpickler(pkl._Unpickler): # type: ignore
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super().find_class(module, name)
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except (AttributeError, KeyError):
pass
def load(fh, encoding=None, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
|
|
#$Id$#
from books.model.Preference import Preference
from books.model.Organization import Organization
from books.model.Address import Address
from books.model.User import User
from books.model.Item import Item
from books.model.InvoiceSetting import InvoiceSetting
from books.model.NotesAndTerms import NotesAndTerms
from books.model.EstimateSetting import EstimateSetting
from books.model.CreditnoteSetting import CreditnoteSetting
from books.model.Currency import Currency
from books.model.ExchangeRate import ExchangeRate
from books.model.Tax import Tax
from books.model.OpeningBalance import OpeningBalance
from books.model.Account import Account
from books.model.Autoreminder import Autoreminder
from books.model.ManualReminder import ManualReminder
from books.model.TaxGroup import TaxGroup
from books.service.ZohoBooks import ZohoBooks
zoho_books = ZohoBooks("{auth_token}", "{organization_id}")
settings_api = zoho_books.get_settings_api()
organizations_api = zoho_books.get_organizations_api()
users_api = zoho_books.get_users_api()
items_api = zoho_books.get_items_api()
currency_id = settings_api.get_currencies().get_currencies()[0].get_currency_id()
#List preferences
print settings_api.list_preferences()
#Update preference
preference = Preference()
preference.set_convert_to_invoice(False)
preference.set_notify_me_on_online_payment(True)
preference.set_send_payment_receipt_acknowledgement("")
preference.set_auto_notify_recurring_invoice("")
preference.set_snail_mail_include_payment_stub("")
preference.set_is_show_powered_by(True)
preference.set_attach_expense_receipt_to_invoice("")
preference.set_allow_auto_categorize("")
print settings_api.update_preferences(preference)
# Create a unit
print settings_api.create_unit("m")
#Delete unit
unit_id = "71127000000179031"
print settings_api.delete_unit(unit_id)
#Organization
organization_id = organizations_api.get_organizations()[0].get_organization_id()
# List organizations.
print organizations_api.get_organizations()
#Get organization
print organizations_api.get(organization_id)
#Create organization
organization = Organization()
organization.set_name("Jony and co")
address = Address()
address.set_street_address1("2/65")
address.set_street_address2("vignesh plaza")
address.set_city("MDU")
address.set_state("TN")
address.set_country("India")
address.set_zip("322")
organization.set_address(address)
organization.set_industry_type("")
organization.set_industry_size("")
organization.set_fiscal_year_start_month("january")
organization.set_currency_code("USD")
organization.set_time_zone("Asia/Calcutta")
organization.set_date_format("dd MMM yyyy")
organization.set_field_separator("")
organization.set_language_code("en")
organization.set_tax_basis("accrual")
organization.set_tax_type("tax")
organization.set_org_address("")
organization.set_remit_to_address("")
print organizations_api.create(organization)
#Update organization
organization = Organization()
organization.set_name("Jony and co")
address = Address()
address.set_street_address1("2/65")
address.set_street_address2("vignesh plaza")
address.set_city("MDU")
address.set_state("TN")
address.set_country("India")
address.set_zip("322")
organization.set_address(address)
organization.set_industry_type("")
organization.set_industry_size("")
organization.set_fiscal_year_start_month("january")
organization.set_currency_code("INR")
organization.set_time_zone("Asia/Calcutta")
organization.set_date_format("dd MMM yyyy")
organization.set_field_separator("")
organization.set_language_code("en")
organization.set_tax_basis("accrual")
organization.set_tax_type("tax")
organization.set_org_address("")
organization.set_remit_to_address("")
print organizations_api.update(organization_id, organization)
# User
user_id = users_api.get_users().get_users()[0].get_user_id()
#List user
print users_api.get_users()
param = {'filter_by': 'Status.All'}
print users_api.get_users(param)
# Get user
print users_api.get(user_id)
# current user
print users_api.current_user()
#Create user
user = User()
user.set_name("karanya")
user.set_email("lek1000@d.com")
user.set_user_role("staff")
print users_api.create(user)
#update user
user = User()
user.set_name("vakaa")
user.set_email("lekha10@d.com")
user.set_user_role("staff")
print users_api.update(user_id, user)
#delete user
print users_api.delete(user_id)
#Invite user
print users_api.invite_user(user_id)
#Mark user as active
print users_api.mark_user_as_active(user_id)
#Mark user as inactive
print users_api.mark_user_as_inactive(user_id)
# Item
item_id = items_api.list_items().get_items()[0].get_item_id()
# List items.
print items_api.list_items()
# Get an item
print items_api.get(item_id)
# Create item
item = Item()
item.set_name("Item 2")
item.set_description("Item")
item.set_rate(10.0)
item.set_account_id("")
item.set_tax_id("")
print items_api.create(item)
#Update item
item = Item()
item.set_name("item 1")
item.set_description("Item")
item.set_rate(100.0)
item.set_account_id("")
item.set_tax_id("")
print items_api.update(item_id, item)
#Delete item
print items_api.delete_item(item_id)
#Mark item as active
print items_api.mark_item_as_active(item_id)
#Mark item as inactive
print items_api.mark_item_as_inactive(item_id)
#Invoice Settings
#Get invoice settings
print settings_api.get_invoice_settings()
#update invoice settings
invoice_settings = InvoiceSetting()
invoice_settings.set_auto_generate(True)
invoice_settings.set_prefix_string("INV")
invoice_settings.set_start_at(1)
invoice_settings.set_next_number("43")
invoice_settings.set_quantity_precision(2)
#invoice_settings.set_discount_enabled(False)
invoice_settings.set_reference_text("")
#invoice_settings.set_default_template_id("")
invoice_settings.set_notes("Hai")
invoice_settings.set_terms("")
invoice_settings.set_is_shipping_charge_required(True)
invoice_settings.set_is_adjustment_required(True)
invoice_settings.set_invoice_item_type("")
invoice_settings.set_discount_type("item_level")
invoice_settings.set_warn_convert_to_open(True)
invoice_settings.set_warn_create_creditnotes(True)
invoice_settings.set_is_open_invoice_editable(True)
invoice_settings.set_is_sales_person_required(True)
print settings_api.update_invoice_settings(invoice_settings)
#Get invoice notes and terms
print settings_api.get_invoice_notes_and_terms()
#Update invoice notes and terms
notes_and_terms = NotesAndTerms()
notes_and_terms.set_notes("Thanks")
notes_and_terms.set_terms("")
print settings_api.update_invoice_notes_and_terms(notes_and_terms)
"""
#Estimates
#Get estimates settings.
"""
print settings_api.get_estimate_settings()
#update estimate settings
estimate_settings = EstimateSetting()
estimate_settings.set_auto_generate(True)
estimate_settings.set_prefix_string("EST-")
estimate_settings.set_start_at(2)
estimate_settings.set_next_number("041")
estimate_settings.set_quantity_precision(2)
estimate_settings.set_discount_type("item_level")
estimate_settings.set_reference_text("")
estimate_settings.set_notes("Hai")
estimate_settings.set_terms("")
estimate_settings.set_terms_to_invoice(True)
estimate_settings.set_notes_to_invoice(True)
estimate_settings.set_warn_estimate_to_invoice(True)
estimate_settings.set_is_sales_person_required(True)
print settings_api.update_estimate_settings(estimate_settings)
#Get estimates notes and terms.
print settings_api.get_estimates_notes_and_terms()
#update estimate notes and terms
notes_and_terms = NotesAndTerms()
notes_and_terms.set_notes("Thanks")
notes_and_terms.set_terms("")
print settings_api.update_estimates_notes_and_terms(notes_and_terms)
"""
#Creditnotes
#List credit note
"""
print settings_api.list_creditnote_settings()
#Update creditnotes settings
creditnote_settings = CreditnoteSetting()
creditnote_settings.set_auto_generate(True)
creditnote_settings.set_prefix_string("CN-")
creditnote_settings.set_reference_text("")
creditnote_settings.set_next_number("0027")
creditnote_settings.set_notes("Thank you")
creditnote_settings.set_terms("Conditions Apply")
print settings_api.update_creditnote_settings(creditnote_settings)
#Get creditnote notes and terms
print settings_api.get_creditnote_notes_and_terms()
#update creditnote notes and terms
notes_and_terms = NotesAndTerms()
notes_and_terms.set_notes("Thanks")
notes_and_terms.set_terms("")
print settings_api.update_creditnote_notes_and_terms(notes_and_terms)
"""
#Currency and exchange rate
#List currencies
"""
print settings_api.get_currencies()
#Get a currency
print settings_api.get_currency(currency_id)
#Create a currency
currency = Currency()
currency.set_currency_code("NPR")
currency.set_currency_symbol("")
currency.set_price_precision(1)
currency.set_currency_format("1,234,567.89")
print settings_api.create_currency(currency)
#Update currency
currency = Currency()
currency.set_currency_code("NPR")
currency.set_currency_symbol("")
currency.set_price_precision(1)
currency.set_currency_format("1,234,567.89")
print settings_api.update_currency(currency_id , currency)
#Delete currency
print settings_api.delete_currency(currency_id)
"""
#List exchange rates
exchange_rate_id = settings_api.list_exchange_rates(currency_id).get_exchange_rates()[0].get_exchange_rate_id()
"""
print settings_api.list_exchange_rates(currency_id)
#Get exchange rate
print settings_api.get_exchange_rate(currency_id, exchange_rate_id)
#Create an exchange rate
exchange_rate = ExchangeRate()
exchange_rate.set_currency_id(currency_id)
exchange_rate.set_currency_code("NPR")
exchange_rate.set_effective_date("2014-05-08")
exchange_rate.set_rate(25.0)
print settings_api.create_exchange_rate(exchange_rate)
#Update an exchange rate
exchange_rate = ExchangeRate()
exchange_rate.set_exchange_rate_id(exchange_rate_id)
exchange_rate.set_currency_id(currency_id)
exchange_rate.set_currency_code("EUR")
exchange_rate.set_effective_date("2014-05-08")
exchange_rate.set_rate(25.0)
print settings_api.update_exchange_rate(exchange_rate)
#Delete an exchange rate
print settings_api.delete_exchange_rate(currency_id, exchange_rate_id)
"""
#Tax and Tax group
tax_id = settings_api.get_taxes().get_taxes()[0].get_tax_id()
tax_group_id = "71127000000184003"
#List taxes
"""
print settings_api.get_taxes()
#Get a tax
print settings_api.get_tax(tax_id)
#Create tax
tax = Tax()
tax.set_tax_name("tax-1")
tax.set_tax_percentage(10.5)
tax.set_tax_type("tax")
print settings_api.create_tax(tax)
#update tax
tax = Tax()
tax.set_tax_name("Shipping_tax1")
tax.set_tax_percentage(10.5)
tax.set_tax_type("tax")
print settings_api.update_tax(tax_id, tax)
#Delete tax
print settings_api.delete_tax(tax_id)
#Get tax group
print settings_api.get_tax_group(tax_group_id)
#Create tax group
tax_group = TaxGroup()
tax_group.set_tax_group_name("group_taxes")
taxes = "71127000000183009,71127000000191007"
tax_group.set_taxes(taxes)
print settings_api.create_tax_group(tax_group)
#update tax group
tax_group = TaxGroup()
tax_group.set_tax_group_name("group_taxes")
taxes = "71127000000185001,71127000000183007"
tax_group.set_taxes(taxes)
tax_group.set_tax_group_id(tax_group_id)
print settings_api.update_tax_group(tax_group)
#Delete tax group
tax_group_id = "711270"
print settings_api.delete_tax_group(tax_group_id)
"""
#Opening balance
#Get opening balance
"""
print settings_api.get_opening_balance()
#Create opening balance
account_id="71127000000170302"
opening_balance = OpeningBalance()
opening_balance.set_date('2014-05-09')
accounts = Account()
accounts.set_account_id(account_id)
accounts.set_debit_or_credit("debit")
accounts.set_exchange_rate(1.0)
accounts.set_currency_id(currency_id)
accounts.set_amount(200.0)
opening_balance.set_accounts(accounts)
print settings_api.create_opening_balance(opening_balance)
#Update opening balance
account_id="71127000000170302"
opening_balance = OpeningBalance()
opening_balance.set_opening_balance_id("71127000000186001")
opening_balance.set_date('2014-05-09')
accounts = Account()
accounts.set_account_id(account_id)
accounts.set_debit_or_credit("debit")
accounts.set_exchange_rate(1.0)
accounts.set_currency_id("71127000000000099")
accounts.set_amount(2000.0)
opening_balance.set_accounts(accounts)
print settings_api.update_opening_balance(opening_balance)
#Delete opening balance
print settings_api.delete_opening_balance()
"""
#Auto payment reminder
auto_payment_reminder_id = settings_api.list_auto_payment_reminder().get_auto_reminders()[0].get_autoreminder_id()
#List auto payment reminder
"""
print settings_api.list_auto_payment_reminder()
"""
#Get an auto payment reminder
print settings_api.get_auto_payment_reminder(auto_payment_reminder_id)
"""
#Update an auto reminder
autoreminder = Autoreminder()
autoreminder.set_is_enabled(True)
autoreminder.set_notification_type('days_after_due_date')
autoreminder.set_address_type('remind_me')
autoreminder.set_number_of_days(3)
autoreminder.set_subject('hai')
autoreminder.set_body('Reminder')
print settings_api.update_auto_reminder(reminder_id, autoreminder)
"""
#List manual reminders
reminder_id = settings_api.list_manual_reminders().get_manual_reminders()[0].get_manualreminder_id()
"""
print settings_api.list_manual_reminders()
"""
#Get a manual reminder
print settings_api.get_manual_reminder(reminder_id)
"""
#Update a manual reminder
manual_reminder = ManualReminder()
manual_reminder.set_subject("Hello")
manual_reminder.set_body("Manual reminder")
manual_reminder.set_cc_me(False)
print settings_api.update_manual_reminder(reminder_id, manual_reminder)
"""
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
import six
from django.core.exceptions import ValidationError # noqa
from django.core import urlresolvers
from django.forms import fields
from django.forms.utils import flatatt # noqa
from django.forms import widgets
from django.utils.encoding import force_text
from django.utils.functional import Promise # noqa
from django.utils import html
from django.utils.translation import ugettext_lazy as _
ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$')
IPv4 = 1
IPv6 = 2
class IPField(fields.Field):
"""Form field for entering IP/range values, with validation.
Supports IPv4/IPv6 in the format:
.. xxx.xxx.xxx.xxx
.. xxx.xxx.xxx.xxx/zz
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz
and all compressed forms. Also the short forms
are supported:
xxx/yy
xxx.xxx/yy
.. attribute:: version
Specifies which IP version to validate,
valid values are 1 (fields.IPv4), 2 (fields.IPv6) or
both - 3 (fields.IPv4 | fields.IPv6).
Defaults to IPv4 (1)
.. attribute:: mask
Boolean flag to validate subnet masks along with IP address.
E.g: 10.0.0.1/32
.. attribute:: mask_range_from
Subnet range limitation, e.g. 16
That means the input mask will be checked to be in the range
16:max_value. Useful to limit the subnet ranges
to A/B/C-class networks.
"""
invalid_format_message = _("Incorrect format for IP address")
invalid_version_message = _("Invalid version for IP address")
invalid_mask_message = _("Invalid subnet mask")
max_v4_mask = 32
max_v6_mask = 128
def __init__(self, *args, **kwargs):
self.mask = kwargs.pop("mask", None)
self.min_mask = kwargs.pop("mask_range_from", 0)
self.version = kwargs.pop('version', IPv4)
super(IPField, self).__init__(*args, **kwargs)
def validate(self, value):
super(IPField, self).validate(value)
if not value and not self.required:
return
try:
if self.mask:
self.ip = netaddr.IPNetwork(value)
else:
self.ip = netaddr.IPAddress(value)
except Exception:
raise ValidationError(self.invalid_format_message)
if not any([self.version & IPv4 > 0 and self.ip.version == 4,
self.version & IPv6 > 0 and self.ip.version == 6]):
raise ValidationError(self.invalid_version_message)
if self.mask:
if self.ip.version == 4 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask:
raise ValidationError(self.invalid_mask_message)
if self.ip.version == 6 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask:
raise ValidationError(self.invalid_mask_message)
def clean(self, value):
super(IPField, self).clean(value)
return str(getattr(self, "ip", ""))
class MultiIPField(IPField):
"""Extends IPField to allow comma-separated lists of addresses."""
def validate(self, value):
self.addresses = []
if value:
addresses = value.split(',')
for ip in addresses:
super(MultiIPField, self).validate(ip)
self.addresses.append(ip)
else:
super(MultiIPField, self).validate(value)
def clean(self, value):
super(MultiIPField, self).clean(value)
return str(','.join(getattr(self, "addresses", [])))
class SelectWidget(widgets.Select):
"""Customizable select widget, that allows to render
data-xxx attributes from choices. This widget also
allows user to specify additional html attributes
for choices.
.. attribute:: data_attrs
Specifies object properties to serialize as
data-xxx attribute. If passed ('id', ),
this will be rendered as:
<option data-id="123">option_value</option>
where 123 is the value of choice_value.id
.. attribute:: transform
A callable used to render the display value
from the option object.
.. attribute:: transform_html_attrs
A callable used to render additional HTML attributes
for the option object. It returns a dictionary
containing the html attributes and their values.
For example, to define a title attribute for the
choices::
helpText = { 'Apple': 'This is a fruit',
'Carrot': 'This is a vegetable' }
def get_title(data):
text = helpText.get(data, None)
if text:
return {'title': text}
else:
return {}
....
....
widget=forms.SelectWidget( attrs={'class': 'switchable',
'data-slug': 'source'},
transform_html_attrs=get_title )
self.fields[<field name>].choices =
([
('apple','Apple'),
('carrot','Carrot')
])
"""
def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None,
transform_html_attrs=None):
self.data_attrs = data_attrs
self.transform = transform
self.transform_html_attrs = transform_html_attrs
super(SelectWidget, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
other_html = (u' selected="selected"'
if option_value in selected_choices else '')
if callable(self.transform_html_attrs):
html_attrs = self.transform_html_attrs(option_label)
other_html += flatatt(html_attrs)
if not isinstance(option_label, (six.string_types, Promise)):
for data_attr in self.data_attrs:
data_value = html.conditional_escape(
force_text(getattr(option_label,
data_attr, "")))
other_html += ' data-%s="%s"' % (data_attr, data_value)
if callable(self.transform):
option_label = self.transform(option_label)
return u'<option value="%s"%s>%s</option>' % (
html.escape(option_value), other_html,
html.conditional_escape(force_text(option_label)))
class DynamicSelectWidget(widgets.Select):
"""A subclass of the ``Select`` widget which renders extra attributes for
use in callbacks to handle dynamic changes to the available choices.
"""
_data_add_url_attr = "data-add-item-url"
def render(self, *args, **kwargs):
add_item_url = self.get_add_item_url()
if add_item_url is not None:
self.attrs[self._data_add_url_attr] = add_item_url
return super(DynamicSelectWidget, self).render(*args, **kwargs)
def get_add_item_url(self):
if callable(self.add_item_link):
return self.add_item_link()
try:
if self.add_item_link_args:
return urlresolvers.reverse(self.add_item_link,
args=self.add_item_link_args)
else:
return urlresolvers.reverse(self.add_item_link)
except urlresolvers.NoReverseMatch:
return self.add_item_link
class DynamicChoiceField(fields.ChoiceField):
"""A subclass of ``ChoiceField`` with additional properties that make
dynamically updating its elements easier.
Notably, the field declaration takes an extra argument, ``add_item_link``
which may be a string or callable defining the URL that should be used
for the "add" link associated with the field.
"""
widget = DynamicSelectWidget
def __init__(self,
add_item_link=None,
add_item_link_args=None,
*args,
**kwargs):
super(DynamicChoiceField, self).__init__(*args, **kwargs)
self.widget.add_item_link = add_item_link
self.widget.add_item_link_args = add_item_link_args
class DynamicTypedChoiceField(DynamicChoiceField, fields.TypedChoiceField):
"""Simple mix of ``DynamicChoiceField`` and ``TypedChoiceField``."""
pass
|
|
import json
import os
import shutil
import tempfile
from contextlib import contextmanager
from datetime import datetime, timedelta
from decimal import Decimal
from django.conf import settings
from django.test.utils import override_settings
import mock
import pytest
from PIL import Image
from waffle.models import Switch
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.tests import addon_factory, TestCase, version_factory
from olympia.amo.helpers import user_media_path
from olympia.amo.tests.test_helpers import get_image_path, get_addon_file
from olympia.amo.utils import utc_millesecs_from_epoch
from olympia.applications.models import AppVersion
from olympia.constants.base import VALIDATOR_SKELETON_RESULTS
from olympia.devhub import tasks
from olympia.files.models import FileUpload
from olympia.versions.models import Version
pytestmark = pytest.mark.django_db
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = 32
final_size = (32, 12)
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = 350
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = 339
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 339, 350]
final_size = [(32, 12), (339, 128), (339, 128)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
# resize_icon removes the original
shutil.copyfile(img, src.name)
src_image = Image.open(src.name)
assert src_image.size == original_size
if isinstance(final_size, list):
uploadto = user_media_path('addon_icons')
try:
os.makedirs(uploadto)
except OSError:
pass
for rsize, fsize in zip(resize_size, final_size):
dest_name = os.path.join(uploadto, '1234')
tasks.resize_icon(src.name, dest_name, resize_size, locally=True)
dest_image = Image.open(open('%s-%s.png' % (dest_name, rsize)))
assert dest_image.size == fsize
if os.path.exists(dest_image.filename):
os.remove(dest_image.filename)
assert not os.path.exists(dest_image.filename)
shutil.rmtree(uploadto)
else:
dest = tempfile.mktemp(suffix='.png')
tasks.resize_icon(src.name, dest, resize_size, locally=True)
dest_image = Image.open(dest)
assert dest_image.size == final_size
assert not os.path.exists(src.name)
class ValidatorTestCase(TestCase):
def setUp(self):
# 3.7a1pre is somehow required to exist by
# amo-validator.
# The other ones are app-versions we're using in our
# tests
self.create_appversion('firefox', '3.7a1pre')
self.create_appversion('firefox', '38.0a1')
# Required for WebExtensions
self.create_appversion('firefox', '*')
self.create_appversion('firefox', '42.0')
self.create_appversion('firefox', '43.0')
# Required for Thunderbird tests
self.create_appversion('thunderbird', '42.0')
self.create_appversion('thunderbird', '45.0')
def create_appversion(self, name, version):
return AppVersion.objects.create(
application=amo.APPS[name].id, version=version)
class TestValidator(ValidatorTestCase):
mock_sign_addon_warning = json.dumps({
"warnings": 1,
"errors": 0,
"messages": [
{"context": None,
"editors_only": False,
"description": "Add-ons which are already signed will be "
"re-signed when published on AMO. This will "
"replace any existing signatures on the add-on.",
"column": None,
"type": "warning",
"id": ["testcases_content", "signed_xpi"],
"file": "",
"tier": 2,
"for_appversions": None,
"message": "Package already signed",
"uid": "87326f8f699f447e90b3d5a66a78513e",
"line": None,
"compatibility_type": None},
]
})
def setUp(self):
super(TestValidator, self).setUp()
self.upload = FileUpload.objects.create(
path=get_addon_file('desktop.xpi'))
assert not self.upload.valid
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
@mock.patch('olympia.devhub.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validate(self.upload, listed=True)
assert self.get_upload().valid
@mock.patch('olympia.devhub.tasks.run_validator')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validate(self.upload, listed=True)
assert not self.get_upload().valid
@mock.patch('validator.submain.test_package')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
self.upload.update(path=get_addon_file('desktop.xpi'))
assert self.upload.validation is None
tasks.validate(self.upload, listed=True)
self.upload.reload()
validation = self.upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == ['validator',
'unexpected_exception']
assert not self.upload.valid
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_validation_error_webextension(self, _mock):
_mock.side_effect = Exception
self.upload.update(path=get_addon_file('valid_webextension.xpi'))
assert self.upload.validation is None
tasks.validate(self.upload, listed=True)
self.upload.reload()
validation = self.upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == [
'validator', 'unexpected_exception']
assert 'WebExtension' in validation['messages'][0]['message']
assert not self.upload.valid
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=False)
@mock.patch('olympia.devhub.tasks.annotate_validation_results')
@mock.patch('olympia.devhub.tasks.run_validator')
def test_annotation_error(self, run_validator, annotate):
"""Test that an error that occurs during annotation is saved as an
error result."""
annotate.side_effect = Exception
run_validator.return_value = '{"errors": 0}'
assert self.upload.validation is None
tasks.validate(self.upload, listed=True)
self.upload.reload()
validation = self.upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == ['validator',
'unexpected_exception']
assert not self.upload.valid
@override_settings(SIGNING_SERVER='http://full')
@mock.patch('olympia.devhub.tasks.run_validator')
def test_validation_signing_warning(self, _mock):
"""If we sign addons, warn on signed addon submission."""
_mock.return_value = self.mock_sign_addon_warning
tasks.validate(self.upload, listed=True)
validation = json.loads(self.get_upload().validation)
assert validation['warnings'] == 1
assert len(validation['messages']) == 1
@override_settings(SIGNING_SERVER='')
@mock.patch('olympia.devhub.tasks.run_validator')
def test_validation_no_signing_warning(self, _mock):
"""If we're not signing addon don't warn on signed addon submission."""
_mock.return_value = self.mock_sign_addon_warning
tasks.validate(self.upload, listed=True)
validation = json.loads(self.get_upload().validation)
assert validation['warnings'] == 0
assert len(validation['messages']) == 0
@mock.patch('olympia.devhub.tasks.run_validator')
def test_annotate_passed_auto_validation(self, _mock):
"""Set passed_auto_validation on reception of the results."""
result = {'signing_summary': {'trivial': 1, 'low': 0, 'medium': 0,
'high': 0},
'errors': 0}
_mock.return_value = json.dumps(result)
tasks.validate(self.upload, listed=True)
validation = json.loads(self.get_upload().validation)
assert validation['passed_auto_validation']
@mock.patch('olympia.devhub.tasks.run_validator')
def test_annotate_failed_auto_validation(self, _mock):
"""Set passed_auto_validation on reception of the results."""
result = {'signing_summary': {'trivial': 0, 'low': 1, 'medium': 0,
'high': 0},
'errors': 0}
_mock.return_value = json.dumps(result)
tasks.validate(self.upload, listed=True)
validation = json.loads(self.get_upload().validation)
assert not validation['passed_auto_validation']
@mock.patch('olympia.devhub.tasks.run_validator')
def test_annotate_passed_auto_validation_bogus_result(self, _mock):
"""Don't set passed_auto_validation, don't fail if results is bogus."""
_mock.return_value = '{"errors": 0}'
tasks.validate(self.upload, listed=True)
assert (json.loads(self.get_upload().validation) ==
{"passed_auto_validation": True, "errors": 0,
"signing_summary": {"high": 0, "medium": 0,
"low": 0, "trivial": 0}})
@mock.patch('validator.validate.validate')
@mock.patch('olympia.devhub.tasks.track_validation_stats')
def test_track_validation_stats(self, mock_track, mock_validate):
mock_validate.return_value = '{"errors": 0}'
tasks.validate(self.upload, listed=True)
mock_track.assert_called_with(mock_validate.return_value)
class TestMeasureValidationTime(TestValidator):
def setUp(self):
super(TestMeasureValidationTime, self).setUp()
# Set created time back (just for sanity) otherwise the delta
# would be in the microsecond range.
self.upload.update(created=datetime.now() - timedelta(days=1))
@contextmanager
def statsd_timing_mock(self):
statsd_calls = {}
def capture_timing_call(metric, value):
statsd_calls[metric] = value
with mock.patch('olympia.devhub.tasks.statsd.timing') as mock_timing:
mock_timing.side_effect = capture_timing_call
yield statsd_calls
def approximate_upload_time(self):
upload_start = utc_millesecs_from_epoch(self.upload.created)
now = utc_millesecs_from_epoch()
return now - upload_start
def assert_milleseconds_are_close(self, actual_ms, calculated_ms,
fuzz=None):
if fuzz is None:
fuzz = Decimal(300)
assert (actual_ms >= (calculated_ms - fuzz) and
actual_ms <= (calculated_ms + fuzz))
def handle_upload_validation_result(self,
channel=amo.RELEASE_CHANNEL_LISTED):
validation = amo.VALIDATOR_SKELETON_RESULTS.copy()
tasks.handle_upload_validation_result(validation, self.upload.pk,
channel)
def test_track_upload_validation_results_time(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls['devhub.validation_results_processed']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_track_upload_validation_results_with_file_size(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# This test makes sure storage.size() works on a real file.
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
# This value should not be scaled because this package is under 1MB.
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_scale_large_xpi_times_per_megabyte(self):
megabyte = Decimal(1024 * 1024)
file_size_in_mb = Decimal(5)
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = file_size_in_mb * megabyte
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# Validation times for files larger than 1MB should be scaled.
rough_delta = self.approximate_upload_time()
rough_scaled_delta = Decimal(rough_delta) / file_size_in_mb
actual_scaled_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
self.assert_milleseconds_are_close(actual_scaled_delta,
rough_scaled_delta)
def test_measure_small_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 500 # less than 1MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_under_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_measure_large_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = (2014 * 1024) * 5 # 5MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_over_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_do_not_calculate_scaled_time_for_empty_files(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 0
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
def test_ignore_missing_upload_paths_for_now(self):
with mock.patch('olympia.devhub.tasks.storage.exists') as mock_exists:
mock_exists.return_value = False
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed' in statsd_calls
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
assert ('devhub.validation_results_processed_under_1mb' not in
statsd_calls)
class TestTrackValidatorStats(TestCase):
def setUp(self):
super(TestTrackValidatorStats, self).setUp()
patch = mock.patch('olympia.devhub.tasks.statsd.incr')
self.mock_incr = patch.start()
self.addCleanup(patch.stop)
def result(self, **overrides):
result = VALIDATOR_SKELETON_RESULTS.copy()
result.update(overrides)
return json.dumps(result)
def test_count_all_successes(self):
tasks.track_validation_stats(self.result(errors=0))
self.mock_incr.assert_any_call(
'devhub.validator.results.all.success'
)
def test_count_all_errors(self):
tasks.track_validation_stats(self.result(errors=1))
self.mock_incr.assert_any_call(
'devhub.validator.results.all.failure'
)
def test_count_listed_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': True}))
self.mock_incr.assert_any_call(
'devhub.validator.results.listed.success'
)
def test_count_unlisted_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': False}))
self.mock_incr.assert_any_call(
'devhub.validator.results.unlisted.success'
)
def test_count_unsignable_addon_for_low_error(self):
tasks.track_validation_stats(self.result(
errors=1,
signing_summary={
'low': 1,
'medium': 0,
'high': 0,
},
metadata={
'listed': False,
},
))
self.mock_incr.assert_any_call(
'devhub.validator.results.unlisted.is_not_signable'
)
def test_count_unsignable_addon_for_medium_error(self):
tasks.track_validation_stats(self.result(
errors=1,
signing_summary={
'low': 0,
'medium': 1,
'high': 0,
},
metadata={
'listed': False,
},
))
self.mock_incr.assert_any_call(
'devhub.validator.results.unlisted.is_not_signable'
)
def test_count_unsignable_addon_for_high_error(self):
tasks.track_validation_stats(self.result(
errors=1,
signing_summary={
'low': 0,
'medium': 0,
'high': 1,
},
metadata={
'listed': False,
},
))
self.mock_incr.assert_any_call(
'devhub.validator.results.unlisted.is_not_signable'
)
def test_count_unlisted_signable_addons(self):
tasks.track_validation_stats(self.result(
signing_summary={
'low': 0,
'medium': 0,
'high': 0,
},
metadata={
'listed': False,
},
))
self.mock_incr.assert_any_call(
'devhub.validator.results.unlisted.is_signable'
)
def test_count_listed_signable_addons(self):
tasks.track_validation_stats(self.result(
signing_summary={
'low': 0,
'medium': 0,
'high': 0,
},
metadata={
'listed': True,
},
))
self.mock_incr.assert_any_call(
'devhub.validator.results.listed.is_signable'
)
class TestRunAddonsLinter(ValidatorTestCase):
def setUp(self):
super(TestRunAddonsLinter, self).setUp()
valid_path = get_addon_file('valid_webextension.xpi')
invalid_path = get_addon_file('invalid_webextension_invalid_id.xpi')
self.valid_upload = FileUpload.objects.create(path=valid_path)
self.invalid_upload = FileUpload.objects.create(path=invalid_path)
def get_upload(self, upload):
return FileUpload.objects.get(pk=upload.pk)
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_calls_run_linter(self, run_linter):
run_linter.return_value = '{"errors": 0}'
assert not self.valid_upload.valid
tasks.validate(self.valid_upload, listed=True)
upload = self.get_upload(self.valid_upload)
assert upload.valid, upload.validation
def test_run_linter_fail(self):
tasks.validate(self.invalid_upload, listed=True)
assert not self.get_upload(self.invalid_upload).valid
def test_run_linter_path_doesnt_exist(self):
with pytest.raises(ValueError) as exc:
tasks.run_addons_linter('doesntexist')
assert exc.value.message == (
'Path "doesntexist" is not a file or directory or '
'does not exist.')
def test_run_linter_use_temporary_file(self):
TemporaryFile = tempfile.TemporaryFile
with mock.patch('olympia.devhub.tasks.tempfile.TemporaryFile') as tmpf:
tmpf.side_effect = lambda *a, **kw: TemporaryFile(*a, **kw)
# This is a relatively small add-on (1.2M) but we are using
# a temporary file for all our linter output.
result = json.loads(tasks.run_addons_linter(
get_addon_file('typo-gecko.xpi')
))
assert tmpf.call_count == 2
assert result['success']
assert result['warnings'] == 11
assert not result['errors']
class TestValidateFilePath(ValidatorTestCase):
def test_amo_validator_success(self):
result = tasks.validate_file_path(
get_addon_file('valid_firefox_addon.xpi'),
hash_=None, listed=True)
assert result['success']
assert not result['errors']
assert not result['warnings']
def test_amo_validator_fail_warning(self):
result = tasks.validate_file_path(
get_addon_file('invalid_firefox_addon_warning.xpi'),
hash_=None, listed=True)
assert not result['success']
assert not result['errors']
assert result['warnings']
def test_amo_validator_fail_error(self):
result = tasks.validate_file_path(
get_addon_file('invalid_firefox_addon_error.xpi'),
hash_=None, listed=True)
assert not result['success']
assert result['errors']
assert not result['warnings']
def test_amo_validator_addons_linter_success(self):
result = tasks.validate_file_path(
get_addon_file('valid_webextension.xpi'),
hash_=None, listed=True, is_webextension=True)
assert result['success']
assert not result['errors']
assert not result['warnings']
def test_amo_validator_addons_linter_error(self):
# This test assumes that `amo-validator` doesn't correctly
# validate a invalid id in manifest.json
result = tasks.validate_file_path(
get_addon_file('invalid_webextension_invalid_id.xpi'),
hash_=None, listed=True, is_webextension=True)
assert not result['success']
assert result['errors']
assert not result['warnings']
class TestWebextensionIncompatibilities(ValidatorTestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
# valid_webextension.xpi has version 1.0 so mock the original version
self.addon.update(guid='beastify@mozilla.org')
self.addon.current_version.update(version='0.9')
self.update_files(
version=self.addon.current_version,
filename='delicious_bookmarks-2.1.106-fx.xpi')
def update_files(self, **kw):
for version in self.addon.versions.all():
for file in version.files.all():
file.update(**kw)
def test_webextension_upgrade_is_annotated(self):
assert all(f.is_webextension is False
for f in self.addon.current_version.all_files)
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['is_upgrade_to_webextension']
expected = ['validation', 'messages', 'webext_upgrade']
assert upload.processed_validation['messages'][0]['id'] == expected
assert upload.processed_validation['warnings'] == 1
assert upload.valid
def test_new_webextension_is_not_annotated(self):
"""https://github.com/mozilla/addons-server/issues/3679"""
previous_file = self.addon.current_version.all_files[-1]
previous_file.is_webextension = True
previous_file.status = amo.STATUS_AWAITING_REVIEW
previous_file.save()
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
assert validation['warnings'] == 0
assert upload.valid
def test_webextension_webext_to_webext_not_annotated(self):
previous_file = self.addon.current_version.all_files[-1]
previous_file.is_webextension = True
previous_file.save()
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
assert validation['warnings'] == 0
assert upload.valid
def test_webextension_no_webext_no_warning(self):
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
def test_webextension_cannot_be_downgraded(self):
self.update_files(is_webextension=True)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
def test_webextension_downgrade_only_warning_unlisted(self):
self.update_files(is_webextension=True)
self.make_addon_unlisted(self.addon)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=False)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'warning'
def test_webextension_cannot_be_downgraded_ignore_deleted_version(self):
"""Make sure there's no workaround the downgrade error."""
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
self.update_files(is_webextension=True)
deleted_version = version_factory(
addon=self.addon, file_kw={'is_webextension': False})
deleted_version.delete()
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
def test_no_upgrade_annotation_no_version(self):
"""Make sure there's no workaround the downgrade error."""
self.addon.update(guid='guid@xpi')
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-no-version.xpi')
self.update_files(is_webextension=True)
deleted_version = version_factory(
addon=self.addon, file_kw={'is_webextension': False})
deleted_version.delete()
upload = FileUpload.objects.create(path=file_, addon=self.addon)
upload.addon.version = None
upload.addon.save()
upload.save(update_fields=('version',))
upload.refresh_from_db()
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = [u'testcases_installrdf', u'_test_rdf', u'missing_addon']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
class TestNewLegacyAddonRestrictions(ValidatorTestCase):
def setUp(self):
super(TestNewLegacyAddonRestrictions, self).setUp()
self.create_switch('restrict-new-legacy-submissions')
def test_submit_legacy_addon_restricted(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_extensions_restricted']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_legacy_extension_waffle_is_off(self):
switch = Switch.objects.get(name='restrict-new-legacy-submissions')
switch.active = False
switch.save()
file_ = get_addon_file('valid_firefox_addon.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_extension_not_a_new_addon(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_webextension(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_extension_targets_older_firefox_stricly(self):
file_ = get_addon_file('valid_firefox_addon_strict_compatibility.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_non_extension(self):
file_ = get_addon_file('searchgeek-20090701.xml')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_thunderbird_extension(self):
file_ = get_addon_file('valid_firefox_and_thunderbird_addon.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
@mock.patch('olympia.devhub.tasks.send_html_mail_jinja')
def test_send_welcome_email(send_html_mail_jinja_mock):
tasks.send_welcome_email(3615, ['del@icio.us'], {'omg': 'yes'})
send_html_mail_jinja_mock.assert_called_with(
'Mozilla Add-ons: Thanks for submitting a Firefox Add-on!',
'devhub/email/submission.html',
'devhub/email/submission.txt',
{'omg': 'yes'},
recipient_list=['del@icio.us'],
from_email=settings.NOBODY_EMAIL,
use_deny_list=False,
perm_setting='individual_contact',
headers={'Reply-To': settings.EDITORS_EMAIL})
class TestSubmitFile(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestSubmitFile, self).setUp()
self.addon = Addon.objects.get(pk=3615)
patcher = mock.patch('olympia.devhub.tasks.create_version_for_upload')
self.create_version_for_upload = patcher.start()
self.addCleanup(patcher.stop)
def create_upload(self, version='1.0'):
return FileUpload.objects.create(
addon=self.addon, version=version, validation='{"errors":0}',
automated_signing=False)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations', True)
def test_file_passed_all_validations(self):
upload = self.create_upload()
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
self.create_version_for_upload.assert_called_with(
self.addon, upload, amo.RELEASE_CHANNEL_LISTED)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations',
False)
def test_file_not_passed_all_validations(self):
upload = self.create_upload()
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
assert not self.create_version_for_upload.called
class TestCreateVersionForUpload(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestCreateVersionForUpload, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.create_version_for_upload = (
tasks.create_version_for_upload.non_atomic)
patcher = mock.patch('olympia.devhub.tasks.Version.from_upload')
self.version__from_upload = patcher.start()
self.addCleanup(patcher.stop)
def create_upload(self, version='1.0'):
return FileUpload.objects.create(
addon=self.addon, version=version, validation='{"errors":0}',
automated_signing=False)
def test_file_passed_all_validations_not_most_recent(self):
upload = self.create_upload()
newer_upload = self.create_upload()
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# Check that the older file won't turn into a Version.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.version__from_upload.called
# But the newer one will.
self.create_version_for_upload(self.addon, newer_upload,
amo.RELEASE_CHANNEL_LISTED)
self.version__from_upload.assert_called_with(
newer_upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED, is_beta=False)
def test_file_passed_all_validations_version_exists(self):
upload = self.create_upload()
Version.objects.create(addon=upload.addon, version=upload.version)
# Check that the older file won't turn into a Version.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.version__from_upload.called
def test_file_passed_all_validations_most_recent_failed(self):
upload = self.create_upload()
newer_upload = self.create_upload()
newer_upload.update(created=datetime.today() + timedelta(hours=1),
valid=False,
validation=json.dumps({"errors": 5}))
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.version__from_upload.called
def test_file_passed_all_validations_most_recent(self):
upload = self.create_upload(version='1.0')
newer_upload = self.create_upload(version='0.5')
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# The Version is created because the newer upload is for a different
# version_string.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.version__from_upload.assert_called_with(
upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED, is_beta=False)
def test_file_passed_all_validations_beta(self):
upload = self.create_upload(version='1.0-beta1')
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.version__from_upload.assert_called_with(
upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED, is_beta=True)
def test_file_passed_all_validations_no_version(self):
upload = self.create_upload(version=None)
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.version__from_upload.assert_called_with(
upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED, is_beta=False)
|
|
from spynoza.nodes import EPI_file_selector
def _extend_motion_parameters(moco_par_file, tr, sg_args = {'window_length': 120, 'deriv':0, 'polyorder':3, 'mode':'nearest'}):
import os.path as op
import numpy as np
from sklearn import decomposition
from scipy.signal import savgol_filter
ext_out_file = moco_par_file[:-7] + 'ext_moco_pars.par'
new_out_file = moco_par_file[:-7] + 'new_moco_pars.par'
sg_args['window_length'] = int(sg_args['window_length'] / tr)
# Window must be odd-shaped
if sg_args['window_length'] % 2 == 0:
sg_args['window_length'] += 1
moco_pars = np.loadtxt(moco_par_file)
moco_pars = moco_pars - savgol_filter(moco_pars, axis = 0, **sg_args)
dt_moco_pars = np.diff(np.vstack((np.ones((1,6)), moco_pars)), axis = 0)
ddt_moco_pars = np.diff(np.vstack((np.ones((1,6)), dt_moco_pars)), axis = 0)
ext_moco_pars = np.hstack((moco_pars, dt_moco_pars, ddt_moco_pars))
# blow up using abs(), perform pca and take original number of 18 components
amp = np.hstack((moco_pars, dt_moco_pars, ddt_moco_pars, dt_moco_pars**2, ddt_moco_pars**2))
pca = decomposition.PCA(n_components = 18)
pca.fit(amp)
new_moco_pars = pca.transform(amp)
np.savetxt(new_out_file, new_moco_pars, fmt='%f', delimiter='\t')
np.savetxt(ext_out_file, ext_moco_pars, fmt='%f', delimiter='\t')
return new_out_file, ext_out_file
def select_target_T2(T2_file_list, target_session):
target_T2 = [T2 for T2 in T2_file_list if target_session in T2][0]
return target_T2
def select_target_epi(epi_file_list, T2_file_list, target_session, which_file):
from spynoza.nodes import EPI_file_selector
target_T2 = [T2 for T2 in T2_file_list if target_session in T2][0]
all_target_epis = [epi for epi in epi_file_list if target_session in epi]
target_epi = EPI_file_selector(which_file, all_target_epis)
print("XXXXX " + target_epi)
return target_epi
def select_T2_for_epi(epi_file, T2_file_list):
import os.path as op
epi_filename = op.split(epi_file)[-1]
T2_sessions = [op.split(T2)[-1].split('_inplaneT2')[0] for T2 in T2_file_list]
which_T2_file = [T2 for (T2f, T2) in zip(T2_sessions, T2_file_list) if T2f in epi_filename][0]
return which_T2_file
def find_all_epis_for_inplane_anats(epi_file_list, inplane_anats, inplane_anat_suffix = '_inplaneT2_brain.nii.gz'):
'''selects epi nifti files that correspond to the session of each of inplane_anats.
Parameters
----------
epi_file_list : list
list of nifti, or other filenames
inplane_anats : list
list of nifti filenames
inplane_anat_suffix : string
string that, when taken from the inplane_anat's filename, leaves the session's label.
Returns:
list of lists, with epi_file_list files distributed among len(inplane_anats) sublists.
'''
import os.path as op
session_labels = [op.split(ipa)[-1].split(inplane_anat_suffix)[0] for ipa in inplane_anats]
output_lists = []
for sl in session_labels:
output_lists.append([epi for epi in epi_file_list if sl in epi])
return output_list
def create_motion_correction_workflow(analysis_info, name = 'moco'):
"""uses sub-workflows to perform different registration steps.
Requires fsl and freesurfer tools
Parameters
----------
name : string
name of workflow
Example
-------
>>> motion_correction_workflow = create_motion_correction_workflow('motion_correction_workflow')
>>> motion_correction_workflow.inputs.inputspec.output_directory = '/data/project/raw/BIDS/sj_1/'
>>> motion_correction_workflow.inputs.inputspec.in_files = ['sub-001.nii.gz','sub-002.nii.gz']
>>> motion_correction_workflow.inputs.inputspec.which_file_is_EPI_space = 'middle'
Inputs::
inputspec.output_directory : directory in which to sink the result files
inputspec.in_files : list of functional files
inputspec.which_file_is_EPI_space : determines which file is the 'standard EPI space'
Outputs::
outputspec.EPI_space_file : standard EPI space file, one timepoint
outputspec.motion_corrected_files : motion corrected files
outputspec.motion_correction_plots : motion correction plots
outputspec.motion_correction_parameters : motion correction parameters
"""
import os
import os.path as op
import nipype.pipeline as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.interfaces.utility import Function, IdentityInterface
import nipype.interfaces.utility as niu
########################################################################################
# nodes
########################################################################################
input_node = pe.Node(IdentityInterface(fields=[
'in_files',
'inplane_T2_files',
'T2_files_reg_matrices',
'output_directory',
'which_file_is_EPI_space',
'sub_id',
'tr']), name='inputspec')
output_node = pe.Node(IdentityInterface(fields=([
'motion_corrected_files',
'EPI_space_file',
'T2_space_file',
'motion_correction_plots',
'motion_correction_parameters',
'extended_motion_correction_parameters',
'new_motion_correction_parameters'])), name='outputspec')
EPI_file_selector_node = pe.Node(Function(input_names=['which_file', 'in_files'], output_names='raw_EPI_space_file',
function=EPI_file_selector), name='EPI_file_selector_node')
# motion_correct_EPI_space = pe.Node(interface=fsl.MCFLIRT(
# save_mats = True,
# stats_imgs = True,
# save_plots = True,
# save_rms = True,
# cost = 'normmi',
# interpolation = 'sinc',
# dof = 6,
# # ref_vol = 0
# ), name='realign_space')
# mean_bold = pe.Node(interface=fsl.maths.MeanImage(dimension='T'), name='mean_space')
# new approach, which should aid in the joint motion correction of
# multiple sessions together, by pre-registering each run.
# the strategy would be to, for each run, take the first TR
# and FLIRT-align (6dof) it to the EPI_space file.
# then we can use this as an --infile argument to mcflirt.
select_target_T2_node = pe.Node(Function(input_names=['T2_file_list', 'target_session'], output_names=['which_T2'],
function=select_target_T2), name='select_target_T2_node')
select_target_T2_node.inputs.target_session = analysis_info['target_session']
# select_target_epi_node = pe.Node(Function(input_names=['epi_file_list', 'T2_file_list', 'target_session', 'which_file'], output_names=['target_epi'],
# function=select_target_epi), name='select_target_epi_node')
# select_target_epi_node.inputs.target_session = analysis_info['target_session']
select_T2_for_epi_node = pe.MapNode(Function(input_names=['epi_file', 'T2_file_list'], output_names=['which_T2_file'],
function=select_T2_for_epi), name='select_T2_for_epi_node', iterfield = ['epi_file'])
select_T2_mat_for_epi_node = pe.MapNode(Function(input_names=['epi_file', 'T2_file_list'], output_names=['which_T2_file'],
function=select_T2_for_epi), name='select_T2_mat_for_epi_node', iterfield = ['epi_file'])
bet_T2_node = pe.MapNode(interface=
fsl.BET(frac = analysis_info['T2_bet_f_value'],
vertical_gradient = analysis_info['T2_bet_g_value'],
functional=False, mask = True), name='bet_T2', iterfield=['in_file'])
bet_epi_node = pe.MapNode(interface=
fsl.BET(frac = analysis_info['T2_bet_f_value'],
vertical_gradient = analysis_info['T2_bet_g_value'],
functional=True, mask = True), name='bet_epi', iterfield=['in_file'])
motion_correct_all = pe.MapNode(interface=fsl.MCFLIRT(
save_mats = True,
save_plots = True,
cost = 'normmi',
interpolation = 'sinc',
stats_imgs = True,
dof = 6
), name='realign_all',
iterfield = ['in_file', 'ref_file'])
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
extend_motion_pars = pe.MapNode(Function(input_names=['moco_par_file', 'tr'], output_names=['new_out_file', 'ext_out_file'],
function=_extend_motion_parameters), name='extend_motion_pars', iterfield = ['moco_par_file'])
# registration node is set up for rigid-body within-modality reg
# reg_flirt_N = pe.MapNode(fsl.FLIRT(cost_func='normcorr', output_type = 'NIFTI_GZ',# dof = 6, schedule = op.abspath(op.join(os.environ['FSLDIR'], 'etc', 'flirtsch', 'sch2D_6dof')),
# interp = 'sinc', dof = 6),
# name = 'reg_flirt_N', iterfield = ['in_file'])
regapply_moco_node = pe.MapNode(interface=
fsl.ApplyXfm(interp = 'spline'), name='regapply_moco_node', iterfield=['in_file', 'in_matrix_file'])
resample_epis = pe.MapNode(fsl.maths.MathsCommand(args = ' -subsamp2offc '), name='resample_epis', iterfield = ['in_file'])
resample_target_T2 = pe.Node(fsl.maths.MathsCommand(args = ' -subsamp2offc '), name='resample_target_T2')
rename = pe.Node(niu.Rename(format_string='session_EPI_space',
keep_ext=True),
name='namer')
rename_T2 = pe.Node(niu.Rename(format_string='session_T2_space',
keep_ext=True),
name='namer_T2')
########################################################################################
# workflow
########################################################################################
motion_correction_workflow = pe.Workflow(name=name)
motion_correction_workflow.connect(input_node, 'in_files', bet_epi_node, 'in_file')
motion_correction_workflow.connect(input_node, 'inplane_T2_files', bet_T2_node, 'in_file')
# select example func data, and example T2 space
# motion_correction_workflow.connect(input_node, 'which_file_is_EPI_space', select_target_epi_node, 'which_file')
# motion_correction_workflow.connect(bet_epi_node, 'out_file', select_target_epi_node, 'epi_file_list')
# motion_correction_workflow.connect(bet_T2_node, 'out_file', select_target_epi_node, 'T2_file_list')
motion_correction_workflow.connect(bet_T2_node, 'out_file', select_target_T2_node, 'T2_file_list')
# motion correct and average the standard EPI file
# motion_correction_workflow.connect(select_target_epi_node, 'target_epi', motion_correct_EPI_space, 'in_file')
# motion_correction_workflow.connect(motion_correct_EPI_space, 'out_file', mean_bold, 'in_file')
# output node, for later saving
# motion_correction_workflow.connect(mean_bold, 'out_file', output_node, 'EPI_space_file')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', output_node, 'T2_space_file')
# find the relevant T2 files for each of the epi files
motion_correction_workflow.connect(bet_epi_node, 'out_file', select_T2_for_epi_node, 'epi_file')
motion_correction_workflow.connect(bet_T2_node, 'out_file', select_T2_for_epi_node, 'T2_file_list')
# find the relevant T2 registration file for each of the epi files
motion_correction_workflow.connect(bet_epi_node, 'out_file', select_T2_mat_for_epi_node, 'epi_file')
motion_correction_workflow.connect(input_node, 'T2_files_reg_matrices', select_T2_mat_for_epi_node, 'T2_file_list')
# motion correction across runs
# motion_correction_workflow.connect(prereg_flirt_N, 'out_matrix_file', motion_correct_all, 'init')
motion_correction_workflow.connect(bet_epi_node, 'out_file', motion_correct_all, 'in_file')
motion_correction_workflow.connect(select_T2_for_epi_node, 'which_T2_file', motion_correct_all, 'ref_file')
# motion_correction_workflow.connect(mean_bold, 'out_file', motion_correct_all, 'ref_file')
# the registration
# motion_correction_workflow.connect(select_T2_for_epi_node, 'which_T2_file', reg_flirt_N, 'in_file')
# motion_correction_workflow.connect(select_target_T2_node, 'which_T2', reg_flirt_N, 'reference')
# output of motion correction of all files
motion_correction_workflow.connect(motion_correct_all, 'par_file', output_node, 'motion_correction_parameters')
motion_correction_workflow.connect(motion_correct_all, 'out_file', regapply_moco_node, 'in_file')
# registration has already been done by hand. This registration matrix is in the datasource, and applied here.
motion_correction_workflow.connect(select_T2_mat_for_epi_node, 'which_T2_file', regapply_moco_node, 'in_matrix_file')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', regapply_moco_node, 'reference')
motion_correction_workflow.connect(regapply_moco_node, 'out_file', resample_epis, 'in_file')
motion_correction_workflow.connect(resample_epis, 'out_file', output_node, 'motion_corrected_files')
motion_correction_workflow.connect(motion_correct_all, 'par_file', extend_motion_pars, 'moco_par_file')
motion_correction_workflow.connect(input_node, 'tr', extend_motion_pars, 'tr')
motion_correction_workflow.connect(extend_motion_pars, 'ext_out_file', output_node, 'extended_motion_correction_parameters')
motion_correction_workflow.connect(extend_motion_pars, 'new_out_file', output_node, 'new_motion_correction_parameters')
motion_correction_workflow.connect(rename, 'out_file', output_node, 'EPI_space_file')
########################################################################################
# Plot the estimated motion parameters
########################################################################################
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
motion_correction_workflow.connect(motion_correct_all, 'par_file', plot_motion, 'in_file')
motion_correction_workflow.connect(plot_motion, 'out_file', output_node, 'motion_correction_plots')
########################################################################################
# outputs via datasink
########################################################################################
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.parameterization = False
# first link the workflow's output_directory into the datasink.
motion_correction_workflow.connect(input_node, 'output_directory', datasink, 'base_directory')
motion_correction_workflow.connect(input_node, 'sub_id', datasink, 'container')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', resample_target_T2, 'in_file')
motion_correction_workflow.connect(resample_target_T2, 'out_file', rename, 'in_file')
motion_correction_workflow.connect(rename, 'out_file', datasink, 'reg')
motion_correction_workflow.connect(select_target_T2_node, 'which_T2', rename_T2, 'in_file')
motion_correction_workflow.connect(rename_T2, 'out_file', datasink, 'reg.@T2')
# motion_correction_workflow.connect(regapply_moco_node, 'out_file', datasink, 'mcf.hr')
motion_correction_workflow.connect(resample_epis, 'out_file', datasink, 'mcf')
motion_correction_workflow.connect(motion_correct_all, 'par_file', datasink, 'mcf.motion_pars')
motion_correction_workflow.connect(plot_motion, 'out_file', datasink, 'mcf.motion_plots')
motion_correction_workflow.connect(extend_motion_pars, 'ext_out_file', datasink, 'mcf.ext_motion_pars')
motion_correction_workflow.connect(extend_motion_pars, 'new_out_file', datasink, 'mcf.new_motion_pars')
motion_correction_workflow.connect(bet_T2_node, 'out_file', datasink, 'mcf.T2s')
# motion_correction_workflow.connect(motion_correct_all, 'out_file', datasink, 'mcf.hr_per_session')
# motion_correction_workflow.connect(reg_flirt_N, 'out_file', datasink, 'mcf.T2_per_session')
return motion_correction_workflow
|
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Acknowledgement to Carl Wallace for the test messages.
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5934
class TAMPStatusResponseTestCase(unittest.TestCase):
tsr_pem_text = """\
MIIU/QYJKoZIhvcNAQcCoIIU7jCCFOoCAQMxDTALBglghkgBZQMEAgEwgg/GBgpghkgBZQIB
Ak0CoIIPtgSCD7Iwgg+uMAiDAAIEXXp3f6GCD50wgg+ZooIFFTCCBREwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQDALMH2jTus/z881nG+uHQiB+xwQRX8q0DjB6rBw9if/tpM
Or8/yNgoe0s2AcCsRSXD0g4Kj4UYZBA9GhNwKm+O19yNk7NBDzghza2rwj0qBdNXETcNzYxR
+ZPjzEZJIY4UtM3LFD44zXIx7qsS8mXqNC5WXf/uY3XLbbqRNPye8/QtHL5QxELfWYj/arP6
qGw9y1ZxcQWWu5+A5YBFWWdBsOvDrWCkgHUGF5wO9EPgmQ4b+3/1s8yygYKx/TLBuL5BpGS1
YDpaUTCMzt5BLBlHXEkQZLl0qYdBr31uusG4ob9lMToEZ/m1u46SigBjuLHmjDhfg/9Q1Tui
XWuyEMxjAgMBAAEEFEl0uwxeunr+AlTve6DGlcYJgHCWMIID0TBbMQswCQYDVQQGEwJVUzEY
MBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEW
MBQGA1UEAxMNRG9EIFJvb3QgQ0EgMqCCA3AwggJYoAMCAQICAQUwDQYJKoZIhvcNAQEFBQAw
WzELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDEMMAoGA1UECxMDRG9E
MQwwCgYDVQQLEwNQS0kxFjAUBgNVBAMTDURvRCBSb290IENBIDIwHhcNMDQxMjEzMTUwMDEw
WhcNMjkxMjA1MTUwMDEwWjBbMQswCQYDVQQGEwJVUzEYMBYGA1UEChMPVS5TLiBHb3Zlcm5t
ZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEWMBQGA1UEAxMNRG9EIFJvb3QgQ0Eg
MjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMAswfaNO6z/PzzWcb64dCIH7HBB
FfyrQOMHqsHD2J/+2kw6vz/I2Ch7SzYBwKxFJcPSDgqPhRhkED0aE3Aqb47X3I2Ts0EPOCHN
ravCPSoF01cRNw3NjFH5k+PMRkkhjhS0zcsUPjjNcjHuqxLyZeo0LlZd/+5jdcttupE0/J7z
9C0cvlDEQt9ZiP9qs/qobD3LVnFxBZa7n4DlgEVZZ0Gw68OtYKSAdQYXnA70Q+CZDhv7f/Wz
zLKBgrH9MsG4vkGkZLVgOlpRMIzO3kEsGUdcSRBkuXSph0GvfW66wbihv2UxOgRn+bW7jpKK
AGO4seaMOF+D/1DVO6Jda7IQzGMCAwEAAaM/MD0wHQYDVR0OBBYEFEl0uwxeunr+AlTve6DG
lcYJgHCWMAsGA1UdDwQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IB
AQCYkY0/ici79cBpcyk7Nay6swh2PXAJkumERCEBfRR2G+5RbB2NFTctezFp9JpEuK9GzDT6
I8sDJxnSgyF1K+fgG5km3IRAleio0sz2WFxm7z9KlxCCHboKot1bBiudp2RO6y4BNaS0PxOt
VeTVc6hpmxHxmPIxHm9A1Ph4n46RoG9wBJBmqgYrzuF6krV94eDRluehOi3MsZ0fBUTth5nT
TRpwOcEEDOV+2fGv1yAO8SJ6JaRzmcw/pAcnlqiile2CuRbTnguHwsHyiPVi32jfx7xpUe2x
XNxUVCkPCTmarAPB2wxNrm8KehZJ8b+R0jiU0/aVLLdsyUK2jcqQjYXZooIFGDCCBRQwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCp7BRyiuhLcKPaEAOEpvunNg0qOlIWvzAV
UoYFRyDPqqbNdcRkbu/xYCPLCmZArrTIaCoAUWhJN+lZMk2VvEMn6UCNOhDOFLxDGKH53szn
hXZzXhgaI1u9Px/y7Y0ZzAPRQKSPpyACTCdaeTb2ozchjgBaBhbK01WWbzEpu3IOy+JIUfLU
N6Q11m/uF7OxBqsLGYboI20xGyh4ZcXeYlK8wX3r7qBdVAT7sssrsiNUkYJM8L+6dEA7DARF
gGdcxeuiV8MafwotvX+53MGZsMgH5AyGNpQ6JS/yfeaXPBuUtJdZBsk65AvZ6un8O3M0b/3n
mOTzocKQXxz1Py7XGdN/AgMBAAEEFGyKlKJ3sYByHYF6Fqry3M5m7kXAMIID1DBbMQswCQYD
VQQGEwJVUzEYMBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNV
BAsTA1BLSTEWMBQGA1UEAxMNRG9EIFJvb3QgQ0EgM6CCA3MwggJboAMCAQICAQEwDQYJKoZI
hvcNAQELBQAwWzELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDEMMAoG
A1UECxMDRG9EMQwwCgYDVQQLEwNQS0kxFjAUBgNVBAMTDURvRCBSb290IENBIDMwHhcNMTIw
MzIwMTg0NjQxWhcNMjkxMjMwMTg0NjQxWjBbMQswCQYDVQQGEwJVUzEYMBYGA1UEChMPVS5T
LiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEWMBQGA1UEAxMNRG9E
IFJvb3QgQ0EgMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKnsFHKK6Etwo9oQ
A4Sm+6c2DSo6Uha/MBVShgVHIM+qps11xGRu7/FgI8sKZkCutMhoKgBRaEk36VkyTZW8Qyfp
QI06EM4UvEMYofnezOeFdnNeGBojW70/H/LtjRnMA9FApI+nIAJMJ1p5NvajNyGOAFoGFsrT
VZZvMSm7cg7L4khR8tQ3pDXWb+4Xs7EGqwsZhugjbTEbKHhlxd5iUrzBfevuoF1UBPuyyyuy
I1SRgkzwv7p0QDsMBEWAZ1zF66JXwxp/Ci29f7ncwZmwyAfkDIY2lDolL/J95pc8G5S0l1kG
yTrkC9nq6fw7czRv/eeY5POhwpBfHPU/LtcZ038CAwEAAaNCMEAwHQYDVR0OBBYEFGyKlKJ3
sYByHYF6Fqry3M5m7kXAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQCfcaTAtpbSgEOgSOkfdgT5xTytZhhYY5vDtuhoioVaQmYStNLmi4h/
h/SY9ajGCckf8Cwf7IK49KVHOMEzK99Mfpq+Cwuxyw98UCgQz4qNoum6rIbX1LGTXyKPlgW0
Tgx1kX3T8ueUwpQUdk+PDKsQh1gyhQd1hhILXupTtArITISSH+voQYY8uvROQUrRbFhHQcOG
WvLu6fKYJ4LqLjbW+AZegvGgUpNECbrSqRlaWKOoXSBtT2T4MIcbkBNIgc3KkMcNwdSYP47y
DldoMxKOmQmx8OT2EPQ28km96qM4yFZBI4Oa36EbNXzrP0Gz9W9LOl6ub5N2mNLxmZ1FxI5y
ooIFYDCCBVwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ3HcYEBAYYEH753gQ
D/iEd3DvLW5VOxGmmVI/bfS9oZf6Nh5uREIRyFP+dYabXjcSiKJ92XEI1Ek1cc5Gz1vQWY5l
H+tCPcoO3EyQ2FRpz144siBg3YNRLt/b1Vs4kVotz5oztG+WkOV2FGJDaYQQz1RB+TXqntRa
l51eEFm94OTDWYnX3vJ5sIdrAsBZoSoAghVvaxERAFM0dD304cxWYqLkZegjsYMdWFMIsjMt
lr7lfTOeEFonc1PdXZjiSxFTWJGP6nIR7LuU8g0PUK3yFrUaACQx5RW9FwaQqiSxrN0MUh7w
i2qruPft32O0zpRov16W0ESW8fj0ejoKeRVTAgMBAAEEFKg8CZ1n9thHuqLQ/BhyVohAbZWV
MIID0jBTMQswCQYDVQQGEwJVUzEfMB0GA1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEj
MCEGA1UEAxMaVmFsaWQgRUUgQ2VydGlmaWNhdGUgVGVzdDGgggN5MIICYaADAgECAgEBMA0G
CSqGSIb3DQEBCwUAMEAxCzAJBgNVBAYTAlVTMR8wHQYDVQQKExZUZXN0IENlcnRpZmljYXRl
cyAyMDExMRAwDgYDVQQDEwdHb29kIENBMB4XDTEwMDEwMTA4MzAwMFoXDTMwMTIzMTA4MzAw
MFowUzELMAkGA1UEBhMCVVMxHzAdBgNVBAoTFlRlc3QgQ2VydGlmaWNhdGVzIDIwMTExIzAh
BgNVBAMTGlZhbGlkIEVFIENlcnRpZmljYXRlIFRlc3QxMIIBIjANBgkqhkiG9w0BAQEFAAOC
AQ8AMIIBCgKCAQEA2dx3GBAQGGBB++d4EA/4hHdw7y1uVTsRpplSP230vaGX+jYebkRCEchT
/nWGm143EoiifdlxCNRJNXHORs9b0FmOZR/rQj3KDtxMkNhUac9eOLIgYN2DUS7f29VbOJFa
Lc+aM7RvlpDldhRiQ2mEEM9UQfk16p7UWpedXhBZveDkw1mJ197yebCHawLAWaEqAIIVb2sR
EQBTNHQ99OHMVmKi5GXoI7GDHVhTCLIzLZa+5X0znhBaJ3NT3V2Y4ksRU1iRj+pyEey7lPIN
D1Ct8ha1GgAkMeUVvRcGkKoksazdDFIe8Itqq7j37d9jtM6UaL9eltBElvH49Ho6CnkVUwID
AQABo2swaTAfBgNVHSMEGDAWgBRYAYQkG7wrUpRKPaUQchRR9a86yTAdBgNVHQ4EFgQUqDwJ
nWf22Ee6otD8GHJWiEBtlZUwDgYDVR0PAQH/BAQDAgTwMBcGA1UdIAQQMA4wDAYKYIZIAWUD
AgEwATANBgkqhkiG9w0BAQsFAAOCAQEAHlrZD69ipblSvLzsDGGIEwGqCg8NR6OeqbIXG/ij
2SzSjTi+O7LP1DGIz85p9I7HuXAFUcAGh8aVtPZq+jGeLcQXs+3lehlhGG6M0eQO2pttbI0G
kO4s0XlY2ITNm0HTGOL+kcZfACcUZXsS+i+9qL80ji3PF0xYWzAPLmlmRSYmIZjT85CuKYda
Tsa96Ch+D6CU5v9ctVxP3YphWQ4F0v/FacDTiUrRwuXI9MgIw/0qI0+EAFwsRC2DisI9Isc8
YPKKeOMbRmXamY/4Y8HUeqBwpnqnEJudrH++FPBEI4dYrBAV6POgvx4lyzarAmlarv/AbrBD
ngieGTynMG6NwqFIMEYwRAYIKwYBBQUHARIBAf8ENTAzMA8GCmCGSAFlAgECTQMKAQEwDwYK
YIZIAWUCAQJNAQoBATAPBgpghkgBZQIBAk0CCgEBAQEAoIIDfTCCA3kwggJhoAMCAQICAQEw
DQYJKoZIhvcNAQELBQAwQDELMAkGA1UEBhMCVVMxHzAdBgNVBAoTFlRlc3QgQ2VydGlmaWNh
dGVzIDIwMTExEDAOBgNVBAMTB0dvb2QgQ0EwHhcNMTAwMTAxMDgzMDAwWhcNMzAxMjMxMDgz
MDAwWjBTMQswCQYDVQQGEwJVUzEfMB0GA1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEj
MCEGA1UEAxMaVmFsaWQgRUUgQ2VydGlmaWNhdGUgVGVzdDEwggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQDZ3HcYEBAYYEH753gQD/iEd3DvLW5VOxGmmVI/bfS9oZf6Nh5uREIR
yFP+dYabXjcSiKJ92XEI1Ek1cc5Gz1vQWY5lH+tCPcoO3EyQ2FRpz144siBg3YNRLt/b1Vs4
kVotz5oztG+WkOV2FGJDaYQQz1RB+TXqntRal51eEFm94OTDWYnX3vJ5sIdrAsBZoSoAghVv
axERAFM0dD304cxWYqLkZegjsYMdWFMIsjMtlr7lfTOeEFonc1PdXZjiSxFTWJGP6nIR7LuU
8g0PUK3yFrUaACQx5RW9FwaQqiSxrN0MUh7wi2qruPft32O0zpRov16W0ESW8fj0ejoKeRVT
AgMBAAGjazBpMB8GA1UdIwQYMBaAFFgBhCQbvCtSlEo9pRByFFH1rzrJMB0GA1UdDgQWBBSo
PAmdZ/bYR7qi0PwYclaIQG2VlTAOBgNVHQ8BAf8EBAMCBPAwFwYDVR0gBBAwDjAMBgpghkgB
ZQMCATABMA0GCSqGSIb3DQEBCwUAA4IBAQAeWtkPr2KluVK8vOwMYYgTAaoKDw1Ho56pshcb
+KPZLNKNOL47ss/UMYjPzmn0jse5cAVRwAaHxpW09mr6MZ4txBez7eV6GWEYbozR5A7am21s
jQaQ7izReVjYhM2bQdMY4v6Rxl8AJxRlexL6L72ovzSOLc8XTFhbMA8uaWZFJiYhmNPzkK4p
h1pOxr3oKH4PoJTm/1y1XE/dimFZDgXS/8VpwNOJStHC5cj0yAjD/SojT4QAXCxELYOKwj0i
xzxg8op44xtGZdqZj/hjwdR6oHCmeqcQm52sf74U8EQjh1isEBXo86C/HiXLNqsCaVqu/8Bu
sEOeCJ4ZPKcwbo3CMYIBiTCCAYUCAQOAFKg8CZ1n9thHuqLQ/BhyVohAbZWVMAsGCWCGSAFl
AwQCAaBMMBkGCSqGSIb3DQEJAzEMBgpghkgBZQIBAk0CMC8GCSqGSIb3DQEJBDEiBCAiPyBP
FFwHJbHgGmoz+54OEJ/ppMyfSoZmbS/nkWfxxjALBgkqhkiG9w0BAQsEggEAHllTg+TMT2ll
zVvrvRDwOwrzr6YIJSt96sLANqOXiqqnvrHDDWTdVMcRX/LccVbm9JP4sGSfGDdwbm3FqB+l
kgSBlejFgjWfF/YVK5OpaVcPGg4DB3oAOwxtn0GVQtKgGkiGQF0r5389mTHYlQzS6BVDG2Oi
sKIe4SBazrBGjnKANf9LEunpWPt15y6QCxiEKnJfPlAqiMuiIhHmXPIHi+d3sYkC+iu+5I68
2oeLdtBWCDcGh4+DdS6Qqzkpp14MpvzBMdfD3lKcI3NRmY+GmRYaGAiEalh83vggslF7N4SS
iPxQyqz7LIQe9/5ynJV5/CPUDBL9QK2vSCOQaihWCg==
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.tsr_pem_text)
layers = {
rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
rfc5652.id_signedData: rfc5652.SignedData(),
rfc5934.id_ct_TAMP_statusResponse: rfc5934.TAMPStatusResponse()
}
getNextLayer = {
rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
rfc5934.id_ct_TAMP_statusResponse: lambda x: None
}
getNextSubstrate = {
rfc5652.id_ct_contentInfo: lambda x: x['content'],
rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
rfc5934.id_ct_TAMP_statusResponse: lambda x: None
}
next_layer = rfc5652.id_ct_contentInfo
while next_layer:
asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
substrate = getNextSubstrate[next_layer](asn1Object)
next_layer = getNextLayer[next_layer](asn1Object)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.tsr_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
eci = asn1Object['content']['encapContentInfo']
self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
self.assertEqual(rfc5934.id_ct_TAMP_statusResponse, eci['eContentType'])
tsr, rest = der_decoder(
eci['eContent'],
asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(tsr.prettyPrint())
self.assertEqual(eci['eContent'], der_encoder(tsr))
self.assertEqual(2, tsr['version'])
self.assertEqual(univ.Null(""), tsr['query']['target'])
self.assertEqual(1568307071, tsr['query']['seqNum'])
self.assertFalse(tsr['usesApex'])
count = 0
for tai in tsr['response']['verboseResponse']['taInfo']:
count += 1
self.assertEqual(1, tai['taInfo']['version'])
self.assertEqual(3, count)
class TrustAnchorUpdateTestCase(unittest.TestCase):
tau_pem_text = """\
MIIGgwYJKoZIhvcNAQcCoIIGdDCCBnACAQMxDTALBglghkgBZQMEAgEwggFMBgpghkgBZQIB
Ak0DoIIBPASCATgwggE0MAiDAAIEXXp3kDCCASaiggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQDALMH2jTus/z881nG+uHQiB+xwQRX8q0DjB6rBw9if/tpMOr8/yNgoe0s2AcCs
RSXD0g4Kj4UYZBA9GhNwKm+O19yNk7NBDzghza2rwj0qBdNXETcNzYxR+ZPjzEZJIY4UtM3L
FD44zXIx7qsS8mXqNC5WXf/uY3XLbbqRNPye8/QtHL5QxELfWYj/arP6qGw9y1ZxcQWWu5+A
5YBFWWdBsOvDrWCkgHUGF5wO9EPgmQ4b+3/1s8yygYKx/TLBuL5BpGS1YDpaUTCMzt5BLBlH
XEkQZLl0qYdBr31uusG4ob9lMToEZ/m1u46SigBjuLHmjDhfg/9Q1TuiXWuyEMxjAgMBAAGg
ggN9MIIDeTCCAmGgAwIBAgIBATANBgkqhkiG9w0BAQsFADBAMQswCQYDVQQGEwJVUzEfMB0G
A1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEQMA4GA1UEAxMHR29vZCBDQTAeFw0xMDAx
MDEwODMwMDBaFw0zMDEyMzEwODMwMDBaMFMxCzAJBgNVBAYTAlVTMR8wHQYDVQQKExZUZXN0
IENlcnRpZmljYXRlcyAyMDExMSMwIQYDVQQDExpWYWxpZCBFRSBDZXJ0aWZpY2F0ZSBUZXN0
MTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANncdxgQEBhgQfvneBAP+IR3cO8t
blU7EaaZUj9t9L2hl/o2Hm5EQhHIU/51hpteNxKIon3ZcQjUSTVxzkbPW9BZjmUf60I9yg7c
TJDYVGnPXjiyIGDdg1Eu39vVWziRWi3PmjO0b5aQ5XYUYkNphBDPVEH5Neqe1FqXnV4QWb3g
5MNZidfe8nmwh2sCwFmhKgCCFW9rEREAUzR0PfThzFZiouRl6COxgx1YUwiyMy2WvuV9M54Q
WidzU91dmOJLEVNYkY/qchHsu5TyDQ9QrfIWtRoAJDHlFb0XBpCqJLGs3QxSHvCLaqu49+3f
Y7TOlGi/XpbQRJbx+PR6Ogp5FVMCAwEAAaNrMGkwHwYDVR0jBBgwFoAUWAGEJBu8K1KUSj2l
EHIUUfWvOskwHQYDVR0OBBYEFKg8CZ1n9thHuqLQ/BhyVohAbZWVMA4GA1UdDwEB/wQEAwIE
8DAXBgNVHSAEEDAOMAwGCmCGSAFlAwIBMAEwDQYJKoZIhvcNAQELBQADggEBAB5a2Q+vYqW5
Ury87AxhiBMBqgoPDUejnqmyFxv4o9ks0o04vjuyz9QxiM/OafSOx7lwBVHABofGlbT2avox
ni3EF7Pt5XoZYRhujNHkDtqbbWyNBpDuLNF5WNiEzZtB0xji/pHGXwAnFGV7Evovvai/NI4t
zxdMWFswDy5pZkUmJiGY0/OQrimHWk7Gvegofg+glOb/XLVcT92KYVkOBdL/xWnA04lK0cLl
yPTICMP9KiNPhABcLEQtg4rCPSLHPGDyinjjG0Zl2pmP+GPB1HqgcKZ6pxCbnax/vhTwRCOH
WKwQFejzoL8eJcs2qwJpWq7/wG6wQ54Inhk8pzBujcIxggGJMIIBhQIBA4AUqDwJnWf22Ee6
otD8GHJWiEBtlZUwCwYJYIZIAWUDBAIBoEwwGQYJKoZIhvcNAQkDMQwGCmCGSAFlAgECTQMw
LwYJKoZIhvcNAQkEMSIEINq+nldSoCoJuEe/lhrRhfx0ArygsPJ7mCMbOFrpr1dFMAsGCSqG
SIb3DQEBCwSCAQBTeRE1DzwF2dnv2yJAOYOxNnAtTs72ZG8mv5Ad4M/9n1+MPiAykLcBslW8
7D1KjBdwB3oxIT4sjwGh0kxKLe4G+VuvQuPwtT8MqMl3hounnFOM5nMSj1TSbfHVPs3dhEyk
Wu1gQ5g9gxLF3MpwEJGJKvhRtK17LGElJWvGPniRMChAJZJWoLjFBMe5JMzpqu2za50S1K3t
YtkTOx/2FQdVApkTY1qMQooljDiuvSvOuSDXcyAA15uIypQJvfrBNqe6Ush+j7yS5UQyTm0o
ZidB8vj4jIZT3S2gqWhtBLMUc11j+kWlXEZEigSL8WgCbAu7lqhItMwz2dy4C5aAWq8r"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.tau_pem_text)
layers = {
rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
rfc5652.id_signedData: rfc5652.SignedData(),
rfc5934.id_ct_TAMP_update: rfc5934.TAMPUpdate()
}
getNextLayer = {
rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
rfc5934.id_ct_TAMP_update: lambda x: None
}
getNextSubstrate = {
rfc5652.id_ct_contentInfo: lambda x: x['content'],
rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
rfc5934.id_ct_TAMP_update: lambda x: None
}
next_layer = rfc5652.id_ct_contentInfo
while next_layer:
asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
substrate = getNextSubstrate[next_layer](asn1Object)
next_layer = getNextLayer[next_layer](asn1Object)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.tau_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=rfc5652.ContentInfo(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
eci = asn1Object['content']['encapContentInfo']
self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
self.assertEqual(rfc5934.id_ct_TAMP_update, eci['eContentType'])
tau, rest = der_decoder(
eci['eContent'],
asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(tau.prettyPrint())
self.assertEqual(eci['eContent'], der_encoder(tau))
self.assertEqual(2, tau['version'])
self.assertEqual(univ.Null(""), tau['msgRef']['target'])
self.assertEqual(1568307088, tau['msgRef']['seqNum'])
self.assertEqual(1, len(tau['updates']))
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import textwrap
import pytest
from cryptography.exceptions import _Reasons
from cryptography.hazmat.backends.interfaces import (
EllipticCurveBackend, PEMSerializationBackend, PKCS8SerializationBackend,
TraditionalOpenSSLSerializationBackend
)
from cryptography.hazmat.primitives import interfaces
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import (
load_pem_pkcs8_private_key, load_pem_private_key, load_pem_public_key,
load_pem_traditional_openssl_private_key
)
from .test_ec import _skip_curve_unsupported
from .utils import _check_rsa_private_numbers, load_vectors_from_file
from ...utils import raises_unsupported_algorithm
@pytest.mark.requires_backend_interface(interface=PEMSerializationBackend)
class TestPEMSerialization(object):
def test_load_pem_rsa_private_key(self, backend):
key = load_vectors_from_file(
os.path.join(
"asymmetric", "PEM_Serialization", "rsa_private_key.pem"),
lambda pemfile: load_pem_private_key(
pemfile.read().encode(), b"123456", backend
)
)
assert key
assert isinstance(key, interfaces.RSAPrivateKey)
if isinstance(key, interfaces.RSAPrivateKeyWithNumbers):
_check_rsa_private_numbers(key.private_numbers())
def test_load_dsa_private_key(self, backend):
key = load_vectors_from_file(
os.path.join(
"asymmetric", "PEM_Serialization", "dsa_private_key.pem"),
lambda pemfile: load_pem_private_key(
pemfile.read().encode(), b"123456", backend
)
)
assert key
assert isinstance(key, interfaces.DSAPrivateKey)
@pytest.mark.parametrize(
("key_file", "password"),
[
("ec_private_key.pem", None),
("ec_private_key_encrypted.pem", b"123456"),
]
)
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
def test_load_pem_ec_private_key(self, key_file, password, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join(
"asymmetric", "PEM_Serialization", key_file),
lambda pemfile: load_pem_private_key(
pemfile.read().encode(), password, backend
)
)
assert key
assert isinstance(key, interfaces.EllipticCurvePrivateKey)
@pytest.mark.parametrize(
("key_file"),
[
os.path.join("asymmetric", "PKCS8", "unenc-rsa-pkcs8.pub.pem"),
os.path.join(
"asymmetric", "PEM_Serialization", "rsa_public_key.pem"),
]
)
def test_load_pem_rsa_public_key(self, key_file, backend):
key = load_vectors_from_file(
key_file,
lambda pemfile: load_pem_public_key(
pemfile.read().encode(), backend
)
)
assert key
assert isinstance(key, interfaces.RSAPublicKey)
if isinstance(key, interfaces.RSAPublicKeyWithNumbers):
numbers = key.public_numbers()
assert numbers.e == 65537
@pytest.mark.parametrize(
("key_file"),
[
os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pub.pem"),
os.path.join(
"asymmetric", "PEM_Serialization",
"dsa_public_key.pem"),
]
)
def test_load_pem_dsa_public_key(self, key_file, backend):
key = load_vectors_from_file(
key_file,
lambda pemfile: load_pem_public_key(
pemfile.read().encode(), backend
)
)
assert key
assert isinstance(key, interfaces.DSAPublicKey)
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
def test_load_ec_public_key(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join(
"asymmetric", "PEM_Serialization",
"ec_public_key.pem"),
lambda pemfile: load_pem_public_key(
pemfile.read().encode(), backend
)
)
assert key
assert isinstance(key, interfaces.EllipticCurvePublicKey)
assert key.curve.name == "secp256r1"
assert key.curve.key_size == 256
@pytest.mark.requires_backend_interface(
interface=TraditionalOpenSSLSerializationBackend
)
class TestTraditionalOpenSSLSerialization(object):
@pytest.mark.parametrize(
("key_file", "password"),
[
("key1.pem", b"123456"),
("key2.pem", b"a123456"),
("testrsa.pem", None),
("testrsa-encrypted.pem", b"password"),
]
)
def test_load_pem_rsa_private_key(self, key_file, password, backend):
key = load_vectors_from_file(
os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization", key_file),
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
assert key
assert isinstance(key, interfaces.RSAPrivateKey)
if isinstance(key, interfaces.RSAPrivateKeyWithNumbers):
_check_rsa_private_numbers(key.private_numbers())
@pytest.mark.parametrize(
("key_file", "password"),
[
("dsa.1024.pem", None),
("dsa.2048.pem", None),
("dsa.3072.pem", None),
]
)
def test_load_pem_dsa_private_key(self, key_file, password, backend):
key = load_vectors_from_file(
os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization", key_file),
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
assert key
assert isinstance(key, interfaces.DSAPrivateKey)
def test_key1_pem_encrypted_values(self, backend):
pkey = load_vectors_from_file(
os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization", "key1.pem"),
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), b"123456", backend
)
)
assert pkey
numbers = pkey.private_numbers()
assert numbers.p == int(
"fb7d316fc51531b36d93adaefaf52db6ad5beb793d37c4cf9dfc1ddd17cfbafb",
16
)
assert numbers.q == int(
"df98264e646de9a0fbeab094e31caad5bc7adceaaae3c800ca0275dd4bb307f5",
16
)
assert numbers.d == int(
"db4848c36f478dd5d38f35ae519643b6b810d404bcb76c00e44015e56ca1cab0"
"7bb7ae91f6b4b43fcfc82a47d7ed55b8c575152116994c2ce5325ec24313b911",
16
)
assert numbers.dmp1 == int(
"ce997f967192c2bcc3853186f1559fd355c190c58ddc15cbf5de9b6df954c727",
16
)
assert numbers.dmq1 == int(
"b018a57ab20ffaa3862435445d863369b852cf70a67c55058213e3fe10e3848d",
16
)
assert numbers.iqmp == int(
"6a8d830616924f5cf2d1bc1973f97fde6b63e052222ac7be06aa2532d10bac76",
16
)
assert numbers.public_numbers.e == 65537
assert numbers.public_numbers.n == int(
"dba786074f2f0350ce1d99f5aed5b520cfe0deb5429ec8f2a88563763f566e77"
"9814b7c310e5326edae31198eed439b845dd2db99eaa60f5c16a43f4be6bcf37",
16
)
def test_unused_password(self, backend):
key_file = os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization", "testrsa.pem")
password = b"this password will not be used"
with pytest.raises(TypeError):
load_vectors_from_file(
key_file,
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
def test_wrong_password(self, backend):
key_file = os.path.join(
"asymmetric",
"Traditional_OpenSSL_Serialization",
"testrsa-encrypted.pem"
)
password = b"this password is wrong"
with pytest.raises(ValueError):
load_vectors_from_file(
key_file,
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
@pytest.mark.parametrize("password", [None, b""])
def test_missing_password(self, backend, password):
key_file = os.path.join(
"asymmetric",
"Traditional_OpenSSL_Serialization",
"testrsa-encrypted.pem"
)
with pytest.raises(TypeError):
load_vectors_from_file(
key_file,
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
def test_wrong_format(self, backend):
key_data = b"---- NOT A KEY ----\n"
with pytest.raises(ValueError):
load_pem_traditional_openssl_private_key(
key_data, None, backend
)
with pytest.raises(ValueError):
load_pem_traditional_openssl_private_key(
key_data, b"this password will not be used", backend
)
def test_corrupt_format(self, backend):
# privkey.pem with a bunch of data missing.
key_data = textwrap.dedent("""\
-----BEGIN RSA PRIVATE KEY-----
MIIBPAIBAAJBAKrbeqkuRk8VcRmWFmtP+LviMB3+6dizWW3DwaffznyHGAFwUJ/I
Tv0XtbsCyl3QoyKGhrOAy3RvPK5M38iuXT0CAwEAAQJAZ3cnzaHXM/bxGaR5CR1R
rD1qFBAVfoQFiOH9uPJgMaoAuoQEisPHVcZDKcOv4wEg6/TInAIXBnEigtqvRzuy
mvcpHZwQJdmdHHkGKAs37Dfxi67HbkUCIQCeZGliHXFa071Fp06ZeWlR2ADonTZz
rJBhdTe0v5pCeQIhAIZfkiGgGBX4cIuuckzEm43g9WMUjxP/0GlK39vIyihxAiEA
mymehFRT0MvqW5xAKAx7Pgkt8HVKwVhc2LwGKHE0DZM=
-----END RSA PRIVATE KEY-----
""").encode()
with pytest.raises(ValueError):
load_pem_traditional_openssl_private_key(
key_data, None, backend
)
with pytest.raises(ValueError):
load_pem_traditional_openssl_private_key(
key_data, b"this password will not be used", backend
)
def test_encrypted_corrupt_format(self, backend):
# privkey.pem with a single bit flipped
key_data = textwrap.dedent("""\
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: <,ENCRYPTED
DEK-Info: AES-128-CBC,5E22A2BD85A653FB7A3ED20DE84F54CD
hAqtb5ZkTMGcs4BBDQ1SKZzdQThWRDzEDxM3qBfjvYa35KxZ54aic013mW/lwj2I
v5bbpOjrHYHNAiZYZ7RNb+ztbF6F/g5PA5g7mFwEq+LFBY0InIplYBSv9QtE+lot
Dy4AlZa/+NzJwgdKDb+JVfk5SddyD4ywnyeORnMPy4xXKvjXwmW+iLibZVKsjIgw
H8hSxcD+FhWyJm9h9uLtmpuqhQo0jTUYpnTezZx2xeVPB53Ev7YCxR9Nsgj5GsVf
9Z/hqLB7IFgM3pa0z3PQeUIZF/cEf72fISWIOBwwkzVrPUkXWfbuWeJXQXSs3amE
5A295jD9BQp9CY0nNFSsy+qiXWToq2xT3y5zVNEStmN0SCGNaIlUnJzL9IHW+oMI
kPmXZMnAYBWeeCF1gf3J3aE5lZInegHNfEI0+J0LazC2aNU5Dg/BNqrmRqKWEIo/
-----END RSA PRIVATE KEY-----
""").encode()
password = b"this password is wrong"
with pytest.raises(ValueError):
load_pem_traditional_openssl_private_key(
key_data, None, backend
)
with pytest.raises(ValueError):
load_pem_traditional_openssl_private_key(
key_data, password, backend
)
def test_unsupported_key_encryption(self, backend):
key_data = textwrap.dedent("""\
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: FAKE-123,5E22A2BD85A653FB7A3ED20DE84F54CD
hAqtb5ZkTMGcs4BBDQ1SKZzdQThWRDzEDxM3qBfjvYa35KxZ54aic013mW/lwj2I
v5bbpOjrHYHNAiZYZ7RNb+ztbF6F/g5PA5g7mFwEq+LFBY0InIplYBSv9QtE+lot
Dy4AlZa/+NzJwgdKDb+JVfk5SddyD4ywnyeORnMPy4xXKvjXwmW+iLibZVKsjIgw
H8hSxcD+FhWyJm9h9uLtmpuqhQo0jTUYpnTezZx2xeVPB53Ev7YCxR9Nsgj5GsVf
9Z/hqLB7IFgM3pa0z3PQeUIZF/cEf72fISWIOBwwkzVrPUkXWfbuWeJXQXSs3amE
5A295jD9BQp9CY0nNFSsy+qiXWToq2xT3y5zVNEStmN0SCGNaIlUnJzL9IHW+oMI
kPmXZMnAYBWeeCF1gf3J3aE5lZInegHNfEI0+J0LazC2aNU5Dg/BNqrmRqKWEIo/
-----END RSA PRIVATE KEY-----
""").encode()
password = b"password"
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
load_pem_traditional_openssl_private_key(
key_data, password, backend
)
@pytest.mark.requires_backend_interface(interface=PKCS8SerializationBackend)
class TestPKCS8Serialization(object):
@pytest.mark.parametrize(
("key_file", "password"),
[
("unenc-rsa-pkcs8.pem", None),
("enc-rsa-pkcs8.pem", b"foobar"),
("enc2-rsa-pkcs8.pem", b"baz"),
("pkcs12_s2k_pem-X_9607.pem", b"123456"),
("pkcs12_s2k_pem-X_9671.pem", b"123456"),
("pkcs12_s2k_pem-X_9925.pem", b"123456"),
("pkcs12_s2k_pem-X_9926.pem", b"123456"),
("pkcs12_s2k_pem-X_9927.pem", b"123456"),
("pkcs12_s2k_pem-X_9928.pem", b"123456"),
("pkcs12_s2k_pem-X_9929.pem", b"123456"),
("pkcs12_s2k_pem-X_9930.pem", b"123456"),
("pkcs12_s2k_pem-X_9931.pem", b"123456"),
("pkcs12_s2k_pem-X_9932.pem", b"123456"),
]
)
def test_load_pem_rsa_private_key(self, key_file, password, backend):
key = load_vectors_from_file(
os.path.join(
"asymmetric", "PKCS8", key_file),
lambda pemfile: load_pem_pkcs8_private_key(
pemfile.read().encode(), password, backend
)
)
assert key
assert isinstance(key, interfaces.RSAPrivateKey)
if isinstance(key, interfaces.RSAPrivateKeyWithNumbers):
_check_rsa_private_numbers(key.private_numbers())
@pytest.mark.parametrize(
("key_file", "password"),
[
("ec_private_key.pem", None),
("ec_private_key_encrypted.pem", b"123456"),
]
)
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
def test_load_pem_ec_private_key(self, key_file, password, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join(
"asymmetric", "PKCS8", key_file),
lambda pemfile: load_pem_pkcs8_private_key(
pemfile.read().encode(), password, backend
)
)
assert key
assert isinstance(key, interfaces.EllipticCurvePrivateKey)
assert key.curve.name == "secp256r1"
assert key.curve.key_size == 256
def test_unused_password(self, backend):
key_file = os.path.join(
"asymmetric", "PKCS8", "unenc-rsa-pkcs8.pem")
password = b"this password will not be used"
with pytest.raises(TypeError):
load_vectors_from_file(
key_file,
lambda pemfile: load_pem_pkcs8_private_key(
pemfile.read().encode(), password, backend
)
)
def test_wrong_password(self, backend):
key_file = os.path.join(
"asymmetric", "PKCS8", "enc-rsa-pkcs8.pem")
password = b"this password is wrong"
with pytest.raises(ValueError):
load_vectors_from_file(
key_file,
lambda pemfile: load_pem_pkcs8_private_key(
pemfile.read().encode(), password, backend
)
)
@pytest.mark.parametrize("password", [None, b""])
def test_missing_password(self, backend, password):
key_file = os.path.join(
"asymmetric",
"PKCS8",
"enc-rsa-pkcs8.pem"
)
with pytest.raises(TypeError):
load_vectors_from_file(
key_file,
lambda pemfile: load_pem_pkcs8_private_key(
pemfile.read().encode(), password, backend
)
)
def test_wrong_format(self, backend):
key_data = b"---- NOT A KEY ----\n"
with pytest.raises(ValueError):
load_pem_pkcs8_private_key(
key_data, None, backend
)
with pytest.raises(ValueError):
load_pem_pkcs8_private_key(
key_data, b"this password will not be used", backend
)
def test_corrupt_format(self, backend):
# unenc-rsa-pkcs8.pem with a bunch of data missing.
key_data = textwrap.dedent("""\
-----BEGIN PRIVATE KEY-----
MIICdQIBADALBgkqhkiG9w0BAQEEggJhMIICXQIBAAKBgQC7JHoJfg6yNzLMOWet
8Z49a4KD0dCspMAYvo2YAMB7/wdEycocujbhJ2n/seONi+5XqTqqFkM5VBl8rmkk
FPZk/7x0xmdsTPECSWnHK+HhoaNDFPR3j8jQhVo1laxiqcEhAHegi5cwtFosuJAv
FiRC0Cgz+frQPFQEBsAV9RuasyQxqzxrR0Ow0qncBeGBWbYE6WZhqtcLAI895b+i
+F4lbB4iD7T9QeIDMU/aIMXA81UO4cns1z4qDAHKeyLLrPQrJ/B4X7XC+egUWm5+
hr1qmyAMusyXIBECQQDJWZ8piluf4yrYfsJAn6hF5T4RjTztbqvO0GVG2McHY7Uj
NPSffhzHx/ll0fQEQji+OgydCCX8o3HZrgw5YfSJAkEA7e+rqdU5nO5ZG//PSEQb
tjLnRiTzBH/elQhtdZ5nF7pcpNTi4k13zutmKcWW4GK75azcRGJUhu1kDM7QYAOd
SQJAVNkYcifkvna7GmooL5VYEsQsqLbM4v0NF2TIGNfG3z1MGp75KrC5LhL97MNR
we2p/bd2k0HYyCKUGnf2nMPDiQJBAI75pwittSoE240EobUGIDTSz8CJsXIxuDmL
z+KOpdpPRR5TQmbEMEspjsFpFymMiuYPgmihQbO2cJl1qScY5OkCQQCJ6m5tcN8l
Xxg/SNpjEIv+qAyUD96XVlOJlOIeLHQ8kYE0C6ZA+MsqYIzgAreJk88Yn0lU/X0/
mu/UpE/BRZmR
-----END PRIVATE KEY-----
""").encode()
with pytest.raises(ValueError):
load_pem_pkcs8_private_key(
key_data, None, backend
)
with pytest.raises(ValueError):
load_pem_pkcs8_private_key(
key_data, b"this password will not be used", backend
)
def test_encrypted_corrupt_format(self, backend):
# enc-rsa-pkcs8.pem with some bits flipped.
key_data = textwrap.dedent("""\
-----BEGIN ENCRYPTED PRIVATE KEY-----
MIICojAcBgoqhkiG9w0BDAEDMA4ECHK0M0+QuEL9AgIBIcSCAoDRq+KRY+0XP0tO
lwBTzViiXSXoyNnKAZKt5r5K/fGNntv22g/1s/ZNCetrqsJDC5eMUPPacz06jFq/
Ipsep4/OgjQ9UAOzXNrWEoNyrHnWDo7usgD3CW0mKyqER4+wG0adVMbt3N+CJHGB
85jzRmQTfkdx1rSWeSx+XyswHn8ER4+hQ+omKWMVm7AFkjjmP/KnhUnLT98J8rhU
ArQoFPHz/6HVkypFccNaPPNg6IA4aS2A+TU9vJYOaXSVfFB2yf99hfYYzC+ukmuU
5Lun0cysK5s/5uSwDueUmDQKspnaNyiaMGDxvw8hilJc7vg0fGObfnbIpizhxJwq
gKBfR7Zt0Hv8OYi1He4MehfMGdbHskztF+yQ40LplBGXQrvAqpU4zShga1BoQ98T
0ekbBmqj7hg47VFsppXR7DKhx7G7rpMmdKbFhAZVCjae7rRGpUtD52cpFdPhMyAX
huhMkoczwUW8B/rM4272lkHo6Br0yk/TQfTEGkvryflNVu6lniPTV151WV5U1M3o
3G3a44eDyt7Ln+WSOpWtbPQMTrpKhur6WXgJvrpa/m02oOGdvOlDsoOCgavgQMWg
7xKKL7620pHl7p7f/8tlE8q6vLXVvyNtAOgt/JAr2rgvrHaZSzDE0DwgCjBXEm+7
cVMVNkHod7bLQefVanVtWqPzbmr8f7gKeuGwWSG9oew/lN2hxcLEPJHAQlnLgx3P
0GdGjK9NvwA0EP2gYIeE4+UtSder7xQ7bVh25VB20R4TTIIs4aXXCVOoQPagnzaT
6JLgl8FrvdfjHwIvmSOO1YMNmILBq000Q8WDqyErBDs4hsvtO6VQ4LeqJj6gClX3
qeJNaJFu
-----END ENCRYPTED PRIVATE KEY-----
""").encode()
password = b"this password is wrong"
with pytest.raises(ValueError):
load_pem_pkcs8_private_key(
key_data, None, backend
)
with pytest.raises(ValueError):
load_pem_pkcs8_private_key(
key_data, password, backend
)
def test_key1_pem_encrypted_values(self, backend):
pkey = load_vectors_from_file(
os.path.join(
"asymmetric", "PKCS8", "enc-rsa-pkcs8.pem"),
lambda pemfile: load_pem_pkcs8_private_key(
pemfile.read().encode(), b"foobar", backend
)
)
assert pkey
numbers = pkey.private_numbers()
assert numbers.public_numbers.n == int(
"00beec64d6db5760ac2fd4c971145641b9bd7f5c56558ece608795c79807"
"376a7fe5b19f95b35ca358ea5c8abd7ae051d49cd2f1e45969a1ae945460"
"3c14b278664a0e414ebc8913acb6203626985525e17a600611b028542dd0"
"562aad787fb4f1650aa318cdcff751e1b187cbf6785fbe164e9809491b95"
"dd68480567c99b1a57", 16
)
assert numbers.public_numbers.e == 65537
assert numbers.d == int(
"0cfe316e9dc6b8817f4fcfd5ae38a0886f68f773b8a6db4c9e6d8703c599"
"f3d9785c3a2c09e4c8090909fb3721e19a3009ec21221523a729265707a5"
"8f13063671c42a4096cad378ef2510cb59e23071489d8893ac4934dd149f"
"34f2d094bea57f1c8027c3a77248ac9b91218737d0c3c3dfa7d7829e6977"
"cf7d995688c86c81", 16
)
assert numbers.p == int(
"00db122ac857b2c0437d7616daa98e597bb75ca9ad3a47a70bec10c10036"
"03328794b225c8e3eee6ffd3fd6d2253d28e071fe27d629ab072faa14377"
"ce6118cb67", 16
)
assert numbers.q == int(
"00df1b8aa8506fcbbbb9d00257f2975e38b33d2698fd0f37e82d7ef38c56"
"f21b6ced63c825383782a7115cfcc093300987dbd2853b518d1c8f26382a"
"2d2586d391", 16
)
assert numbers.dmp1 == int(
"00be18aca13e60712fdf5daa85421eb10d86d654b269e1255656194fb0c4"
"2dd01a1070ea12c19f5c39e09587af02f7b1a1030d016a9ffabf3b36d699"
"ceaf38d9bf", 16
)
assert numbers.dmq1 == int(
"71aa8978f90a0c050744b77cf1263725b203ac9f730606d8ae1d289dce4a"
"28b8d534e9ea347aeb808c73107e583eb80c546d2bddadcdb3c82693a4c1"
"3d863451", 16
)
assert numbers.iqmp == int(
"136b7b1afac6e6279f71b24217b7083485a5e827d156024609dae39d48a6"
"bdb55af2f062cc4a3b077434e6fffad5faa29a2b5dba2bed3e4621e478c0"
"97ccfe7f", 16
)
@pytest.mark.parametrize(
("key_file", "password"),
[
("unenc-dsa-pkcs8.pem", None),
]
)
def test_load_pem_dsa_private_key(self, key_file, password, backend):
key = load_vectors_from_file(
os.path.join(
"asymmetric", "PKCS8", key_file),
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
assert key
assert isinstance(key, interfaces.DSAPrivateKey)
params = key.parameters()
assert isinstance(params, interfaces.DSAParameters)
if isinstance(params, interfaces.DSAParametersWithNumbers):
num = key.private_numbers()
pub = num.public_numbers
parameter_numbers = pub.parameter_numbers
assert num.x == int("00a535a8e1d0d91beafc8bee1d9b2a3a8de3311203",
16)
assert pub.y == int(
"2b260ea97dc6a12ae932c640e7df3d8ff04a8a05a0324f8d5f1b23f15fa1"
"70ff3f42061124eff2586cb11b49a82dcdc1b90fc6a84fb10109cb67db5d"
"2da971aeaf17be5e37284563e4c64d9e5fc8480258b319f0de29d54d8350"
"70d9e287914d77df81491f4423b62da984eb3f45eb2a29fcea5dae525ac6"
"ab6bcce04bfdf5b6",
16
)
assert parameter_numbers.p == int(
"00aa0930cc145825221caffa28ac2894196a27833de5ec21270791689420"
"7774a2e7b238b0d36f1b2499a2c2585083eb01432924418d867faa212dd1"
"071d4dceb2782794ad393cc08a4d4ada7f68d6e839a5fcd34b4e402d82cb"
"8a8cb40fec31911bf9bd360b034caacb4c5e947992573c9e90099c1b0f05"
"940cabe5d2de49a167",
16
)
assert parameter_numbers.q == int(
"00adc0e869b36f0ac013a681fdf4d4899d69820451", 16)
assert parameter_numbers.g == int(
"008c6b4589afa53a4d1048bfc346d1f386ca75521ccf72ddaa251286880e"
"e13201ff48890bbfc33d79bacaec71e7a778507bd5f1a66422e39415be03"
"e71141ba324f5b93131929182c88a9fa4062836066cebe74b5c6690c7d10"
"1106c240ab7ebd54e4e3301fd086ce6adac922fb2713a2b0887cba13b9bc"
"68ce5cfff241cd3246",
16
)
@pytest.mark.parametrize(
("key_file", "password"),
[
("bad-oid-dsa-key.pem", None),
]
)
def test_load_bad_oid_key(self, key_file, password, backend):
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
load_vectors_from_file(
os.path.join(
"asymmetric", "PKCS8", key_file),
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
@pytest.mark.parametrize(
("key_file", "password"),
[
("bad-encryption-oid.pem", b"password"),
]
)
def test_load_bad_encryption_oid_key(self, key_file, password, backend):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
load_vectors_from_file(
os.path.join(
"asymmetric", "PKCS8", key_file),
lambda pemfile: load_pem_traditional_openssl_private_key(
pemfile.read().encode(), password, backend
)
)
|
|
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
import re
from cms.utils import get_cms_setting
from cms.exceptions import SubClassNeededError, Deprecated
from cms.models import CMSPlugin
import django
from django import forms
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import ModelForm
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
DJANGO_1_4 = LooseVersion(django.get_version()) < LooseVersion('1.5')
class CMSPluginBaseMetaclass(forms.MediaDefiningClass):
"""
Ensure the CMSPlugin subclasses have sane values and set some defaults if
they're not given.
"""
def __new__(cls, name, bases, attrs):
super_new = super(CMSPluginBaseMetaclass, cls).__new__
parents = [base for base in bases if isinstance(base, CMSPluginBaseMetaclass)]
if not parents:
# If this is CMSPluginBase itself, and not a subclass, don't do anything
return super_new(cls, name, bases, attrs)
new_plugin = super_new(cls, name, bases, attrs)
# validate model is actually a CMSPlugin subclass.
if not issubclass(new_plugin.model, CMSPlugin):
raise SubClassNeededError(
"The 'model' attribute on CMSPluginBase subclasses must be "
"either CMSPlugin or a subclass of CMSPlugin. %r on %r is not."
% (new_plugin.model, new_plugin)
)
# validate the template:
if not hasattr(new_plugin, 'render_template'):
raise ImproperlyConfigured(
"CMSPluginBase subclasses must have a render_template attribute"
)
# Set the default form
if not new_plugin.form:
form_meta_attrs = {
'model': new_plugin.model,
'exclude': ('position', 'placeholder', 'language', 'plugin_type')
}
form_attrs = {
'Meta': type('Meta', (object,), form_meta_attrs)
}
new_plugin.form = type('%sForm' % name, (ModelForm,), form_attrs)
# Set the default fieldsets
if not new_plugin.fieldsets:
basic_fields = []
advanced_fields = []
for f in new_plugin.model._meta.fields:
if not f.auto_created and f.editable:
if hasattr(f, 'advanced'):
advanced_fields.append(f.name)
else: basic_fields.append(f.name)
if advanced_fields:
new_plugin.fieldsets = [
(
None,
{
'fields': basic_fields
}
),
(
_('Advanced options'),
{
'fields' : advanced_fields,
'classes' : ('collapse',)
}
)
]
# Set default name
if not new_plugin.name:
new_plugin.name = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", name)
return new_plugin
class CMSPluginBase(admin.ModelAdmin):
__metaclass__ = CMSPluginBaseMetaclass
name = ""
form = None
change_form_template = "admin/cms/page/plugin/change_form.html"
frontend_edit_template = 'cms/toolbar/plugin.html'
# Should the plugin be rendered in the admin?
admin_preview = False
render_template = None
# Should the plugin be rendered at all, or doesn't it have any output?
render_plugin = True
model = CMSPlugin
text_enabled = False
page_only = False
allow_children = False
child_classes = None
opts = {}
module = None #track in which module/application belongs
def __init__(self, model=None, admin_site=None):
if admin_site:
super(CMSPluginBase, self).__init__(self.model, admin_site)
self.object_successfully_changed = False
# variables will be overwritten in edit_view, so we got required
self.cms_plugin_instance = None
self.placeholder = None
self.page = None
def render(self, context, instance, placeholder):
context['instance'] = instance
context['placeholder'] = placeholder
return context
@property
def parent(self):
return self.cms_plugin_instance.parent
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
We just need the popup interface here
"""
context.update({
'preview': not "no_preview" in request.GET,
'is_popup': True,
'plugin': self.cms_plugin_instance,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
})
return super(CMSPluginBase, self).render_change_form(request, context, add, change, form_url, obj)
def has_add_permission(self, request, *args, **kwargs):
"""Permission handling change - if user is allowed to change the page
he must be also allowed to add/change/delete plugins..
Not sure if there will be plugin permission requirement in future, but
if, then this must be changed.
"""
return self.cms_plugin_instance.has_change_permission(request)
has_delete_permission = has_change_permission = has_add_permission
def save_model(self, request, obj, form, change):
"""
Override original method, and add some attributes to obj
This have to be made, because if object is newly created, he must know
where he lives.
Attributes from cms_plugin_instance have to be assigned to object, if
is cms_plugin_instance attribute available.
"""
if getattr(self, "cms_plugin_instance"):
# assign stuff to object
fields = self.cms_plugin_instance._meta.fields
for field in fields:
# assign all the fields - we can do this, because object is
# subclassing cms_plugin_instance (one to one relation)
value = getattr(self.cms_plugin_instance, field.name)
setattr(obj, field.name, value)
# remember the saved object
self.saved_object = obj
return super(CMSPluginBase, self).save_model(request, obj, form, change)
def response_change(self, request, obj):
"""
Just set a flag, so we know something was changed, and can make
new version if reversion installed.
New version will be created in admin.views.edit_plugin
"""
self.object_successfully_changed = True
return super(CMSPluginBase, self).response_change(request, obj)
def response_add(self, request, obj, **kwargs):
"""
Just set a flag, so we know something was changed, and can make
new version if reversion installed.
New version will be created in admin.views.edit_plugin
"""
self.object_successfully_changed = True
if not DJANGO_1_4:
post_url_continue = reverse('admin:cms_page_edit_plugin',
args=(obj._get_pk_val(),),
current_app=self.admin_site.name)
kwargs.setdefault('post_url_continue', post_url_continue)
return super(CMSPluginBase, self).response_add(request, obj, **kwargs)
def log_addition(self, request, object):
pass
def log_change(self, request, object, message):
pass
def log_deletion(self, request, object, object_repr):
pass
def icon_src(self, instance):
"""
Overwrite this if text_enabled = True
Return the URL for an image to be used for an icon for this
plugin instance in a text editor.
"""
return ""
def icon_alt(self, instance):
"""
Overwrite this if necessary if text_enabled = True
Return the 'alt' text to be used for an icon representing
the plugin object in a text editor.
"""
return "%s - %s" % (unicode(self.name), unicode(instance))
def get_child_classes(self, slot, page):
from cms.plugin_pool import plugin_pool
if self.child_classes:
return self.child_classes
else:
installed_plugins = plugin_pool.get_all_plugins(slot, page)
return [cls.__name__ for cls in installed_plugins]
def __repr__(self):
return smart_str(self.name)
def __unicode__(self):
return self.name
#===========================================================================
# Deprecated APIs
#===========================================================================
@property
def pluginmedia(self):
raise Deprecated(
"CMSPluginBase.pluginmedia is deprecated in favor of django-sekizai"
)
def get_plugin_media(self, request, context, plugin):
raise Deprecated(
"CMSPluginBase.get_plugin_media is deprecated in favor of django-sekizai"
)
|
|
from __future__ import unicode_literals
import re
import six
from six.moves.urllib.parse import parse_qs, urlparse
import xmltodict
from moto.core.responses import _TemplateEnvironmentMixin
from .exceptions import BucketAlreadyExists, S3ClientError, InvalidPartOrder
from .models import s3_backend
from .utils import bucket_name_from_url, metadata_from_headers
from xml.dom import minidom
REGION_URL_REGEX = r'\.s3-(.+?)\.amazonaws\.com'
DEFAULT_REGION_NAME = 'us-east-1'
def parse_key_name(pth):
return pth.lstrip("/")
class ResponseObject(_TemplateEnvironmentMixin):
def __init__(self, backend, bucket_name_from_url, parse_key_name):
self.backend = backend
self.bucket_name_from_url = bucket_name_from_url
self.parse_key_name = parse_key_name
def all_buckets(self):
# No bucket specified. Listing all buckets
all_buckets = self.backend.get_all_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def bucket_response(self, request, full_url, headers):
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, headers, s3error.description
if isinstance(response, six.string_types):
return 200, headers, response.encode("utf-8")
else:
status_code, headers, response_content = response
return status_code, headers, response_content.encode("utf-8")
def _bucket_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
region_name = DEFAULT_REGION_NAME
region_match = re.search(REGION_URL_REGEX, full_url)
if region_match:
region_name = region_match.groups()[0]
bucket_name = self.bucket_name_from_url(full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
body = body.decode('utf-8')
if method == 'HEAD':
return self._bucket_response_head(bucket_name, headers)
elif method == 'GET':
return self._bucket_response_get(bucket_name, querystring, headers)
elif method == 'PUT':
return self._bucket_response_put(body, region_name, bucket_name, querystring, headers)
elif method == 'DELETE':
return self._bucket_response_delete(body, bucket_name, querystring, headers)
elif method == 'POST':
return self._bucket_response_post(request, bucket_name, headers)
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _bucket_response_head(self, bucket_name, headers):
self.backend.get_bucket(bucket_name)
return 200, headers, ""
def _bucket_response_get(self, bucket_name, querystring, headers):
if 'uploads' in querystring:
for unsup in ('delimiter', 'max-uploads'):
if unsup in querystring:
raise NotImplementedError("Listing multipart uploads with {} has not been implemented yet.".format(unsup))
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if 'prefix' in querystring:
prefix = querystring.get('prefix', [None])[0]
multiparts = [upload for upload in multiparts if upload.key_name.startswith(prefix)]
template = self.response_template(S3_ALL_MULTIPARTS)
return 200, headers, template.render(
bucket_name=bucket_name,
uploads=multiparts)
elif 'location' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
return 200, headers, template.render(location=bucket.location)
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules:
return 404, headers, "NoSuchLifecycleConfiguration"
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return 200, headers, template.render(rules=bucket.rules)
elif 'versioning' in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return 200, headers, template.render(status=versioning)
elif 'policy' in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, headers, template.render(bucket_name=bucket_name)
return 200, headers, policy
elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0]
encoding_type = querystring.get('encoding-type', [None])[0]
key_marker = querystring.get('key-marker', [None])[0]
max_keys = querystring.get('max-keys', [None])[0]
prefix = querystring.get('prefix', [None])[0]
version_id_marker = querystring.get('version-id-marker', [None])[0]
bucket = self.backend.get_bucket(bucket_name)
versions = self.backend.get_bucket_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker
)
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return 200, headers, template.render(
key_list=versions,
bucket=bucket,
prefix='',
max_keys='',
delimiter='',
is_truncated='false',
)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get('prefix', [None])[0]
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return 200, headers, template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
)
def _bucket_response_put(self, body, region_name, bucket_name, querystring, headers):
if 'versioning' in querystring:
ver = re.search('<Status>([A-Za-z]+)</Status>', body)
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, headers, ""
elif 'lifecycle' in querystring:
rules = xmltodict.parse(body)['LifecycleConfiguration']['Rule']
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.set_bucket_lifecycle(bucket_name, rules)
return ""
elif 'policy' in querystring:
self.backend.set_bucket_policy(bucket_name, body)
return 'True'
else:
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
if region_name == DEFAULT_REGION_NAME:
# us-east-1 has different behavior
new_bucket = self.backend.get_bucket(bucket_name)
else:
raise
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, headers, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring, headers):
if 'policy' in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, headers, ""
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
bucket.delete_lifecycle()
return 204, headers, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, headers, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, headers, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, bucket_name, headers):
if request.path == u'/?delete':
return self._bucket_response_delete_keys(request, bucket_name, headers)
# POST to bucket-url should create file from form
if hasattr(request, 'form'):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
form = {}
for kv in request.body.decode('utf-8').split('&'):
k, v = kv.split('=')
form[k] = v
key = form['key']
if 'file' in form:
f = form['file']
else:
f = request.files['file'].stream.read()
new_key = self.backend.set_key(bucket_name, key, f)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return 200, headers, ""
def _bucket_response_delete_keys(self, request, bucket_name, headers):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
keys = minidom.parseString(request.body.decode('utf-8')).getElementsByTagName('Key')
deleted_names = []
error_names = []
for k in keys:
try:
key_name = k.firstChild.nodeValue
self.backend.delete_key(bucket_name, key_name)
deleted_names.append(key_name)
except KeyError:
error_names.append(key_name)
return 200, headers, template.render(deleted=deleted_names, delete_errors=error_names)
def _handle_range_header(self, request, headers, response_content):
length = len(response_content)
last = length - 1
_, rspec = request.headers.get('range').split('=')
if ',' in rspec:
raise NotImplementedError(
"Multiple range specifiers not supported")
toint = lambda i: int(i) if i else None
begin, end = map(toint, rspec.split('-'))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, headers, ""
if begin < 0 or end > last or begin > min(end, last):
return 416, headers, ""
headers['content-range'] = "bytes {0}-{1}/{2}".format(
begin, end, length)
return 206, headers, response_content[begin:end + 1]
def key_response(self, request, full_url, headers):
try:
response = self._key_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, headers, s3error.description
if isinstance(response, six.string_types):
status_code = 200
response_content = response
else:
status_code, headers, response_content = response
if status_code == 200 and 'range' in request.headers:
return self._handle_range_header(request, headers, response_content)
return status_code, headers, response_content
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query)
method = request.method
key_name = self.parse_key_name(parsed_url.path)
bucket_name = self.bucket_name_from_url(full_url)
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
if method == 'GET':
return self._key_response_get(bucket_name, query, key_name, headers)
elif method == 'PUT':
return self._key_response_put(request, parsed_url, body, bucket_name, query, key_name, headers)
elif method == 'HEAD':
return self._key_response_head(bucket_name, key_name, headers)
elif method == 'DELETE':
return self._key_response_delete(bucket_name, query, key_name, headers)
elif method == 'POST':
return self._key_response_post(request, body, parsed_url, bucket_name, query, key_name, headers)
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _key_response_get(self, bucket_name, query, key_name, headers):
if 'uploadId' in query:
upload_id = query['uploadId'][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return 200, headers, template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts
)
version_id = query.get('versionId', [None])[0]
key = self.backend.get_key(
bucket_name, key_name, version_id=version_id)
if key:
headers.update(key.metadata)
return 200, headers, key.value
else:
return 404, headers, ""
def _key_response_put(self, request, parsed_url, body, bucket_name, query, key_name, headers):
if 'uploadId' in query and 'partNumber' in query:
upload_id = query['uploadId'][0]
part_number = int(query['partNumber'][0])
if 'x-amz-copy-source' in request.headers:
src = request.headers.get("x-amz-copy-source")
src_bucket, src_key = src.split("/", 1)
key = self.backend.copy_part(
bucket_name, upload_id, part_number, src_bucket,
src_key)
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.set_part(
bucket_name, upload_id, part_number, body)
response = ""
headers.update(key.response_dict)
return 200, headers, response
storage_class = request.headers.get('x-amz-storage-class', 'STANDARD')
if parsed_url.query == 'acl':
# We don't implement ACL yet, so just return
return 200, headers, ""
if 'x-amz-copy-source' in request.headers:
# Copy key
src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1)
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class)
mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE':
new_key = self.backend.get_key(bucket_name, key_name)
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
return template.render(key=src_key)
streaming_request = hasattr(request, 'streaming') and request.streaming
closing_connection = headers.get('connection') == 'close'
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_key(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.set_key(bucket_name, key_name, body,
storage=storage_class)
request.streaming = True
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata)
template = self.response_template(S3_OBJECT_RESPONSE)
headers.update(new_key.response_dict)
return 200, headers, template.render(key=new_key)
def _key_response_head(self, bucket_name, key_name, headers):
key = self.backend.get_key(bucket_name, key_name)
if key:
headers.update(key.metadata)
headers.update(key.response_dict)
return 200, headers, key.value
else:
return 404, headers, ""
def _key_response_delete(self, bucket_name, query, key_name, headers):
if 'uploadId' in query:
upload_id = query['uploadId'][0]
self.backend.cancel_multipart(bucket_name, upload_id)
return 204, headers, ""
try:
removed_key = self.backend.delete_key(bucket_name, key_name)
except KeyError:
return 404, headers, ""
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
return 204, headers, template.render(bucket=removed_key)
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName('Part')
prev = 0
for p in ps:
pn = int(p.getElementsByTagName('PartNumber')[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText)
def _key_response_post(self, request, body, parsed_url, bucket_name, query, key_name, headers):
if body == b'' and parsed_url.query == 'uploads':
metadata = metadata_from_headers(request.headers)
multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=multipart.id,
)
return 200, headers, response
if 'uploadId' in query:
body = self._complete_multipart_body(body)
upload_id = query['uploadId'][0]
key = self.backend.complete_multipart(bucket_name, upload_id, body)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
return template.render(
bucket_name=bucket_name,
key_name=key.name,
etag=key.etag,
)
elif parsed_url.query == 'restore':
es = minidom.parseString(body).getElementsByTagName('Days')
days = es[0].childNodes[0].wholeText
key = self.backend.get_key(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, headers, ""
else:
raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far")
S3ResponseInstance = ResponseObject(s3_backend, bucket_name_from_url, parse_key_name)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>2006-02-03T16:45:09.000Z</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<MaxKeys>1000</MaxKeys>
<Delimiter>{{ delimiter }}</Delimiter>
<IsTruncated>false</IsTruncated>
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{{ location }}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
<Prefix>{{ rule.prefix if rule.prefix != None }}</Prefix>
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
</Expiration>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{{ key._version_id }}</VersionId>
<IsLatest>false</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k in deleted %}
<Deleted>
<Key>{{k}}</Key>
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_OBJECT_SUCCESS = """<DeleteObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteObjectResponse>
<Code>200</Code>
<Description>OK</Description>
</DeleteObjectResponse>
</DeleteObjectResponse>"""
S3_OBJECT_RESPONSE = """<PutObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<PutObjectResponse>
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</PutObjectResponse>
</PutObjectResponse>"""
S3_OBJECT_COPY_RESPONSE = """<CopyObjectResponse xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<CopyObjectResponse>
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResponse>
</CopyObjectResponse>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>1</PartNumberMarker>
<NextPartNumberMarker>{{ count }} </NextPartNumberMarker>
<MaxParts>{{ count }}</MaxParts>
<IsTruncated>false</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = """<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>False</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::111122223333:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>OwnerDisplayName</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
|
|
# (C) Datadog, Inc. 2010-2016
# (C) Luca Cipriani <luca@c9.io> 2013
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import time
# 3p
import psutil
# project
from checks import AgentCheck
from config import _is_affirmative
from utils.platform import Platform
DEFAULT_AD_CACHE_DURATION = 120
DEFAULT_PID_CACHE_DURATION = 120
ATTR_TO_METRIC = {
'thr': 'threads',
'cpu': 'cpu.pct',
'rss': 'mem.rss',
'vms': 'mem.vms',
'real': 'mem.real',
'open_fd': 'open_file_descriptors',
'open_handle': 'open_handles', # win32 only
'r_count': 'ioread_count', # FIXME: namespace me correctly (6.x), io.r_count
'w_count': 'iowrite_count', # FIXME: namespace me correctly (6.x) io.r_bytes
'r_bytes': 'ioread_bytes', # FIXME: namespace me correctly (6.x) io.w_count
'w_bytes': 'iowrite_bytes', # FIXME: namespace me correctly (6.x) io.w_bytes
'ctx_swtch_vol': 'voluntary_ctx_switches', # FIXME: namespace me correctly (6.x), ctx_swt.voluntary
'ctx_swtch_invol': 'involuntary_ctx_switches', # FIXME: namespace me correctly (6.x), ctx_swt.involuntary
'run_time': 'run_time',
'mem_pct': 'mem.pct'
}
ATTR_TO_METRIC_RATE = {
'minflt': 'mem.page_faults.minor_faults',
'cminflt': 'mem.page_faults.children_minor_faults',
'majflt': 'mem.page_faults.major_faults',
'cmajflt': 'mem.page_faults.children_major_faults'
}
class ProcessCheck(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# ad stands for access denied
# We cache the PIDs getting this error and don't iterate on them
# more often than `access_denied_cache_duration`
# This cache is for all PIDs so it's global, but it should
# be refreshed by instance
self.last_ad_cache_ts = {}
self.ad_cache = set()
self.access_denied_cache_duration = int(
init_config.get(
'access_denied_cache_duration',
DEFAULT_AD_CACHE_DURATION
)
)
# By default cache the PID list for a while
# Sometimes it's not wanted b/c it can mess with no-data monitoring
# This cache is indexed per instance
self.last_pid_cache_ts = {}
self.pid_cache = {}
self.pid_cache_duration = int(
init_config.get(
'pid_cache_duration',
DEFAULT_PID_CACHE_DURATION
)
)
self._conflicting_procfs = False
self._deprecated_init_procfs = False
if Platform.is_linux():
procfs_path = init_config.get('procfs_path')
if procfs_path:
if 'procfs_path' in agentConfig and procfs_path != agentConfig.get('procfs_path').rstrip('/'):
self._conflicting_procfs = True
else:
self._deprecated_init_procfs = True
psutil.PROCFS_PATH = procfs_path
# Process cache, indexed by instance
self.process_cache = defaultdict(dict)
def should_refresh_ad_cache(self, name):
now = time.time()
return now - self.last_ad_cache_ts.get(name, 0) > self.access_denied_cache_duration
def should_refresh_pid_cache(self, name):
now = time.time()
return now - self.last_pid_cache_ts.get(name, 0) > self.pid_cache_duration
def find_pids(self, name, search_string, exact_match, ignore_ad=True):
"""
Create a set of pids of selected processes.
Search for search_string
"""
if not self.should_refresh_pid_cache(name):
return self.pid_cache[name]
ad_error_logger = self.log.debug
if not ignore_ad:
ad_error_logger = self.log.error
refresh_ad_cache = self.should_refresh_ad_cache(name)
matching_pids = set()
for proc in psutil.process_iter():
# Skip access denied processes
if not refresh_ad_cache and proc.pid in self.ad_cache:
continue
found = False
for string in search_string:
try:
# FIXME 6.x: All has been deprecated from the doc, should be removed
if string == 'All':
found = True
if exact_match:
if proc.name() == string:
found = True
else:
cmdline = proc.cmdline()
if string in ' '.join(cmdline):
found = True
except psutil.NoSuchProcess:
self.log.warning('Process disappeared while scanning')
except psutil.AccessDenied as e:
ad_error_logger('Access denied to process with PID %s', proc.pid)
ad_error_logger('Error: %s', e)
if refresh_ad_cache:
self.ad_cache.add(proc.pid)
if not ignore_ad:
raise
else:
if refresh_ad_cache:
self.ad_cache.discard(proc.pid)
if found:
matching_pids.add(proc.pid)
break
self.pid_cache[name] = matching_pids
self.last_pid_cache_ts[name] = time.time()
if refresh_ad_cache:
self.last_ad_cache_ts[name] = time.time()
return matching_pids
def psutil_wrapper(self, process, method, accessors, *args, **kwargs):
"""
A psutil wrapper that is calling
* psutil.method(*args, **kwargs) and returns the result
OR
* psutil.method(*args, **kwargs).accessor[i] for each accessors given in
a list, the result being indexed in a dictionary by the accessor name
"""
if accessors is None:
result = None
else:
result = {}
# Ban certain method that we know fail
if method == 'memory_info_ex'\
and (Platform.is_win32() or Platform.is_solaris()):
return result
elif method == 'num_fds' and not Platform.is_unix():
return result
elif method == 'num_handles' and not Platform.is_win32():
return result
try:
res = getattr(process, method)(*args, **kwargs)
if accessors is None:
result = res
else:
for acc in accessors:
try:
result[acc] = getattr(res, acc)
except AttributeError:
self.log.debug("psutil.%s().%s attribute does not exist", method, acc)
except (NotImplementedError, AttributeError):
self.log.debug("psutil method %s not implemented", method)
except psutil.AccessDenied:
self.log.debug("psutil was denied acccess for method %s", method)
except psutil.NoSuchProcess:
self.warning("Process {0} disappeared while scanning".format(process.pid))
return result
def get_process_state(self, name, pids):
st = defaultdict(list)
# Remove from cache the processes that are not in `pids`
cached_pids = set(self.process_cache[name].keys())
pids_to_remove = cached_pids - pids
for pid in pids_to_remove:
del self.process_cache[name][pid]
for pid in pids:
st['pids'].append(pid)
new_process = False
# If the pid's process is not cached, retrieve it
if pid not in self.process_cache[name] or not self.process_cache[name][pid].is_running():
new_process = True
try:
self.process_cache[name][pid] = psutil.Process(pid)
self.log.debug('New process in cache: %s' % pid)
# Skip processes dead in the meantime
except psutil.NoSuchProcess:
self.warning('Process %s disappeared while scanning' % pid)
# reset the PID cache now, something changed
self.last_pid_cache_ts[name] = 0
continue
p = self.process_cache[name][pid]
meminfo = self.psutil_wrapper(p, 'memory_info', ['rss', 'vms'])
st['rss'].append(meminfo.get('rss'))
st['vms'].append(meminfo.get('vms'))
mem_percent = self.psutil_wrapper(p, 'memory_percent', None)
st['mem_pct'].append(mem_percent)
# will fail on win32 and solaris
shared_mem = self.psutil_wrapper(p, 'memory_info_ex', ['shared']).get('shared')
if shared_mem is not None and meminfo.get('rss') is not None:
st['real'].append(meminfo['rss'] - shared_mem)
else:
st['real'].append(None)
ctxinfo = self.psutil_wrapper(p, 'num_ctx_switches', ['voluntary', 'involuntary'])
st['ctx_swtch_vol'].append(ctxinfo.get('voluntary'))
st['ctx_swtch_invol'].append(ctxinfo.get('involuntary'))
st['thr'].append(self.psutil_wrapper(p, 'num_threads', None))
cpu_percent = self.psutil_wrapper(p, 'cpu_percent', None)
if not new_process:
# psutil returns `0.` for `cpu_percent` the first time it's sampled on a process,
# so save the value only on non-new processes
st['cpu'].append(cpu_percent)
st['open_fd'].append(self.psutil_wrapper(p, 'num_fds', None))
st['open_handle'].append(self.psutil_wrapper(p, 'num_handles', None))
ioinfo = self.psutil_wrapper(p, 'io_counters', ['read_count', 'write_count', 'read_bytes', 'write_bytes'])
st['r_count'].append(ioinfo.get('read_count'))
st['w_count'].append(ioinfo.get('write_count'))
st['r_bytes'].append(ioinfo.get('read_bytes'))
st['w_bytes'].append(ioinfo.get('write_bytes'))
pagefault_stats = self.get_pagefault_stats(pid)
if pagefault_stats is not None:
(minflt, cminflt, majflt, cmajflt) = pagefault_stats
st['minflt'].append(minflt)
st['cminflt'].append(cminflt)
st['majflt'].append(majflt)
st['cmajflt'].append(cmajflt)
else:
st['minflt'].append(None)
st['cminflt'].append(None)
st['majflt'].append(None)
st['cmajflt'].append(None)
#calculate process run time
create_time = self.psutil_wrapper(p, 'create_time', None)
if create_time is not None:
now = time.time()
run_time = now - create_time
st['run_time'].append(run_time)
return st
def get_pagefault_stats(self, pid):
if not Platform.is_linux():
return None
def file_to_string(path):
with open(path, 'r') as f:
res = f.read()
return res
# http://man7.org/linux/man-pages/man5/proc.5.html
try:
data = file_to_string('/%s/%s/stat' % (psutil.PROCFS_PATH, pid))
except Exception:
self.log.debug('error getting proc stats: file_to_string failed'
'for /%s/%s/stat' % (psutil.PROCFS_PATH, pid))
return None
return map(lambda i: int(i), data.split()[9:13])
def check(self, instance):
name = instance.get('name', None)
tags = instance.get('tags', [])
exact_match = _is_affirmative(instance.get('exact_match', True))
search_string = instance.get('search_string', None)
ignore_ad = _is_affirmative(instance.get('ignore_denied_access', True))
pid = instance.get('pid')
pid_file = instance.get('pid_file')
if self._conflicting_procfs:
self.warning('The `procfs_path` defined in `process.yaml` is different from the one defined in '
'`datadog.conf`. This is currently not supported by the Agent. Defaulting to the '
'value defined in `datadog.conf`: {}'.format(psutil.PROCFS_PATH))
elif self._deprecated_init_procfs:
self.warning('DEPRECATION NOTICE: Specifying `procfs_path` in `process.yaml` is deprecated. '
'Please specify it in `datadog.conf` instead')
if not isinstance(search_string, list) and pid is None and pid_file is None:
raise ValueError('"search_string" or "pid" or "pid_file" parameter is required')
# FIXME 6.x remove me
if search_string is not None:
if "All" in search_string:
self.warning('Deprecated: Having "All" in your search_string will'
'greatly reduce the performance of the check and '
'will be removed in a future version of the agent.')
if name is None:
raise KeyError('The "name" of process groups is mandatory')
if search_string is not None:
pids = self.find_pids(
name,
search_string,
exact_match,
ignore_ad=ignore_ad
)
elif pid is not None:
# we use Process(pid) as a means to search, if pid not found
# psutil.NoSuchProcess is raised.
pids = self._get_pid_set(pid)
elif pid_file is not None:
with open(pid_file, 'r') as file_pid:
pid_line = file_pid.readline().strip()
pids = self._get_pid_set(int(pid_line))
else:
raise ValueError('The "search_string" or "pid" options are required for process identification')
proc_state = self.get_process_state(name, pids)
# FIXME 6.x remove the `name` tag
tags.extend(['process_name:%s' % name, name])
self.log.debug('ProcessCheck: process %s analysed', name)
self.gauge('system.processes.number', len(pids), tags=tags)
for attr, mname in ATTR_TO_METRIC.iteritems():
vals = [x for x in proc_state[attr] if x is not None]
# skip []
if vals:
if attr == 'run_time':
self.gauge('system.processes.%s.avg' % mname, sum(vals)/len(vals), tags=tags)
self.gauge('system.processes.%s.max' % mname, max(vals), tags=tags)
self.gauge('system.processes.%s.min' % mname, min(vals), tags=tags)
# FIXME 6.x: change this prefix?
else:
self.gauge('system.processes.%s' % mname, sum(vals), tags=tags)
for attr, mname in ATTR_TO_METRIC_RATE.iteritems():
vals = [x for x in proc_state[attr] if x is not None]
if vals:
self.rate('system.processes.%s' % mname, sum(vals), tags=tags)
self._process_service_check(name, len(pids), instance.get('thresholds', None))
def _get_pid_set(self, pid):
try:
return {psutil.Process(pid).pid}
except psutil.NoSuchProcess:
return set()
def _process_service_check(self, name, nb_procs, bounds):
"""
Report a service check, for each process in search_string.
Report as OK if the process is in the warning thresholds
CRITICAL out of the critical thresholds
WARNING out of the warning thresholds
"""
tag = ["process:%s" % name]
status = AgentCheck.OK
message_str = "PROCS %s: %s processes found for %s"
status_str = {
AgentCheck.OK: "OK",
AgentCheck.WARNING: "WARNING",
AgentCheck.CRITICAL: "CRITICAL"
}
if not bounds and nb_procs < 1:
status = AgentCheck.CRITICAL
elif bounds:
warning = bounds.get('warning', [1, float('inf')])
critical = bounds.get('critical', [1, float('inf')])
if warning[1] < nb_procs or nb_procs < warning[0]:
status = AgentCheck.WARNING
if critical[1] < nb_procs or nb_procs < critical[0]:
status = AgentCheck.CRITICAL
self.service_check(
"process.up",
status,
tags=tag,
message=message_str % (status_str[status], nb_procs, name)
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .sub_resource_py3 import SubResource
from .backend_address_pool_py3 import BackendAddressPool
from .inbound_nat_rule_py3 import InboundNatRule
from .security_rule_py3 import SecurityRule
from .network_interface_dns_settings_py3 import NetworkInterfaceDnsSettings
from .network_interface_py3 import NetworkInterface
from .network_security_group_py3 import NetworkSecurityGroup
from .route_py3 import Route
from .route_table_py3 import RouteTable
from .service_endpoint_properties_format_py3 import ServiceEndpointPropertiesFormat
from .public_ip_address_sku_py3 import PublicIPAddressSku
from .public_ip_address_dns_settings_py3 import PublicIPAddressDnsSettings
from .public_ip_address_py3 import PublicIPAddress
from .ip_configuration_py3 import IPConfiguration
from .resource_navigation_link_py3 import ResourceNavigationLink
from .subnet_py3 import Subnet
from .network_interface_ip_configuration_py3 import NetworkInterfaceIPConfiguration
from .application_gateway_backend_address_py3 import ApplicationGatewayBackendAddress
from .application_gateway_backend_address_pool_py3 import ApplicationGatewayBackendAddressPool
from .application_gateway_connection_draining_py3 import ApplicationGatewayConnectionDraining
from .application_gateway_backend_http_settings_py3 import ApplicationGatewayBackendHttpSettings
from .application_gateway_backend_health_server_py3 import ApplicationGatewayBackendHealthServer
from .application_gateway_backend_health_http_settings_py3 import ApplicationGatewayBackendHealthHttpSettings
from .application_gateway_backend_health_pool_py3 import ApplicationGatewayBackendHealthPool
from .application_gateway_backend_health_py3 import ApplicationGatewayBackendHealth
from .application_gateway_sku_py3 import ApplicationGatewaySku
from .application_gateway_ssl_policy_py3 import ApplicationGatewaySslPolicy
from .application_gateway_ip_configuration_py3 import ApplicationGatewayIPConfiguration
from .application_gateway_authentication_certificate_py3 import ApplicationGatewayAuthenticationCertificate
from .application_gateway_ssl_certificate_py3 import ApplicationGatewaySslCertificate
from .application_gateway_frontend_ip_configuration_py3 import ApplicationGatewayFrontendIPConfiguration
from .application_gateway_frontend_port_py3 import ApplicationGatewayFrontendPort
from .application_gateway_http_listener_py3 import ApplicationGatewayHttpListener
from .application_gateway_path_rule_py3 import ApplicationGatewayPathRule
from .application_gateway_probe_health_response_match_py3 import ApplicationGatewayProbeHealthResponseMatch
from .application_gateway_probe_py3 import ApplicationGatewayProbe
from .application_gateway_request_routing_rule_py3 import ApplicationGatewayRequestRoutingRule
from .application_gateway_redirect_configuration_py3 import ApplicationGatewayRedirectConfiguration
from .application_gateway_url_path_map_py3 import ApplicationGatewayUrlPathMap
from .application_gateway_firewall_disabled_rule_group_py3 import ApplicationGatewayFirewallDisabledRuleGroup
from .application_gateway_web_application_firewall_configuration_py3 import ApplicationGatewayWebApplicationFirewallConfiguration
from .application_gateway_py3 import ApplicationGateway
from .application_gateway_firewall_rule_py3 import ApplicationGatewayFirewallRule
from .application_gateway_firewall_rule_group_py3 import ApplicationGatewayFirewallRuleGroup
from .application_gateway_firewall_rule_set_py3 import ApplicationGatewayFirewallRuleSet
from .application_gateway_available_waf_rule_sets_result_py3 import ApplicationGatewayAvailableWafRuleSetsResult
from .application_gateway_available_ssl_options_py3 import ApplicationGatewayAvailableSslOptions
from .application_gateway_ssl_predefined_policy_py3 import ApplicationGatewaySslPredefinedPolicy
from .resource_py3 import Resource
from .dns_name_availability_result_py3 import DnsNameAvailabilityResult
from .endpoint_service_result_py3 import EndpointServiceResult
from .express_route_circuit_authorization_py3 import ExpressRouteCircuitAuthorization
from .express_route_circuit_peering_config_py3 import ExpressRouteCircuitPeeringConfig
from .route_filter_rule_py3 import RouteFilterRule
from .express_route_circuit_stats_py3 import ExpressRouteCircuitStats
from .express_route_circuit_peering_py3 import ExpressRouteCircuitPeering
from .route_filter_py3 import RouteFilter
from .ipv6_express_route_circuit_peering_config_py3 import Ipv6ExpressRouteCircuitPeeringConfig
from .express_route_circuit_sku_py3 import ExpressRouteCircuitSku
from .express_route_circuit_service_provider_properties_py3 import ExpressRouteCircuitServiceProviderProperties
from .express_route_circuit_py3 import ExpressRouteCircuit
from .express_route_circuit_arp_table_py3 import ExpressRouteCircuitArpTable
from .express_route_circuits_arp_table_list_result_py3 import ExpressRouteCircuitsArpTableListResult
from .express_route_circuit_routes_table_py3 import ExpressRouteCircuitRoutesTable
from .express_route_circuits_routes_table_list_result_py3 import ExpressRouteCircuitsRoutesTableListResult
from .express_route_circuit_routes_table_summary_py3 import ExpressRouteCircuitRoutesTableSummary
from .express_route_circuits_routes_table_summary_list_result_py3 import ExpressRouteCircuitsRoutesTableSummaryListResult
from .express_route_service_provider_bandwidths_offered_py3 import ExpressRouteServiceProviderBandwidthsOffered
from .express_route_service_provider_py3 import ExpressRouteServiceProvider
from .load_balancer_sku_py3 import LoadBalancerSku
from .frontend_ip_configuration_py3 import FrontendIPConfiguration
from .load_balancing_rule_py3 import LoadBalancingRule
from .probe_py3 import Probe
from .inbound_nat_pool_py3 import InboundNatPool
from .outbound_nat_rule_py3 import OutboundNatRule
from .load_balancer_py3 import LoadBalancer
from .error_details_py3 import ErrorDetails
from .error_py3 import Error
from .azure_async_operation_result_py3 import AzureAsyncOperationResult
from .effective_network_security_group_association_py3 import EffectiveNetworkSecurityGroupAssociation
from .effective_network_security_rule_py3 import EffectiveNetworkSecurityRule
from .effective_network_security_group_py3 import EffectiveNetworkSecurityGroup
from .effective_network_security_group_list_result_py3 import EffectiveNetworkSecurityGroupListResult
from .effective_route_py3 import EffectiveRoute
from .effective_route_list_result_py3 import EffectiveRouteListResult
from .network_watcher_py3 import NetworkWatcher
from .topology_parameters_py3 import TopologyParameters
from .topology_association_py3 import TopologyAssociation
from .topology_resource_py3 import TopologyResource
from .topology_py3 import Topology
from .verification_ip_flow_parameters_py3 import VerificationIPFlowParameters
from .verification_ip_flow_result_py3 import VerificationIPFlowResult
from .next_hop_parameters_py3 import NextHopParameters
from .next_hop_result_py3 import NextHopResult
from .security_group_view_parameters_py3 import SecurityGroupViewParameters
from .network_interface_association_py3 import NetworkInterfaceAssociation
from .subnet_association_py3 import SubnetAssociation
from .security_rule_associations_py3 import SecurityRuleAssociations
from .security_group_network_interface_py3 import SecurityGroupNetworkInterface
from .security_group_view_result_py3 import SecurityGroupViewResult
from .packet_capture_storage_location_py3 import PacketCaptureStorageLocation
from .packet_capture_filter_py3 import PacketCaptureFilter
from .packet_capture_parameters_py3 import PacketCaptureParameters
from .packet_capture_py3 import PacketCapture
from .packet_capture_result_py3 import PacketCaptureResult
from .packet_capture_query_status_result_py3 import PacketCaptureQueryStatusResult
from .troubleshooting_parameters_py3 import TroubleshootingParameters
from .query_troubleshooting_parameters_py3 import QueryTroubleshootingParameters
from .troubleshooting_recommended_actions_py3 import TroubleshootingRecommendedActions
from .troubleshooting_details_py3 import TroubleshootingDetails
from .troubleshooting_result_py3 import TroubleshootingResult
from .retention_policy_parameters_py3 import RetentionPolicyParameters
from .flow_log_status_parameters_py3 import FlowLogStatusParameters
from .flow_log_information_py3 import FlowLogInformation
from .connectivity_source_py3 import ConnectivitySource
from .connectivity_destination_py3 import ConnectivityDestination
from .connectivity_parameters_py3 import ConnectivityParameters
from .connectivity_issue_py3 import ConnectivityIssue
from .connectivity_hop_py3 import ConnectivityHop
from .connectivity_information_py3 import ConnectivityInformation
from .patch_route_filter_rule_py3 import PatchRouteFilterRule
from .patch_route_filter_py3 import PatchRouteFilter
from .bgp_community_py3 import BGPCommunity
from .bgp_service_community_py3 import BgpServiceCommunity
from .usage_name_py3 import UsageName
from .usage_py3 import Usage
from .virtual_network_peering_py3 import VirtualNetworkPeering
from .address_space_py3 import AddressSpace
from .dhcp_options_py3 import DhcpOptions
from .virtual_network_py3 import VirtualNetwork
from .ip_address_availability_result_py3 import IPAddressAvailabilityResult
from .virtual_network_usage_name_py3 import VirtualNetworkUsageName
from .virtual_network_usage_py3 import VirtualNetworkUsage
from .virtual_network_gateway_ip_configuration_py3 import VirtualNetworkGatewayIPConfiguration
from .virtual_network_gateway_sku_py3 import VirtualNetworkGatewaySku
from .vpn_client_root_certificate_py3 import VpnClientRootCertificate
from .vpn_client_revoked_certificate_py3 import VpnClientRevokedCertificate
from .vpn_client_configuration_py3 import VpnClientConfiguration
from .bgp_settings_py3 import BgpSettings
from .bgp_peer_status_py3 import BgpPeerStatus
from .gateway_route_py3 import GatewayRoute
from .virtual_network_gateway_py3 import VirtualNetworkGateway
from .vpn_client_parameters_py3 import VpnClientParameters
from .bgp_peer_status_list_result_py3 import BgpPeerStatusListResult
from .gateway_route_list_result_py3 import GatewayRouteListResult
from .tunnel_connection_health_py3 import TunnelConnectionHealth
from .local_network_gateway_py3 import LocalNetworkGateway
from .ipsec_policy_py3 import IpsecPolicy
from .virtual_network_gateway_connection_py3 import VirtualNetworkGatewayConnection
from .connection_reset_shared_key_py3 import ConnectionResetSharedKey
from .connection_shared_key_py3 import ConnectionSharedKey
from .virtual_network_connection_gateway_reference_py3 import VirtualNetworkConnectionGatewayReference
from .virtual_network_gateway_connection_list_entity_py3 import VirtualNetworkGatewayConnectionListEntity
except (SyntaxError, ImportError):
from .sub_resource import SubResource
from .backend_address_pool import BackendAddressPool
from .inbound_nat_rule import InboundNatRule
from .security_rule import SecurityRule
from .network_interface_dns_settings import NetworkInterfaceDnsSettings
from .network_interface import NetworkInterface
from .network_security_group import NetworkSecurityGroup
from .route import Route
from .route_table import RouteTable
from .service_endpoint_properties_format import ServiceEndpointPropertiesFormat
from .public_ip_address_sku import PublicIPAddressSku
from .public_ip_address_dns_settings import PublicIPAddressDnsSettings
from .public_ip_address import PublicIPAddress
from .ip_configuration import IPConfiguration
from .resource_navigation_link import ResourceNavigationLink
from .subnet import Subnet
from .network_interface_ip_configuration import NetworkInterfaceIPConfiguration
from .application_gateway_backend_address import ApplicationGatewayBackendAddress
from .application_gateway_backend_address_pool import ApplicationGatewayBackendAddressPool
from .application_gateway_connection_draining import ApplicationGatewayConnectionDraining
from .application_gateway_backend_http_settings import ApplicationGatewayBackendHttpSettings
from .application_gateway_backend_health_server import ApplicationGatewayBackendHealthServer
from .application_gateway_backend_health_http_settings import ApplicationGatewayBackendHealthHttpSettings
from .application_gateway_backend_health_pool import ApplicationGatewayBackendHealthPool
from .application_gateway_backend_health import ApplicationGatewayBackendHealth
from .application_gateway_sku import ApplicationGatewaySku
from .application_gateway_ssl_policy import ApplicationGatewaySslPolicy
from .application_gateway_ip_configuration import ApplicationGatewayIPConfiguration
from .application_gateway_authentication_certificate import ApplicationGatewayAuthenticationCertificate
from .application_gateway_ssl_certificate import ApplicationGatewaySslCertificate
from .application_gateway_frontend_ip_configuration import ApplicationGatewayFrontendIPConfiguration
from .application_gateway_frontend_port import ApplicationGatewayFrontendPort
from .application_gateway_http_listener import ApplicationGatewayHttpListener
from .application_gateway_path_rule import ApplicationGatewayPathRule
from .application_gateway_probe_health_response_match import ApplicationGatewayProbeHealthResponseMatch
from .application_gateway_probe import ApplicationGatewayProbe
from .application_gateway_request_routing_rule import ApplicationGatewayRequestRoutingRule
from .application_gateway_redirect_configuration import ApplicationGatewayRedirectConfiguration
from .application_gateway_url_path_map import ApplicationGatewayUrlPathMap
from .application_gateway_firewall_disabled_rule_group import ApplicationGatewayFirewallDisabledRuleGroup
from .application_gateway_web_application_firewall_configuration import ApplicationGatewayWebApplicationFirewallConfiguration
from .application_gateway import ApplicationGateway
from .application_gateway_firewall_rule import ApplicationGatewayFirewallRule
from .application_gateway_firewall_rule_group import ApplicationGatewayFirewallRuleGroup
from .application_gateway_firewall_rule_set import ApplicationGatewayFirewallRuleSet
from .application_gateway_available_waf_rule_sets_result import ApplicationGatewayAvailableWafRuleSetsResult
from .application_gateway_available_ssl_options import ApplicationGatewayAvailableSslOptions
from .application_gateway_ssl_predefined_policy import ApplicationGatewaySslPredefinedPolicy
from .resource import Resource
from .dns_name_availability_result import DnsNameAvailabilityResult
from .endpoint_service_result import EndpointServiceResult
from .express_route_circuit_authorization import ExpressRouteCircuitAuthorization
from .express_route_circuit_peering_config import ExpressRouteCircuitPeeringConfig
from .route_filter_rule import RouteFilterRule
from .express_route_circuit_stats import ExpressRouteCircuitStats
from .express_route_circuit_peering import ExpressRouteCircuitPeering
from .route_filter import RouteFilter
from .ipv6_express_route_circuit_peering_config import Ipv6ExpressRouteCircuitPeeringConfig
from .express_route_circuit_sku import ExpressRouteCircuitSku
from .express_route_circuit_service_provider_properties import ExpressRouteCircuitServiceProviderProperties
from .express_route_circuit import ExpressRouteCircuit
from .express_route_circuit_arp_table import ExpressRouteCircuitArpTable
from .express_route_circuits_arp_table_list_result import ExpressRouteCircuitsArpTableListResult
from .express_route_circuit_routes_table import ExpressRouteCircuitRoutesTable
from .express_route_circuits_routes_table_list_result import ExpressRouteCircuitsRoutesTableListResult
from .express_route_circuit_routes_table_summary import ExpressRouteCircuitRoutesTableSummary
from .express_route_circuits_routes_table_summary_list_result import ExpressRouteCircuitsRoutesTableSummaryListResult
from .express_route_service_provider_bandwidths_offered import ExpressRouteServiceProviderBandwidthsOffered
from .express_route_service_provider import ExpressRouteServiceProvider
from .load_balancer_sku import LoadBalancerSku
from .frontend_ip_configuration import FrontendIPConfiguration
from .load_balancing_rule import LoadBalancingRule
from .probe import Probe
from .inbound_nat_pool import InboundNatPool
from .outbound_nat_rule import OutboundNatRule
from .load_balancer import LoadBalancer
from .error_details import ErrorDetails
from .error import Error
from .azure_async_operation_result import AzureAsyncOperationResult
from .effective_network_security_group_association import EffectiveNetworkSecurityGroupAssociation
from .effective_network_security_rule import EffectiveNetworkSecurityRule
from .effective_network_security_group import EffectiveNetworkSecurityGroup
from .effective_network_security_group_list_result import EffectiveNetworkSecurityGroupListResult
from .effective_route import EffectiveRoute
from .effective_route_list_result import EffectiveRouteListResult
from .network_watcher import NetworkWatcher
from .topology_parameters import TopologyParameters
from .topology_association import TopologyAssociation
from .topology_resource import TopologyResource
from .topology import Topology
from .verification_ip_flow_parameters import VerificationIPFlowParameters
from .verification_ip_flow_result import VerificationIPFlowResult
from .next_hop_parameters import NextHopParameters
from .next_hop_result import NextHopResult
from .security_group_view_parameters import SecurityGroupViewParameters
from .network_interface_association import NetworkInterfaceAssociation
from .subnet_association import SubnetAssociation
from .security_rule_associations import SecurityRuleAssociations
from .security_group_network_interface import SecurityGroupNetworkInterface
from .security_group_view_result import SecurityGroupViewResult
from .packet_capture_storage_location import PacketCaptureStorageLocation
from .packet_capture_filter import PacketCaptureFilter
from .packet_capture_parameters import PacketCaptureParameters
from .packet_capture import PacketCapture
from .packet_capture_result import PacketCaptureResult
from .packet_capture_query_status_result import PacketCaptureQueryStatusResult
from .troubleshooting_parameters import TroubleshootingParameters
from .query_troubleshooting_parameters import QueryTroubleshootingParameters
from .troubleshooting_recommended_actions import TroubleshootingRecommendedActions
from .troubleshooting_details import TroubleshootingDetails
from .troubleshooting_result import TroubleshootingResult
from .retention_policy_parameters import RetentionPolicyParameters
from .flow_log_status_parameters import FlowLogStatusParameters
from .flow_log_information import FlowLogInformation
from .connectivity_source import ConnectivitySource
from .connectivity_destination import ConnectivityDestination
from .connectivity_parameters import ConnectivityParameters
from .connectivity_issue import ConnectivityIssue
from .connectivity_hop import ConnectivityHop
from .connectivity_information import ConnectivityInformation
from .patch_route_filter_rule import PatchRouteFilterRule
from .patch_route_filter import PatchRouteFilter
from .bgp_community import BGPCommunity
from .bgp_service_community import BgpServiceCommunity
from .usage_name import UsageName
from .usage import Usage
from .virtual_network_peering import VirtualNetworkPeering
from .address_space import AddressSpace
from .dhcp_options import DhcpOptions
from .virtual_network import VirtualNetwork
from .ip_address_availability_result import IPAddressAvailabilityResult
from .virtual_network_usage_name import VirtualNetworkUsageName
from .virtual_network_usage import VirtualNetworkUsage
from .virtual_network_gateway_ip_configuration import VirtualNetworkGatewayIPConfiguration
from .virtual_network_gateway_sku import VirtualNetworkGatewaySku
from .vpn_client_root_certificate import VpnClientRootCertificate
from .vpn_client_revoked_certificate import VpnClientRevokedCertificate
from .vpn_client_configuration import VpnClientConfiguration
from .bgp_settings import BgpSettings
from .bgp_peer_status import BgpPeerStatus
from .gateway_route import GatewayRoute
from .virtual_network_gateway import VirtualNetworkGateway
from .vpn_client_parameters import VpnClientParameters
from .bgp_peer_status_list_result import BgpPeerStatusListResult
from .gateway_route_list_result import GatewayRouteListResult
from .tunnel_connection_health import TunnelConnectionHealth
from .local_network_gateway import LocalNetworkGateway
from .ipsec_policy import IpsecPolicy
from .virtual_network_gateway_connection import VirtualNetworkGatewayConnection
from .connection_reset_shared_key import ConnectionResetSharedKey
from .connection_shared_key import ConnectionSharedKey
from .virtual_network_connection_gateway_reference import VirtualNetworkConnectionGatewayReference
from .virtual_network_gateway_connection_list_entity import VirtualNetworkGatewayConnectionListEntity
from .application_gateway_paged import ApplicationGatewayPaged
from .application_gateway_ssl_predefined_policy_paged import ApplicationGatewaySslPredefinedPolicyPaged
from .endpoint_service_result_paged import EndpointServiceResultPaged
from .express_route_circuit_authorization_paged import ExpressRouteCircuitAuthorizationPaged
from .express_route_circuit_peering_paged import ExpressRouteCircuitPeeringPaged
from .express_route_circuit_paged import ExpressRouteCircuitPaged
from .express_route_service_provider_paged import ExpressRouteServiceProviderPaged
from .load_balancer_paged import LoadBalancerPaged
from .backend_address_pool_paged import BackendAddressPoolPaged
from .frontend_ip_configuration_paged import FrontendIPConfigurationPaged
from .inbound_nat_rule_paged import InboundNatRulePaged
from .load_balancing_rule_paged import LoadBalancingRulePaged
from .network_interface_paged import NetworkInterfacePaged
from .probe_paged import ProbePaged
from .network_interface_ip_configuration_paged import NetworkInterfaceIPConfigurationPaged
from .network_security_group_paged import NetworkSecurityGroupPaged
from .security_rule_paged import SecurityRulePaged
from .network_watcher_paged import NetworkWatcherPaged
from .packet_capture_result_paged import PacketCaptureResultPaged
from .public_ip_address_paged import PublicIPAddressPaged
from .route_filter_paged import RouteFilterPaged
from .route_filter_rule_paged import RouteFilterRulePaged
from .route_table_paged import RouteTablePaged
from .route_paged import RoutePaged
from .bgp_service_community_paged import BgpServiceCommunityPaged
from .usage_paged import UsagePaged
from .virtual_network_paged import VirtualNetworkPaged
from .virtual_network_usage_paged import VirtualNetworkUsagePaged
from .subnet_paged import SubnetPaged
from .virtual_network_peering_paged import VirtualNetworkPeeringPaged
from .virtual_network_gateway_paged import VirtualNetworkGatewayPaged
from .virtual_network_gateway_connection_list_entity_paged import VirtualNetworkGatewayConnectionListEntityPaged
from .virtual_network_gateway_connection_paged import VirtualNetworkGatewayConnectionPaged
from .local_network_gateway_paged import LocalNetworkGatewayPaged
from .network_management_client_enums import (
TransportProtocol,
IPAllocationMethod,
IPVersion,
SecurityRuleProtocol,
SecurityRuleAccess,
SecurityRuleDirection,
RouteNextHopType,
PublicIPAddressSkuName,
ApplicationGatewayProtocol,
ApplicationGatewayCookieBasedAffinity,
ApplicationGatewayBackendHealthServerHealth,
ApplicationGatewaySkuName,
ApplicationGatewayTier,
ApplicationGatewaySslProtocol,
ApplicationGatewaySslPolicyType,
ApplicationGatewaySslPolicyName,
ApplicationGatewaySslCipherSuite,
ApplicationGatewayRequestRoutingRuleType,
ApplicationGatewayRedirectType,
ApplicationGatewayOperationalState,
ApplicationGatewayFirewallMode,
AuthorizationUseStatus,
ExpressRouteCircuitPeeringAdvertisedPublicPrefixState,
Access,
ExpressRouteCircuitPeeringType,
ExpressRouteCircuitPeeringState,
ExpressRouteCircuitSkuTier,
ExpressRouteCircuitSkuFamily,
ServiceProviderProvisioningState,
LoadBalancerSkuName,
LoadDistribution,
ProbeProtocol,
NetworkOperationStatus,
EffectiveSecurityRuleProtocol,
EffectiveRouteSource,
EffectiveRouteState,
ProvisioningState,
AssociationType,
Direction,
Protocol,
NextHopType,
PcProtocol,
PcStatus,
PcError,
Origin,
Severity,
IssueType,
ConnectionStatus,
VirtualNetworkPeeringState,
VirtualNetworkGatewayType,
VpnType,
VirtualNetworkGatewaySkuName,
VirtualNetworkGatewaySkuTier,
VpnClientProtocol,
BgpPeerState,
ProcessorArchitecture,
AuthenticationMethod,
VirtualNetworkGatewayConnectionStatus,
VirtualNetworkGatewayConnectionType,
IpsecEncryption,
IpsecIntegrity,
IkeEncryption,
IkeIntegrity,
DhGroup,
PfsGroup,
)
__all__ = [
'SubResource',
'BackendAddressPool',
'InboundNatRule',
'SecurityRule',
'NetworkInterfaceDnsSettings',
'NetworkInterface',
'NetworkSecurityGroup',
'Route',
'RouteTable',
'ServiceEndpointPropertiesFormat',
'PublicIPAddressSku',
'PublicIPAddressDnsSettings',
'PublicIPAddress',
'IPConfiguration',
'ResourceNavigationLink',
'Subnet',
'NetworkInterfaceIPConfiguration',
'ApplicationGatewayBackendAddress',
'ApplicationGatewayBackendAddressPool',
'ApplicationGatewayConnectionDraining',
'ApplicationGatewayBackendHttpSettings',
'ApplicationGatewayBackendHealthServer',
'ApplicationGatewayBackendHealthHttpSettings',
'ApplicationGatewayBackendHealthPool',
'ApplicationGatewayBackendHealth',
'ApplicationGatewaySku',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayIPConfiguration',
'ApplicationGatewayAuthenticationCertificate',
'ApplicationGatewaySslCertificate',
'ApplicationGatewayFrontendIPConfiguration',
'ApplicationGatewayFrontendPort',
'ApplicationGatewayHttpListener',
'ApplicationGatewayPathRule',
'ApplicationGatewayProbeHealthResponseMatch',
'ApplicationGatewayProbe',
'ApplicationGatewayRequestRoutingRule',
'ApplicationGatewayRedirectConfiguration',
'ApplicationGatewayUrlPathMap',
'ApplicationGatewayFirewallDisabledRuleGroup',
'ApplicationGatewayWebApplicationFirewallConfiguration',
'ApplicationGateway',
'ApplicationGatewayFirewallRule',
'ApplicationGatewayFirewallRuleGroup',
'ApplicationGatewayFirewallRuleSet',
'ApplicationGatewayAvailableWafRuleSetsResult',
'ApplicationGatewayAvailableSslOptions',
'ApplicationGatewaySslPredefinedPolicy',
'Resource',
'DnsNameAvailabilityResult',
'EndpointServiceResult',
'ExpressRouteCircuitAuthorization',
'ExpressRouteCircuitPeeringConfig',
'RouteFilterRule',
'ExpressRouteCircuitStats',
'ExpressRouteCircuitPeering',
'RouteFilter',
'Ipv6ExpressRouteCircuitPeeringConfig',
'ExpressRouteCircuitSku',
'ExpressRouteCircuitServiceProviderProperties',
'ExpressRouteCircuit',
'ExpressRouteCircuitArpTable',
'ExpressRouteCircuitsArpTableListResult',
'ExpressRouteCircuitRoutesTable',
'ExpressRouteCircuitsRoutesTableListResult',
'ExpressRouteCircuitRoutesTableSummary',
'ExpressRouteCircuitsRoutesTableSummaryListResult',
'ExpressRouteServiceProviderBandwidthsOffered',
'ExpressRouteServiceProvider',
'LoadBalancerSku',
'FrontendIPConfiguration',
'LoadBalancingRule',
'Probe',
'InboundNatPool',
'OutboundNatRule',
'LoadBalancer',
'ErrorDetails',
'Error',
'AzureAsyncOperationResult',
'EffectiveNetworkSecurityGroupAssociation',
'EffectiveNetworkSecurityRule',
'EffectiveNetworkSecurityGroup',
'EffectiveNetworkSecurityGroupListResult',
'EffectiveRoute',
'EffectiveRouteListResult',
'NetworkWatcher',
'TopologyParameters',
'TopologyAssociation',
'TopologyResource',
'Topology',
'VerificationIPFlowParameters',
'VerificationIPFlowResult',
'NextHopParameters',
'NextHopResult',
'SecurityGroupViewParameters',
'NetworkInterfaceAssociation',
'SubnetAssociation',
'SecurityRuleAssociations',
'SecurityGroupNetworkInterface',
'SecurityGroupViewResult',
'PacketCaptureStorageLocation',
'PacketCaptureFilter',
'PacketCaptureParameters',
'PacketCapture',
'PacketCaptureResult',
'PacketCaptureQueryStatusResult',
'TroubleshootingParameters',
'QueryTroubleshootingParameters',
'TroubleshootingRecommendedActions',
'TroubleshootingDetails',
'TroubleshootingResult',
'RetentionPolicyParameters',
'FlowLogStatusParameters',
'FlowLogInformation',
'ConnectivitySource',
'ConnectivityDestination',
'ConnectivityParameters',
'ConnectivityIssue',
'ConnectivityHop',
'ConnectivityInformation',
'PatchRouteFilterRule',
'PatchRouteFilter',
'BGPCommunity',
'BgpServiceCommunity',
'UsageName',
'Usage',
'VirtualNetworkPeering',
'AddressSpace',
'DhcpOptions',
'VirtualNetwork',
'IPAddressAvailabilityResult',
'VirtualNetworkUsageName',
'VirtualNetworkUsage',
'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku',
'VpnClientRootCertificate',
'VpnClientRevokedCertificate',
'VpnClientConfiguration',
'BgpSettings',
'BgpPeerStatus',
'GatewayRoute',
'VirtualNetworkGateway',
'VpnClientParameters',
'BgpPeerStatusListResult',
'GatewayRouteListResult',
'TunnelConnectionHealth',
'LocalNetworkGateway',
'IpsecPolicy',
'VirtualNetworkGatewayConnection',
'ConnectionResetSharedKey',
'ConnectionSharedKey',
'VirtualNetworkConnectionGatewayReference',
'VirtualNetworkGatewayConnectionListEntity',
'ApplicationGatewayPaged',
'ApplicationGatewaySslPredefinedPolicyPaged',
'EndpointServiceResultPaged',
'ExpressRouteCircuitAuthorizationPaged',
'ExpressRouteCircuitPeeringPaged',
'ExpressRouteCircuitPaged',
'ExpressRouteServiceProviderPaged',
'LoadBalancerPaged',
'BackendAddressPoolPaged',
'FrontendIPConfigurationPaged',
'InboundNatRulePaged',
'LoadBalancingRulePaged',
'NetworkInterfacePaged',
'ProbePaged',
'NetworkInterfaceIPConfigurationPaged',
'NetworkSecurityGroupPaged',
'SecurityRulePaged',
'NetworkWatcherPaged',
'PacketCaptureResultPaged',
'PublicIPAddressPaged',
'RouteFilterPaged',
'RouteFilterRulePaged',
'RouteTablePaged',
'RoutePaged',
'BgpServiceCommunityPaged',
'UsagePaged',
'VirtualNetworkPaged',
'VirtualNetworkUsagePaged',
'SubnetPaged',
'VirtualNetworkPeeringPaged',
'VirtualNetworkGatewayPaged',
'VirtualNetworkGatewayConnectionListEntityPaged',
'VirtualNetworkGatewayConnectionPaged',
'LocalNetworkGatewayPaged',
'TransportProtocol',
'IPAllocationMethod',
'IPVersion',
'SecurityRuleProtocol',
'SecurityRuleAccess',
'SecurityRuleDirection',
'RouteNextHopType',
'PublicIPAddressSkuName',
'ApplicationGatewayProtocol',
'ApplicationGatewayCookieBasedAffinity',
'ApplicationGatewayBackendHealthServerHealth',
'ApplicationGatewaySkuName',
'ApplicationGatewayTier',
'ApplicationGatewaySslProtocol',
'ApplicationGatewaySslPolicyType',
'ApplicationGatewaySslPolicyName',
'ApplicationGatewaySslCipherSuite',
'ApplicationGatewayRequestRoutingRuleType',
'ApplicationGatewayRedirectType',
'ApplicationGatewayOperationalState',
'ApplicationGatewayFirewallMode',
'AuthorizationUseStatus',
'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState',
'Access',
'ExpressRouteCircuitPeeringType',
'ExpressRouteCircuitPeeringState',
'ExpressRouteCircuitSkuTier',
'ExpressRouteCircuitSkuFamily',
'ServiceProviderProvisioningState',
'LoadBalancerSkuName',
'LoadDistribution',
'ProbeProtocol',
'NetworkOperationStatus',
'EffectiveSecurityRuleProtocol',
'EffectiveRouteSource',
'EffectiveRouteState',
'ProvisioningState',
'AssociationType',
'Direction',
'Protocol',
'NextHopType',
'PcProtocol',
'PcStatus',
'PcError',
'Origin',
'Severity',
'IssueType',
'ConnectionStatus',
'VirtualNetworkPeeringState',
'VirtualNetworkGatewayType',
'VpnType',
'VirtualNetworkGatewaySkuName',
'VirtualNetworkGatewaySkuTier',
'VpnClientProtocol',
'BgpPeerState',
'ProcessorArchitecture',
'AuthenticationMethod',
'VirtualNetworkGatewayConnectionStatus',
'VirtualNetworkGatewayConnectionType',
'IpsecEncryption',
'IpsecIntegrity',
'IkeEncryption',
'IkeIntegrity',
'DhGroup',
'PfsGroup',
]
|
|
#!/usr/bin/python
usage = """pointed.py [--options] gps gps gps..."""
description="""a script that generates a pointed follow-up of possible auxiliary couplings"""
import numpy as np
import scipy
from laldetchar.idq import idq
from laldetchar.idq import event
import greedyCI as gci
from optparse import OptionParser
from ConfigParser import SafeConfigParser
#=================================================
parser = OptionParser(usage="", description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option("", "--kwverbose", default=False, action="store_true", help="make the retrieve_kwtrigs() call verbose")
parser.add_option("", "--Omicronverbose", dest="overbose", default=False, action="store_true", help="make the retrieve_OmicronTrigs() call verbose")
parser.add_option("", "--OfflineOmicronverbose", dest="ooverbose", default=False, action="store_true", help="make the retrieve_OfflineOmicronTrigs() call verbose")
parser.add_option("-c", "--config", default="config.ini", type="string")
parser.add_option("-w", "--window", default=10, type="float")
parser.add_option("-e", "--exclude", default=0.0, type="float", help="exclude triggers falling closer than exclude when computing the rate. This is NOT applied when finding the closest trigger and computing a pvalue, only for the rate estimation.")
parser.add_option("-n", "--nmax", default=50, type="int", help="the number of events over which we fall back to a gaussian approx for CI")
parser.add_option("", "--pvalue-print-thr", default=1.0, type="float", help="only print channels/pvalues if they are smaller than this")
parser.add_option("-C", "--confidence-intervals", default=False, action="store_true", help="compute confidence intervals for rates and pvalues")
parser.add_option("-o", "--output-dir", default="./", type="string")
opts, args = parser.parse_args()
if not len(args):
if opts.verbose:
print "no gps times specified"
import sys
sys.exit(0)
else:
args = [float(arg) for arg in args]
if opts.exclude >= opts.window:
raise ValueError("--exclude is larger than --window. That doesn't make sense")
#=================================================
config = SafeConfigParser()
config.read(opts.config)
conf = config.getfloat('general', 'conf')
ifo = config.get('general', 'ifo')
#===========
kwgdsdir = config.get("kleinewelle", "gdsdir")
kwbasename = config.get("kleinewelle", "basename")
kwstride = config.getint("kleinewelle", "stride")
kwchannels = config.get("kleinewelle", "channels").split()
for chan in kwchannels:
if not config.has_section(chan):
raise ValueError("no section for channel=%s found in %s"%(chan, opts.config))
#===========
oogdsdir = config.get('OfflineOmicron', 'gdsdir')
oochannels = config.get('OfflineOmicron', 'channels').split()
for chan in oochannels:
if not config.has_section(chan):
raise ValueError('no section for channel=%s in %s'%(chan, opts.config))
#===========
ogdsdir = config.get('Omicron', 'gdsdir')
ostride = config.getint('Omicron', 'stride')
ochannels = config.get('Omicron', 'channels').split()
for chan in ochannels:
if not config.has_section(chan):
raise ValueError('no section for channel=%s in %s'%(chan, opts.config))
if oochannels and ochannels:
print "WARNING: you've specified both Omicron and OfflineOmicron channels. In the event of a conflict, the OfflineOmicron data will be preferred!"
#=================================================
### clean up channel list to get a unique set
channels = kwchannels + list(set(ochannels + oochannels))
#=================================================
for gps in args:
print "gps : %.9f"%(gps)
#=============================================
# KW triggers
#=============================================
if kwchannels:
### go find triggers
if opts.verbose:
print "\tdiscoverying KW triggers within [%.9f, %.9f]"%(gps-opts.window, gps+opts.window)
kwtrgdict = idq.retrieve_kwtrigs(kwgdsdir, kwbasename, int(np.floor(gps-opts.window)), 2*opts.window+1, kwstride, verbose=opts.kwverbose)
### keep only the relevant channels
if opts.verbose:
print "\tdownselecting to only the following channels:"
for chan in kwchannels:
print "\t\t%s"%chan
kwtrgdict.keep_channels(kwchannels)
### trim the edges
kwtrgdict.include([[gps-opts.window, gps+opts.window]], tcent=event.col_kw['tcent'])
### ensure we have entries for all requested channels and downselect as needed
if opts.verbose:
print "\tdownselecting triggers:"
for chan in kwchannels:
if kwtrgdict.has_key(chan): ### apply windows, thresholds
if opts.verbose:
print "\t\tchannel=%s, found %d triggers"%(chan, len(kwtrgdict[chan]))
signifmin = config.getfloat(chan, "signifmin")
signifmax = config.getfloat(chan, "signifmax")
kwtrgdict[chan] = [trg for trg in kwtrgdict[chan] if (trg[event.col_kw['signif']] >= signifmin) and (trg[event.col_kw['signif']] <= signifmax) ]
fmin = config.getfloat(chan, "fmin")
fmax = config.getfloat(chan, "fmax")
kwtrgdict[chan] = [trg for trg in kwtrgdict[chan] if (trg[event.col_kw['fcent']] >= fmin) and (trg[event.col_kw['fcent']] <= fmax) ]
durmin = config.getfloat(chan, "durmin")
durmax = config.getfloat(chan, "durmax")
kwtrgdict[chan] = [trg for trg in kwtrgdict[chan] if (trg[event.col_kw['tstop']]-trg[event.col_kw['tstart']] >= durmin) and (trg[event.col_kw['tstop']]-trg[event.col_kw['tstart']] <= durmax) ]
if opts.verbose:
print "\t\t\tsignifmin = %.3f\n\t\t\tsignifmax = %.3f\n\t\t\tfmin = %.3f\n\t\t\tfmax=%.3f\n\t\t\tdurmin=%.3e\n\t\t\tdurmax=%.3e"%(signifmin, signifmax, fmin, fmax, durmin, durmax)
else:
if opts.verbose:
print "\t\tWARNING: channel=%s not found, inserting an empty list"%chan
kwtrgdict[chan] = []
if opts.verbose:
print "\t\t\tchannel=%s -> %d triggers"%(chan, len(kwtrgdict[chan]))
else:
kwtrgdict = event.trigdict()
#=============================================
# Omicron triggers
#=============================================
if ochannels:
if opts.verbose:
print "\tdiscovering Omicron triggers within [%.9f, %.9f]"%(gps-opts.window, gps+opts.window)
otrgdict = idq.retrieve_OmicronTrigs(ogdsdir, ifo, int(np.floor(gps-opts.window)), 2*opts.window+1, ostride, ochannels, verbose=opts.overbose)
### trim edges
otrgdict.include([[gps-opts.window, gps+opts.window]], tcent=event.col_snglBurst['tcent'])
### downselect as needed
if opts.verbose:
print "\tdownselecting triggers"
for chan in ochannels:
if otrgdict.has_key(chan):
if opts.verbose:
print "\t\tchannel=%s, found %d triggers"%(chan, len(otrgdict[chan]))
snrmin = config.getfloat(chan, "snrmin")
snrmax = config.getfloat(chan, "snrmax")
otrgdict[chan] = [trg for trg in otrgdict[chan] if (trg[event.col_snglBurst['snr']] >= snrmin) and (trg[event.col_snglBurst['snr']] <= snrmax) ]
fmin = config.getfloat(chan, "fmin")
fmax = config.getfloat(chan, "fmax")
otrgdict[chan] = [trg for trg in otrgdict[chan] if (trg[event.col_snglBurst['fcent']] >= fmin) and (trg[event.col_snglBurst['fcent']] <= fmax) ]
durmin = config.getfloat(chan, "durmin")
durmax = config.getfloat(chan, "durmax")
otrgdict[chan] = [trg for trg in otrgdict[chan] if (trg[event.col_snglBurst['duration']] >= durmin) and (trg[event.col_snglBurst['duration']] <= durmax) ]
if opts.verbose:
print "\t\t\tsnrmin = %.3f\n\t\t\tsnrmax = %.3f\n\t\t\tfmin = %.3f\n\t\t\tfmax=%.3f\n\t\t\tdurmin=%.3e\n\t\t\tdurmax=%.3e"%(snrmin, snrmax, fmin, fmax, durmin, durmax)
else:
if opts.verbose:
print "\t\tWARNING: channel=%s not found, inserting an empty list"%chan
otrgdict[chan] = []
if opts.verbose:
print "\t\t\tchannel=%s -> %d triggers"%(chan, len(otrgdict[chan]))
else:
otrgdict = event.trigdict()
#=============================================
# OfflineOmicron triggers
#=============================================
if oochannels:
if opts.verbose:
print "\tdiscovering OfflineOmicron triggers within [%.9f, %.9f]"%(gps-opts.window, gps+opts.window)
ootrgdict = idq.retrieve_OfflineOmicronTrigs(oogdsdir, ifo, int(np.floor(gps-opts.window)), 2*opts.window+1, channels=oochannels, verbose=opts.ooverbose)
### trim edges
ootrgdict.include([[gps-opts.window, gps+opts.window]], tcent=event.col_snglBurst['tcent'])
### downselect as needed
if opts.verbose:
print "\tdownselecting triggers"
for chan in oochannels:
if ootrgdict.has_key(chan):
if opts.verbose:
print "\t\tchannel=%s, found %d triggers"%(chan, len(ootrgdict[chan]))
snrmin = config.getfloat(chan, "snrmin")
snrmax = config.getfloat(chan, "snrmax")
ootrgdict[chan] = [trg for trg in ootrgdict[chan] if (trg[event.col_snglBurst['snr']] >= snrmin) and (trg[event.col_snglBurst['snr']] <= snrmax) ]
fmin = config.getfloat(chan, "fmin")
fmax = config.getfloat(chan, "fmax")
ootrgdict[chan] = [trg for trg in ootrgdict[chan] if (trg[event.col_snglBurst['fcent']] >= fmin) and (trg[event.col_snglBurst['fcent']] <= fmax) ]
durmin = config.getfloat(chan, "durmin")
durmax = config.getfloat(chan, "durmax")
ootrgdict[chan] = [trg for trg in ootrgdict[chan] if (trg[event.col_snglBurst['duration']] >= durmin) and (trg[event.col_snglBurst['duration']] <= durmax) ]
if opts.verbose:
print "\t\t\tsnrmin = %.3f\n\t\t\tsnrmax = %.3f\n\t\t\tfmin = %.3f\n\t\t\tfmax=%.3f\n\t\t\tdurmin=%.3e\n\t\t\tdurmax=%.3e"%(snrmin, snrmax, fmin, fmax, durmin, durmax)
else:
if opts.verbose:
print "\t\tWARNING: channel=%s not found, inserting an empty list"%chan
ootrgdict[chan] = []
if opts.verbose:
print "\t\t\tchannel=%s -> %d triggers"%(chan, len(ootrgdict[chan]))
else:
ootrgdict = event.trigdict()
#=============================================
# combine all trgdicts
#=============================================
trgdict = event.trigdict()
### add kw triggers
trgdict.add( kwtrgdict )
### add Omicron triggers
trgdict.add( otrgdict )
### add OfflineOmicron triggers
trgdict.add( ootrgdict )
#=============================================
# cluster triggers?
#=============================================
# print "\n\n\tWARNING: you may want to cluster triggers!\n\n"
#=============================================
# generate statistics, plots
#=============================================
if opts.verbose:
print "\tcomputing statistics, generating plots"
for chan in channels:
if "_Omicron" in chan:
col = event.col_snglBurst
snrkey = 'snr'
else:
col = event.col_kw
snrkey = 'signif'
n = len([ 1.0 for trg in trgdict[chan] if abs(trg[col['tcent']]-gps) >= opts.exclude] ) ### number of triggers beyond exclude
r = 0.5*n/(opts.window-opts.exclude) ### point estimate of the rate
if n:
dt = np.array([trg[col['tcent']] for trg in trgdict[chan]]) - gps
arg = np.argmin(np.abs(dt))
min_dt = dt[arg]
else:
min_dt = opts.window
arg = None
absmin_dt = abs(min_dt)
if r > 0:
pvalue = 1 - np.exp(-r*2*absmin_dt) ### cumulative probability of observing min_dt <= observed min_dt | estimated rate, the factor of 2 comes from looking on either side of the specified gps time
else:
pvalue = 1 ### limit is not great here...need to add CI
if opts.confidence_intervals:
if n < opts.nmax:
r_l, r_h = np.array( gci.poisson_bs(conf, n) ) * 0.5 / (opts.window-opts.exclude)
else:
s = n**0.5 * (scipy.special.erfinv( conf ) * 2**0.5) ### the size of the standard deviation times the number of standard deviations needed to cover conf
r_l = max( 0, (n - s) * 0.5 / (opts.window-opts.exclude) )
r_h = (n + s) * 0.5 / (opts.window-opts.exclude)
pvalue_l = 1 - np.exp(-r_l*2*absmin_dt)
pvalue_h = 1 - np.exp(-r_h*2*absmin_dt)
if (pvalue <= opts.pvalue_print_thr):
print "\n\tchannel=%s\n\t-> Ntrg=%d\n\t-> rate=%.9e Hz\n\t-> min_dt=%.9e sec"%(chan, n, r, min_dt)
if arg!=None:
trg = trgdict[chan][arg]
print "\t\t%s=%.5f\n\t\tfcnt=%.5f Hz\n\t\ttcnt=%.9f"%(snrkey, trg[col[snrkey]], trg[col['fcent']], trg[col['tcent']])
if "_Omicron" in chan:
print "\t\tbndw=%.5f Hz\n\t\tdur=%.5f sec"%(trg[col['bandwidth']], trg[col['duration']])
else:
print "\t\tdur=%.5f sec"%(trg[col['tstop']]-trg[col['tstart']])
print "\t-> pvalue=%.9e"%(pvalue)
if opts.confidence_intervals:
print "\t-> %.1f"%(conf*100) + " % confidence:"
print "\t\tlow rate =%.9e Hz\n\t\thigh rate=%.9e Hz\n\t\tlow pvalue =%.9e\n\t\thigh pvalue=%.9e"%(r_l, r_h, pvalue_l, pvalue_h)
### SHOULD BE ABLE TO TEST THAT THIS STATISTIC DOES WHAT WE THINK IT SHOULD BY CHECKING RANDOM TIMES AND THE DISTRIBUTION DERIVED THEREFROM
### DO SOMETHING
'''
plot histograms
TIME-DOMAIN ONLY
zero scale so t=0 corresponds to the gps time
FREQ-DOMAIN ONLY
TIME-FREQ DOMAIN ONLY
ALL
PLOT FOR EACH CHANNEL SEPARATELY, in some sort of stacked way
PLOT for each gps separately?
markers for each trigger separately
stacke multiple gps times into a global histogram
compute and report basic statistics:
aux rate (need to estimate this over a large window... rely on the usre to do this correctly?)
set CI on this rate
poisson probability of finding a trigger within "x" of the requested time. set "x" to be the closest time?
set CI on this statistic using CI on the rate
scatter aux SNR vs DARM SNR
--> possibly inform safety criteria that we can eventually add back into the downselection of events when defining a veto
histogram the dt between triggers in each channel -> confirm "poisson-ianity" of the triggers and the validity of our pvalue estimate
'''
|
|
'''
Copyright (c) 2016 Abhishek Agrawal (abhishek.agrawal@protonmail.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
import math
from scipy.interpolate import griddata
from numpy import linalg as LA
# System
import sys
import time
from tqdm import tqdm
# Get plotting packages
import matplotlib
import matplotlib.colors as colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.tri as tri
from matplotlib import rcParams
from matplotlib import cm
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.basemap import Basemap
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.patches import FancyArrowPatch
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal (abhishek.agrawal@protonmail.com) "
print "---------------------------------------------------------------------------------"
print ""
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
# Start timer.
start_time = time.time( )
## Set up the figure
fig = plt.figure()
ax1 = fig.gca( projection = '3d' )
# Read data in csv file. data returned as a panda series.
data = pd.read_csv( '../data/singleRegolithEjectaURESolution.csv' )
x = data[ 'x' ].values
y = data[ 'y' ].values
z = data[ 'z' ].values
vx = data[ 'vx' ].values
vy = data[ 'vy' ].values
vz = data[ 'vz' ].values
t = data[ 't' ].values
## Plot the ellipsoidal shape of the asteroid
alpha = 20000.0
beta = 7000.0
gamma = 7000.0
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
ellipsoid_x = alpha * np.outer(np.cos(u), np.sin(v))
ellipsoid_y = beta * np.outer(np.sin(u), np.sin(v))
ellipsoid_z = gamma * np.outer(np.ones(np.size(u)), np.cos(v))
newColor = colors.cnames["slategray"]
surf = ax1.plot_surface( ellipsoid_x, ellipsoid_y, ellipsoid_z,
rstride=5, cstride=5, alpha=0.3 )
surf.set_facecolor( newColor )
surf.set_linewidth( 0.1 )
ax1.hold( True )
## draw the initial velocity vector
velocityVector = [ vx[0], vy[0], vz[0] ]
positionVector = [ x[0], y[0], z[0] ]
# draw the position vector
radius = np.sqrt( ( positionVector[ 0 ] - 0.0 )**2
+ ( positionVector[ 1 ] - 0.0 )**2
+ ( positionVector[ 2 ] - 0.0 )**2 )
ax1.quiver3D( 0.0, 0.0, 0.0,
positionVector[ 0 ], positionVector[ 1 ], positionVector[ 2 ],
length=radius, lw = 1, pivot='tail', arrow_length_ratio=0.2,
color=colors.cnames["black"], linestyles='solid' )
# draw the north pole vector
ax1.quiver3D( 0.0, 0.0, 0.0,
0.0, 0.0, gamma,
length=gamma+2000.0, lw=1, pivot='tail', arrow_length_ratio=0.2,
color=colors.cnames["black"], linestyles='solid' )
# get the surface unit normal vector (body fixed frame) and plot it
# this is also the z basis vector for the surface frame in body fixed coordinates
normalVector = [ 2.0 * positionVector[ 0 ] / alpha**2,
2.0 * positionVector[ 1 ] / beta**2,
2.0 * positionVector[ 2 ] / gamma**2 ]
normalVectorMagnitude = LA.norm( normalVector )
unitNormalVector = normalVector / normalVectorMagnitude
ax1.quiver3D( positionVector[ 0 ], positionVector[ 1 ], positionVector[ 2 ],
unitNormalVector[ 0 ], unitNormalVector[ 1 ], unitNormalVector[ 2 ],
length=3000, lw=1, pivot='tail', arrow_length_ratio=0.2,
color=colors.cnames["darkorange"], linestyles='solid', label='Unit Normal/Z axis' )
# get the intermediate RTN frame
unitR = positionVector / LA.norm( positionVector )
bodyFramePrincipalZAxisUnitVector = [ 0.0, 0.0, 1.0 ]
unitT = np.cross( unitR, bodyFramePrincipalZAxisUnitVector ) / LA.norm( np.cross( unitR, bodyFramePrincipalZAxisUnitVector ) )
unitN = np.cross( unitT, unitR ) / LA.norm( np.cross( unitT, unitR ) )
# check for if the position vector is pointing to the poles
unitPositionVector = positionVector / LA.norm( positionVector )
positionDotPrincipalZ = np.dot( unitPositionVector, [ 0.0, 0.0, 1.0 ] )
positionDotNegativePrincipalZ = np.dot( unitPositionVector, [ 0.0, 0.0, -1.0 ] )
# cross product of normal and unitT
unitX = np.cross( unitT, unitNormalVector ) / LA.norm( np.cross( unitT, unitNormalVector ) )
# cross product of unitX and normal
unitY = np.cross( unitNormalVector, unitX ) / LA.norm( np.cross( unitNormalVector, unitX ) )
if positionDotPrincipalZ == 1.0:
unitX = [ 1.0, 0.0, 0.0 ]
unitY = [ 0.0, 1.0, 0.0 ]
elif positionDotNegativePrincipalZ == 1.0:
unitX = [ -1.0, 0.0, 0.0 ]
unitY = [ 0.0, 1.0, 0.0 ]
#plot the x and y basis vectors
ax1.quiver3D( positionVector[ 0 ], positionVector[ 1 ], positionVector[ 2 ],
unitX[ 0 ], unitX[ 1 ], unitX[ 2 ],
length=3000, lw=1, pivot='tail', arrow_length_ratio=0.2,
color=colors.cnames["magenta"], linestyles='solid', label='X axis' )
ax1.quiver3D( positionVector[ 0 ], positionVector[ 1 ], positionVector[ 2 ],
unitY[ 0 ], unitY[ 1 ], unitY[ 2 ],
length=3000, lw=1, pivot='tail', arrow_length_ratio=0.2,
color=colors.cnames["blue"], linestyles='solid', label='Y axis' )
# plot the velocity vector now
ax1.quiver3D( positionVector[ 0 ], positionVector[ 1 ], positionVector[ 2 ],
velocityVector[ 0 ], velocityVector[ 1 ], velocityVector[ 2 ],
length=3000, lw=1, pivot='tail', arrow_length_ratio=0.2,
color=colors.cnames["green"], linestyles='solid', label='Velocity' )
# vel_start = [ positionVector[0], positionVector[1], positionVector[2] ]
# vel_end = [ positionVector[0] + 500 * velocityVector[0],
# positionVector[1] + 500 * velocityVector[1],
# positionVector[2] + 500 * velocityVector[2] ]
# vel_vecs = list(zip(vel_start, vel_end))
# vel_arrow = Arrow3D(vel_vecs[0],vel_vecs[1],vel_vecs[2], mutation_scale=20, lw=1, arrowstyle="-|>", color="g")
# ax1.add_artist(vel_arrow)
## format axis and title
ax1.set_xlim( [ -20000, 20000 ] )
ax1.set_ylim( [ -20000, 20000 ] )
ax1.set_zlim( [ -20000, 20000 ] )
ax1.set_xlabel('x [m]')
ax1.set_ylabel('y [m]')
ax1.set_zlabel('z [m]')
ax1.ticklabel_format(style='sci', axis='both', scilimits=(0,0), offset=False)
## Show the plot
plt.tight_layout( )
plt.grid( )
plt.legend( )
plt.show( )
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
|
"""Python implementation of MySensors API."""
import logging
from distutils.version import LooseVersion as parse_ver
from pathlib import Path
import voluptuous as vol
from voluptuous.humanize import humanize_error
from .const import SYSTEM_CHILD_ID, get_const
from .message import Message
from .sensor import Sensor
from .task import AsyncTasks, SyncTasks
from .validation import safe_is_version
_LOGGER = logging.getLogger(__name__)
__version__ = (Path(__file__).parent / "VERSION").read_text(encoding="utf-8").strip()
class Gateway:
"""Base implementation for a MySensors Gateway."""
# pylint: disable=too-many-instance-attributes
def __init__(self, event_callback=None, protocol_version="1.4"):
"""Set up Gateway."""
protocol_version = safe_is_version(protocol_version)
self.const = get_const(protocol_version)
self.event_callback = event_callback
self.metric = True # if true - use metric, if false - use imperial
handlers = self.const.get_handler_registry()
# Copy to allow safe modification.
self.handlers = dict(handlers)
self.can_log = False
self.on_conn_made = None
self.on_conn_lost = None
self.protocol_version = protocol_version
self.sensors = {}
self.tasks = None
def __repr__(self):
"""Return the representation."""
return f"<{self.__class__.__name__}>"
def logic(self, data):
"""Parse the data and respond to it appropriately.
Response is returned to the caller and has to be sent
data as a mysensors command string.
"""
try:
msg = Message(data)
except ValueError as exc:
_LOGGER.warning("Not a valid message: %s", exc)
return None
try:
msg.validate(self.protocol_version)
except vol.Invalid as exc:
_LOGGER.warning("Invalid %s: %s", msg, humanize_error(msg.__dict__, exc))
return None
msg.gateway = self
message_type = self.const.MessageType(msg.type)
handler = message_type.get_handler(self.handlers)
reply = handler(msg)
reply = self._route_message(reply)
return reply.encode() if reply else None
def alert(self, msg):
"""Tell anyone who wants to know that a sensor was updated."""
if self.event_callback is not None:
try:
self.event_callback(msg)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception(exception)
if self.tasks.persistence:
self.tasks.persistence.need_save = True
def _get_next_id(self):
"""Return the next available sensor id."""
if self.sensors:
next_id = max(self.sensors.keys()) + 1
else:
next_id = 1
if next_id <= self.const.MAX_NODE_ID:
return next_id
return None
def add_sensor(self, sensorid=None):
"""Add a sensor to the gateway."""
if sensorid is None:
sensorid = self._get_next_id()
if sensorid is not None and sensorid not in self.sensors:
self.sensors[sensorid] = Sensor(sensorid)
return sensorid if sensorid in self.sensors else None
def create_message_to_set_sensor_value(
self, sensor, child_id, value_type, value, **kwargs
):
"""Create a message to set specified sensor child value."""
msg_type = kwargs.get("msg_type", self.const.MessageType.set)
ack = kwargs.get("ack", 0)
try:
value_type = int(value_type)
except (ValueError, TypeError) as exc:
raise ValueError(f"Invalid value_type provided: {value_type}") from exc
value = str(value)
msg = Message(
node_id=sensor.sensor_id,
child_id=child_id,
type=msg_type,
ack=ack,
sub_type=value_type,
payload=value,
)
msg_string = msg.encode()
if msg_string is None:
raise ValueError(
f"Unable to encode message: node {sensor.sensor_id}, child {child_id}, "
"type {msg_type}, ack {ack}, sub_type {value_type}, payload {value}"
)
msg.validate(self.protocol_version)
return msg
def is_sensor(self, sensorid, child_id=None):
"""Return True if a sensor and its child exist."""
ret = sensorid in self.sensors
if not ret:
_LOGGER.warning("Node %s is unknown", sensorid)
if ret and child_id is not None:
ret = child_id in self.sensors[sensorid].children
if not ret:
_LOGGER.warning("Child %s is unknown", child_id)
if not ret and parse_ver(self.protocol_version) >= parse_ver("2.0"):
_LOGGER.info("Requesting new presentation for node %s", sensorid)
msg = Message(gateway=self).modify(
node_id=sensorid,
child_id=SYSTEM_CHILD_ID,
type=self.const.MessageType.internal,
sub_type=self.const.Internal.I_PRESENTATION,
)
if self._route_message(msg):
self.tasks.add_job(msg.encode)
return ret
def _route_message(self, msg):
if (
not isinstance(msg, Message)
or msg.type == self.const.MessageType.presentation
):
return None
if (
msg.node_id not in self.sensors
or msg.type == self.const.MessageType.stream
or not self.sensors[msg.node_id].is_smart_sleep_node
):
return msg
self.sensors[msg.node_id].queue.append(msg.encode())
return None
def set_child_value(self, sensor_id, child_id, value_type, value, **kwargs):
"""Add a command to set a sensor value, to the queue.
A queued command will be sent to the sensor when the gateway
thread has sent all previously queued commands.
If the sensor attribute new_state returns True, the command will be
buffered in a queue on the sensor, and only the internal sensor state
will be updated. When a smartsleep message is received, the internal
state will be pushed to the sensor, via _handle_smartsleep method.
"""
if not self.is_sensor(sensor_id, child_id):
return
sensor = self.sensors[sensor_id]
if sensor.is_smart_sleep_node:
sensor.set_child_desired_state(child_id, value_type, value)
return
msg_to_send = self.create_message_to_set_sensor_value(
sensor, child_id, value_type, value, **kwargs
)
self.tasks.add_job(msg_to_send.encode)
def send(self, message):
"""Write a message to the arduino gateway."""
self.tasks.transport.send(message)
class BaseSyncGateway(Gateway):
"""MySensors base sync gateway."""
def __init__(
self,
transport,
*args,
persistence=False,
persistence_file="mysensors.pickle",
**kwargs,
):
"""Set up gateway."""
super().__init__(*args, **kwargs)
self.tasks = SyncTasks(
self.const, persistence, persistence_file, self.sensors, transport
)
def start(self):
"""Start the gateway and task allow tasks to be scheduled."""
self.tasks.start()
def stop(self):
"""Stop the gateway and stop allowing tasks for the scheduler."""
self.tasks.stop()
def start_persistence(self):
"""Load persistence file and schedule saving of persistence file."""
self.tasks.start_persistence()
def update_fw(self, nids, fw_type, fw_ver, fw_path=None):
"""Update firmware of all node_ids in nids."""
self.tasks.update_fw(nids, fw_type, fw_ver, fw_path=fw_path)
class BaseAsyncGateway(Gateway):
"""MySensors base async gateway."""
def __init__(
self,
transport,
*args,
loop=None,
persistence=False,
persistence_file="mysensors.pickle",
**kwargs,
):
"""Set up gateway."""
super().__init__(*args, **kwargs)
self.tasks = AsyncTasks(
self.const,
persistence,
persistence_file,
self.sensors,
transport,
loop=loop,
)
async def start(self):
"""Start the gateway and task allow tasks to be scheduled."""
await self.tasks.start()
async def stop(self):
"""Stop the gateway and stop allowing tasks for the scheduler."""
await self.tasks.stop()
async def start_persistence(self):
"""Load persistence file and schedule saving of persistence file."""
await self.tasks.start_persistence()
async def update_fw(self, nids, fw_type, fw_ver, fw_path=None):
"""Update firmware of all node_ids in nids."""
await self.tasks.update_fw(nids, fw_type, fw_ver, fw_path=fw_path)
|
|
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as sa_orm_exc
from webob import exc as web_exc
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.extensions import networkgw
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface'
NETWORK_ID = 'network_id'
SEGMENTATION_TYPE = 'segmentation_type'
SEGMENTATION_ID = 'segmentation_id'
ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID,
SEGMENTATION_TYPE,
SEGMENTATION_ID))
# Constants for gateway device operational status
STATUS_UNKNOWN = "UNKNOWN"
STATUS_ERROR = "ERROR"
STATUS_ACTIVE = "ACTIVE"
STATUS_DOWN = "DOWN"
class GatewayInUse(exceptions.InUse):
message = _("Network Gateway '%(gateway_id)s' still has active mappings "
"with one or more neutron networks.")
class GatewayNotFound(exceptions.NotFound):
message = _("Network Gateway %(gateway_id)s could not be found")
class GatewayDeviceInUse(exceptions.InUse):
message = _("Network Gateway Device '%(device_id)s' is still used by "
"one or more network gateways.")
class GatewayDeviceNotFound(exceptions.NotFound):
message = _("Network Gateway Device %(device_id)s could not be found.")
class NetworkGatewayPortInUse(exceptions.InUse):
message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and "
"therefore cannot be deleted directly via the port API.")
class GatewayConnectionInUse(exceptions.InUse):
message = _("The specified mapping '%(mapping)s' is already in use on "
"network gateway '%(gateway_id)s'.")
class MultipleGatewayConnections(exceptions.NeutronException):
message = _("Multiple network connections found on '%(gateway_id)s' "
"with provided criteria.")
class GatewayConnectionNotFound(exceptions.NotFound):
message = _("The connection %(network_mapping_info)s was not found on the "
"network gateway '%(network_gateway_id)s'")
class NetworkGatewayUnchangeable(exceptions.InUse):
message = _("The network gateway %(gateway_id)s "
"cannot be updated or deleted")
# Add exceptions to HTTP Faults mappings
base.FAULT_MAP.update({GatewayInUse: web_exc.HTTPConflict,
NetworkGatewayPortInUse: web_exc.HTTPConflict,
GatewayConnectionInUse: web_exc.HTTPConflict,
GatewayConnectionNotFound: web_exc.HTTPNotFound,
MultipleGatewayConnections: web_exc.HTTPConflict})
class NetworkConnection(model_base.BASEV2, models_v2.HasTenant):
"""Defines a connection between a network gateway and a network."""
# We use port_id as the primary key as one can connect a gateway
# to a network in multiple ways (and we cannot use the same port form
# more than a single gateway)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'))
segmentation_type = sa.Column(
sa.Enum('flat', 'vlan',
name='networkconnections_segmentation_type'))
segmentation_id = sa.Column(sa.Integer)
__table_args__ = (sa.UniqueConstraint(network_gateway_id,
segmentation_type,
segmentation_id),)
# Also, storing port id comes back useful when disconnecting a network
# from a gateway
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
primary_key=True)
class NetworkGatewayDeviceReference(model_base.BASEV2):
id = sa.Column(sa.String(36), primary_key=True)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'))
interface_name = sa.Column(sa.String(64))
class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
nsx_id = sa.Column(sa.String(36))
# Optional name for the gateway device
name = sa.Column(sa.String(255))
# Transport connector type. Not using enum as range of
# connector types might vary with backend version
connector_type = sa.Column(sa.String(10))
# Transport connector IP Address
connector_ip = sa.Column(sa.String(64))
# operational status
status = sa.Column(sa.String(16))
class NetworkGateway(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Defines the data model for a network gateway."""
name = sa.Column(sa.String(255))
# Tenant id is nullable for this resource
tenant_id = sa.Column(sa.String(36))
default = sa.Column(sa.Boolean())
devices = orm.relationship(NetworkGatewayDeviceReference,
backref='networkgateways',
cascade='all,delete')
network_connections = orm.relationship(NetworkConnection, lazy='joined')
class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
gateway_resource = networkgw.GATEWAY_RESOURCE_NAME
device_resource = networkgw.DEVICE_RESOURCE_NAME
def _get_network_gateway(self, context, gw_id):
try:
gw = self._get_by_id(context, NetworkGateway, gw_id)
except sa_orm_exc.NoResultFound:
raise GatewayNotFound(gateway_id=gw_id)
return gw
def _make_gw_connection_dict(self, gw_conn):
return {'port_id': gw_conn['port_id'],
'segmentation_type': gw_conn['segmentation_type'],
'segmentation_id': gw_conn['segmentation_id']}
def _make_network_gateway_dict(self, network_gateway, fields=None):
device_list = []
for d in network_gateway['devices']:
device_list.append({'id': d['id'],
'interface_name': d['interface_name']})
res = {'id': network_gateway['id'],
'name': network_gateway['name'],
'default': network_gateway['default'],
'devices': device_list,
'tenant_id': network_gateway['tenant_id']}
# Query gateway connections only if needed
if (fields and 'ports' in fields) or not fields:
res['ports'] = [self._make_gw_connection_dict(conn)
for conn in network_gateway.network_connections]
return self._fields(res, fields)
def _set_mapping_info_defaults(self, mapping_info):
if not mapping_info.get('segmentation_type'):
mapping_info['segmentation_type'] = 'flat'
if not mapping_info.get('segmentation_id'):
mapping_info['segmentation_id'] = 0
def _validate_network_mapping_info(self, network_mapping_info):
self._set_mapping_info_defaults(network_mapping_info)
network_id = network_mapping_info.get(NETWORK_ID)
if not network_id:
raise exceptions.InvalidInput(
error_message=_("A network identifier must be specified "
"when connecting a network to a network "
"gateway. Unable to complete operation"))
connection_attrs = set(network_mapping_info.keys())
if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES):
raise exceptions.InvalidInput(
error_message=(_("Invalid keys found among the ones provided "
"in request body: %(connection_attrs)s."),
connection_attrs))
seg_type = network_mapping_info.get(SEGMENTATION_TYPE)
seg_id = network_mapping_info.get(SEGMENTATION_ID)
if not seg_type and seg_id:
msg = _("In order to specify a segmentation id the "
"segmentation type must be specified as well")
raise exceptions.InvalidInput(error_message=msg)
elif seg_type and seg_type.lower() == 'flat' and seg_id:
msg = _("Cannot specify a segmentation id when "
"the segmentation type is flat")
raise exceptions.InvalidInput(error_message=msg)
return network_id
def _retrieve_gateway_connections(self, context, gateway_id,
mapping_info={}, only_one=False):
filters = {'network_gateway_id': [gateway_id]}
for k, v in mapping_info.iteritems():
if v and k != NETWORK_ID:
filters[k] = [v]
query = self._get_collection_query(context,
NetworkConnection,
filters)
return only_one and query.one() or query.all()
def _unset_default_network_gateways(self, context):
with context.session.begin(subtransactions=True):
context.session.query(NetworkGateway).update(
{NetworkGateway.default: False})
def _set_default_network_gateway(self, context, gw_id):
with context.session.begin(subtransactions=True):
gw = (context.session.query(NetworkGateway).
filter_by(id=gw_id).one())
gw['default'] = True
def prevent_network_gateway_port_deletion(self, context, port):
"""Pre-deletion check.
Ensures a port will not be deleted if is being used by a network
gateway. In that case an exception will be raised.
"""
if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF:
raise NetworkGatewayPortInUse(port_id=port['id'],
device_owner=port['device_owner'])
def create_network_gateway(self, context, network_gateway):
gw_data = network_gateway[self.gateway_resource]
tenant_id = self._get_tenant_id_for_create(context, gw_data)
with context.session.begin(subtransactions=True):
gw_db = NetworkGateway(
id=gw_data.get('id', uuidutils.generate_uuid()),
tenant_id=tenant_id,
name=gw_data.get('name'))
# Device list is guaranteed to be a valid list
# TODO(salv-orlando): Enforce that gateway device identifiers
# in this list are among the tenant's NSX network gateway devices
# to avoid risk a tenant 'guessing' other tenant's network devices
gw_db.devices.extend([NetworkGatewayDeviceReference(**device)
for device in gw_data['devices']])
context.session.add(gw_db)
LOG.debug(_("Created network gateway with id:%s"), gw_db['id'])
return self._make_network_gateway_dict(gw_db)
def update_network_gateway(self, context, id, network_gateway):
gw_data = network_gateway[self.gateway_resource]
with context.session.begin(subtransactions=True):
gw_db = self._get_network_gateway(context, id)
if gw_db.default:
raise NetworkGatewayUnchangeable(gateway_id=id)
# Ensure there is something to update before doing it
if any([gw_db[k] != gw_data[k] for k in gw_data]):
gw_db.update(gw_data)
LOG.debug(_("Updated network gateway with id:%s"), id)
return self._make_network_gateway_dict(gw_db)
def get_network_gateway(self, context, id, fields=None):
gw_db = self._get_network_gateway(context, id)
return self._make_network_gateway_dict(gw_db, fields)
def delete_network_gateway(self, context, id):
with context.session.begin(subtransactions=True):
gw_db = self._get_network_gateway(context, id)
if gw_db.network_connections:
raise GatewayInUse(gateway_id=id)
if gw_db.default:
raise NetworkGatewayUnchangeable(gateway_id=id)
context.session.delete(gw_db)
LOG.debug(_("Network gateway '%s' was destroyed."), id)
def get_network_gateways(self, context, filters=None, fields=None):
return self._get_collection(context, NetworkGateway,
self._make_network_gateway_dict,
filters=filters, fields=fields)
def connect_network(self, context, network_gateway_id,
network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info)
LOG.debug(_("Connecting network '%(network_id)s' to gateway "
"'%(network_gateway_id)s'"),
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True):
gw_db = self._get_network_gateway(context, network_gateway_id)
tenant_id = self._get_tenant_id_for_create(context, gw_db)
# TODO(salvatore-orlando): Leverage unique constraint instead
# of performing another query!
if self._retrieve_gateway_connections(context,
network_gateway_id,
network_mapping_info):
raise GatewayConnectionInUse(mapping=network_mapping_info,
gateway_id=network_gateway_id)
# TODO(salvatore-orlando): Creating a port will give it an IP,
# but we actually do not need any. Instead of wasting an IP we
# should have a way to say a port shall not be associated with
# any subnet
try:
# We pass the segmentation type and id too - the plugin
# might find them useful as the network connection object
# does not exist yet.
# NOTE: they're not extended attributes, rather extra data
# passed in the port structure to the plugin
# TODO(salvatore-orlando): Verify optimal solution for
# ownership of the gateway port
port = self.create_port(context, {
'port':
{'tenant_id': tenant_id,
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'fixed_ips': [],
'device_id': network_gateway_id,
'device_owner': DEVICE_OWNER_NET_GW_INTF,
'name': '',
'gw:segmentation_type':
network_mapping_info.get('segmentation_type'),
'gw:segmentation_id':
network_mapping_info.get('segmentation_id')}})
except exceptions.NetworkNotFound:
err_msg = (_("Requested network '%(network_id)s' not found."
"Unable to create network connection on "
"gateway '%(network_gateway_id)s") %
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
LOG.error(err_msg)
raise exceptions.InvalidInput(error_message=err_msg)
port_id = port['id']
LOG.debug(_("Gateway port for '%(network_gateway_id)s' "
"created on network '%(network_id)s':%(port_id)s"),
{'network_gateway_id': network_gateway_id,
'network_id': network_id,
'port_id': port_id})
# Create NetworkConnection record
network_mapping_info['port_id'] = port_id
network_mapping_info['tenant_id'] = tenant_id
gw_db.network_connections.append(
NetworkConnection(**network_mapping_info))
port_id = port['id']
# now deallocate and recycle ip from the port
for fixed_ip in port.get('fixed_ips', []):
self._delete_ip_allocation(context, network_id,
fixed_ip['subnet_id'],
fixed_ip['ip_address'])
LOG.debug(_("Ensured no Ip addresses are configured on port %s"),
port_id)
return {'connection_info':
{'network_gateway_id': network_gateway_id,
'network_id': network_id,
'port_id': port_id}}
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info)
LOG.debug(_("Disconnecting network '%(network_id)s' from gateway "
"'%(network_gateway_id)s'"),
{'network_id': network_id,
'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True):
# Uniquely identify connection, otherwise raise
try:
net_connection = self._retrieve_gateway_connections(
context, network_gateway_id,
network_mapping_info, only_one=True)
except sa_orm_exc.NoResultFound:
raise GatewayConnectionNotFound(
network_mapping_info=network_mapping_info,
network_gateway_id=network_gateway_id)
except sa_orm_exc.MultipleResultsFound:
raise MultipleGatewayConnections(
gateway_id=network_gateway_id)
# Remove gateway port from network
# FIXME(salvatore-orlando): Ensure state of port in NVP is
# consistent with outcome of transaction
self.delete_port(context, net_connection['port_id'],
nw_gw_port_check=False)
# Remove NetworkConnection record
context.session.delete(net_connection)
def _make_gateway_device_dict(self, gateway_device, fields=None,
include_nsx_id=False):
res = {'id': gateway_device['id'],
'name': gateway_device['name'],
'status': gateway_device['status'],
'connector_type': gateway_device['connector_type'],
'connector_ip': gateway_device['connector_ip'],
'tenant_id': gateway_device['tenant_id']}
if include_nsx_id:
# Return the NSX mapping as well. This attribute will not be
# returned in the API response anyway. Ensure it will not be
# filtered out in field selection.
if fields:
fields.append('nsx_id')
res['nsx_id'] = gateway_device['nsx_id']
return self._fields(res, fields)
def _get_gateway_device(self, context, device_id):
try:
return self._get_by_id(context, NetworkGatewayDevice, device_id)
except sa_orm_exc.NoResultFound:
raise GatewayDeviceNotFound(device_id=device_id)
def _is_device_in_use(self, context, device_id):
query = self._get_collection_query(
context, NetworkGatewayDeviceReference, {'id': [device_id]})
return query.first()
def get_gateway_device(self, context, device_id, fields=None,
include_nsx_id=False):
return self._make_gateway_device_dict(
self._get_gateway_device(context, device_id),
fields, include_nsx_id)
def get_gateway_devices(self, context, filters=None, fields=None,
include_nsx_id=False):
query = self._get_collection_query(context,
NetworkGatewayDevice,
filters=filters)
return [self._make_gateway_device_dict(row, fields, include_nsx_id)
for row in query]
def create_gateway_device(self, context, gateway_device,
initial_status=STATUS_UNKNOWN):
device_data = gateway_device[self.device_resource]
tenant_id = self._get_tenant_id_for_create(context, device_data)
with context.session.begin(subtransactions=True):
device_db = NetworkGatewayDevice(
id=device_data.get('id', uuidutils.generate_uuid()),
tenant_id=tenant_id,
name=device_data.get('name'),
connector_type=device_data['connector_type'],
connector_ip=device_data['connector_ip'],
status=initial_status)
context.session.add(device_db)
LOG.debug(_("Created network gateway device: %s"), device_db['id'])
return self._make_gateway_device_dict(device_db)
def update_gateway_device(self, context, gateway_device_id,
gateway_device, include_nsx_id=False):
device_data = gateway_device[self.device_resource]
with context.session.begin(subtransactions=True):
device_db = self._get_gateway_device(context, gateway_device_id)
# Ensure there is something to update before doing it
if any([device_db[k] != device_data[k] for k in device_data]):
device_db.update(device_data)
LOG.debug(_("Updated network gateway device: %s"),
gateway_device_id)
return self._make_gateway_device_dict(
device_db, include_nsx_id=include_nsx_id)
def delete_gateway_device(self, context, device_id):
with context.session.begin(subtransactions=True):
# A gateway device should not be deleted
# if it is used in any network gateway service
if self._is_device_in_use(context, device_id):
raise GatewayDeviceInUse(device_id=device_id)
device_db = self._get_gateway_device(context, device_id)
context.session.delete(device_db)
LOG.debug(_("Deleted network gateway device: %s."), device_id)
|
|
# BLS wrapper
import pyfits
import numpy
import scipy
import scipy.ndimage.filters
import pylab as p
from math import *
import bls
import filter
from numpy.random import normal
import atpy
import medium_filter
print 'Dan rocks! x2'
gap_days = 0.02043365 # long cadence
#ascii_DIR = '/Users/angusr/angusr/data2/SAMSI/ascii_inj'
ascii_DIR = '/Users/angusr/angusr/data2/SAMSI'
ascii_file = 'kplr000892203-2009131105131_llc.txt'
#ascii_file = 'KIC_005383248_long.dat'
#ascii_file = 'k7372635.dat'
ascii_DIR = '/Users/angusr/angusr/data2/SAMSI/ascii_inj/detrended'
ascii_file = 'k2161400.dat'
#ascii_file = 'nopl_k2161400.dat'
fits_DIR = '/Users/angusr/.kplr/data/old'
fits_file = 'kplr006448890-2009259160929_llc.fits'
def run_BLS(Baines = False, type = 'ascii'):
print 'Loading lc...'
time, lc = load_data(type)
p.close(6)
p.figure(6)
p.plot(time,lc)
print 'Median normalise...'
lc = lc/numpy.median(lc)
print 'Median filter...'
lc = medium_filter.detrend_using_mf(lc)
''' Define parameters '''
min_period = 300
max_period = 450
freq_step = 0.000001 #0.0001
min_duration_hours = 10
max_duration_hours = 15
print '1st pass bls...'
ingresses, egresses, t_number, epoch, periods, bper, bpow, depth, qtran, \
duration, f_1, convolved_bls, approx_duration = compute_bls(time, \
lc, df = freq_step, \
nf = (1./min_period)/freq_step, nb = 1400, \
qmi = float(min_duration_hours)/24./450., \
qma = float(max_duration_hours)/24./300., \
fmin = (1./(float(max_period)*1.1)))
p.close(1)
p.figure(1)
p.subplot(2,1,1)
p.plot(time,lc,".k")
p.ylabel('Flux')
p.xlabel('Time')
for i in range(len(ingresses)):
p.axvline(ingresses[i], color = 'c')
p.axvline(egresses[i], color = 'c')
p.subplot(2,1,2)
p.plot(f_1, convolved_bls)
p.axvline(bper, color = 'r')
for i in range(2, 10):
p.axvline(i*bper, color = 'y')
p.axvline(bper/2., color = 'y')
p.axvline(3*bper/2., color = 'y')
print 'period = ', bper
p.xlim(min(f_1), max(f_1))
p.xlabel('Period')
if Baines == False:
p.close(2)
'''Folding lc ...'''
Fold(time, lc, bper, ingresses, egresses, plot_time = numpy.zeros(len(time)), \
plot_lc = numpy.zeros(len(time)), figure = 2)
print 'Cutting out 1st planet transits...'
time, lc, plot_time, plot_lc = cut_out_transits(time, lc, ingresses, egresses)
'''Folding lc ...'''
Fold(time, lc, bper, ingresses, egresses, plot_time, plot_lc, figure = 3)
print 'Interpolating...'
time, lc = interpolate(time, lc, approx_duration)
'''Folding lc ...'''
Fold(time, lc, bper, ingresses, egresses, plot_time, plot_lc, figure = 4)
freq_step = 0.000001
print '2nd pass bls...'
# ingresses, egresses, t_number, epoch, periods, bper, bpow, depth, qtran, \
# duration, f_1, convolved_bls = compute_bls(time, lc)
ingresses, egresses, t_number, epoch, periods, bper, bpow, depth, qtran, \
duration, f_1, convolved_bls, approx_duration = compute_bls(time, \
lc, df = freq_step, nf = (1./min_period)/freq_step,\
nb = 1400, qmi = float(min_duration_hours)/24./450., \
qma = float(max_duration_hours)/24./300., \
fmin = (1./(float(max_period)*1.1)))
p.close(5)
p.figure(5)
p.subplot(2,1,1)
p.plot(time,lc,".k")
p.ylabel('Flux')
p.xlabel('Time')
for i in range(len(ingresses)):
p.axvline(ingresses[i], color = 'c')
p.axvline(egresses[i], color = 'c')
p.subplot(2,1,2)
p.plot(f_1, convolved_bls)
p.axvline(bper, color = '0.5')
print 'period measurements = ', periods, bper
p.xlim(min(f_1), max(f_1))
p.xlabel('Period')
p.close(3)
'''Folding lc ...'''
Fold(time, lc, bper, ingresses, egresses, plot_time = numpy.zeros(len(time)), \
plot_lc = numpy.zeros(len(time)), figure = 2)
print 'Cutting out 2nd planet transits...'
time, lc, plot_time, plot_lc = cut_out_transits(time, lc, ingresses, egresses)
'''Folding lc ...'''
Fold(time, lc, bper, ingresses, egresses, plot_time, plot_lc, figure = 3)
print 'Interpolating...'
time, lc = interpolate(time, lc, approx_duration)
'''Folding lc ...'''
Fold(time, lc, bper, ingresses, egresses, plot_time, plot_lc, figure = 4)
return
def load_data(type):
if type == 'fits':
file = '%s/%s' %(fits_DIR, fits_file)
hdulist = pyfits.open(file)
tbdata = hdulist[1].data
'''Remove NANs'''
x = numpy.where(numpy.isfinite(tbdata['TIME']))
time = tbdata['TIME'][x]; lc = tbdata['PDCSAP_FLUX'][x]
x = numpy.where(numpy.isfinite(tbdata['PDCSAP_FLUX']))
time = tbdata['TIME'][x]; lc = tbdata['PDCSAP_FLUX'][x]
elif type == 'ascii':
f = open('%s/%s' %(ascii_DIR, ascii_file), 'r')
time = []
lc = []
for line in f:
line = line.strip()
columns = line.split()
time.append(float(columns[0][0:-1]))
lc.append(float(columns[1][0:-1]))
time = numpy.array(time); lc = numpy.array(lc)
x = numpy.where(numpy.isfinite(time))
time = time[x[0]]; lc = lc[x[0]]
x = numpy.where(numpy.isfinite(lc))
time = time[x[0]]; lc = lc[x[0]]
return numpy.array(time), numpy.array(lc)
# def medfilt (x, k):
# """Apply a length-k median filter to a 1D array x.
# Boundaries are extended by repeating endpoints.
# """
# assert k % 2 == 1, "Median filter length must be odd."
# assert x.ndim == 1, "Input must be one-dimensional."
# k2 = (k - 1) // 2
# y = numpy.zeros ((len (x), k), dtype=x.dtype)
# y[:,k2] = x
# for i in range (k2):
# j = k2 - i
# y[j:,i] = x[:-j]
# y[:j,i] = x[0]
# y[:-j,-(i+1)] = x[j:]
# y[-j:,-(i+1)] = x[-1]
# return numpy.median (y, axis=1)
def compute_bls(time, lc, df, nf, nb, qmi, qma, fmin):
'''Calculate BLS'''
bls, f_1, nb = BLS(time, lc, df, nf, nb, qmi, qma, fmin)
print 'Complete'
bper = bls[1]
bpow = bls[2]
depth = bls[3]
qtran = bls[4]
duration = bper*qtran
in1 = bls[5]
in2 = bls[6]
phase1 = in1/float(nb)
phase2 = in2/float(nb)
'''Convolve with gaussian'''
convolved_bls = scipy.ndimage.filters.gaussian_filter(bls[0], 2.0) # maybe adjust this?\
#related to df
#Play with this
'''Locate all peaks'''
peak = numpy.r_[True, convolved_bls[1:] > convolved_bls[:-1]] & \
numpy.r_[convolved_bls[:-1] > convolved_bls[1:], True]
'''Sort peaks'''
sel_peaks = numpy.sort(convolved_bls[peak])
sel_peaks = sel_peaks[-1:]
'''locate highest peak'''
periods = f_1[numpy.where(convolved_bls == sel_peaks)]
'''calculate number of transits, epoch, ingress and egress times'''
t_number = int((max(time) - min(time)) / bper)
epoch = time[0] + phase1*bper
ingresses = numpy.zeros(t_number)
egresses = numpy.zeros(t_number)
for n in range(0,t_number):
ingresses[n] = (epoch + bper*n) - 0.2
egresses[n] = epoch + bper*n + duration + 0.2 # add a margin each side
approx_duration = egresses[0] - ingresses[0]
return ingresses, egresses, t_number, epoch, periods, bper, bpow, depth, qtran, \
duration, f_1, convolved_bls, approx_duration
#---------------------------------------------------------------------------------------
''' df = frequency step, \
nf = number of frequencies, \
nb = number of bins, \
qmi = minimum fractional transit duration, \
qma = maximum transit duration, \
fmin = minimum frequency '''
def BLS(time, lc, df = 0.0001, nf = 500, nb = 200, qmi = 0.01,\
qma = 0.8, fmin = (1./(400.0*1.1))):
diffs = time[1:] - time[:-1]
u = numpy.ones(len(time))
v = numpy.ones(len(time))
BLS = bls.eebls(time, lc, u, v, nf, fmin, df, nb, qmi, qma)
f = fmin + (numpy.arange(len(BLS[0])))*df
return BLS, 1/f, nb
#---------------------------------------------------------------------------------------
def cut_out_transits(time, lc, ingresses, egresses):
new_time = list(time); new_lc = list(lc)
plot_time = []; plot_lc = []
for j in range(0, len(ingresses)):
for i in range(0,len(time)):
if ingresses[j] < time[i] < egresses[j]:
new_time.remove(time[i])
new_lc.remove(lc[i])
plot_time.append(time[i])
plot_lc.append(lc[i])
time = numpy.array(new_time)
lc = numpy.array(new_lc)
# p.close(2)
# p.figure(2)
# p.plot(time,lc, 'k.')
# for i in range(len(ingresses)):
# p.axvline(ingresses[i], color = 'c')
# p.axvline(egresses[i], color = 'c')
return time, lc, plot_time, plot_lc
#---------------------------------------------------------------------------------------
def Fold(time, lc, bper, ingresses, \
egresses, plot_time, plot_lc, figure ):
''' Fold '''
#phases = (time-time[0]) % bper
ingress = (ingresses[0] - time[0]) / bper
egress = (egresses[0] - time[0]) / bper
plot_phase = (plot_time - plot_time[0]) / bper
for i in range(0,len(plot_time)):
plot_phase[i] -= int(plot_phase[i])
phases = (time - time[0]) / bper
for i in range(0, len(phases)):
phases[i] -= int(phases[i])
# p.close(figure)
# p.figure(figure)
#p.close(2)
p.figure(2)
#p.subplot(2,1,1)
p.subplot(3,1,figure-1)
p.plot(phases, lc, 'k.')
if figure == 3:
p.plot(plot_phase, plot_lc, 'r.')
p.axvline(ingress, color = 'c')
p.axvline(egress, color = 'c')
#p.xlabel('Phase')
p.ylabel('Flux')
p.xlim(ingress - (egress-ingress)*2, egress + (egress - ingress)*2)
# #p.subplot(2,1,2)
# p.plot(time, lc, 'k.')
# p.xlabel('Time (days)')
# p.ylabel('Flux')
# for i in range(len(ingresses)):
# p.axvline(ingresses[i], color = 'c')
# p.axvline(egresses[i], color = 'c')
# if figure == 3:
# p.plot(plot_time, plot_lc, 'r.')
# p.xlim(min(time), max(time))
return
#---------------------------------------------------------------------------------------
def interpolate(time, flux, approx_duration):
min_gap = approx_duration/gap_days
# p.close(5)
# p.figure(5)
# p.subplot(2,1,1)
# p.plot(time, flux, 'k.')
''' Calculate noise properties '''
flux_filt = filter.filt1d(flux, 10, 5)
med_noise, sig_noise = filter.medsig(flux-flux_filt)
''' find gaps greater than 1 (1.1) data points and linear interp with noise'''
diff1 = time[1:] - time[:-1]
diff1 = scipy.append(diff1, gap_days)
gap_find = diff1 > min_gap*gap_days*0.9 #1.1*gap_days
gap_times = time[gap_find]
time_index = scipy.r_[0:len(time):1]
fill_arr_t = scipy.zeros(1)
fill_arr_f = scipy.zeros(1)
#fill_arr_nan = scipy.zeros(1)
print 'Filling gaps...'
for m in scipy.arange(len(gap_times)):
time_start = time[time_index[gap_find][m]]
flux_start = flux[time_index[gap_find][m]]
time_end = time[time_index[gap_find][m] + 1]
flux_end = flux[time_index[gap_find][m] + 1]
span = time_end - time_start
if span < 2.0*gap_days:
fill = scipy.array([time_start, time_start+gap_days, time_end])
else: fill = scipy.r_[time_start: time_end: gap_days]
fill = fill[1:-1]
if time_end - fill.max() > 1.1*gap_days: #1.1*gap_days*min_gap:
fill = scipy.append(fill, fill.max()+gap_days)
f = scipy.interpolate.interp1d([time_start, time_end], [flux_start, flux_end])
gap_new = f(fill)
if sig_noise > 0: gap_new += normal(0,sig_noise,len(fill))
fill_arr_t = scipy.append(fill_arr_t, fill)
fill_arr_f = scipy.append(fill_arr_f, gap_new)
#fill_arr_nan = scipy.append(fill_arr_nan, scipy.ones(len(fill))*scipy.nan)
# combine time and flux arrays with their filled sections
fill_arr_t = fill_arr_t[1:]
fill_arr_f = fill_arr_f[1:]
#fill_arr_nan = fill_arr_nan[1:]
fill_arr_t = scipy.append(fill_arr_t, time)
fill_arr_f = scipy.append(fill_arr_f, flux)
#fill_arr_nan = scipy.append(fill_arr_nan, flux_base)
if len(fill_arr_t) == 0:
print '*************** empty time array ***************'
return False, atpy.Table(), 0, 0
# put in table and sort
tf = atpy.Table()
tf.add_column('time', fill_arr_t)
tf.add_column('flux', fill_arr_f)
# tf.add_column('flux_pdc', fill_arr_nan)
tf.sort('time')
# for i in range(0, len(gap_times)):
# p.axvline(gap_times[i], color = 'y')
# p.subplot(2,1,2)
# p.plot(fill_arr_t, fill_arr_f, 'k.')
return tf.time, tf.flux
if __name__ == '__main__':
BLS()
|
|
"""The tests for the Rfxtrx sensor platform."""
import unittest
import pytest
from homeassistant.bootstrap import setup_component
from homeassistant.components import rfxtrx as rfxtrx_core
from homeassistant.const import TEMP_CELSIUS
from tests.common import get_test_home_assistant
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestSensorRfxtrx(unittest.TestCase):
"""Test the Rfxtrx sensor platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
self.hass.config.components = ['rfxtrx']
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
if rfxtrx_core.RFXOBJECT:
rfxtrx_core.RFXOBJECT.close_connection()
self.hass.stop()
def test_default_config(self):
"""Test with 0 sensor."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{}}}))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_old_config_sensor(self):
"""Test with 1 sensor."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'sensor_0502': {
'name': 'Test',
'packetid': '0a52080705020095220269',
'data_type': 'Temperature'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(None, entity.state)
def test_one_sensor(self):
"""Test with 1 sensor."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(None, entity.state)
def test_one_sensor_no_datatype(self):
"""Test with 1 sensor."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(None, entity.state)
entity_id = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']\
.entity_id
entity = self.hass.states.get(entity_id)
self.assertEqual('Test', entity.name)
self.assertEqual('unknown', entity.state)
def test_several_sensors(self):
"""Test with 3 sensors."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'},
'0a520802060100ff0e0269': {
'name': 'Bath',
'data_type': ['Temperature', 'Humidity']
}}}}))
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(None, _entity_hum.state)
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(None, entity.state)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
def test_discover_sensor(self):
"""Test with discovery of sensor."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature']
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 18.4,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a520801070100b81b0279',
entity.__str__())
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 7, 'Humidity': 36,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a52080405020095240279',
entity.__str__())
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 17.9,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a520801070100b81b0279',
entity.__str__())
# trying to add a switch
event = rfxtrx_core.get_rfx_object('0b1100cd0213c7f210010f70')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
def test_discover_sensor_noautoadd(self):
"""Test with discover of sensor when auto add is False."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': False,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_update_of_sensors(self):
"""Test with 3 sensors."""
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'},
'0a520802060100ff0e0269': {
'name': 'Bath',
'data_type': ['Temperature', 'Humidity']
}}}}))
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(None, _entity_temp.state)
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(None, entity.state)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
event = rfxtrx_core.get_rfx_object('0a520802060101ff0f0269')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
event = rfxtrx_core.get_rfx_object('0a52080705020085220269')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual(15, _entity_hum.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_hum.device_state_attributes)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual(51.1, _entity_temp.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_temp.device_state_attributes)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(13.3, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 13.3,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Sqlite coverage data."""
# TODO: factor out dataop debugging to a wrapper class?
# TODO: make sure all dataop debugging is in place somehow
import collections
import datetime
import functools
import glob
import itertools
import os
import random
import re
import socket
import sqlite3
import sys
import threading
import zlib
from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr
from coverage.exceptions import CoverageException, DataError
from coverage.files import PathAliases
from coverage.misc import contract, file_be_gone, isolate_module
from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits
from coverage.version import __version__
os = isolate_module(os)
# If you change the schema, increment the SCHEMA_VERSION, and update the
# docs in docs/dbschema.rst also.
SCHEMA_VERSION = 7
# Schema versions:
# 1: Released in 5.0a2
# 2: Added contexts in 5.0a3.
# 3: Replaced line table with line_map table.
# 4: Changed line_map.bitmap to line_map.numbits.
# 5: Added foreign key declarations.
# 6: Key-value in meta.
# 7: line_map -> line_bits
SCHEMA = """\
CREATE TABLE coverage_schema (
-- One row, to record the version of the schema in this db.
version integer
);
CREATE TABLE meta (
-- Key-value pairs, to record metadata about the data
key text,
value text,
unique (key)
-- Keys:
-- 'has_arcs' boolean -- Is this data recording branches?
-- 'sys_argv' text -- The coverage command line that recorded the data.
-- 'version' text -- The version of coverage.py that made the file.
-- 'when' text -- Datetime when the file was created.
);
CREATE TABLE file (
-- A row per file measured.
id integer primary key,
path text,
unique (path)
);
CREATE TABLE context (
-- A row per context measured.
id integer primary key,
context text,
unique (context)
);
CREATE TABLE line_bits (
-- If recording lines, a row per context per file executed.
-- All of the line numbers for that file/context are in one numbits.
file_id integer, -- foreign key to `file`.
context_id integer, -- foreign key to `context`.
numbits blob, -- see the numbits functions in coverage.numbits
foreign key (file_id) references file (id),
foreign key (context_id) references context (id),
unique (file_id, context_id)
);
CREATE TABLE arc (
-- If recording branches, a row per context per from/to line transition executed.
file_id integer, -- foreign key to `file`.
context_id integer, -- foreign key to `context`.
fromno integer, -- line number jumped from.
tono integer, -- line number jumped to.
foreign key (file_id) references file (id),
foreign key (context_id) references context (id),
unique (file_id, context_id, fromno, tono)
);
CREATE TABLE tracer (
-- A row per file indicating the tracer used for that file.
file_id integer primary key,
tracer text,
foreign key (file_id) references file (id)
);
"""
class CoverageData(SimpleReprMixin):
"""Manages collected coverage data, including file storage.
This class is the public supported API to the data that coverage.py
collects during program execution. It includes information about what code
was executed. It does not include information from the analysis phase, to
determine what lines could have been executed, or what lines were not
executed.
.. note::
The data file is currently a SQLite database file, with a
:ref:`documented schema <dbschema>`. The schema is subject to change
though, so be careful about querying it directly. Use this API if you
can to isolate yourself from changes.
There are a number of kinds of data that can be collected:
* **lines**: the line numbers of source lines that were executed.
These are always available.
* **arcs**: pairs of source and destination line numbers for transitions
between source lines. These are only available if branch coverage was
used.
* **file tracer names**: the module names of the file tracer plugins that
handled each file in the data.
Lines, arcs, and file tracer names are stored for each source file. File
names in this API are case-sensitive, even on platforms with
case-insensitive file systems.
A data file either stores lines, or arcs, but not both.
A data file is associated with the data when the :class:`CoverageData`
is created, using the parameters `basename`, `suffix`, and `no_disk`. The
base name can be queried with :meth:`base_filename`, and the actual file
name being used is available from :meth:`data_filename`.
To read an existing coverage.py data file, use :meth:`read`. You can then
access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
or :meth:`file_tracer`.
The :meth:`has_arcs` method indicates whether arc data is available. You
can get a set of the files in the data with :meth:`measured_files`. As
with most Python containers, you can determine if there is any data at all
by using this object as a boolean value.
The contexts for each line in a file can be read with
:meth:`contexts_by_lineno`.
To limit querying to certain contexts, use :meth:`set_query_context` or
:meth:`set_query_contexts`. These will narrow the focus of subsequent
:meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set
of all measured context names can be retrieved with
:meth:`measured_contexts`.
Most data files will be created by coverage.py itself, but you can use
methods here to create data files if you like. The :meth:`add_lines`,
:meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
that are convenient for coverage.py.
To record data for contexts, use :meth:`set_context` to set a context to
be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls.
To add a source file without any measured data, use :meth:`touch_file`,
or :meth:`touch_files` for a list of such files.
Write the data to its file with :meth:`write`.
You can clear the data in memory with :meth:`erase`. Two data collections
can be combined by using :meth:`update` on one :class:`CoverageData`,
passing it the other.
Data in a :class:`CoverageData` can be serialized and deserialized with
:meth:`dumps` and :meth:`loads`.
The methods used during the coverage.py collection phase
(:meth:`add_lines`, :meth:`add_arcs`, :meth:`set_context`, and
:meth:`add_file_tracers`) are thread-safe. Other methods may not be.
"""
def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None):
"""Create a :class:`CoverageData` object to hold coverage-measured data.
Arguments:
basename (str): the base name of the data file, defaulting to
".coverage". This can be a path to a file in another directory.
suffix (str or bool): has the same meaning as the `data_suffix`
argument to :class:`coverage.Coverage`.
no_disk (bool): if True, keep all data in memory, and don't
write any disk file.
warn: a warning callback function, accepting a warning message
argument.
debug: a `DebugControl` object (optional)
"""
self._no_disk = no_disk
self._basename = os.path.abspath(basename or ".coverage")
self._suffix = suffix
self._warn = warn
self._debug = debug or NoDebugging()
self._choose_filename()
self._file_map = {}
# Maps thread ids to SqliteDb objects.
self._dbs = {}
self._pid = os.getpid()
# Synchronize the operations used during collection.
self._lock = threading.RLock()
# Are we in sync with the data file?
self._have_used = False
self._has_lines = False
self._has_arcs = False
self._current_context = None
self._current_context_id = None
self._query_context_ids = None
def _locked(method): # pylint: disable=no-self-argument
"""A decorator for methods that should hold self._lock."""
@functools.wraps(method)
def _wrapped(self, *args, **kwargs):
if self._debug.should("lock"):
self._debug.write(f"Locking {self._lock!r} for {method.__name__}")
with self._lock:
if self._debug.should("lock"):
self._debug.write(f"Locked {self._lock!r} for {method.__name__}")
# pylint: disable=not-callable
return method(self, *args, **kwargs)
return _wrapped
def _choose_filename(self):
"""Set self._filename based on inited attributes."""
if self._no_disk:
self._filename = ":memory:"
else:
self._filename = self._basename
suffix = filename_suffix(self._suffix)
if suffix:
self._filename += "." + suffix
def _reset(self):
"""Reset our attributes."""
if self._dbs:
for db in self._dbs.values():
db.close()
self._dbs = {}
self._file_map = {}
self._have_used = False
self._current_context_id = None
def _open_db(self):
"""Open an existing db file, and read its metadata."""
if self._debug.should("dataio"):
self._debug.write(f"Opening data file {self._filename!r}")
self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug)
self._read_db()
def _read_db(self):
"""Read the metadata from a database so that we are ready to use it."""
with self._dbs[threading.get_ident()] as db:
try:
schema_version, = db.execute_one("select version from coverage_schema")
except Exception as exc:
if "no such table: coverage_schema" in str(exc):
self._init_db(db)
else:
raise DataError(
"Data file {!r} doesn't seem to be a coverage data file: {}".format(
self._filename, exc
)
) from exc
else:
if schema_version != SCHEMA_VERSION:
raise DataError(
"Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
self._filename, schema_version, SCHEMA_VERSION
)
)
for row in db.execute("select value from meta where key = 'has_arcs'"):
self._has_arcs = bool(int(row[0]))
self._has_lines = not self._has_arcs
for path, file_id in db.execute("select path, id from file"):
self._file_map[path] = file_id
def _init_db(self, db):
"""Write the initial contents of the database."""
if self._debug.should("dataio"):
self._debug.write(f"Initing data file {self._filename!r}")
db.executescript(SCHEMA)
db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
db.executemany(
"insert or ignore into meta (key, value) values (?, ?)",
[
("sys_argv", str(getattr(sys, "argv", None))),
("version", __version__),
("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
]
)
def _connect(self):
"""Get the SqliteDb object to use."""
if threading.get_ident() not in self._dbs:
self._open_db()
return self._dbs[threading.get_ident()]
def __bool__(self):
if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)):
return False
try:
with self._connect() as con:
rows = con.execute("select * from file limit 1")
return bool(list(rows))
except CoverageException:
return False
@contract(returns="bytes")
def dumps(self):
"""Serialize the current data to a byte string.
The format of the serialized data is not documented. It is only
suitable for use with :meth:`loads` in the same version of
coverage.py.
Note that this serialization is not what gets stored in coverage data
files. This method is meant to produce bytes that can be transmitted
elsewhere and then deserialized with :meth:`loads`.
Returns:
A byte string of serialized data.
.. versionadded:: 5.0
"""
if self._debug.should("dataio"):
self._debug.write(f"Dumping data from data file {self._filename!r}")
with self._connect() as con:
script = con.dump()
return b"z" + zlib.compress(script.encode("utf-8"))
@contract(data="bytes")
def loads(self, data):
"""Deserialize data from :meth:`dumps`.
Use with a newly-created empty :class:`CoverageData` object. It's
undefined what happens if the object already has data in it.
Note that this is not for reading data from a coverage data file. It
is only for use on data you produced with :meth:`dumps`.
Arguments:
data: A byte string of serialized data produced by :meth:`dumps`.
.. versionadded:: 5.0
"""
if self._debug.should("dataio"):
self._debug.write(f"Loading data into data file {self._filename!r}")
if data[:1] != b"z":
raise DataError(
f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)"
)
script = zlib.decompress(data[1:]).decode("utf-8")
self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug)
with db:
db.executescript(script)
self._read_db()
self._have_used = True
def _file_id(self, filename, add=False):
"""Get the file id for `filename`.
If filename is not in the database yet, add it if `add` is True.
If `add` is not True, return None.
"""
if filename not in self._file_map:
if add:
with self._connect() as con:
cur = con.execute("insert or replace into file (path) values (?)", (filename,))
self._file_map[filename] = cur.lastrowid
return self._file_map.get(filename)
def _context_id(self, context):
"""Get the id for a context."""
assert context is not None
self._start_using()
with self._connect() as con:
row = con.execute_one("select id from context where context = ?", (context,))
if row is not None:
return row[0]
else:
return None
@_locked
def set_context(self, context):
"""Set the current context for future :meth:`add_lines` etc.
`context` is a str, the name of the context to use for the next data
additions. The context persists until the next :meth:`set_context`.
.. versionadded:: 5.0
"""
if self._debug.should("dataop"):
self._debug.write(f"Setting context: {context!r}")
self._current_context = context
self._current_context_id = None
def _set_context_id(self):
"""Use the _current_context to set _current_context_id."""
context = self._current_context or ""
context_id = self._context_id(context)
if context_id is not None:
self._current_context_id = context_id
else:
with self._connect() as con:
cur = con.execute("insert into context (context) values (?)", (context,))
self._current_context_id = cur.lastrowid
def base_filename(self):
"""The base filename for storing data.
.. versionadded:: 5.0
"""
return self._basename
def data_filename(self):
"""Where is the data stored?
.. versionadded:: 5.0
"""
return self._filename
@_locked
def add_lines(self, line_data):
"""Add measured line data.
`line_data` is a dictionary mapping file names to iterables of ints::
{ filename: { line1, line2, ... }, ...}
"""
if self._debug.should("dataop"):
self._debug.write("Adding lines: %d files, %d lines total" % (
len(line_data), sum(len(lines) for lines in line_data.values())
))
self._start_using()
self._choose_lines_or_arcs(lines=True)
if not line_data:
return
with self._connect() as con:
self._set_context_id()
for filename, linenos in line_data.items():
linemap = nums_to_numbits(linenos)
file_id = self._file_id(filename, add=True)
query = "select numbits from line_bits where file_id = ? and context_id = ?"
existing = list(con.execute(query, (file_id, self._current_context_id)))
if existing:
linemap = numbits_union(linemap, existing[0][0])
con.execute(
"insert or replace into line_bits " +
" (file_id, context_id, numbits) values (?, ?, ?)",
(file_id, self._current_context_id, linemap),
)
@_locked
def add_arcs(self, arc_data):
"""Add measured arc data.
`arc_data` is a dictionary mapping file names to iterables of pairs of
ints::
{ filename: { (l1,l2), (l1,l2), ... }, ...}
"""
if self._debug.should("dataop"):
self._debug.write("Adding arcs: %d files, %d arcs total" % (
len(arc_data), sum(len(arcs) for arcs in arc_data.values())
))
self._start_using()
self._choose_lines_or_arcs(arcs=True)
if not arc_data:
return
with self._connect() as con:
self._set_context_id()
for filename, arcs in arc_data.items():
file_id = self._file_id(filename, add=True)
from coverage import env
if env.PYVERSION == (3, 11, 0, "alpha", 4, 0):
arcs = [(a, b) for a, b in arcs if a is not None and b is not None]
data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
con.executemany(
"insert or ignore into arc " +
"(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
data,
)
def _choose_lines_or_arcs(self, lines=False, arcs=False):
"""Force the data file to choose between lines and arcs."""
assert lines or arcs
assert not (lines and arcs)
if lines and self._has_arcs:
if self._debug.should("dataop"):
self._debug.write("Error: Can't add line measurements to existing branch data")
raise DataError("Can't add line measurements to existing branch data")
if arcs and self._has_lines:
if self._debug.should("dataop"):
self._debug.write("Error: Can't add branch measurements to existing line data")
raise DataError("Can't add branch measurements to existing line data")
if not self._has_arcs and not self._has_lines:
self._has_lines = lines
self._has_arcs = arcs
with self._connect() as con:
con.execute(
"insert or ignore into meta (key, value) values (?, ?)",
("has_arcs", str(int(arcs)))
)
@_locked
def add_file_tracers(self, file_tracers):
"""Add per-file plugin information.
`file_tracers` is { filename: plugin_name, ... }
"""
if self._debug.should("dataop"):
self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
if not file_tracers:
return
self._start_using()
with self._connect() as con:
for filename, plugin_name in file_tracers.items():
file_id = self._file_id(filename)
if file_id is None:
raise DataError(
f"Can't add file tracer data for unmeasured file '{filename}'"
)
existing_plugin = self.file_tracer(filename)
if existing_plugin:
if existing_plugin != plugin_name:
raise DataError(
"Conflicting file tracer name for '{}': {!r} vs {!r}".format(
filename, existing_plugin, plugin_name,
)
)
elif plugin_name:
con.execute(
"insert into tracer (file_id, tracer) values (?, ?)",
(file_id, plugin_name)
)
def touch_file(self, filename, plugin_name=""):
"""Ensure that `filename` appears in the data, empty if needed.
`plugin_name` is the name of the plugin responsible for this file. It is used
to associate the right filereporter, etc.
"""
self.touch_files([filename], plugin_name)
def touch_files(self, filenames, plugin_name=""):
"""Ensure that `filenames` appear in the data, empty if needed.
`plugin_name` is the name of the plugin responsible for these files. It is used
to associate the right filereporter, etc.
"""
if self._debug.should("dataop"):
self._debug.write(f"Touching {filenames!r}")
self._start_using()
with self._connect(): # Use this to get one transaction.
if not self._has_arcs and not self._has_lines:
raise DataError("Can't touch files in an empty CoverageData")
for filename in filenames:
self._file_id(filename, add=True)
if plugin_name:
# Set the tracer for this file
self.add_file_tracers({filename: plugin_name})
def update(self, other_data, aliases=None):
"""Update this data with data from several other :class:`CoverageData` instances.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
if self._debug.should("dataop"):
self._debug.write("Updating with data from {!r}".format(
getattr(other_data, "_filename", "???"),
))
if self._has_lines and other_data._has_arcs:
raise DataError("Can't combine arc data with line data")
if self._has_arcs and other_data._has_lines:
raise DataError("Can't combine line data with arc data")
aliases = aliases or PathAliases()
# Force the database we're writing to to exist before we start nesting
# contexts.
self._start_using()
# Collector for all arcs, lines and tracers
other_data.read()
with other_data._connect() as conn:
# Get files data.
cur = conn.execute("select path from file")
files = {path: aliases.map(path) for (path,) in cur}
cur.close()
# Get contexts data.
cur = conn.execute("select context from context")
contexts = [context for (context,) in cur]
cur.close()
# Get arc data.
cur = conn.execute(
"select file.path, context.context, arc.fromno, arc.tono " +
"from arc " +
"inner join file on file.id = arc.file_id " +
"inner join context on context.id = arc.context_id"
)
arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur]
cur.close()
# Get line data.
cur = conn.execute(
"select file.path, context.context, line_bits.numbits " +
"from line_bits " +
"inner join file on file.id = line_bits.file_id " +
"inner join context on context.id = line_bits.context_id"
)
lines = {(files[path], context): numbits for (path, context, numbits) in cur}
cur.close()
# Get tracer data.
cur = conn.execute(
"select file.path, tracer " +
"from tracer " +
"inner join file on file.id = tracer.file_id"
)
tracers = {files[path]: tracer for (path, tracer) in cur}
cur.close()
with self._connect() as conn:
conn.con.isolation_level = "IMMEDIATE"
# Get all tracers in the DB. Files not in the tracers are assumed
# to have an empty string tracer. Since Sqlite does not support
# full outer joins, we have to make two queries to fill the
# dictionary.
this_tracers = {path: "" for path, in conn.execute("select path from file")}
this_tracers.update({
aliases.map(path): tracer
for path, tracer in conn.execute(
"select file.path, tracer from tracer " +
"inner join file on file.id = tracer.file_id"
)
})
# Create all file and context rows in the DB.
conn.executemany(
"insert or ignore into file (path) values (?)",
((file,) for file in files.values())
)
file_ids = {
path: id
for id, path in conn.execute("select id, path from file")
}
conn.executemany(
"insert or ignore into context (context) values (?)",
((context,) for context in contexts)
)
context_ids = {
context: id
for id, context in conn.execute("select id, context from context")
}
# Prepare tracers and fail, if a conflict is found.
# tracer_paths is used to ensure consistency over the tracer data
# and tracer_map tracks the tracers to be inserted.
tracer_map = {}
for path in files.values():
this_tracer = this_tracers.get(path)
other_tracer = tracers.get(path, "")
# If there is no tracer, there is always the None tracer.
if this_tracer is not None and this_tracer != other_tracer:
raise DataError(
"Conflicting file tracer name for '{}': {!r} vs {!r}".format(
path, this_tracer, other_tracer
)
)
tracer_map[path] = other_tracer
# Prepare arc and line rows to be inserted by converting the file
# and context strings with integer ids. Then use the efficient
# `executemany()` to insert all rows at once.
arc_rows = (
(file_ids[file], context_ids[context], fromno, tono)
for file, context, fromno, tono in arcs
)
# Get line data.
cur = conn.execute(
"select file.path, context.context, line_bits.numbits " +
"from line_bits " +
"inner join file on file.id = line_bits.file_id " +
"inner join context on context.id = line_bits.context_id"
)
for path, context, numbits in cur:
key = (aliases.map(path), context)
if key in lines:
numbits = numbits_union(lines[key], numbits)
lines[key] = numbits
cur.close()
if arcs:
self._choose_lines_or_arcs(arcs=True)
# Write the combined data.
conn.executemany(
"insert or ignore into arc " +
"(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
arc_rows
)
if lines:
self._choose_lines_or_arcs(lines=True)
conn.execute("delete from line_bits")
conn.executemany(
"insert into line_bits " +
"(file_id, context_id, numbits) values (?, ?, ?)",
[
(file_ids[file], context_ids[context], numbits)
for (file, context), numbits in lines.items()
]
)
conn.executemany(
"insert or ignore into tracer (file_id, tracer) values (?, ?)",
((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
)
# Update all internal cache data.
self._reset()
self.read()
def erase(self, parallel=False):
"""Erase the data in this object.
If `parallel` is true, then also deletes data files created from the
basename by parallel-mode.
"""
self._reset()
if self._no_disk:
return
if self._debug.should("dataio"):
self._debug.write(f"Erasing data file {self._filename!r}")
file_be_gone(self._filename)
if parallel:
data_dir, local = os.path.split(self._filename)
localdot = local + ".*"
pattern = os.path.join(os.path.abspath(data_dir), localdot)
for filename in glob.glob(pattern):
if self._debug.should("dataio"):
self._debug.write(f"Erasing parallel data file {filename!r}")
file_be_gone(filename)
def read(self):
"""Start using an existing data file."""
with self._connect(): # TODO: doesn't look right
self._have_used = True
def write(self):
"""Ensure the data is written to the data file."""
pass
def _start_using(self):
"""Call this before using the database at all."""
if self._pid != os.getpid():
# Looks like we forked! Have to start a new data file.
self._reset()
self._choose_filename()
self._pid = os.getpid()
if not self._have_used:
self.erase()
self._have_used = True
def has_arcs(self):
"""Does the database have arcs (True) or lines (False)."""
return bool(self._has_arcs)
def measured_files(self):
"""A set of all files that had been measured."""
return set(self._file_map)
def measured_contexts(self):
"""A set of all contexts that have been measured.
.. versionadded:: 5.0
"""
self._start_using()
with self._connect() as con:
contexts = {row[0] for row in con.execute("select distinct(context) from context")}
return contexts
def file_tracer(self, filename):
"""Get the plugin name of the file tracer for a file.
Returns the name of the plugin that handles this file. If the file was
measured, but didn't use a plugin, then "" is returned. If the file
was not measured, then None is returned.
"""
self._start_using()
with self._connect() as con:
file_id = self._file_id(filename)
if file_id is None:
return None
row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,))
if row is not None:
return row[0] or ""
return "" # File was measured, but no tracer associated.
def set_query_context(self, context):
"""Set a context for subsequent querying.
The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
calls will be limited to only one context. `context` is a string which
must match a context exactly. If it does not, no exception is raised,
but queries will return no data.
.. versionadded:: 5.0
"""
self._start_using()
with self._connect() as con:
cur = con.execute("select id from context where context = ?", (context,))
self._query_context_ids = [row[0] for row in cur.fetchall()]
def set_query_contexts(self, contexts):
"""Set a number of contexts for subsequent querying.
The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
calls will be limited to the specified contexts. `contexts` is a list
of Python regular expressions. Contexts will be matched using
:func:`re.search <python:re.search>`. Data will be included in query
results if they are part of any of the contexts matched.
.. versionadded:: 5.0
"""
self._start_using()
if contexts:
with self._connect() as con:
context_clause = " or ".join(["context regexp ?"] * len(contexts))
cur = con.execute("select id from context where " + context_clause, contexts)
self._query_context_ids = [row[0] for row in cur.fetchall()]
else:
self._query_context_ids = None
def lines(self, filename):
"""Get the list of lines executed for a source file.
If the file was not measured, returns None. A file might be measured,
and have no lines executed, in which case an empty list is returned.
If the file was executed, returns a list of integers, the line numbers
executed in the file. The list is in no particular order.
"""
self._start_using()
if self.has_arcs():
arcs = self.arcs(filename)
if arcs is not None:
all_lines = itertools.chain.from_iterable(arcs)
return list({l for l in all_lines if l > 0})
with self._connect() as con:
file_id = self._file_id(filename)
if file_id is None:
return None
else:
query = "select numbits from line_bits where file_id = ?"
data = [file_id]
if self._query_context_ids is not None:
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and context_id in (" + ids_array + ")"
data += self._query_context_ids
bitmaps = list(con.execute(query, data))
nums = set()
for row in bitmaps:
nums.update(numbits_to_nums(row[0]))
return list(nums)
def arcs(self, filename):
"""Get the list of arcs executed for a file.
If the file was not measured, returns None. A file might be measured,
and have no arcs executed, in which case an empty list is returned.
If the file was executed, returns a list of 2-tuples of integers. Each
pair is a starting line number and an ending line number for a
transition from one line to another. The list is in no particular
order.
Negative numbers have special meaning. If the starting line number is
-N, it represents an entry to the code object that starts at line N.
If the ending ling number is -N, it's an exit from the code object that
starts at line N.
"""
self._start_using()
with self._connect() as con:
file_id = self._file_id(filename)
if file_id is None:
return None
else:
query = "select distinct fromno, tono from arc where file_id = ?"
data = [file_id]
if self._query_context_ids is not None:
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and context_id in (" + ids_array + ")"
data += self._query_context_ids
arcs = con.execute(query, data)
return list(arcs)
def contexts_by_lineno(self, filename):
"""Get the contexts for each line in a file.
Returns:
A dict mapping line numbers to a list of context names.
.. versionadded:: 5.0
"""
self._start_using()
with self._connect() as con:
file_id = self._file_id(filename)
if file_id is None:
return {}
lineno_contexts_map = collections.defaultdict(set)
if self.has_arcs():
query = (
"select arc.fromno, arc.tono, context.context " +
"from arc, context " +
"where arc.file_id = ? and arc.context_id = context.id"
)
data = [file_id]
if self._query_context_ids is not None:
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and arc.context_id in (" + ids_array + ")"
data += self._query_context_ids
for fromno, tono, context in con.execute(query, data):
if fromno > 0:
lineno_contexts_map[fromno].add(context)
if tono > 0:
lineno_contexts_map[tono].add(context)
else:
query = (
"select l.numbits, c.context from line_bits l, context c " +
"where l.context_id = c.id " +
"and file_id = ?"
)
data = [file_id]
if self._query_context_ids is not None:
ids_array = ", ".join("?" * len(self._query_context_ids))
query += " and l.context_id in (" + ids_array + ")"
data += self._query_context_ids
for numbits, context in con.execute(query, data):
for lineno in numbits_to_nums(numbits):
lineno_contexts_map[lineno].add(context)
return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()}
@classmethod
def sys_info(cls):
"""Our information for `Coverage.sys_info`.
Returns a list of (key, value) pairs.
"""
with SqliteDb(":memory:", debug=NoDebugging()) as db:
temp_store = [row[0] for row in db.execute("pragma temp_store")]
copts = [row[0] for row in db.execute("pragma compile_options")]
# Yes, this is overkill. I don't like the long list of options
# at the end of "debug sys", but I don't want to omit information.
copts = ["; ".join(copts[i:i + 3]) for i in range(0, len(copts), 3)]
return [
("sqlite3_version", sqlite3.version),
("sqlite3_sqlite_version", sqlite3.sqlite_version),
("sqlite3_temp_store", temp_store),
("sqlite3_compile_options", copts),
]
def filename_suffix(suffix):
"""Compute a filename suffix for a data file.
If `suffix` is a string or None, simply return it. If `suffix` is True,
then build a suffix incorporating the hostname, process id, and a random
number.
Returns a string or None.
"""
if suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
dice = random.Random(os.urandom(8)).randint(0, 999999)
suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
return suffix
class SqliteDb(SimpleReprMixin):
"""A simple abstraction over a SQLite database.
Use as a context manager, then you can use it like a
:class:`python:sqlite3.Connection` object::
with SqliteDb(filename, debug_control) as db:
db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,))
"""
def __init__(self, filename, debug):
self.debug = debug if debug.should("sql") else None
self.filename = filename
self.nest = 0
self.con = None
def _connect(self):
"""Connect to the db and do universal initialization."""
if self.con is not None:
return
# It can happen that Python switches threads while the tracer writes
# data. The second thread will also try to write to the data,
# effectively causing a nested context. However, given the idempotent
# nature of the tracer operations, sharing a connection among threads
# is not a problem.
if self.debug:
self.debug.write(f"Connecting to {self.filename!r}")
try:
self.con = sqlite3.connect(self.filename, check_same_thread=False)
except sqlite3.Error as exc:
raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc
self.con.create_function("REGEXP", 2, _regexp)
# This pragma makes writing faster. It disables rollbacks, but we never need them.
# PyPy needs the .close() calls here, or sqlite gets twisted up:
# https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
self.execute("pragma journal_mode=off").close()
# This pragma makes writing faster.
self.execute("pragma synchronous=off").close()
def close(self):
"""If needed, close the connection."""
if self.con is not None and self.filename != ":memory:":
self.con.close()
self.con = None
def __enter__(self):
if self.nest == 0:
self._connect()
self.con.__enter__()
self.nest += 1
return self
def __exit__(self, exc_type, exc_value, traceback):
self.nest -= 1
if self.nest == 0:
try:
self.con.__exit__(exc_type, exc_value, traceback)
self.close()
except Exception as exc:
if self.debug:
self.debug.write(f"EXCEPTION from __exit__: {exc}")
raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc
def execute(self, sql, parameters=()):
"""Same as :meth:`python:sqlite3.Connection.execute`."""
if self.debug:
tail = f" with {parameters!r}" if parameters else ""
self.debug.write(f"Executing {sql!r}{tail}")
try:
try:
return self.con.execute(sql, parameters)
except Exception:
# In some cases, an error might happen that isn't really an
# error. Try again immediately.
# https://github.com/nedbat/coveragepy/issues/1010
return self.con.execute(sql, parameters)
except sqlite3.Error as exc:
msg = str(exc)
try:
# `execute` is the first thing we do with the database, so try
# hard to provide useful hints if something goes wrong now.
with open(self.filename, "rb") as bad_file:
cov4_sig = b"!coverage.py: This is a private format"
if bad_file.read(len(cov4_sig)) == cov4_sig:
msg = (
"Looks like a coverage 4.x data file. " +
"Are you mixing versions of coverage?"
)
except Exception: # pragma: cant happen
pass
if self.debug:
self.debug.write(f"EXCEPTION from execute: {msg}")
raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc
def execute_one(self, sql, parameters=()):
"""Execute a statement and return the one row that results.
This is like execute(sql, parameters).fetchone(), except it is
correct in reading the entire result set. This will raise an
exception if more than one row results.
Returns a row, or None if there were no rows.
"""
rows = list(self.execute(sql, parameters))
if len(rows) == 0:
return None
elif len(rows) == 1:
return rows[0]
else:
raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
def executemany(self, sql, data):
"""Same as :meth:`python:sqlite3.Connection.executemany`."""
if self.debug:
data = list(data)
self.debug.write(f"Executing many {sql!r} with {len(data)} rows")
try:
return self.con.executemany(sql, data)
except Exception: # pragma: cant happen
# In some cases, an error might happen that isn't really an
# error. Try again immediately.
# https://github.com/nedbat/coveragepy/issues/1010
return self.con.executemany(sql, data)
def executescript(self, script):
"""Same as :meth:`python:sqlite3.Connection.executescript`."""
if self.debug:
self.debug.write("Executing script with {} chars: {}".format(
len(script), clipped_repr(script, 100),
))
self.con.executescript(script)
def dump(self):
"""Return a multi-line string, the SQL dump of the database."""
return "\n".join(self.con.iterdump())
def _regexp(text, pattern):
"""A regexp function for SQLite."""
return re.search(text, pattern) is not None
|
|
import threading
from ctypes import POINTER, Structure, byref, c_byte, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import (
GEOM_PTR, GEOSFuncFactory, geos_version_tuple,
)
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_byte])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_byte)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_byte])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
ptr_type = WKT_READ_PTR
destructor = wkt_reader_destroy
def read(self, wkt):
if not isinstance(wkt, (bytes, str)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
ptr_type = WKB_READ_PTR
destructor = wkb_reader_destroy
def read(self, wkb):
"Return a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, str)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
ptr_type = WKT_WRITE_PTR
destructor = wkt_writer_destroy
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super().__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Return the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, self._trim)
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
geos_version = geos_version_tuple()
def __init__(self, dim=2):
super().__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Return the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if self.geos_version < (3, 6, 1) and isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return memoryview(wkb)
def write_hex(self, geom):
"Return the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if self.geos_version < (3, 6, 1) and isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(wkb_writer_get_include_srid(self.ptr))
@srid.setter
def srid(self, include):
wkb_writer_set_include_srid(self.ptr, bool(include))
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
thread_context.wkt_r = thread_context.wkt_r or _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
thread_context.wkb_r = thread_context.wkb_r or _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from unittest import TestCase
import jinja2
from alluratest.tools import assert_equal, assert_raises
import mock
from tg import config
from allura.lib.package_path_loader import PackagePathLoader
class TestPackagePathLoader(TestCase):
@mock.patch('pkg_resources.resource_filename')
@mock.patch('pkg_resources.iter_entry_points')
def test_load_paths(self, iter_entry_points, resource_filename):
eps = iter_entry_points.return_value.__iter__.return_value = [
mock.Mock(ep_name='ep0', module_name='eps.ep0'),
mock.Mock(ep_name='ep1', module_name='eps.ep1'),
mock.Mock(ep_name='ep2', module_name='eps.ep2'),
]
for ep in eps:
ep.name = ep.ep_name
resource_filename.side_effect = lambda m, r: 'path:' + m
paths = PackagePathLoader()._load_paths()
assert_equal(paths, [
['site-theme', None],
['ep0', 'path:eps.ep0'],
['ep1', 'path:eps.ep1'],
['ep2', 'path:eps.ep2'],
['allura', '/'],
])
assert_equal(type(paths[0]), list)
assert_equal(resource_filename.call_args_list, [
mock.call('eps.ep0', ''),
mock.call('eps.ep1', ''),
mock.call('eps.ep2', ''),
])
@mock.patch('pkg_resources.iter_entry_points')
def test_load_rules(self, iter_entry_points):
eps = iter_entry_points.return_value.__iter__.return_value = [
mock.Mock(ep_name='ep0', rules=[('>', 'allura')]),
mock.Mock(ep_name='ep1', rules=[('=', 'allura')]),
mock.Mock(ep_name='ep2', rules=[('<', 'allura')]),
]
for ep in eps:
ep.name = ep.ep_name
ep.load.return_value.template_path_rules = ep.rules
order_rules, replacement_rules = PackagePathLoader()._load_rules()
assert_equal(order_rules, [('ep0', 'allura'), ('allura', 'ep2')])
assert_equal(replacement_rules, {'allura': 'ep1'})
eps = iter_entry_points.return_value.__iter__.return_value = [
mock.Mock(ep_name='ep0', rules=[('?', 'allura')]),
]
for ep in eps:
ep.name = ep.ep_name
ep.load.return_value.template_path_rules = ep.rules
assert_raises(jinja2.TemplateError, PackagePathLoader()._load_rules)
def test_replace_signposts(self):
ppl = PackagePathLoader()
ppl._replace_signpost = mock.Mock()
paths = [
['site-theme', None],
['ep0', '/ep0'],
['ep1', '/ep1'],
['ep2', '/ep2'],
['allura', '/'],
]
rules = OrderedDict([
('allura', 'ep2'),
('site-theme', 'ep1'),
('foo', 'ep1'),
('ep0', 'bar'),
])
ppl._replace_signposts(paths, rules)
assert_equal(paths, [
['site-theme', '/ep1'],
['ep0', '/ep0'],
['allura', '/ep2'],
])
def test_sort_paths(self):
paths = [
['site-theme', None],
['ep0', '/ep0'],
['ep1', '/ep1'],
['ep2', '/ep2'],
['ep3', '/ep3'],
['allura', '/'],
]
rules = [
('allura', 'ep0'),
('ep3', 'ep1'),
('ep2', 'ep1'),
('ep4', 'ep1'), # rules referencing missing paths
('ep2', 'ep5'),
]
PackagePathLoader()._sort_paths(paths, rules)
assert_equal(paths, [
['site-theme', None],
['ep2', '/ep2'],
['ep3', '/ep3'],
['ep1', '/ep1'],
['allura', '/'],
['ep0', '/ep0'],
])
def test_init_paths(self):
paths = [
['root', '/'],
['none', None],
['tail', '/tail'],
]
ppl = PackagePathLoader()
ppl._load_paths = mock.Mock(return_value=paths)
ppl._load_rules = mock.Mock(return_value=('order_rules', 'repl_rules'))
ppl._replace_signposts = mock.Mock()
ppl._sort_paths = mock.Mock()
output = ppl.init_paths()
ppl._load_paths.assert_called_once_with()
ppl._load_rules.assert_called_once_with()
ppl._sort_paths.assert_called_once_with(paths, 'order_rules')
ppl._replace_signposts.assert_called_once_with(paths, 'repl_rules')
assert_equal(output, ['/', '/tail'])
@mock.patch('jinja2.FileSystemLoader')
def test_fs_loader(self, FileSystemLoader):
ppl = PackagePathLoader()
ppl.init_paths = mock.Mock(return_value=['path1', 'path2'])
FileSystemLoader.return_value = 'fs_loader'
output1 = ppl.fs_loader
output2 = ppl.fs_loader
ppl.init_paths.assert_called_once_with()
FileSystemLoader.assert_called_once_with(['path1', 'path2'])
assert_equal(output1, 'fs_loader')
assert output1 is output2
@mock.patch.dict(config, {'disable_template_overrides': False})
@mock.patch('jinja2.FileSystemLoader')
def test_get_source(self, fs_loader):
ppl = PackagePathLoader()
ppl.init_paths = mock.Mock()
fs_loader().get_source.return_value = 'fs_load'
# override exists
output = ppl.get_source('env', 'allura.ext.admin:templates/audit.html')
assert_equal(output, 'fs_load')
fs_loader().get_source.assert_called_once_with(
'env', 'override/allura/ext/admin/templates/audit.html')
fs_loader().get_source.reset_mock()
fs_loader().get_source.side_effect = [
jinja2.TemplateNotFound('test'), 'fs_load']
with mock.patch('pkg_resources.resource_filename') as rf:
rf.return_value = 'resource'
# no override, ':' in template
output = ppl.get_source(
'env', 'allura.ext.admin:templates/audit.html')
rf.assert_called_once_with(
'allura.ext.admin', 'templates/audit.html')
assert_equal(output, 'fs_load')
assert_equal(fs_loader().get_source.call_count, 2)
fs_loader().get_source.assert_called_with('env', 'resource')
fs_loader().get_source.reset_mock()
fs_loader().get_source.side_effect = [
jinja2.TemplateNotFound('test'), 'fs_load']
# no override, ':' not in template
output = ppl.get_source('env', 'templates/audit.html')
assert_equal(output, 'fs_load')
assert_equal(fs_loader().get_source.call_count, 2)
fs_loader().get_source.assert_called_with(
'env', 'templates/audit.html')
@mock.patch('jinja2.FileSystemLoader')
def test_override_disable(self, fs_loader):
ppl = PackagePathLoader()
ppl.init_paths = mock.Mock()
fs_loader().get_source.side_effect = jinja2.TemplateNotFound('test')
assert_raises(
jinja2.TemplateError,
ppl.get_source, 'env', 'allura.ext.admin:templates/audit.html')
assert_equal(fs_loader().get_source.call_count, 1)
fs_loader().get_source.reset_mock()
with mock.patch.dict(config, {'disable_template_overrides': False}):
assert_raises(
jinja2.TemplateError,
ppl.get_source, 'env', 'allura.ext.admin:templates/audit.html')
assert_equal(fs_loader().get_source.call_count, 2)
|
|
# Copyright (c) 2014 X-IO Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_log import log as logging
from jacket import context
from jacket.storage import exception
from jacket.storage import test
from jacket.tests.storage.unit import utils
from jacket.storage.volume.drivers import xio
from jacket.storage.volume import qos_specs
from jacket.storage.volume import volume_types
LOG = logging.getLogger("storage.volume.driver")
ISE_IP1 = '10.12.12.1'
ISE_IP2 = '10.11.12.2'
ISE_ISCSI_IP1 = '1.2.3.4'
ISE_ISCSI_IP2 = '1.2.3.5'
ISE_GID = 'isegid'
ISE_IQN = ISE_GID
ISE_WWN1 = ISE_GID + '1'
ISE_WWN2 = ISE_GID + '2'
ISE_WWN3 = ISE_GID + '3'
ISE_WWN4 = ISE_GID + '4'
ISE_TARGETS = [ISE_WWN1, ISE_WWN2, ISE_WWN3, ISE_WWN4]
ISE_INIT_TARGET_MAP = {'init_wwn1': ISE_TARGETS,
'init_wwn2': ISE_TARGETS}
VOLUME_SIZE = 10
NEW_VOLUME_SIZE = 20
VOLUME1 = {'id': '1', 'name': 'volume1',
'size': VOLUME_SIZE, 'volume_type_id': 'type1'}
VOLUME2 = {'id': '2', 'name': 'volume2',
'size': VOLUME_SIZE, 'volume_type_id': 'type2',
'provider_auth': 'CHAP abc abc'}
VOLUME3 = {'id': '3', 'name': 'volume3',
'size': VOLUME_SIZE, 'volume_type_id': None}
SNAPSHOT1 = {'name': 'snapshot1',
'volume_name': VOLUME1['name'],
'volume_type_id': 'type3'}
CLONE1 = {'id': '3', 'name': 'clone1',
'size': VOLUME_SIZE, 'volume_type_id': 'type4'}
HOST1 = 'host1'
HOST2 = 'host2'
ISCSI_CONN1 = {'initiator': 'init_iqn1',
'host': HOST1}
ISCSI_CONN2 = {'initiator': 'init_iqn2',
'host': HOST2}
FC_CONN1 = {'wwpns': ['init_wwn1', 'init_wwn2'],
'host': HOST1}
FC_CONN2 = {'wwpns': ['init_wwn3', 'init_wwn4'],
'host': HOST2}
ISE_HTTP_IP = 'http://' + ISE_IP1
ISE_HOST_LOCATION = '/storage/hosts/1'
ISE_HOST_LOCATION_URL = ISE_HTTP_IP + ISE_HOST_LOCATION
ISE_VOLUME1_LOCATION = '/storage/volumes/volume1'
ISE_VOLUME1_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME1_LOCATION
ISE_VOLUME2_LOCATION = '/storage/volumes/volume2'
ISE_VOLUME2_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME2_LOCATION
ISE_VOLUME3_LOCATION = '/storage/volumes/volume3'
ISE_VOLUME3_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME3_LOCATION
ISE_SNAPSHOT_LOCATION = '/storage/volumes/snapshot1'
ISE_SNAPSHOT_LOCATION_URL = ISE_HTTP_IP + ISE_SNAPSHOT_LOCATION
ISE_CLONE_LOCATION = '/storage/volumes/clone1'
ISE_CLONE_LOCATION_URL = ISE_HTTP_IP + ISE_CLONE_LOCATION
ISE_ALLOCATION_LOCATION = '/storage/allocations/a1'
ISE_ALLOCATION_LOCATION_URL = ISE_HTTP_IP + ISE_ALLOCATION_LOCATION
ISE_GET_QUERY_XML =\
"""<array>
<globalid>ABC12345</globalid>
<capabilities>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
<capability value="49006" string="Clones" type="source"/>
</capabilities>
<controllers>
<controller>
<ipaddress>%s</ipaddress>
<rank value="1"/>
</controller>
<controller>
<ipaddress>%s</ipaddress>
<rank value="0"/>
</controller>
</controllers>
</array>""" % (ISE_IP1, ISE_IP2)
ISE_GET_QUERY_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_XML.split())}
ISE_GET_QUERY_NO_CAP_XML =\
"""<array>
<globalid>ABC12345</globalid>
<controllers>
<controller>
<ipaddress>%s</ipaddress>
<rank value="1"/>
</controller>
<controller>
<ipaddress>%s</ipaddress>
<rank value="0"/>
</controller>
</controllers>
</array>""" % (ISE_IP1, ISE_IP2)
ISE_GET_QUERY_NO_CAP_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_NO_CAP_XML.split())}
ISE_GET_QUERY_NO_CTRL_XML =\
"""<array>
<globalid>ABC12345</globalid>
<capabilities>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
<capability value="49006" string="Clones" type="source"/>
<capability value="49007" string="Thin clones" type="source"/>
<capability value="49007" string="Thin clones" type="source"/>
</capabilities>
</array>"""
ISE_GET_QUERY_NO_CTRL_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_NO_CTRL_XML.split())}
ISE_GET_QUERY_NO_IP_XML =\
"""<array>
<globalid>ABC12345</globalid>
<capabilities>
<test value="1"/>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
<capability value="49006" string="Clones" type="source"/>
<capability value="49007" string="Thin clones" type="source"/>
<capability value="49007" string="Thin clones" type="source"/>
</capabilities>
<controllers>
<test value="2"/>
<controller>
<rank value="1"/>
</controller>
<controller>
<rank value="0"/>
</controller>
</controllers>
</array>"""
ISE_GET_QUERY_NO_IP_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_NO_IP_XML.split())}
ISE_GET_QUERY_NO_GID_XML =\
"""<array>
<capabilities>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
<capability value="49006" string="Clones" type="source"/>
</capabilities>
<controllers>
<controller>
<ipaddress>%s</ipaddress>
<rank value="1"/>
</controller>
<controller>
<ipaddress>%s</ipaddress>
<rank value="0"/>
</controller>
</controllers>
</array>""" % (ISE_IP1, ISE_IP2)
ISE_GET_QUERY_NO_GID_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_NO_GID_XML.split())}
ISE_GET_QUERY_NO_CLONE_XML =\
"""<array>
<globalid>ABC12345</globalid>
<capabilities>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
</capabilities>
<controllers>
<controller>
<ipaddress>%s</ipaddress>
<rank value="1"/>
</controller>
<controller>
<ipaddress>%s</ipaddress>
<rank value="0"/>
</controller>
</controllers>
</array>""" % (ISE_IP1, ISE_IP2)
ISE_GET_QUERY_NO_CLONE_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_NO_CLONE_XML.split())}
ISE_GET_STORAGE_POOLS_XML =\
"""
<pools>
<pool>
<name>Pool 1</name>
<id>1</id>
<status value="0" string="Operational">
<details value="0x00000000">
<detail>None</detail>
</details>
</status>
<available total="60">
<byredundancy>
<raid-0>60</raid-0>
<raid-1>30</raid-1>
<raid-5>45</raid-5>
</byredundancy>
</available>
<used total="40">
<byredundancy>
<raid-0>0</raid-0>
<raid-1>40</raid-1>
<raid-5>0</raid-5>
</byredundancy>
</used>
<media>
<medium>
<health>100</health>
<tier value="4" string="Hybrid"/>
</medium>
</media>
<volumes>
<volume>
<globalid>volgid</globalid>
</volume>
<volume>
<globalid>volgid2</globalid>
</volume>
</volumes>
</pool>
</pools>
"""
ISE_GET_STORAGE_POOLS_RESP =\
{'status': 200,
'location': 'Pool location',
'content': " ".join(ISE_GET_STORAGE_POOLS_XML.split())}
ISE_GET_VOL_STATUS_NO_VOL_NODE_XML =\
"""<volumes></volumes>"""
ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_GET_VOL_STATUS_NO_VOL_NODE_XML.split())}
ISE_GET_VOL_STATUS_NO_STATUS_XML =\
"""<volumes>
<volume self="%s">
</volume>
</volumes>""" % (ISE_VOLUME1_LOCATION_URL)
ISE_GET_VOL_STATUS_NO_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_GET_VOL_STATUS_NO_STATUS_XML.split())}
ISE_GET_VOL1_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
<size>10</size>
</volume>
</volumes>""" % (ISE_VOLUME1_LOCATION_URL)
ISE_GET_VOL1_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_GET_VOL1_STATUS_XML.split())}
ISE_GET_VOL2_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_VOLUME2_LOCATION_URL)
ISE_GET_VOL2_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME2_LOCATION_URL,
'content': " ".join(ISE_GET_VOL2_STATUS_XML.split())}
ISE_GET_VOL3_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_VOLUME3_LOCATION_URL)
ISE_GET_VOL3_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME3_LOCATION_URL,
'content': " ".join(ISE_GET_VOL3_STATUS_XML.split())}
ISE_GET_SNAP1_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_SNAPSHOT_LOCATION_URL)
ISE_GET_SNAP1_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_SNAPSHOT_LOCATION_URL,
'content': " ".join(ISE_GET_SNAP1_STATUS_XML.split())}
ISE_GET_CLONE1_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_CLONE_LOCATION_URL)
ISE_GET_CLONE1_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_CLONE_LOCATION_URL,
'content': " ".join(ISE_GET_CLONE1_STATUS_XML.split())}
ISE_CREATE_VOLUME_XML = """<volume/>"""
ISE_CREATE_VOLUME_RESP =\
{'status': 201,
'location': ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_CREATE_VOLUME_XML.split())}
ISE_GET_IONETWORKS_XML =\
"""<chap>
<chapin value="0" string="disabled">
<username/>
<password/>
</chapin>
<chapout value="0" string="disabled">
<username/>
<password/>
</chapout>
</chap>"""
ISE_GET_IONETWORKS_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_IONETWORKS_XML.split())}
ISE_GET_IONETWORKS_CHAP_XML =\
"""<chap>
<chapin value="1" string="disabled">
<username>abc</username>
<password>abc</password>
</chapin>
<chapout value="0" string="disabled">
<username/>
<password/>
</chapout>
</chap>"""
ISE_GET_IONETWORKS_CHAP_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_IONETWORKS_CHAP_XML.split())}
ISE_DELETE_VOLUME_XML = """<volumes/>"""
ISE_DELETE_VOLUME_RESP =\
{'status': 204,
'location': '',
'content': " ".join(ISE_DELETE_VOLUME_XML.split())}
ISE_GET_ALLOC_WITH_EP_XML =\
"""<allocations>
<allocation self="%s">
<volume>
<volumename>%s</volumename>
</volume>
<endpoints>
<hostname>%s</hostname>
</endpoints>
<lun>1</lun>
</allocation>
</allocations>""" %\
(ISE_ALLOCATION_LOCATION_URL, VOLUME1['name'], HOST1)
ISE_GET_ALLOC_WITH_EP_RESP =\
{'status': 200,
'location': ISE_ALLOCATION_LOCATION_URL,
'content': " ".join(ISE_GET_ALLOC_WITH_EP_XML.split())}
ISE_GET_ALLOC_WITH_NO_ALLOC_XML =\
"""<allocations self="%s"/>""" % ISE_ALLOCATION_LOCATION_URL
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP =\
{'status': 200,
'location': ISE_ALLOCATION_LOCATION_URL,
'content': " ".join(ISE_GET_ALLOC_WITH_NO_ALLOC_XML.split())}
ISE_DELETE_ALLOC_XML = """<allocations/>"""
ISE_DELETE_ALLOC_RESP =\
{'status': 204,
'location': '',
'content': " ".join(ISE_DELETE_ALLOC_XML.split())}
ISE_GET_HOSTS_NOHOST_XML =\
"""<hosts self="http://ip/storage/hosts"/>"""
ISE_GET_HOSTS_NOHOST_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_HOSTS_NOHOST_XML.split())}
ISE_GET_HOSTS_HOST1_XML =\
"""<hosts self="http://ip/storage/hosts">
<host self="http://ip/storage/hosts/1">
<type>"OPENSTACK"</type>
<name>%s</name>
<id>1</id>
<endpoints self="http://ip/storage/endpoints">
<endpoint self="http://ip/storage/endpoints/ep1">
<globalid>init_wwn1</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep2">
<globalid>init_wwn2</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep1">
<globalid>init_iqn1</globalid>
</endpoint>
</endpoints>
</host>
</hosts>""" % HOST1
ISE_GET_HOSTS_HOST1_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_HOSTS_HOST1_XML.split())}
ISE_GET_HOSTS_HOST1_HOST_TYPE_XML =\
"""<hosts self="http://ip/storage/hosts">
<host self="http://ip/storage/hosts/1">
<type>"WINDOWS"</type>
<name>%s</name>
<id>1</id>
<endpoints self="http://ip/storage/endpoints">
<endpoint self="http://ip/storage/endpoints/ep1">
<globalid>init_wwn1</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep2">
<globalid>init_wwn2</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep1">
<globalid>init_iqn1</globalid>
</endpoint>
</endpoints>
</host>
</hosts>""" % HOST1
ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_HOSTS_HOST1_HOST_TYPE_XML.split())}
ISE_GET_HOSTS_HOST2_XML =\
"""<hosts self="http://ip/storage/hosts">
<host self="http://ip/storage/hosts/2">
<name>%s</name>
<id>2</id>
<endpoints self="http://ip/storage/endpoints">
<endpoint self="http://ip/storage/endpoints/ep3">
<globalid>init_wwn3</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep4">
<globalid>init_wwn4</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep3">
<globalid>init_iqn2</globalid>
</endpoint>
</endpoints>
</host>
</hosts>""" % HOST2
ISE_GET_HOSTS_HOST2_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_HOSTS_HOST2_XML.split())}
ISE_CREATE_HOST_XML =\
"""<hosts self="http://ip/storage/hosts"/>"""
ISE_CREATE_HOST_RESP =\
{'status': 201,
'location': 'http://ip/storage/hosts/host1',
'content': " ".join(ISE_CREATE_HOST_XML.split())}
ISE_CREATE_ALLOC_XML =\
"""<allocations self="http://ip/storage/allocations"/>"""
ISE_CREATE_ALLOC_RESP =\
{'status': 201,
'location': ISE_ALLOCATION_LOCATION_URL,
'content': " ".join(ISE_CREATE_ALLOC_XML.split())}
ISE_GET_ENDPOINTS_XML =\
"""<endpoints self="http://ip/storage/endpoints">
<endpoint type="array" self="http://ip/storage/endpoints/isegid">
<globalid>isegid</globalid>
<protocol>iSCSI</protocol>
<array self="http://ip/storage/arrays/ise1">
<globalid>ise1</globalid>
</array>
<host/>
<allocations self="http://ip/storage/allocations">
<allocation self="%s">
<globalid>
a1
</globalid>
</allocation>
</allocations>
</endpoint>
<endpoint type="array" self="http://ip/storage/endpoints/isegid">
<globalid>isegid</globalid>
<protocol>Fibre Channel</protocol>
<array self="http://ip/storage/arrays/ise1">
<globalid>ise1</globalid>
</array>
<host/>
<allocations self="http://ip/storage/allocations">
<allocation self="%s">
<globalid>
a1
</globalid>
</allocation>
</allocations>
</endpoint>
</endpoints>""" % (ISE_ALLOCATION_LOCATION_URL,
ISE_ALLOCATION_LOCATION_URL)
ISE_GET_ENDPOINTS_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_ENDPOINTS_XML.split())}
ISE_GET_CONTROLLERS_XML =\
"""<controllers self="http://ip/storage/arrays/controllers">
<controller>
<status/>
<ioports>
<ioport>
<ipaddresses>
<ipaddress>%s</ipaddress>
</ipaddresses>
<endpoint>
<globalid>isegid</globalid>
</endpoint>
</ioport>
</ioports>
<fcports>
<fcport>
<wwn>%s</wwn>
</fcport>
<fcport>
<wwn>%s</wwn>
</fcport>
</fcports>
</controller>
<controller>
<status/>
<ioports>
<ioport>
<ipaddresses>
<ipaddress>%s</ipaddress>
</ipaddresses>
<endpoint>
<globalid>isegid</globalid>
</endpoint>
</ioport>
</ioports>
<fcports>
<fcport>
<wwn>%s</wwn>
</fcport>
<fcport>
<wwn>%s</wwn>
</fcport>
</fcports>
</controller>
</controllers>""" % (ISE_ISCSI_IP1, ISE_WWN1, ISE_WWN2,
ISE_ISCSI_IP2, ISE_WWN3, ISE_WWN4)
ISE_GET_CONTROLLERS_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_CONTROLLERS_XML.split())}
ISE_CREATE_SNAPSHOT_XML = """<snapshot/>"""
ISE_CREATE_SNAPSHOT_RESP =\
{'status': 201,
'location': ISE_SNAPSHOT_LOCATION_URL,
'content': " ".join(ISE_CREATE_SNAPSHOT_XML.split())}
ISE_PREP_SNAPSHOT_XML = """<snapshot/>"""
ISE_PREP_SNAPSHOT_RESP =\
{'status': 202,
'location': ISE_SNAPSHOT_LOCATION_URL,
'content': " ".join(ISE_PREP_SNAPSHOT_XML.split())}
ISE_MODIFY_VOLUME_XML = """<volume/>"""
ISE_MODIFY_VOLUME_RESP =\
{'status': 201,
'location': ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_MODIFY_VOLUME_XML.split())}
ISE_MODIFY_HOST_XML = """<host/>"""
ISE_MODIFY_HOST_RESP =\
{'status': 201,
'location': ISE_HOST_LOCATION_URL,
'content': " ".join(ISE_MODIFY_HOST_XML.split())}
ISE_BAD_CONNECTION_RESP =\
{'status': 0,
'location': '',
'content': " "}
ISE_400_RESP =\
{'status': 400,
'location': '',
'content': ""}
ISE_GET_VOL_STATUS_404_XML = \
"""<response value="404" index="3">VOLUME not found.</response>"""
ISE_GET_VOL_STATUS_404_RESP =\
{'status': 404,
'location': '',
'content': " ".join(ISE_GET_VOL_STATUS_404_XML.split())}
ISE_400_INVALID_STATE_XML = \
"""<response value="400">Not in a valid state.</response>"""
ISE_400_INVALID_STATE_RESP =\
{'status': 400,
'location': '',
'content': " ".join(ISE_400_INVALID_STATE_XML.split())}
ISE_409_CONFLICT_XML = \
"""<response value="409">Conflict</response>"""
ISE_409_CONFLICT_RESP =\
{'status': 409,
'location': '',
'content': " ".join(ISE_409_CONFLICT_XML.split())}
DRIVER = "storage.volume.drivers.xio.XIOISEDriver"
@mock.patch(DRIVER + "._opener", autospec=True)
class XIOISEDriverTestCase(object):
# Test cases for X-IO volume driver
def setUp(self):
super(XIOISEDriverTestCase, self).setUp()
# set good default values
self.configuration = mock.Mock()
self.configuration.san_ip = ISE_IP1
self.configuration.san_user = 'fakeuser'
self.configuration.san_password = 'fakepass'
self.configuration.iscsi_ip_address = ISE_ISCSI_IP1
self.configuration.driver_use_ssl = False
self.configuration.ise_completion_retries = 30
self.configuration.ise_connection_retries = 5
self.configuration.ise_retry_interval = 1
self.configuration.volume_backend_name = 'ise1'
self.driver = None
self.protocol = ''
self.connector = None
self.connection_failures = 0
self.hostgid = ''
self.use_response_table = 1
def setup_test(self, protocol):
self.protocol = protocol
# set good default values
if self.protocol == 'iscsi':
self.configuration.ise_protocol = protocol
self.connector = ISCSI_CONN1
self.hostgid = self.connector['initiator']
elif self.protocol == 'fibre_channel':
self.configuration.ise_protocol = protocol
self.connector = FC_CONN1
self.hostgid = self.connector['wwpns'][0]
def setup_driver(self):
# this setups up driver object with previously set configuration values
if self.configuration.ise_protocol == 'iscsi':
self.driver =\
xio.XIOISEISCSIDriver(configuration=self.configuration)
elif self.configuration.ise_protocol == 'fibre_channel':
self.driver =\
xio.XIOISEFCDriver(configuration=self.configuration)
elif self.configuration.ise_protocol == 'test_prot':
# if test_prot specified override with correct protocol
# used to bypass protocol specific driver
self.configuration.ise_protocol = self.protocol
self.driver = xio.XIOISEDriver(configuration=self.configuration)
else:
# Invalid protocol type
raise exception.Invalid()
#################################
# UNIT TESTS #
#################################
def test_do_setup(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.driver.do_setup(None)
def test_negative_do_setup_no_clone_support(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_NO_CLONE_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.do_setup, None)
def test_negative_do_setup_no_capabilities(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_NO_CAP_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.do_setup, None)
def test_negative_do_setup_no_ctrl(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_NO_CTRL_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.do_setup, None)
def test_negative_do_setup_no_ipaddress(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_NO_IP_RESP])
self.driver.do_setup(None)
def test_negative_do_setup_bad_globalid_none(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_NO_GID_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.do_setup, None)
def test_check_for_setup_error(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.driver.check_for_setup_error()
def test_negative_do_setup_bad_ip(self, mock_req):
# set san_ip to bad value
self.configuration.san_ip = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.check_for_setup_error)
def test_negative_do_setup_bad_user_blank(self, mock_req):
# set san_user to bad value
self.configuration.san_login = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.check_for_setup_error)
def test_negative_do_setup_bad_password_blank(self, mock_req):
# set san_password to bad value
self.configuration.san_password = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.check_for_setup_error)
def test_get_volume_stats(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_STORAGE_POOLS_RESP])
backend_name = self.configuration.volume_backend_name
if self.configuration.ise_protocol == 'iscsi':
protocol = 'iSCSI'
else:
protocol = 'fibre_channel'
exp_result = {'vendor_name': "X-IO",
'driver_version': "1.1.4",
'volume_backend_name': backend_name,
'reserved_percentage': 0,
'total_capacity_gb': 100,
'free_capacity_gb': 60,
'QoS_support': True,
'affinity': True,
'thin': False,
'pools': [{'pool_ise_name': "Pool 1",
'pool_name': "1",
'status': "Operational",
'status_details': "None",
'free_capacity_gb': 60,
'free_capacity_gb_raid_0': 60,
'free_capacity_gb_raid_1': 30,
'free_capacity_gb_raid_5': 45,
'allocated_capacity_gb': 40,
'allocated_capacity_gb_raid_0': 0,
'allocated_capacity_gb_raid_1': 40,
'allocated_capacity_gb_raid_5': 0,
'health': 100,
'media': "Hybrid",
'total_capacity_gb': 100,
'QoS_support': True,
'reserved_percentage': 0}],
'active_volumes': 2,
'storage_protocol': protocol}
act_result = self.driver.get_volume_stats(True)
self.assertDictMatch(exp_result, act_result)
def test_get_volume_stats_ssl(self, mock_req):
self.configuration.driver_use_ssl = True
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_STORAGE_POOLS_RESP])
self.driver.get_volume_stats(True)
def test_negative_get_volume_stats_bad_primary(self, mock_req):
self.configuration.ise_connection_retries = 1
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_BAD_CONNECTION_RESP,
ISE_GET_STORAGE_POOLS_RESP])
self.driver.get_volume_stats(True)
def test_create_volume(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_IONETWORKS_RESP])
exp_result = {}
exp_result = {"provider_auth": ""}
act_result = self.driver.create_volume(VOLUME1)
self.assertDictMatch(exp_result, act_result)
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.create_volume(VOLUME1)
def test_create_volume_chap(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_IONETWORKS_CHAP_RESP])
exp_result = {}
exp_result = {"provider_auth": "CHAP abc abc"}
act_result = self.driver.create_volume(VOLUME1)
self.assertDictMatch(exp_result, act_result)
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.create_volume(VOLUME1)
def test_create_volume_type_none(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_IONETWORKS_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.create_volume(VOLUME3)
def test_delete_volume(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_DELETE_VOLUME_RESP,
ISE_GET_VOL_STATUS_404_RESP])
self.setup_driver()
self.driver.delete_volume(VOLUME1)
def test_delete_volume_delayed(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_DELETE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_VOL_STATUS_404_RESP])
self.setup_driver()
self.driver.delete_volume(VOLUME1)
def test_delete_volume_timeout(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_DELETE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.configuration.ise_completion_retries = 3
self.setup_driver()
self.driver.delete_volume(VOLUME1)
def test_delete_volume_none_existing(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.setup_driver()
self.driver.delete_volume(VOLUME2)
def test_initialize_connection_positive(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST2_RESP,
ISE_CREATE_HOST_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_CREATE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_CONTROLLERS_RESP])
self.setup_driver()
exp_result = {}
if self.configuration.ise_protocol == 'iscsi':
exp_result = {"driver_volume_type": "iscsi",
"data": {"target_lun": 1,
"volume_id": '1',
"target_discovered": False,
"target_iqn": ISE_IQN,
"target_portal": ISE_ISCSI_IP1 + ":3260"}}
elif self.configuration.ise_protocol == 'fibre_channel':
exp_result = {"driver_volume_type": "fibre_channel",
"data": {"target_lun": 1,
"volume_id": '1',
"target_discovered": True,
"initiator_target_map": ISE_INIT_TARGET_MAP,
"target_wwn": ISE_TARGETS}}
act_result =\
self.driver.initialize_connection(VOLUME1, self.connector)
self.assertDictMatch(exp_result, act_result)
def test_initialize_connection_positive_host_type(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP,
ISE_MODIFY_HOST_RESP,
ISE_CREATE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_CONTROLLERS_RESP])
self.setup_driver()
exp_result = {}
if self.configuration.ise_protocol == 'iscsi':
exp_result = {"driver_volume_type": "iscsi",
"data": {"target_lun": 1,
"volume_id": '1',
"target_discovered": False,
"target_iqn": ISE_IQN,
"target_portal": ISE_ISCSI_IP1 + ":3260"}}
elif self.configuration.ise_protocol == 'fibre_channel':
exp_result = {"driver_volume_type": "fibre_channel",
"data": {"target_lun": 1,
"volume_id": '1',
"target_discovered": True,
"initiator_target_map": ISE_INIT_TARGET_MAP,
"target_wwn": ISE_TARGETS}}
act_result =\
self.driver.initialize_connection(VOLUME1, self.connector)
self.assertDictMatch(exp_result, act_result)
def test_initialize_connection_positive_chap(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST2_RESP,
ISE_CREATE_HOST_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_CREATE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_CONTROLLERS_RESP])
self.setup_driver()
exp_result = {}
if self.configuration.ise_protocol == 'iscsi':
exp_result = {"driver_volume_type": "iscsi",
"data": {"target_lun": 1,
"volume_id": '2',
"target_discovered": False,
"target_iqn": ISE_IQN,
"target_portal": ISE_ISCSI_IP1 + ":3260",
'auth_method': 'CHAP',
'auth_username': 'abc',
'auth_password': 'abc'}}
elif self.configuration.ise_protocol == 'fibre_channel':
exp_result = {"driver_volume_type": "fibre_channel",
"data": {"target_lun": 1,
"volume_id": '2',
"target_discovered": True,
"initiator_target_map": ISE_INIT_TARGET_MAP,
"target_wwn": ISE_TARGETS}}
act_result =\
self.driver.initialize_connection(VOLUME2, self.connector)
self.assertDictMatch(exp_result, act_result)
def test_initialize_connection_negative_no_host(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST2_RESP,
ISE_CREATE_HOST_RESP,
ISE_GET_HOSTS_HOST2_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.initialize_connection,
VOLUME2, self.connector)
def test_initialize_connection_negative_host_type(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP,
ISE_400_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.initialize_connection,
VOLUME2, self.connector)
def test_terminate_connection_positive(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_DELETE_ALLOC_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_CONTROLLERS_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_DELETE_ALLOC_RESP])
self.driver.terminate_connection(VOLUME1, self.connector)
def test_terminate_connection_positive_noalloc(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_DELETE_ALLOC_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
ISE_GET_CONTROLLERS_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_DELETE_ALLOC_RESP])
self.driver.terminate_connection(VOLUME1, self.connector)
def test_negative_terminate_connection_bad_host(self, mock_req):
self.setup_driver()
test_connector = {}
if self.configuration.ise_protocol == 'iscsi':
test_connector['initiator'] = 'bad_iqn'
test_connector['host'] = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
test_connector['wwpns'] = 'bad_wwn'
test_connector['host'] = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_CONTROLLERS_RESP])
self.driver.terminate_connection(VOLUME1, test_connector)
def test_create_snapshot(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP])
self.setup_driver()
self.driver.create_snapshot(SNAPSHOT1)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_negative_create_snapshot_invalid_state_recover(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_400_INVALID_STATE_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP])
self.setup_driver()
self.driver.create_snapshot(SNAPSHOT1)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_negative_create_snapshot_invalid_state_norecover(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_400_INVALID_STATE_RESP,
ISE_400_INVALID_STATE_RESP,
ISE_400_INVALID_STATE_RESP,
ISE_400_INVALID_STATE_RESP,
ISE_400_INVALID_STATE_RESP])
self.configuration.ise_completion_retries = 5
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.create_snapshot, SNAPSHOT1)
def test_negative_create_snapshot_conflict(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_409_CONFLICT_RESP])
self.configuration.ise_completion_retries = 1
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.create_snapshot, SNAPSHOT1)
def test_delete_snapshot(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_DELETE_VOLUME_RESP])
self.setup_driver()
self.driver.delete_snapshot(SNAPSHOT1)
def test_clone_volume(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP])
self.setup_driver()
self.driver.create_cloned_volume(CLONE1, VOLUME1)
def test_extend_volume(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.setup_driver()
self.driver.extend_volume(VOLUME1, NEW_VOLUME_SIZE)
def test_retype_volume(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
# New volume type
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "5",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT2', extra_specs)
specs = {'qos:minIOPS': '30',
'qos:maxIOPS': '3000',
'qos:burstIOPS': '10000'}
qos = qos_specs.create(ctxt, 'fake-qos2', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.setup_driver()
self.driver.retype(ctxt, VOLUME1, type_ref, 0, 0)
def test_create_volume_from_snapshot(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.setup_driver()
self.driver.create_volume_from_snapshot(VOLUME1, SNAPSHOT1)
def test_manage_existing(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP,
ISE_GET_IONETWORKS_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.driver.manage_existing(VOLUME1, {'source-name': 'testvol'})
def test_manage_existing_no_source_name(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP,
ISE_GET_IONETWORKS_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.manage_existing, VOLUME1, {})
def test_manage_existing_get_size(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP])
exp_result = 10
act_result = \
self.driver.manage_existing_get_size(VOLUME1,
{'source-name': 'a'})
self.assertEqual(exp_result, act_result)
def test_manage_existing_get_size_no_source_name(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.manage_existing_get_size, VOLUME1, {})
def test_unmanage(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.unmanage(VOLUME1)
def test_negative_unmanage_no_volume_status_xml(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL_STATUS_NO_STATUS_RESP])
self.driver.unmanage(VOLUME1)
def test_negative_unmanage_no_volume_xml(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.unmanage, VOLUME1)
def test_negative_unmanage_non_existing_volume(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL_STATUS_404_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.unmanage, VOLUME1)
class XIOISEISCSIDriverTestCase(XIOISEDriverTestCase, test.TestCase):
def setUp(self):
super(XIOISEISCSIDriverTestCase, self).setUp()
self.setup_test('iscsi')
class XIOISEFCDriverTestCase(XIOISEDriverTestCase, test.TestCase):
def setUp(self):
super(XIOISEFCDriverTestCase, self).setUp()
self.setup_test('fibre_channel')
|
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import multiprocessing
import android.adb.commands
from swift_build_support.swift_build_support import host
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '1'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# Propagate global --skip-build
if args.skip_build:
args.build_linux = False
args.build_freebsd = False
args.build_cygwin = False
args.build_osx = False
args.build_ios = False
args.build_tvos = False
args.build_watchos = False
args.build_android = False
args.build_benchmarks = False
args.build_external_benchmarks = False
args.build_lldb = False
args.build_llbuild = False
args.build_swiftpm = False
args.build_xctest = False
args.build_foundation = False
args.build_libdispatch = False
args.build_libicu = False
args.build_playgroundsupport = False
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--no-legacy-impl', store_false('legacy_impl'),
help='avoid legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
help='use distcc in pump mode')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
default_max_lto_link_job_counts = host.max_lto_link_job_counts()
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=default_max_lto_link_job_counts['llvm'],
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=default_max_lto_link_job_counts['swift'],
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--enable-sil-ownership', store_true,
help='Enable the SIL ownership model')
option('--enable-guaranteed-normal-arguments', store_true,
help='Enable guaranteed normal arguments')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', append,
type=argparse.ShellSplitType(),
default=None,
help='list of targets to compile or cross-compile the Swift '
'standard library for. %(default)s by default.')
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option(['-l', '--lldb'], store_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], store_true('build_llbuild'),
help='build llbuild')
option(['-p', '--swiftpm'], store_true('build_swiftpm'),
help='build swiftpm')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', store_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option('--assertions', store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option('--no-assertions', store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to a directory containing libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to a directory containing libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
|
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
# This file was created automatically by SWIG 1.3.27.
# Don't modify this file, modify the SWIG interface instead.
import _TetGen
# This file is compatible with both classic and new-style classes.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class tetgenio(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, tetgenio, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, tetgenio, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
FILENAMESIZE = _TetGen.tetgenio_FILENAMESIZE
INPUTLINESIZE = _TetGen.tetgenio_INPUTLINESIZE
__swig_getmethods__["init"] = lambda x: _TetGen.tetgenio_init
if _newclass:init = staticmethod(_TetGen.tetgenio_init)
__swig_setmethods__["firstnumber"] = _TetGen.tetgenio_firstnumber_set
__swig_getmethods__["firstnumber"] = _TetGen.tetgenio_firstnumber_get
if _newclass:firstnumber = property(_TetGen.tetgenio_firstnumber_get, _TetGen.tetgenio_firstnumber_set)
__swig_setmethods__["mesh_dim"] = _TetGen.tetgenio_mesh_dim_set
__swig_getmethods__["mesh_dim"] = _TetGen.tetgenio_mesh_dim_get
if _newclass:mesh_dim = property(_TetGen.tetgenio_mesh_dim_get, _TetGen.tetgenio_mesh_dim_set)
__swig_setmethods__["pointlist"] = _TetGen.tetgenio_pointlist_set
__swig_getmethods__["pointlist"] = _TetGen.tetgenio_pointlist_get
if _newclass:pointlist = property(_TetGen.tetgenio_pointlist_get, _TetGen.tetgenio_pointlist_set)
__swig_setmethods__["pointattributelist"] = _TetGen.tetgenio_pointattributelist_set
__swig_getmethods__["pointattributelist"] = _TetGen.tetgenio_pointattributelist_get
if _newclass:pointattributelist = property(_TetGen.tetgenio_pointattributelist_get, _TetGen.tetgenio_pointattributelist_set)
__swig_setmethods__["addpointlist"] = _TetGen.tetgenio_addpointlist_set
__swig_getmethods__["addpointlist"] = _TetGen.tetgenio_addpointlist_get
if _newclass:addpointlist = property(_TetGen.tetgenio_addpointlist_get, _TetGen.tetgenio_addpointlist_set)
__swig_setmethods__["addpointattributelist"] = _TetGen.tetgenio_addpointattributelist_set
__swig_getmethods__["addpointattributelist"] = _TetGen.tetgenio_addpointattributelist_get
if _newclass:addpointattributelist = property(_TetGen.tetgenio_addpointattributelist_get, _TetGen.tetgenio_addpointattributelist_set)
__swig_setmethods__["pointmarkerlist"] = _TetGen.tetgenio_pointmarkerlist_set
__swig_getmethods__["pointmarkerlist"] = _TetGen.tetgenio_pointmarkerlist_get
if _newclass:pointmarkerlist = property(_TetGen.tetgenio_pointmarkerlist_get, _TetGen.tetgenio_pointmarkerlist_set)
__swig_setmethods__["numberofpoints"] = _TetGen.tetgenio_numberofpoints_set
__swig_getmethods__["numberofpoints"] = _TetGen.tetgenio_numberofpoints_get
if _newclass:numberofpoints = property(_TetGen.tetgenio_numberofpoints_get, _TetGen.tetgenio_numberofpoints_set)
__swig_setmethods__["numberofpointattributes"] = _TetGen.tetgenio_numberofpointattributes_set
__swig_getmethods__["numberofpointattributes"] = _TetGen.tetgenio_numberofpointattributes_get
if _newclass:numberofpointattributes = property(_TetGen.tetgenio_numberofpointattributes_get, _TetGen.tetgenio_numberofpointattributes_set)
__swig_setmethods__["numberofaddpoints"] = _TetGen.tetgenio_numberofaddpoints_set
__swig_getmethods__["numberofaddpoints"] = _TetGen.tetgenio_numberofaddpoints_get
if _newclass:numberofaddpoints = property(_TetGen.tetgenio_numberofaddpoints_get, _TetGen.tetgenio_numberofaddpoints_set)
__swig_setmethods__["tetrahedronlist"] = _TetGen.tetgenio_tetrahedronlist_set
__swig_getmethods__["tetrahedronlist"] = _TetGen.tetgenio_tetrahedronlist_get
if _newclass:tetrahedronlist = property(_TetGen.tetgenio_tetrahedronlist_get, _TetGen.tetgenio_tetrahedronlist_set)
__swig_setmethods__["tetrahedronattributelist"] = _TetGen.tetgenio_tetrahedronattributelist_set
__swig_getmethods__["tetrahedronattributelist"] = _TetGen.tetgenio_tetrahedronattributelist_get
if _newclass:tetrahedronattributelist = property(_TetGen.tetgenio_tetrahedronattributelist_get, _TetGen.tetgenio_tetrahedronattributelist_set)
__swig_setmethods__["tetrahedronvolumelist"] = _TetGen.tetgenio_tetrahedronvolumelist_set
__swig_getmethods__["tetrahedronvolumelist"] = _TetGen.tetgenio_tetrahedronvolumelist_get
if _newclass:tetrahedronvolumelist = property(_TetGen.tetgenio_tetrahedronvolumelist_get, _TetGen.tetgenio_tetrahedronvolumelist_set)
__swig_setmethods__["neighborlist"] = _TetGen.tetgenio_neighborlist_set
__swig_getmethods__["neighborlist"] = _TetGen.tetgenio_neighborlist_get
if _newclass:neighborlist = property(_TetGen.tetgenio_neighborlist_get, _TetGen.tetgenio_neighborlist_set)
__swig_setmethods__["numberoftetrahedra"] = _TetGen.tetgenio_numberoftetrahedra_set
__swig_getmethods__["numberoftetrahedra"] = _TetGen.tetgenio_numberoftetrahedra_get
if _newclass:numberoftetrahedra = property(_TetGen.tetgenio_numberoftetrahedra_get, _TetGen.tetgenio_numberoftetrahedra_set)
__swig_setmethods__["numberofcorners"] = _TetGen.tetgenio_numberofcorners_set
__swig_getmethods__["numberofcorners"] = _TetGen.tetgenio_numberofcorners_get
if _newclass:numberofcorners = property(_TetGen.tetgenio_numberofcorners_get, _TetGen.tetgenio_numberofcorners_set)
__swig_setmethods__["numberoftetrahedronattributes"] = _TetGen.tetgenio_numberoftetrahedronattributes_set
__swig_getmethods__["numberoftetrahedronattributes"] = _TetGen.tetgenio_numberoftetrahedronattributes_get
if _newclass:numberoftetrahedronattributes = property(_TetGen.tetgenio_numberoftetrahedronattributes_get, _TetGen.tetgenio_numberoftetrahedronattributes_set)
__swig_setmethods__["facetlist"] = _TetGen.tetgenio_facetlist_set
__swig_getmethods__["facetlist"] = _TetGen.tetgenio_facetlist_get
if _newclass:facetlist = property(_TetGen.tetgenio_facetlist_get, _TetGen.tetgenio_facetlist_set)
__swig_setmethods__["facetmarkerlist"] = _TetGen.tetgenio_facetmarkerlist_set
__swig_getmethods__["facetmarkerlist"] = _TetGen.tetgenio_facetmarkerlist_get
if _newclass:facetmarkerlist = property(_TetGen.tetgenio_facetmarkerlist_get, _TetGen.tetgenio_facetmarkerlist_set)
__swig_setmethods__["numberoffacets"] = _TetGen.tetgenio_numberoffacets_set
__swig_getmethods__["numberoffacets"] = _TetGen.tetgenio_numberoffacets_get
if _newclass:numberoffacets = property(_TetGen.tetgenio_numberoffacets_get, _TetGen.tetgenio_numberoffacets_set)
__swig_setmethods__["holelist"] = _TetGen.tetgenio_holelist_set
__swig_getmethods__["holelist"] = _TetGen.tetgenio_holelist_get
if _newclass:holelist = property(_TetGen.tetgenio_holelist_get, _TetGen.tetgenio_holelist_set)
__swig_setmethods__["numberofholes"] = _TetGen.tetgenio_numberofholes_set
__swig_getmethods__["numberofholes"] = _TetGen.tetgenio_numberofholes_get
if _newclass:numberofholes = property(_TetGen.tetgenio_numberofholes_get, _TetGen.tetgenio_numberofholes_set)
__swig_setmethods__["regionlist"] = _TetGen.tetgenio_regionlist_set
__swig_getmethods__["regionlist"] = _TetGen.tetgenio_regionlist_get
if _newclass:regionlist = property(_TetGen.tetgenio_regionlist_get, _TetGen.tetgenio_regionlist_set)
__swig_setmethods__["numberofregions"] = _TetGen.tetgenio_numberofregions_set
__swig_getmethods__["numberofregions"] = _TetGen.tetgenio_numberofregions_get
if _newclass:numberofregions = property(_TetGen.tetgenio_numberofregions_get, _TetGen.tetgenio_numberofregions_set)
__swig_setmethods__["facetconstraintlist"] = _TetGen.tetgenio_facetconstraintlist_set
__swig_getmethods__["facetconstraintlist"] = _TetGen.tetgenio_facetconstraintlist_get
if _newclass:facetconstraintlist = property(_TetGen.tetgenio_facetconstraintlist_get, _TetGen.tetgenio_facetconstraintlist_set)
__swig_setmethods__["numberoffacetconstraints"] = _TetGen.tetgenio_numberoffacetconstraints_set
__swig_getmethods__["numberoffacetconstraints"] = _TetGen.tetgenio_numberoffacetconstraints_get
if _newclass:numberoffacetconstraints = property(_TetGen.tetgenio_numberoffacetconstraints_get, _TetGen.tetgenio_numberoffacetconstraints_set)
__swig_setmethods__["segmentconstraintlist"] = _TetGen.tetgenio_segmentconstraintlist_set
__swig_getmethods__["segmentconstraintlist"] = _TetGen.tetgenio_segmentconstraintlist_get
if _newclass:segmentconstraintlist = property(_TetGen.tetgenio_segmentconstraintlist_get, _TetGen.tetgenio_segmentconstraintlist_set)
__swig_setmethods__["numberofsegmentconstraints"] = _TetGen.tetgenio_numberofsegmentconstraints_set
__swig_getmethods__["numberofsegmentconstraints"] = _TetGen.tetgenio_numberofsegmentconstraints_get
if _newclass:numberofsegmentconstraints = property(_TetGen.tetgenio_numberofsegmentconstraints_get, _TetGen.tetgenio_numberofsegmentconstraints_set)
__swig_setmethods__["nodeconstraintlist"] = _TetGen.tetgenio_nodeconstraintlist_set
__swig_getmethods__["nodeconstraintlist"] = _TetGen.tetgenio_nodeconstraintlist_get
if _newclass:nodeconstraintlist = property(_TetGen.tetgenio_nodeconstraintlist_get, _TetGen.tetgenio_nodeconstraintlist_set)
__swig_setmethods__["numberofnodeconstraints"] = _TetGen.tetgenio_numberofnodeconstraints_set
__swig_getmethods__["numberofnodeconstraints"] = _TetGen.tetgenio_numberofnodeconstraints_get
if _newclass:numberofnodeconstraints = property(_TetGen.tetgenio_numberofnodeconstraints_get, _TetGen.tetgenio_numberofnodeconstraints_set)
__swig_setmethods__["pbcgrouplist"] = _TetGen.tetgenio_pbcgrouplist_set
__swig_getmethods__["pbcgrouplist"] = _TetGen.tetgenio_pbcgrouplist_get
if _newclass:pbcgrouplist = property(_TetGen.tetgenio_pbcgrouplist_get, _TetGen.tetgenio_pbcgrouplist_set)
__swig_setmethods__["numberofpbcgroups"] = _TetGen.tetgenio_numberofpbcgroups_set
__swig_getmethods__["numberofpbcgroups"] = _TetGen.tetgenio_numberofpbcgroups_get
if _newclass:numberofpbcgroups = property(_TetGen.tetgenio_numberofpbcgroups_get, _TetGen.tetgenio_numberofpbcgroups_set)
__swig_setmethods__["trifacelist"] = _TetGen.tetgenio_trifacelist_set
__swig_getmethods__["trifacelist"] = _TetGen.tetgenio_trifacelist_get
if _newclass:trifacelist = property(_TetGen.tetgenio_trifacelist_get, _TetGen.tetgenio_trifacelist_set)
__swig_setmethods__["adjtetlist"] = _TetGen.tetgenio_adjtetlist_set
__swig_getmethods__["adjtetlist"] = _TetGen.tetgenio_adjtetlist_get
if _newclass:adjtetlist = property(_TetGen.tetgenio_adjtetlist_get, _TetGen.tetgenio_adjtetlist_set)
__swig_setmethods__["trifacemarkerlist"] = _TetGen.tetgenio_trifacemarkerlist_set
__swig_getmethods__["trifacemarkerlist"] = _TetGen.tetgenio_trifacemarkerlist_get
if _newclass:trifacemarkerlist = property(_TetGen.tetgenio_trifacemarkerlist_get, _TetGen.tetgenio_trifacemarkerlist_set)
__swig_setmethods__["numberoftrifaces"] = _TetGen.tetgenio_numberoftrifaces_set
__swig_getmethods__["numberoftrifaces"] = _TetGen.tetgenio_numberoftrifaces_get
if _newclass:numberoftrifaces = property(_TetGen.tetgenio_numberoftrifaces_get, _TetGen.tetgenio_numberoftrifaces_set)
__swig_setmethods__["edgelist"] = _TetGen.tetgenio_edgelist_set
__swig_getmethods__["edgelist"] = _TetGen.tetgenio_edgelist_get
if _newclass:edgelist = property(_TetGen.tetgenio_edgelist_get, _TetGen.tetgenio_edgelist_set)
__swig_setmethods__["edgemarkerlist"] = _TetGen.tetgenio_edgemarkerlist_set
__swig_getmethods__["edgemarkerlist"] = _TetGen.tetgenio_edgemarkerlist_get
if _newclass:edgemarkerlist = property(_TetGen.tetgenio_edgemarkerlist_get, _TetGen.tetgenio_edgemarkerlist_set)
__swig_setmethods__["numberofedges"] = _TetGen.tetgenio_numberofedges_set
__swig_getmethods__["numberofedges"] = _TetGen.tetgenio_numberofedges_get
if _newclass:numberofedges = property(_TetGen.tetgenio_numberofedges_get, _TetGen.tetgenio_numberofedges_set)
def initialize(*args): return _TetGen.tetgenio_initialize(*args)
def deinitialize(*args): return _TetGen.tetgenio_deinitialize(*args)
def load_node_call(*args): return _TetGen.tetgenio_load_node_call(*args)
def load_node(*args): return _TetGen.tetgenio_load_node(*args)
def load_addnodes(*args): return _TetGen.tetgenio_load_addnodes(*args)
def load_pbc(*args): return _TetGen.tetgenio_load_pbc(*args)
def load_var(*args): return _TetGen.tetgenio_load_var(*args)
def load_mtr(*args): return _TetGen.tetgenio_load_mtr(*args)
def load_poly(*args): return _TetGen.tetgenio_load_poly(*args)
def load_off(*args): return _TetGen.tetgenio_load_off(*args)
def load_ply(*args): return _TetGen.tetgenio_load_ply(*args)
def load_stl(*args): return _TetGen.tetgenio_load_stl(*args)
def load_medit(*args): return _TetGen.tetgenio_load_medit(*args)
def load_plc(*args): return _TetGen.tetgenio_load_plc(*args)
def load_tetmesh(*args): return _TetGen.tetgenio_load_tetmesh(*args)
def save_nodes(*args): return _TetGen.tetgenio_save_nodes(*args)
def save_elements(*args): return _TetGen.tetgenio_save_elements(*args)
def save_faces(*args): return _TetGen.tetgenio_save_faces(*args)
def save_edges(*args): return _TetGen.tetgenio_save_edges(*args)
def save_neighbors(*args): return _TetGen.tetgenio_save_neighbors(*args)
def save_poly(*args): return _TetGen.tetgenio_save_poly(*args)
def readline(*args): return _TetGen.tetgenio_readline(*args)
def findnextfield(*args): return _TetGen.tetgenio_findnextfield(*args)
def readnumberline(*args): return _TetGen.tetgenio_readnumberline(*args)
def findnextnumber(*args): return _TetGen.tetgenio_findnextnumber(*args)
def __init__(self, *args):
_swig_setattr(self, tetgenio, 'this', _TetGen.new_tetgenio(*args))
_swig_setattr(self, tetgenio, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_tetgenio):
try:
if self.thisown: destroy(self)
except: pass
class tetgenioPtr(tetgenio):
def __init__(self, this):
_swig_setattr(self, tetgenio, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, tetgenio, 'thisown', 0)
self.__class__ = tetgenio
_TetGen.tetgenio_swigregister(tetgenioPtr)
tetgenio_init = _TetGen.tetgenio_init
class polygon(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, polygon, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, polygon, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::polygon instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, polygon, 'this', _TetGen.new_polygon(*args))
_swig_setattr(self, polygon, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_polygon):
try:
if self.thisown: destroy(self)
except: pass
class polygonPtr(polygon):
def __init__(self, this):
_swig_setattr(self, polygon, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, polygon, 'thisown', 0)
self.__class__ = polygon
_TetGen.polygon_swigregister(polygonPtr)
class facet(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, facet, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, facet, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::facet instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, facet, 'this', _TetGen.new_facet(*args))
_swig_setattr(self, facet, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_facet):
try:
if self.thisown: destroy(self)
except: pass
class facetPtr(facet):
def __init__(self, this):
_swig_setattr(self, facet, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, facet, 'thisown', 0)
self.__class__ = facet
_TetGen.facet_swigregister(facetPtr)
class pbcgroup(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, pbcgroup, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, pbcgroup, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::pbcgroup instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, pbcgroup, 'this', _TetGen.new_pbcgroup(*args))
_swig_setattr(self, pbcgroup, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_pbcgroup):
try:
if self.thisown: destroy(self)
except: pass
class pbcgroupPtr(pbcgroup):
def __init__(self, this):
_swig_setattr(self, pbcgroup, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, pbcgroup, 'thisown', 0)
self.__class__ = pbcgroup
_TetGen.pbcgroup_swigregister(pbcgroupPtr)
tetrahedralize = _TetGen.tetrahedralize
allocate_array = _TetGen.allocate_array
set_val = _TetGen.set_val
get_val = _TetGen.get_val
allocate_facet_array = _TetGen.allocate_facet_array
add_tri = _TetGen.add_tri
|
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
#!python -u
import sys
import os
import os.path
import win32com.client
import gen_dir_wxi
from gen_dir_wxi import add_wix_to_path
import gen_analysis_tool_wxi
import glob
import subprocess
import xml.etree.ElementTree as ET
prefs = { 'verbose': True }
this_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(this_dir)
os.environ['PATH'] = os.environ['PATH'].replace('"', '')
def system(args, dirname=None):
"""
Executes a system command (throws an exception on error)
params
args : [command, arg1, arg2, ...]
dirname : if set, execute the command within this directory
"""
import subprocess
#print args
with open(os.devnull, "w") as nulfp:
# n.b. stderr=subprocess.STDOUT fails mysteriously
import sys
subprocess.check_call(args, stdout=(sys.stdout if prefs['verbose'] else nulfp), stderr=subprocess.STDOUT, shell=False, cwd=dirname)
def get_nuget_packages():
import svn_info
#branch = svn_info.get_branch_name()
branch = 'trunk'
packages = None
from xml.etree import ElementTree
cad_packages = ElementTree.parse(r'CAD_Installs\packages.config')
destination_files = [ r'CAD_Installs\Proe ISIS Extensions\bin\CADCreoParametricCreateAssembly.exe',
r'CAD_Installs\Proe ISIS Extensions\0Readme - CreateAssembly.txt',
r'MDL2MGACyber.exe',
r'CAD_Installs\Proe ISIS Extensions\bin\ExtractACM-XMLfromCreoModels.exe',
r'CAD_Installs\Proe ISIS Extensions\bin\CADCreoParametricMetaLink.exe',
]
for filename in destination_files:
if os.path.isfile(filename):
os.unlink(filename)
for package in cad_packages.findall('package'):
version = package.get('version')
print "NuGet install " + package.get('id') + " " + version
system([r'..\src\.nuget\nuget.exe', 'install', '-ConfigFile', r'..\NuGet.config', '-PreRelease', '-Version', version, package.get('id')], os.path.join(this_dir, 'CAD_Installs'))
package_dir = r'CAD_Installs\%s.%s' % (package.get('id'), version)
for filename in glob.glob(package_dir + '/*'):
#if os.path.basename(filename) == 'svnversion':
# with open(os.path.join(this_dir, filename), 'rb') as svnversion:
# print filename + ': ' + svnversion.read()
destination_file = [fn for fn in destination_files if os.path.basename(fn) == os.path.basename(filename)]
if not destination_file:
continue
destination_file = destination_file[0]
from win32file import CreateHardLink
print "Linking %s to %s" % (os.path.join(this_dir, filename), os.path.join(this_dir, destination_file))
CreateHardLink(os.path.join(this_dir, destination_file), os.path.join(this_dir, filename))
destination_files.remove(destination_file)
if destination_files:
raise Exception('Could not find files %s in NuGet packages' % repr(destination_files))
def build_msi():
get_nuget_packages()
add_wix_to_path()
def get_wixobj(file):
return os.path.splitext(file)[0] + ".wixobj"
def adjacent_file(file):
return os.path.join(os.path.dirname(__file__), file)
gen_analysis_tool_wxi.main(r"..\analysis_tools", diskId='5')
# gen_dir_from_vc: "explicit is better than implicit"
# consider: generated files are left on disk after an svn switch, and get included in an installer that shouldn't have them
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\PCC\PCC",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\isis_meta\isis_meta",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\material_library\MaterialLibraryInterface",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\meta_nrmm\meta_nrmm",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\py_modelica\py_modelica",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\py_modelica_exporter\py_modelica_exporter",)
gen_dir_wxi.gen_dir_from_vc(r"..\meta\DesignDataPackage\lib\python", "DesignDataPackage_python.wxi", "DesignDataPackage_python")
gen_dir_wxi.main(r"CAD_Installs\Proe ISIS Extensions", "Proe_ISIS_Extensions_x64.wxi", "Proe_ISIS_Extensions_x64", diskId='4') # do not call gen_dir_from_vc, it would exclude CADCreoCreateAssembly.exe
gen_dir_wxi.gen_dir_from_vc(r"..\WebGME",)
gen_dir_wxi.gen_dir_from_vc(r"..\meta\CyPhyML\icons",)
gen_dir_wxi.gen_dir_from_vc(r"..\models\MassSpringDamper",)
gen_dir_wxi.gen_dir_from_vc(r"..\bin", diskId='3')
gen_dir_wxi.gen_dir_from_vc(r"..\ModelicaWrapperTemplates",)
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\chipfit_display",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\TestBenchExecutor",)
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\SpiceVisualizer")
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\spice_viewer")
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\cam2gerber")
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\get_bom_with_eagle_xref")
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\runCentroidUlp")
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\runEagleUlp")
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\runDrc")
gen_dir_wxi.gen_dir_from_vc(r"..\..\tonka\src\Android")
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\PCC\PCC",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\isis_meta\isis_meta",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\meta_nrmm\meta_nrmm",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\py_modelica\py_modelica",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\Python27Packages\py_modelica_exporter\py_modelica_exporter",)
gen_dir_wxi.gen_dir_from_vc(r"..\src\CADAssembler\Python", "CADPython")
gen_dir_wxi.gen_dir_from_vc(r"..\meta\DesignDataPackage\lib\python", "DesignDataPackage_python.wxi", "DesignDataPackage_python")
def get_svnversion():
p = subprocess.Popen("git rev-list HEAD --count".split(), stdout=subprocess.PIPE)
out, err = p.communicate()
return out.strip() or '22950'
#import subprocess
#p = subprocess.Popen(['svnversion', '-n', adjacent_file('..')], stdout=subprocess.PIPE)
#out, err = p.communicate()
#if p.returncode:
# raise subprocess.CalledProcessError(p.returncode, 'svnversion')
#return out
svnversion = get_svnversion()
print "SVN version: " + str(get_svnversion())
sourcedir = adjacent_file('')
def get_gitversion():
p = subprocess.Popen("git rev-parse --short HEAD".split(), stdout=subprocess.PIPE)
out, err = p.communicate()
#if p.returncode:
# raise subprocess.CalledProcessError(p.returncode, 'svnversion')
return out.strip() or 'unknown'
gitversion = get_gitversion()
import glob
if len(sys.argv[1:]) > 0:
source_wxs = sys.argv[1]
else:
source_wxs = 'META_x64.wxs'
sources_all = glob.glob(sourcedir + '*.wxi') + glob.glob(sourcedir + source_wxs)
sources = []
include_wxis = []
# For each each ComponentGroupRef in "source_wxs" and "analysis_tools.wxi",
# add its corresponding file to "include_wxis"
for wxs in glob.glob(sourcedir + source_wxs) + glob.glob(sourcedir + 'analysis_tools.wxi'):
print 'Processing WXS: ' + wxs
tree = ET.parse(wxs)
root = tree.getroot()
#print root
all_nodes = root.findall('.//')
for node in all_nodes:
if node.tag == '{http://schemas.microsoft.com/wix/2006/wi}ComponentGroupRef':
include_wxis.append(node.attrib['Id'] + '.wxi')
include_wxis.append(node.attrib['Id'] + '_x64.wxi')
if 'Proe' in node.attrib['Id'] + '_x64.wxi':
print node.attrib['Id'] + '_x64.wxi'
if node.tag == '{http://schemas.microsoft.com/wix/2006/wi}ComponentRef':
include_wxis.append(node.attrib['Id'].rsplit( ".", 1 )[ 0 ] + '.wxi')
include_wxis.append(node.attrib['Id'].rsplit( ".", 1 )[ 0 ] + '_x64.wxi')
# For each file in include_wxis, check for ComponentGroupRef and ComponentRef.
# Add any that you find
index = 0
while index < len(include_wxis):
wxi = include_wxis[index]
index += 1
if not os.path.exists(wxi):
continue
tree = ET.parse(wxi)
root = tree.getroot()
all_nodes = root.findall('.//')
for node in all_nodes:
if node.tag == '{http://schemas.microsoft.com/wix/2006/wi}ComponentGroupRef':
include_wxis.append(node.attrib['Id'] + '.wxi')
include_wxis.append(node.attrib['Id'] + '_x64.wxi')
if node.tag == '{http://schemas.microsoft.com/wix/2006/wi}ComponentRef':
include_wxis.append(node.attrib['Id'].rsplit( ".", 1 )[ 0 ] + '.wxi')
include_wxis.append(node.attrib['Id'].rsplit( ".", 1 )[ 0 ] + '_x64.wxi')
sources = [source for source in sources_all if (os.path.basename(source) in include_wxis)]
sources.append(source_wxs)
if len(sources) == 0:
raise Exception("0 sources found in " + sourcedir)
defines = [ ('InterpreterBin', '../src/bin') ]
def get_mta_versions(mta_file):
import uuid
metaproject = win32com.client.Dispatch("MGA.MgaMetaProject")
metaproject.Open('MGA=' + mta_file)
try:
return ("{" + str(uuid.UUID(bytes_le=metaproject.GUID)).upper() + "}", metaproject.Version)
finally:
metaproject.Close()
cyphy_versions = get_mta_versions(adjacent_file('../generated/CyPhyML/models/CyPhyML.mta'))
defines.append(('GUIDSTRCYPHYML', cyphy_versions[0]))
defines.append(('VERSIONSTRCYPHYML', cyphy_versions[1]))
version = '14.10.'
if 'M' in svnversion:
### METAMORPH HACK: SVN calls are bad. This is just checking for local modifications.
#if 'JENKINS_URL' in os.environ:
# try:
# system('svn status -q'.split(), os.path.join(this_dir, '..'))
# finally:
# raise Exception('Versioned files have modifications. The build must not modify versioned files.')
version = version + '1'
else:
# this will crash for switched or sparse checkouts
version = version + str(int(svnversion))
print 'Installer version: ' + version
defines.append(('VERSIONSTR', version))
defines.append(('SVNVERSION', svnversion))
defines.append(('GITVERSION', gitversion))
from multiprocessing.pool import ThreadPool
pool = ThreadPool()
pool_exceptions = []
def candle(source):
try:
arch = [ '-arch', ('x86' if source.find('x64') == -1 else 'x64') ]
system(['candle', '-ext', 'WiXUtilExtension'] + ['-d' + d[0] + '=' + d[1] for d in defines ] + arch + [ '-out', get_wixobj(source), source] + ['-nologo'])
except Exception as e:
pool_exceptions.append(e)
raise
candle_results = pool.map_async(candle, sources, chunksize=1)
pool.close()
pool.join()
if pool_exceptions:
raise pool_exceptions[0]
assert candle_results.successful()
#ignore warning 1055, ICE82 from VC10 merge modules
# ICE69: Mismatched component reference. Entry 'reg491FAFEB7F990D99C4A4D719B2A95253' of the Registry table belongs to component 'CyPhySoT.dll'. However, the formatted string in column 'Value' references file 'CyPhySoT.ico' which belongs to component 'CyPhySoT.ico'
# ICE60: The file fil_5b64d789d9ad5473bc580ea7258a0fac is not a Font, and its version is not a companion file reference. It should have a language specified in the Language column.
if source_wxs.startswith("META"):
import datetime
starttime = datetime.datetime.now()
system(['light', '-sw1055', '-sice:ICE82', '-sice:ICE57', '-sice:ICE60', '-sice:ICE69', '-ext', 'WixNetFxExtension', '-ext', 'WixUIExtension', '-ext', 'WixUtilExtension',
'-cc', os.path.join(this_dir, 'cab_cache'), '-reusecab',
'-o', os.path.splitext(source_wxs)[0] + ".msi"] + [ get_wixobj(file) for file in sources ])
print "elapsed time: %d seconds" % (datetime.datetime.now() - starttime).seconds
else:
msm_output = os.path.splitext(source_wxs)[0] + ".msm"
system(['light', '-ext', 'WixUtilExtension', '-o', msm_output] + [ get_wixobj(file) for file in sources ])
class MSBuildErrorWriter(object):
def write(self, d):
sys.stderr.write("error: ")
sys.stderr.write(d)
if __name__ == '__main__':
os.chdir(this_dir)
import traceback
try:
build_msi()
except:
traceback.print_exc(None, MSBuildErrorWriter())
sys.exit(2)
|
|
# election_office_measure/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from exception.models import handle_record_found_more_than_one_exception
from wevote_settings.models import fetch_next_id_we_vote_last_candidate_campaign_integer, \
fetch_next_id_we_vote_last_contest_measure_integer, fetch_next_id_we_vote_last_contest_office_integer, \
fetch_next_id_we_vote_last_measure_campaign_integer, fetch_site_unique_id_prefix
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
class Election(models.Model):
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=20, null=True, unique=True)
# A displayable name for the election.
name = models.CharField(verbose_name="election name", max_length=254, null=False, blank=False)
# Day of the election in YYYY-MM-DD format.
election_date_text = models.CharField(verbose_name="election day", max_length=254, null=False, blank=False)
class ContestOffice(models.Model):
# The id_we_vote identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "off", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.id_we_vote_last_contest_office_integer
id_we_vote = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
# The name of the office for this contest.
office_name = models.CharField(verbose_name="google civic office", max_length=254, null=False, blank=False)
# The We Vote unique id for the election
election_id = models.CharField(verbose_name="we vote election id", max_length=254, null=False, blank=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=254, null=False, blank=False)
id_cicero = models.CharField(
verbose_name="azavea cicero unique identifier", max_length=254, null=True, blank=True, unique=True)
id_maplight = models.CharField(
verbose_name="maplight unique identifier", max_length=254, null=True, blank=True, unique=True)
id_ballotpedia = models.CharField(
verbose_name="ballotpedia unique identifier", max_length=254, null=True, blank=True)
id_wikipedia = models.CharField(verbose_name="wikipedia unique identifier", max_length=254, null=True, blank=True)
# vote_type (ranked choice, majority)
# ballot_placement: NOTE - even though GoogleCivicContestOffice has this field, we store this value
# in the BallotItem table instead because it is different for each voter
# The number of candidates that a voter may vote for in this contest.
number_voting_for = models.CharField(verbose_name="number of candidates to vote for",
max_length=254, null=True, blank=True)
# The number of candidates that will be elected to office in this contest.
number_elected = models.CharField(verbose_name="number of candidates who will be elected",
max_length=254, null=True, blank=True)
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="primary party", max_length=254, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=254, null=False, blank=False)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="district scope", max_length=254, null=False, blank=False)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_ocd_id = models.CharField(verbose_name="open civic data id", max_length=254, null=False, blank=False)
# We override the save function so we can auto-generate id_we_vote
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique id_we_vote
if self.id_we_vote:
self.id_we_vote = self.id_we_vote.strip()
if self.id_we_vote == "" or self.id_we_vote is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_id_we_vote_last_contest_office_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "off" = tells us this is a unique id for a ContestOffice
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.id_we_vote = "wv{site_unique_id_prefix}off{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(ContestOffice, self).save(*args, **kwargs)
class ContestOfficeManager(models.Model):
def __unicode__(self):
return "ContestOfficeManager"
def retrieve_contest_office_from_id(self, contest_office_id):
contest_office_manager = ContestOfficeManager()
return contest_office_manager.retrieve_contest_office(contest_office_id)
def retrieve_contest_office_from_id_maplight(self, id_maplight):
contest_office_id = 0
contest_office_manager = ContestOfficeManager()
return contest_office_manager.retrieve_contest_office(contest_office_id, id_maplight)
def fetch_contest_office_id_from_id_maplight(self, id_maplight):
contest_office_id = 0
contest_office_manager = ContestOfficeManager()
results = contest_office_manager.retrieve_contest_office(contest_office_id, id_maplight)
if results['success']:
return results['contest_office_id']
return 0
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_contest_office(self, contest_office_id, id_maplight=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
contest_office_on_stage = ContestOffice()
try:
if contest_office_id > 0:
contest_office_on_stage = ContestOffice.objects.get(id=contest_office_id)
contest_office_id = contest_office_on_stage.id
elif len(id_maplight) > 0:
contest_office_on_stage = ContestOffice.objects.get(id_maplight=id_maplight)
contest_office_id = contest_office_on_stage.id
except ContestOffice.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
exception_multiple_object_returned = True
except ContestOffice.DoesNotExist as e:
exception_does_not_exist = True
results = {
'success': True if contest_office_id > 0 else False,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'contest_office_found': True if contest_office_id > 0 else False,
'contest_office_id': contest_office_id,
'contest_office': contest_office_on_stage,
}
return results
class CandidateCampaignList(models.Model):
"""
This is a class to make it easy to retrieve lists of Candidates
"""
def retrieve_candidate_campaigns_for_this_election_list(self):
candidates_list_temp = CandidateCampaign.objects.all()
# Order by candidate_name.
# To order by last name we will need to make some guesses in some case about what the last name is.
candidates_list_temp = candidates_list_temp.order_by('candidate_name')
candidates_list_temp = candidates_list_temp.filter(election_id=1) # TODO Temp election_id
return candidates_list_temp
class CandidateCampaign(models.Model):
# The id_we_vote identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "cand", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.id_we_vote_last_candidate_campaign_integer
id_we_vote = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
id_maplight = models.CharField(
verbose_name="maplight candidate id", max_length=255, default=None, null=True, blank=True, unique=True)
# election link to local We Vote Election entry. During setup we need to allow this to be null.
election_id = models.IntegerField(verbose_name="election unique identifier", null=True, blank=True)
# The internal We Vote id for the ContestOffice that this candidate is competing for.
# During setup we need to allow this to be null.
contest_office_id = models.CharField(
verbose_name="contest_office_id id", max_length=254, null=True, blank=True)
# politician link to local We Vote Politician entry. During setup we need to allow this to be null.
politician_id = models.IntegerField(verbose_name="politician unique identifier", null=True, blank=True)
# The candidate's name.
candidate_name = models.CharField(verbose_name="candidate name", max_length=254, null=False, blank=False)
# The full name of the party the candidate is a member of.
party = models.CharField(verbose_name="party", max_length=254, null=True, blank=True)
# A URL for a photo of the candidate.
photo_url = models.CharField(verbose_name="photoUrl", max_length=254, null=True, blank=True)
photo_url_from_maplight = models.URLField(verbose_name='candidate portrait url of candidate', blank=True, null=True)
# The order the candidate appears on the ballot for this contest.
order_on_ballot = models.CharField(verbose_name="order on ballot", max_length=254, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(
verbose_name="google civic election id", max_length=254, null=True, blank=True)
# The URL for the candidate's campaign web site.
candidate_url = models.URLField(verbose_name='website url of candidate campaign', blank=True, null=True)
facebook_url = models.URLField(verbose_name='facebook url of candidate campaign', blank=True, null=True)
twitter_url = models.URLField(verbose_name='twitter url of candidate campaign', blank=True, null=True)
google_plus_url = models.URLField(verbose_name='google plus url of candidate campaign', blank=True, null=True)
youtube_url = models.URLField(verbose_name='youtube url of candidate campaign', blank=True, null=True)
# The email address for the candidate's campaign.
email = models.CharField(verbose_name="candidate campaign email", max_length=254, null=True, blank=True)
# The voice phone number for the candidate's campaign office.
phone = models.CharField(verbose_name="candidate campaign email", max_length=254, null=True, blank=True)
def fetch_photo_url(self):
if self.photo_url_from_maplight:
return self.photo_url_from_maplight
elif self.photo_url:
return self.photo_url
else:
return ""
# "http://votersedge.org/sites/all/modules/map/modules/map_proposition/images/politicians/2662.jpg"
# else:
# politician_manager = PoliticianManager()
# return politician_manager.fetch_photo_url(self.politician_id)
# We override the save function so we can auto-generate id_we_vote
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique id_we_vote
if self.id_we_vote:
self.id_we_vote = self.id_we_vote.strip()
if self.id_we_vote == "" or self.id_we_vote is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_id_we_vote_last_candidate_campaign_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "cand" = tells us this is a unique id for a CandidateCampaign
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.id_we_vote = "wv{site_unique_id_prefix}cand{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
if self.id_maplight == "": # We want this to be unique IF there is a value, and otherwise "None"
self.id_maplight = None
super(CandidateCampaign, self).save(*args, **kwargs)
#
def mimic_google_civic_initials(name):
modified_name = name.replace(' A ', ' A. ')
modified_name = modified_name.replace(' B ', ' B. ')
modified_name = modified_name.replace(' C ', ' C. ')
modified_name = modified_name.replace(' D ', ' D. ')
modified_name = modified_name.replace(' E ', ' E. ')
modified_name = modified_name.replace(' F ', ' F. ')
modified_name = modified_name.replace(' G ', ' G. ')
modified_name = modified_name.replace(' H ', ' H. ')
modified_name = modified_name.replace(' I ', ' I. ')
modified_name = modified_name.replace(' J ', ' J. ')
modified_name = modified_name.replace(' K ', ' K. ')
modified_name = modified_name.replace(' L ', ' L. ')
modified_name = modified_name.replace(' M ', ' M. ')
modified_name = modified_name.replace(' N ', ' N. ')
modified_name = modified_name.replace(' O ', ' O. ')
modified_name = modified_name.replace(' P ', ' P. ')
modified_name = modified_name.replace(' Q ', ' Q. ')
modified_name = modified_name.replace(' R ', ' R. ')
modified_name = modified_name.replace(' S ', ' S. ')
modified_name = modified_name.replace(' T ', ' T. ')
modified_name = modified_name.replace(' U ', ' U. ')
modified_name = modified_name.replace(' V ', ' V. ')
modified_name = modified_name.replace(' W ', ' W. ')
modified_name = modified_name.replace(' X ', ' X. ')
modified_name = modified_name.replace(' Y ', ' Y. ')
modified_name = modified_name.replace(' Z ', ' Z. ')
return modified_name
class CandidateCampaignManager(models.Model):
def __unicode__(self):
return "CandidateCampaignManager"
def retrieve_candidate_campaign_from_id(self, candidate_campaign_id):
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id)
def retrieve_candidate_campaign_from_id_we_vote(self, id_we_vote):
candidate_campaign_id = 0
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id, id_we_vote)
def fetch_candidate_campaign_id_from_id_we_vote(self, id_we_vote):
candidate_campaign_id = 0
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign(candidate_campaign_id, id_we_vote)
if results['success']:
return results['candidate_campaign_id']
return 0
def retrieve_candidate_campaign_from_id_maplight(self, candidate_id_maplight):
candidate_campaign_id = 0
id_we_vote = ''
candidate_campaign_manager = CandidateCampaignManager()
return candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, id_we_vote, candidate_id_maplight)
def retrieve_candidate_campaign_from_candidate_name(self, candidate_name):
candidate_campaign_id = 0
id_we_vote = ''
candidate_id_maplight = ''
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, id_we_vote, candidate_id_maplight, candidate_name)
if results['success']:
return results
# Try to modify the candidate name, and search again
# MapLight for example will pass in "Ronald Gold" for example
candidate_name_try2 = candidate_name.replace(' ', ' ')
results = candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, id_we_vote, candidate_id_maplight, candidate_name_try2)
if results['success']:
return results
# MapLight also passes in "Kamela D Harris" for example, and Google Civic uses "Kamela D. Harris"
candidate_name_try3 = mimic_google_civic_initials(candidate_name)
if candidate_name_try3 != candidate_name:
results = candidate_campaign_manager.retrieve_candidate_campaign(
candidate_campaign_id, id_we_vote, candidate_id_maplight, candidate_name_try3)
if results['success']:
return results
# Otherwise return failed results
return results
# NOTE: searching by all other variables seems to return a list of objects
def retrieve_candidate_campaign(
self, candidate_campaign_id, id_we_vote=None, candidate_id_maplight=None, candidate_name=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
candidate_campaign_on_stage = CandidateCampaign()
try:
if candidate_campaign_id > 0:
candidate_campaign_on_stage = CandidateCampaign.objects.get(id=candidate_campaign_id)
candidate_campaign_id = candidate_campaign_on_stage.id
elif len(id_we_vote) > 0:
candidate_campaign_on_stage = CandidateCampaign.objects.get(id_we_vote=id_we_vote)
candidate_campaign_id = candidate_campaign_on_stage.id
elif candidate_id_maplight > 0 and candidate_id_maplight != "":
candidate_campaign_on_stage = CandidateCampaign.objects.get(id_maplight=candidate_id_maplight)
candidate_campaign_id = candidate_campaign_on_stage.id
elif len(candidate_name) > 0:
candidate_campaign_on_stage = CandidateCampaign.objects.get(candidate_name=candidate_name)
candidate_campaign_id = candidate_campaign_on_stage.id
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
exception_multiple_object_returned = True
except CandidateCampaign.DoesNotExist as e:
exception_does_not_exist = True
results = {
'success': True if candidate_campaign_id > 0 else False,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'candidate_campaign_found': True if candidate_campaign_id > 0 else False,
'candidate_campaign_id': candidate_campaign_id,
'candidate_campaign': candidate_campaign_on_stage,
}
return results
class ContestMeasure(models.Model):
# The id_we_vote identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "meas", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.id_we_vote_last_contest_measure_integer
id_we_vote = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
id_maplight = models.CharField(verbose_name="maplight unique identifier",
max_length=254, null=True, blank=True, unique=True)
# The title of the measure (e.g. 'Proposition 42').
measure_title = models.CharField(verbose_name="measure title", max_length=254, null=False, blank=False)
# A brief description of the referendum. This field is only populated for contests of type 'Referendum'.
measure_subtitle = models.CharField(verbose_name="google civic referendum subtitle",
max_length=254, null=False, blank=False)
# A link to the referendum. This field is only populated for contests of type 'Referendum'.
measure_url = models.CharField(verbose_name="measure details url", max_length=254, null=True, blank=False)
# The We Vote unique id for the election
election_id = models.CharField(verbose_name="we vote election id", max_length=254, null=False, blank=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="election id", max_length=254, null=False, blank=False)
# ballot_placement: NOTE - even though GoogleCivicContestOffice has this field, we store this value
# in the BallotItem table instead because it is different for each voter
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="primary party", max_length=254, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=254, null=False, blank=False)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="district scope", max_length=254, null=False, blank=False)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_ocd_id = models.CharField(verbose_name="open civic data id", max_length=254, null=False, blank=False)
# We override the save function so we can auto-generate id_we_vote
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique id_we_vote
if self.id_we_vote:
self.id_we_vote = self.id_we_vote.strip()
if self.id_we_vote == "" or self.id_we_vote is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_id_we_vote_last_contest_measure_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "meas" = tells us this is a unique id for a ContestMeasure
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.id_we_vote = "wv{site_unique_id_prefix}meas{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(ContestMeasure, self).save(*args, **kwargs)
class MeasureCampaign(models.Model):
# The id_we_vote identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "meascam", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.id_we_vote_last_measure_campaign_integer
id_we_vote = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
# contest_measure link
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_measure_id = models.CharField(verbose_name="contest_measure unique id",
max_length=254, null=False, blank=False)
# Is the campaign attempting to pass the measure, or stop it from passing?
SUPPORT = 'S'
NEUTRAL = 'N'
OPPOSE = 'O'
STANCE_CHOICES = (
(SUPPORT, 'Support'),
(NEUTRAL, 'Neutral'),
(OPPOSE, 'Oppose'),
)
stance = models.CharField("stance", max_length=1, choices=STANCE_CHOICES, default=NEUTRAL)
# The candidate's name.
candidate_name = models.CharField(verbose_name="candidate name", max_length=254, null=False, blank=False)
# The full name of the party the candidate is a member of.
party = models.CharField(verbose_name="party", max_length=254, null=True, blank=True)
# A URL for a photo of the candidate.
photo_url = models.CharField(verbose_name="photoUrl", max_length=254, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google election id",
max_length=254, null=False, blank=False)
# The URL for the candidate's campaign web site.
url = models.URLField(verbose_name='website url of campaign', blank=True, null=True)
facebook_url = models.URLField(verbose_name='facebook url of campaign', blank=True, null=True)
twitter_url = models.URLField(verbose_name='twitter url of campaign', blank=True, null=True)
google_plus_url = models.URLField(verbose_name='google plus url of campaign', blank=True, null=True)
youtube_url = models.URLField(verbose_name='youtube url of campaign', blank=True, null=True)
# The email address for the candidate's campaign.
email = models.CharField(verbose_name="campaign email", max_length=254, null=True, blank=True)
# The voice phone number for the campaign office.
phone = models.CharField(verbose_name="campaign email", max_length=254, null=True, blank=True)
# We override the save function so we can auto-generate id_we_vote
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique id_we_vote
if self.id_we_vote:
self.id_we_vote = self.id_we_vote.strip()
if self.id_we_vote == "" or self.id_we_vote is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_id_we_vote_last_measure_campaign_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "meascam" = tells us this is a unique id for a MeasureCampaign
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.id_we_vote = "wv{site_unique_id_prefix}meascam{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(MeasureCampaign, self).save(*args, **kwargs)
class BallotItem(models.Model):
"""
This is a generated table with ballot item data from a variety of sources, including Google Civic
(and MapLight, Ballot API Code for America project, and Azavea Cicero in the future)
"""
# The unique id of the voter
voter_id = models.IntegerField(verbose_name="the voter unique id", default=0, null=False, blank=False)
# The We Vote unique ID of this election
election_id = models.CharField(verbose_name="election id", max_length=20, null=True)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id", max_length=20, null=True)
# The internal We Vote id for the ContestOffice that this candidate is competing for
contest_office_id = models.CharField(verbose_name="contest_office_id id", max_length=254, null=True, blank=True)
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_measure_id = models.CharField(
verbose_name="contest_measure unique id", max_length=254, null=True, blank=True)
ballot_order = models.SmallIntegerField(
verbose_name="the order this item should appear on the ballot", null=True, blank=True)
# This is a sortable name
ballot_item_label = models.CharField(verbose_name="a label we can sort by", max_length=254, null=True, blank=True)
def is_contest_office(self):
if self.contest_office_id:
return True
return False
def is_contest_measure(self):
if self.contest_measure_id:
return True
return False
def display_ballot_item(self):
return self.ballot_item_label
def candidates_list(self):
candidates_list_temp = CandidateCampaign.objects.all()
candidates_list_temp = candidates_list_temp.filter(election_id=self.election_id)
candidates_list_temp = candidates_list_temp.filter(contest_office_id=self.contest_office_id)
return candidates_list_temp
class BallotItemManager(models.Model):
def retrieve_all_ballot_items_for_voter(self, voter_id, election_id=0):
ballot_item_list = BallotItem.objects.order_by('ballot_order')
results = {
'election_id': election_id,
'voter_id': voter_id,
'ballot_item_list': ballot_item_list,
}
return results
# NOTE: This method only needs to hit the database at most once per day.
# We should cache the results in a JSON file that gets cached on the server and locally in the
# voter's browser for speed.
def retrieve_my_ballot(voter_on_stage, election_on_stage):
# Retrieve all of the jurisdictions the voter is in
# Retrieve all of the office_contests in each of those jurisdictions
# Retrieve all of the measure_contests in each of those jurisdictions
return True
|
|
from mock import Mock
import numpy
import pytest
import theano
class TestLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import Layer
return Layer(Mock())
def test_get_output_shape(self, layer):
assert layer.get_output_shape() == layer.input_layer.get_output_shape()
def test_get_output_without_arguments(self, layer):
layer.get_output_for = Mock()
output = layer.get_output()
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with(
layer.input_layer.get_output.return_value)
layer.input_layer.get_output.assert_called_with(None)
def test_get_output_passes_on_arguments_to_input_layer(self, layer):
input, arg, kwarg = object(), object(), object()
layer.get_output_for = Mock()
output = layer.get_output(input, arg, kwarg=kwarg)
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with(
layer.input_layer.get_output.return_value, arg, kwarg=kwarg)
layer.input_layer.get_output.assert_called_with(
input, arg, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: object()}
assert layer.get_output(input) is input[layer]
def test_get_output_input_is_a_mapping_no_key(self, layer):
layer.get_output_for = Mock()
output = layer.get_output({})
assert output is layer.get_output_for.return_value
def test_create_param_numpy_bad_shape_raises_error(self, layer):
param = numpy.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(RuntimeError):
layer.create_param(param, (3, 2))
def test_create_param_numpy_returns_shared(self, layer):
param = numpy.array([[1, 2, 3], [4, 5, 6]])
result = layer.create_param(param, (2, 3))
assert (result.get_value() == param).all()
assert isinstance(result, type(theano.shared(param)))
assert (result.get_value() == param).all()
def test_create_param_shared_returns_same(self, layer):
param = theano.shared(numpy.array([[1, 2, 3], [4, 5, 6]]))
result = layer.create_param(param, (2, 3))
assert result is param
def test_create_param_callable_returns_return_value(self, layer):
array = numpy.array([[1, 2, 3], [4, 5, 6]])
factory = Mock()
factory.return_value = array
result = layer.create_param(factory, (2, 3))
assert (result.get_value() == array).all()
factory.assert_called_with((2, 3))
class TestMultipleInputsLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import MultipleInputsLayer
return MultipleInputsLayer([Mock(), Mock()])
def test_get_output_shape(self, layer):
layer.get_output_shape_for = Mock()
result = layer.get_output_shape()
assert result is layer.get_output_shape_for.return_value
layer.get_output_shape_for.assert_called_with([
layer.input_layers[0].get_output_shape.return_value,
layer.input_layers[1].get_output_shape.return_value,
])
def test_get_output_without_arguments(self, layer):
layer.get_output_for = Mock()
output = layer.get_output()
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with([
layer.input_layers[0].get_output.return_value,
layer.input_layers[1].get_output.return_value,
])
layer.input_layers[0].get_output.assert_called_with(None)
layer.input_layers[1].get_output.assert_called_with(None)
def test_get_output_passes_on_arguments_to_input_layer(self, layer):
input, arg, kwarg = object(), object(), object()
layer.get_output_for = Mock()
output = layer.get_output(input, arg, kwarg=kwarg)
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with([
layer.input_layers[0].get_output.return_value,
layer.input_layers[1].get_output.return_value,
], arg, kwarg=kwarg)
layer.input_layers[0].get_output.assert_called_with(
input, arg, kwarg=kwarg)
layer.input_layers[1].get_output.assert_called_with(
input, arg, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: object()}
assert layer.get_output(input) is input[layer]
def test_get_output_input_is_a_mapping_no_key(self, layer):
layer.get_output_for = Mock()
output = layer.get_output({})
assert output is layer.get_output_for.return_value
class TestInputLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import InputLayer
return InputLayer((3, 2))
def test_input_var(self, layer):
assert layer.input_var.ndim == 2
def test_get_output_shape(self, layer):
assert layer.get_output_shape() == (3, 2)
def test_get_output_without_arguments(self, layer):
assert layer.get_output() is layer.input_var
def test_get_output_input_is_variable(self, layer):
variable = theano.Variable("myvariable")
assert layer.get_output(variable) is variable
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: object()}
assert layer.get_output(input) is input[layer]
class TestDenseLayer:
@pytest.fixture
def layer_vars(self):
from nntools.layers.base import DenseLayer
input_layer = Mock()
W = Mock()
b = Mock()
nonlinearity = Mock()
input_layer.get_output_shape.return_value = (2, 3, 4)
W.return_value = numpy.ones((12, 3))
b.return_value = numpy.ones((3,)) * 3
layer = DenseLayer(
input_layer=input_layer,
num_units=3,
W=W,
b=b,
nonlinearity=nonlinearity,
)
return {
'W': W,
'b': b,
'nonlinearity': nonlinearity,
'layer': layer,
}
@pytest.fixture
def layer(self, layer_vars):
return layer_vars['layer']
def test_init(self, layer_vars):
layer = layer_vars['layer']
assert (layer.W.get_value() == layer_vars['W'].return_value).all()
assert (layer.b.get_value() == layer_vars['b'].return_value).all()
layer_vars['W'].assert_called_with((12, 3))
layer_vars['b'].assert_called_with((3,))
def test_get_params(self, layer):
assert layer.get_params() == [layer.W, layer.b]
def test_get_bias_params(self, layer):
assert layer.get_bias_params() == [layer.b]
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for((5, 6, 7)) == (5, 3)
def test_get_output_for(self, layer_vars):
layer = layer_vars['layer']
nonlinearity = layer_vars['nonlinearity']
W = layer_vars['W']()
b = layer_vars['b']()
input = theano.shared(numpy.ones((2, 12)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
# Check that the input to the nonlinearity was what we expect
# from dense layer, i.e. the dot product plus bias
nonlinearity_arg = nonlinearity.call_args[0][0]
assert (nonlinearity_arg.eval() ==
numpy.dot(input.get_value(), W) + b).all()
def test_get_output_for_flattens_input(self, layer_vars):
layer = layer_vars['layer']
nonlinearity = layer_vars['nonlinearity']
W = layer_vars['W']()
b = layer_vars['b']()
input = theano.shared(numpy.ones((2, 3, 4)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
# Check that the input to the nonlinearity was what we expect
# from dense layer, i.e. the dot product plus bias
nonlinearity_arg = nonlinearity.call_args[0][0]
assert (nonlinearity_arg.eval() ==
numpy.dot(input.get_value().reshape(2, -1), W) + b).all()
class TestDropoutLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import DropoutLayer
return DropoutLayer(Mock())
@pytest.fixture
def layer_no_rescale(self):
from nntools.layers.base import DropoutLayer
return DropoutLayer(Mock(), rescale=False)
@pytest.fixture
def layer_p_02(self):
from nntools.layers.base import DropoutLayer
return DropoutLayer(Mock(), p=0.2)
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input)
result_eval = result.eval()
assert 0.99 < result_eval.mean() < 1.01
assert (numpy.unique(result_eval) == [0., 2.]).all()
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.get_value()).all()
def test_get_output_for_no_rescale(self, layer_no_rescale):
input = theano.shared(numpy.ones((100, 100)))
result = layer_no_rescale.get_output_for(input)
result_eval = result.eval()
assert 0.49 < result_eval.mean() < 0.51
assert (numpy.unique(result_eval) == [0., 1.]).all()
def test_get_output_for_p_02(self, layer_p_02):
input = theano.shared(numpy.ones((100, 100)))
result = layer_p_02.get_output_for(input)
result_eval = result.eval()
assert 0.99 < result_eval.mean() < 1.01
assert (numpy.round(numpy.unique(result_eval), 2) == [0., 1.25]).all()
class TestGaussianNoiseLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import GaussianNoiseLayer
return GaussianNoiseLayer(Mock())
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=False)
result_eval = result.eval()
assert (result_eval != input.eval()).all()
assert result_eval.mean() != 1.0
assert numpy.round(result_eval.mean()) == 1.0
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((3, 3)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.eval()).all()
class TestConcatLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=1)
def test_get_output_for(self, layer):
inputs = [theano.shared(numpy.ones((3, 3))),
theano.shared(numpy.ones((3, 2)))]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = numpy.hstack([input.get_value() for input in inputs])
assert (result_eval == desired_result).all()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import sys
import os
import fcntl
import json
import logging
import datetime
import io
from string import Template
from shutil import copyfile
import numpy as np
import skimage
import skimage.io
import skimage.exposure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.animation import FuncAnimation, PillowWriter
import matplotlib.colors as color
from spinalcordtoolbox.image import Image
import spinalcordtoolbox.reports.slice as qcslice
from spinalcordtoolbox.utils import sct_dir_local_path, list2cmdline, __version__, copy, extract_fname
logger = logging.getLogger(__name__)
class QcImage(object):
"""
Class used to create a .png file from a 2d image produced by the class "Slice"
"""
_labels_regions = {'PONS': 50, 'MO': 51,
'C1': 1, 'C2': 2, 'C3': 3, 'C4': 4, 'C5': 5, 'C6': 6, 'C7': 7,
'T1': 8, 'T2': 9, 'T3': 10, 'T4': 11, 'T5': 12, 'T6': 13, 'T7': 14, 'T8': 15, 'T9': 16,
'T10': 17, 'T11': 18, 'T12': 19,
'L1': 20, 'L2': 21, 'L3': 22, 'L4': 23, 'L5': 24,
'S1': 25, 'S2': 26, 'S3': 27, 'S4': 28, 'S5': 29,
'Co': 30}
_color_bin_green = ["#ffffff", "#00ff00"]
_color_bin_red = ["#ffffff", "#ff0000"]
_labels_color = ["#04663c", "#ff0000", "#50ff30",
"#ed1339", "#ffffff", "#e002e8",
"#ffee00", "#00c7ff", "#199f26",
"#563691", "#848545", "#ce2fe1",
"#2142a6", "#3edd76", "#c4c253",
"#e8618a", "#3128a3", "#1a41db",
"#939e41", "#3bec02", "#1c2c79",
"#18584e", "#b49992", "#e9e73a",
"#3b0e6e", "#6e856f", "#637394",
"#36e05b", "#530a1f", "#8179c4",
"#e1320c", "#52a4df", "#000ab5",
"#4a4242", "#0b53a5", "#b49c19",
"#50e7a9", "#bf5a42", "#fa8d8e",
"#83839a", "#320fef", "#82ffbf",
"#360ee7", "#551960", "#11371e",
"#e900c3", "#a21360", "#58a601",
"#811c90", "#235acf", "#49395d",
"#9f89b0", "#e08e08", "#3d2b54",
"#7d0434", "#fb1849", "#14aab4",
"#a22abd", "#d58240", "#ac2aff"]
_seg_colormap = ["#4d0000", "#ff0000"]
_ctl_colormap = ["#ff000099", '#ffff00']
def __init__(self, qc_report, interpolation, action_list, process, stretch_contrast=True,
stretch_contrast_method='contrast_stretching', angle_line=None, fps=None):
"""
:param qc_report: QcReport: The QC report object
:param interpolation: str: Type of interpolation used in matplotlib
:param action_list: list: List of functions that generates a specific type of images
:param process: str: Name of SCT function. e.g., sct_propseg
:param stretch_contrast: adjust image so as to improve contrast
:param stretch_contrast_method: str: {'contrast_stretching', 'equalized'}: Method for stretching contrast
:param angle_line: float: See generate_qc()
:param fps: float: Number of frames per second for output gif images. It is only used for sct_fmri_moco and\
sct_dmri_moco
"""
self.qc_report = qc_report
self.interpolation = interpolation
self.action_list = action_list
self.process = process
self._stretch_contrast = stretch_contrast
self._stretch_contrast_method = stretch_contrast_method
if stretch_contrast_method not in ['equalized', 'contrast_stretching']:
raise ValueError("Unrecognized stretch_contrast_method: {}.".format(stretch_contrast_method),
"Try 'equalized' or 'contrast_stretching'")
self._angle_line = angle_line
self._fps = fps
self._centermass = None # center of mass returned by slice.Axial.get_center()
"""
action_list contain the list of images that has to be generated.
It can be seen as "figures" of matplotlib to be shown
Ex: if 'colorbar' is in the list, the process will generate a color bar in the "img" folder
"""
def line_angle(self, mask, ax):
"""Create figure with line superposed over each mosaic square. The line has an angle encoded in the
argument self._angle_line"""
angles = np.full_like(np.zeros(len(self._centermass)), np.nan)
angles[0:len(self._angle_line)] = self._angle_line
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
for nslice, center_mosaic in enumerate(self._centermass):
if np.isnan(angles[nslice]):
pass
else:
x0, y0 = center_mosaic[0], center_mosaic[1]
angle = angles[nslice]
if not (-np.pi <= angle <= np.pi):
raise Exception("angle prompted for angle_line not in the range [-pi pi]")
x_min, y_min = x0 - 10, y0 - 10
x_max, y_max = x0 + 10, y0 + 10
if -np.pi / 4 < angle <= np.pi / 4 or -np.pi <= angle <= -3 * np.pi / 4 or 3 * np.pi / 4 < angle <= np.pi:
y1 = y_min
y2 = y_max
x1 = (y_min - y0) * np.tan(angle) + x0
x2 = (y_max - y0) * np.tan(angle) + x0
else:
x1 = x_min
x2 = x_max
y1 = y0 + (x_min - x0) / np.tan(angle)
y2 = y0 + (x_max - x0) / np.tan(angle)
ax.plot([x1, x2], [y1, y2], '-', color='red', linewidth=0.7)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def listed_seg(self, mask, ax):
"""Create figure with red segmentation. Common scenario."""
img = np.ma.masked_equal(mask, 0)
ax.imshow(img,
cmap=color.LinearSegmentedColormap.from_list("", self._seg_colormap),
norm=color.Normalize(vmin=0.5, vmax=1),
interpolation=self.interpolation,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def template(self, mask, ax):
"""Show template statistical atlas"""
values = mask
values[values < 0.5] = 0
color_white = color.colorConverter.to_rgba('white', alpha=0.0)
color_blue = color.colorConverter.to_rgba('blue', alpha=0.7)
color_cyan = color.colorConverter.to_rgba('cyan', alpha=0.8)
cmap = color.LinearSegmentedColormap.from_list('cmap_atlas',
[color_white, color_blue, color_cyan], N=256)
ax.imshow(values,
cmap=cmap,
interpolation=self.interpolation,
aspect=self.aspect_mask)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def no_seg_seg(self, mask, ax):
"""Create figure with image overlay. Notably used by sct_registration_to_template"""
ax.imshow(mask, cmap='gray', interpolation=self.interpolation, aspect=self.aspect_mask)
self._add_orientation_label(ax)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def sequential_seg(self, mask, ax):
values = np.ma.masked_equal(np.rint(mask), 0)
ax.imshow(values,
cmap=self._seg_colormap,
interpolation=self.interpolation,
aspect=self.aspect_mask)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def label_utils(self, mask, ax):
"""Create figure with red label. Common scenario."""
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
non_null_vox = np.where(mask > 0)
coord_labels = list(zip(non_null_vox[0], non_null_vox[1]))
logger.debug(coord_labels)
# compute horizontal offset based on the resolution of the mask
horiz_offset = mask.shape[1] / 50
for coord in coord_labels:
ax.plot(coord[1], coord[0], 'o', color='lime', markersize=5)
ax.text(coord[1] + horiz_offset, coord[0], str(round(mask[coord[0], coord[1]])), color='lime', fontsize=15,
verticalalignment='center', clip_on=True)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def label_vertebrae(self, mask, ax):
"""Draw vertebrae areas, then add text showing the vertebrae names"""
from matplotlib import colors
import scipy.ndimage
img = np.rint(np.ma.masked_where(mask < 1, mask))
ax.imshow(img,
cmap=colors.ListedColormap(self._labels_color),
norm=colors.Normalize(vmin=0, vmax=len(self._labels_color)),
interpolation=self.interpolation,
alpha=1,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
a = [0.0]
data = mask
for index, val in np.ndenumerate(data):
if val not in a:
a.append(val)
index = int(val)
if index in self._labels_regions.values():
color = self._labels_color[index]
y, x = scipy.ndimage.measurements.center_of_mass(np.where(data == val, data, 0))
# Draw text with a shadow
x += 10
label = list(self._labels_regions.keys())[list(self._labels_regions.values()).index(index)]
ax.text(x, y, label, color='black', clip_on=True)
x -= 0.5
y -= 0.5
ax.text(x, y, label, color=color, clip_on=True)
def highlight_pmj(self, mask, ax):
"""Hook to show a rectangle where PMJ is on the slice"""
y, x = np.where(mask == 50)
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
ax.plot(x, y, 'x', color='lime', markersize=6)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def label_centerline(self, mask, ax):
"""Create figure with red label. Common scenario."""
results_mask_pixels = np.where(mask > 0)
# TODO: maybe we only need one pixel per centerline (currently, it's a 1x2 matrix of pixels)
listOfCoordinates = list(zip(results_mask_pixels[0], results_mask_pixels[1]))
for cord in listOfCoordinates:
ax.plot(cord[1], cord[0], 'ro', markersize=1)
# ax.text(cord[1]+5,cord[0]+5, str(mask[cord]), color='lime', clip_on=True)
img = np.rint(np.ma.masked_where(mask < 1, mask))
ax.imshow(img,
cmap=color.ListedColormap(self._color_bin_red),
norm=color.Normalize(vmin=0, vmax=1),
interpolation=self.interpolation,
alpha=10,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def vertical_line(self, mask, ax):
"""Centered vertical line to assess quality of straightening"""
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
ax.axvline(x=img.shape[1] / 2.0, color='r', linewidth=2)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def grid(self, mask, ax):
"""Centered grid to assess quality of motion correction"""
grid = np.full_like(mask, 0)
ax.imshow(grid, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
for center_mosaic in self._centermass:
x0, y0 = center_mosaic[0], center_mosaic[1]
ax.axvline(x=x0, color='w', linestyle='-', linewidth=0.5)
ax.axhline(y=y0, color='w', linestyle='-', linewidth=0.5)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def smooth_centerline(self, mask, ax):
"""Display smoothed centerline"""
mask = mask/mask.max()
mask[mask < 0.05] = 0 # Apply 0.5 threshold
img = np.ma.masked_equal(mask, 0)
ax.imshow(img,
cmap=color.LinearSegmentedColormap.from_list("", self._ctl_colormap),
norm=color.Normalize(vmin=0, vmax=1),
interpolation=self.interpolation,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# def colorbar(self):
# fig = plt.figure(figsize=(9, 1.5))
# ax = fig.add_axes([0.05, 0.80, 0.9, 0.15])
# colorbar.ColorbarBase(ax, cmap=self._seg_colormap, orientation='horizontal')
# return '{}_colorbar'.format(self.qc_report.img_base_name)
def __call__(self, func):
"""wrapped function (f).
In this case, it is the "mosaic" or "single" methods of the class "Slice"
"""
def wrapped_f(sct_slice, *args):
"""
:param sct_slice: spinalcordtoolbox.report.slice:Slice
:param args: list: list of args
"""
self.qc_report.slice_name = sct_slice.get_name()
# Get the aspect ratio (height/width) based on pixel size. Consider only the first 2 slices.
self.aspect_img, self.aspect_mask = sct_slice.aspect()[:2]
self.qc_report.make_content_path()
logger.info('QcImage: %s with %s slice', func.__name__, sct_slice.get_name())
if self.process in ['sct_fmri_moco', 'sct_dmri_moco']:
[images_after_moco, images_before_moco], centermass = func(sct_slice, *args)
self._centermass = centermass
self._make_QC_image_for_4d_volumes(images_after_moco, images_before_moco)
else:
if self._angle_line is None:
img, *mask = func(sct_slice, *args)
else:
[img, mask], centermass = func(sct_slice, *args)
self._centermass = centermass
self._make_QC_image_for_3d_volumes(img, mask, slice_orientation=sct_slice.get_name())
return wrapped_f
def _make_QC_image_for_3d_volumes(self, img, mask, slice_orientation):
"""
Create overlay and background images for all processes that deal with 3d volumes
(all except sct_fmri_moco and sct_dmri_moco)
:param img: The base image to display underneath the overlays (typically anatomical)
:param mask: A list of images to be processed and overlaid on top of `img`
:return:
"""
if self._stretch_contrast:
img = self._func_stretch_contrast(img)
# if axial mosaic restrict width
if slice_orientation == 'Axial':
size_fig = [5, 5 * img.shape[0] / img.shape[1]] # with dpi=300, will give 1500pix width
# if sagittal orientation restrict height
elif slice_orientation == 'Sagittal':
size_fig = [5 * img.shape[1] / img.shape[0], 5]
fig = Figure()
fig.set_size_inches(size_fig[0], size_fig[1], forward=True)
FigureCanvas(fig)
ax = fig.add_axes((0, 0, 1, 1))
ax.imshow(img, cmap='gray', interpolation=self.interpolation, aspect=float(self.aspect_img))
self._add_orientation_label(ax)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
logger.info(self.qc_report.qc_params.abs_bkg_img_path())
self._save(fig, self.qc_report.qc_params.abs_bkg_img_path(), dpi=self.qc_report.qc_params.dpi)
fig = Figure()
fig.set_size_inches(size_fig[0], size_fig[1], forward=True)
FigureCanvas(fig)
for i, action in enumerate(self.action_list):
logger.debug('Action List %s', action.__name__)
if self._stretch_contrast and action.__name__ in ("no_seg_seg",):
print("Mask type %s" % mask[i].dtype)
mask[i] = self._func_stretch_contrast(mask[i])
ax = fig.add_axes((0, 0, 1, 1), label=str(i))
action(self, mask[i], ax)
self._save(fig, self.qc_report.qc_params.abs_overlay_img_path(), dpi=self.qc_report.qc_params.dpi)
self.qc_report.update_description_file(img.shape)
def _make_QC_image_for_4d_volumes(self, images_after_moco, images_before_moco):
"""
Generate background and overlay gifs for sct_fmri_moco and sct_dmri_moco
:param images_after_moco: list of mosaic images after motion correction
:param images_before_moco: list of mosaic images before motion correction
:return:
"""
size_fig = [5, 10 * images_after_moco[0].shape[0] / images_after_moco[0].shape[1] + 0.5]
if self._stretch_contrast:
for i in range(len(images_after_moco)):
images_after_moco[i] = self._func_stretch_contrast(images_after_moco[i])
images_before_moco[i] = self._func_stretch_contrast(images_before_moco[i])
self._generate_and_save_gif(images_before_moco, images_after_moco, size_fig)
self._generate_and_save_gif(images_before_moco, images_after_moco, size_fig, is_mask=True)
w, h = (self.qc_report.qc_params.dpi * size_fig[0], self.qc_report.qc_params.dpi * size_fig[1])
self.qc_report.update_description_file((w, h))
def _func_stretch_contrast(self, img):
if self._stretch_contrast_method == "equalized":
return self._equalize_histogram(img)
else: # stretch_contrast_method == "contrast_stretching":
return self._stretch_intensity_levels(img)
def _stretch_intensity_levels(self, img):
p2, p98 = np.percentile(img, (2, 98))
return skimage.exposure.rescale_intensity(img, in_range=(p2, p98))
def _equalize_histogram(self, img):
"""
Perform histogram equalization using CLAHE
Notes:
- Image value range is preserved
- Workaround for adapthist artifact by padding (#1664)
"""
winsize = 16
min_, max_ = img.min(), img.max()
b = (np.float32(img) - min_) / (max_ - min_)
b[b >= 1] = 1 # 1+eps numerical error may happen (#1691)
h, w = b.shape
h1 = (h + (winsize - 1)) // winsize * winsize
w1 = (w + (winsize - 1)) // winsize * winsize
if h != h1 or w != w1:
b1 = np.zeros((h1, w1), dtype=b.dtype)
b1[:h, :w] = b
b = b1
c = skimage.exposure.equalize_adapthist(b, kernel_size=(winsize, winsize))
if h != h1 or w != w1:
c = c[:h, :w]
return np.array(c * (max_ - min_) + min_, dtype=img.dtype)
def _add_orientation_label(self, ax):
"""
Add orientation labels on the figure
:param fig: MPL figure handler
:return:
"""
if self.qc_report.qc_params.orientation == 'Axial':
# If mosaic of axial slices, display orientation labels
ax.text(12, 6, 'A', color='yellow', size=4)
ax.text(12, 28, 'P', color='yellow', size=4)
ax.text(0, 18, 'L', color='yellow', size=4)
ax.text(24, 18, 'R', color='yellow', size=4)
def _generate_and_save_gif(self, top_images, bottom_images, size_fig, is_mask=False):
"""
Create figure with two images for sct_fmri_moco and sct_dmri_moco and save gif
:param top_images: list of images of mosaic before motion correction
:param bottom_images: list of images of mosaic after motion correction
:param size_fig: size of figure in inches
:param is_mask: display grid on top of mosaic
:return:
"""
if is_mask:
aspect = self.aspect_mask
else:
aspect = self.aspect_img
fig = Figure()
FigureCanvas(fig)
fig.set_size_inches(size_fig[0], size_fig[1], forward=True)
fig.subplots_adjust(left=0, top=0.9, bottom=0.1)
ax1 = fig.add_subplot(211)
null_image = np.zeros(np.shape(top_images[0]))
img1 = ax1.imshow(null_image, cmap='gray', aspect=float(aspect))
ax1.set_title('Before motion correction', fontsize=8, loc='left', pad=2)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
self._add_orientation_label(ax1)
if is_mask:
QcImage.grid(self, top_images[0], ax1)
ax2 = fig.add_subplot(212)
img2 = ax2.imshow(null_image, cmap='gray', aspect=float(aspect))
ax2.set_title('After motion correction', fontsize=8, loc='left', pad=2)
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
self._add_orientation_label(ax2)
if is_mask:
QcImage.grid(self, bottom_images[0], ax2)
ann = ax2.annotate('', xy=(0, .025), xycoords='figure fraction', horizontalalignment='left',
verticalalignment='bottom', fontsize=6)
def update_figure(i):
img1.set_data(top_images[i])
img1.set_clim(vmin=np.amin(top_images[i]), vmax=np.amax(top_images[i]))
img2.set_data(bottom_images[i])
img2.set_clim(vmin=np.amin(bottom_images[i]), vmax=np.amax(bottom_images[i]))
ann.set_text(f'Volume: {i + 1}/{len(top_images)}')
# FuncAnimation creates an animation by repeatedly calling the function update_figure for each frame
ani = FuncAnimation(fig, update_figure, frames=len(top_images))
if is_mask:
gif_out_path = self.qc_report.qc_params.abs_overlay_img_path()
else:
gif_out_path = self.qc_report.qc_params.abs_bkg_img_path()
if self._fps is None:
self._fps = 3
writer = PillowWriter(self._fps)
logger.info('Saving gif %s', gif_out_path)
ani.save(gif_out_path, writer=writer, dpi=self.qc_report.qc_params.dpi)
def _save(self, fig, img_path, format='png', bbox_inches='tight', pad_inches=0.00, dpi=300):
"""
Save the current figure into an image.
:param fig: Figure handler
:param img_path: str: path of the folder where the image is saved
:param format: str: image format
:param bbox_inches: str
:param pad_inches: float
:param dpi: int: Output resolution of the image
:return:
"""
logger.debug('Save image %s', img_path)
fig.savefig(img_path,
format=format,
bbox_inches=None,
transparent=True,
dpi=dpi)
class Params(object):
"""Parses and stores the variables that will be included into the QC details
"""
def __init__(self, input_file, command, args, orientation, dest_folder, dpi=300, dataset=None, subject=None):
"""
:param input_file: str: the input nifti file name
:param command: str: command name
:param args: str: the command's arguments
:param orientation: str: The anatomical orientation
:param dest_folder: str: The absolute path of the QC root
:param dpi: int: Output resolution of the image
:param dataset: str: Dataset name
:param subject: str: Subject name
"""
path_in, file_in, ext_in = extract_fname(os.path.abspath(input_file))
# Assuming BIDS convention, we derive the value of the dataset, subject and contrast from the `input_file`
# by splitting it into `[dataset]/[subject]/[contrast]/input_file`
abs_input_path, contrast = os.path.split(path_in)
abs_input_path, subject_tmp = os.path.split(abs_input_path)
_, dataset_tmp = os.path.split(abs_input_path)
if dataset is None:
dataset = dataset_tmp
if subject is None:
subject = subject_tmp
if isinstance(args, list):
args = list2cmdline(args)
self.fname_in = file_in + ext_in
self.dataset = dataset
self.subject = subject
self.cwd = os.getcwd()
self.contrast = contrast
self.command = command
self.sct_version = __version__
self.args = args
self.orientation = orientation
self.dpi = dpi
self.root_folder = dest_folder
self.mod_date = datetime.datetime.strftime(datetime.datetime.now(), '%Y_%m_%d_%H%M%S.%f')
self.qc_results = os.path.join(dest_folder, '_json/qc_' + self.mod_date + '.json')
if command in ['sct_fmri_moco', 'sct_dmri_moco']:
ext = "gif"
else:
ext = "png"
self.bkg_img_path = os.path.join(dataset, subject, contrast, command, self.mod_date, f"bkg_img.{ext}")
self.overlay_img_path = os.path.join(dataset, subject, contrast, command, self.mod_date, f"overlay_img.{ext}")
def abs_bkg_img_path(self):
return os.path.join(self.root_folder, self.bkg_img_path)
def abs_overlay_img_path(self):
return os.path.join(self.root_folder, self.overlay_img_path)
class QcReport(object):
"""This class generates the quality control report.
It will also setup the folder structure so the report generator only needs to fetch the appropriate files.
"""
def __init__(self, qc_params, usage):
"""
Parameters
:param qc_params: arguments of the "-param-qc" option in Terminal
:param usage: str: description of the process
"""
self.tool_name = qc_params.command
self.slice_name = qc_params.orientation
self.qc_params = qc_params
self.usage = usage
self.assets_folder = sct_dir_local_path('assets')
self.img_base_name = 'bkg_img'
self.description_base_name = "qc_results"
def make_content_path(self):
"""Creates the whole directory to contain the QC report
:return: return "root folder of the report" and the "furthest folder path" containing the images
"""
# make a new or update Qc directory
target_img_folder = os.path.dirname(self.qc_params.abs_bkg_img_path())
try:
os.makedirs(target_img_folder, exist_ok=True)
except OSError as err:
if not os.path.isdir(target_img_folder):
raise err
def update_description_file(self, dimension):
"""Create the description file with a JSON structure
:param: dimension 2-tuple, the dimension of the image frame (w, h)
"""
output = {
'python': sys.executable,
'cwd': self.qc_params.cwd,
'cmdline': "{} {}".format(self.qc_params.command, self.qc_params.args),
'command': self.qc_params.command,
'sct_version': self.qc_params.sct_version,
'dataset': self.qc_params.dataset,
'subject': self.qc_params.subject,
'contrast': self.qc_params.contrast,
'fname_in': self.qc_params.fname_in,
'orientation': self.qc_params.orientation,
'background_img': self.qc_params.bkg_img_path,
'overlay_img': self.qc_params.overlay_img_path,
'dimension': '%dx%d' % dimension,
'moddate': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'qc': ""
}
logger.debug('Description file: %s', self.qc_params.qc_results)
# results = []
# Create path to store json files
path_json, _ = os.path.split(self.qc_params.qc_results)
if not os.path.exists(path_json):
os.makedirs(path_json, exist_ok=True)
# lock the output directory
# because this code may be run in parallel
path_json_fd = os.open(path_json, os.O_RDONLY)
fcntl.flock(path_json_fd, fcntl.LOCK_EX)
try:
# Create json file
with open(self.qc_params.qc_results, 'w+') as qc_file:
json.dump(output, qc_file, indent=1)
self._update_html_assets(get_json_data_from_path(path_json))
finally:
# fcntl.flock(path_json_fd, fcntl.LOCK_UN) # technically, redundant, since close() triggers this too.
os.close(path_json_fd)
def _update_html_assets(self, json_data):
"""Update the html file and assets"""
assets_path = os.path.join(os.path.dirname(__file__), 'assets')
dest_path = self.qc_params.root_folder
with io.open(os.path.join(assets_path, 'index.html'), encoding="utf-8") as template_index:
template = Template(template_index.read())
output = template.substitute(sct_json_data=json.dumps(json_data))
io.open(os.path.join(dest_path, 'index.html'), 'w', encoding="utf-8").write(output)
for path in ['css', 'js', 'imgs', 'fonts']:
src_path = os.path.join(assets_path, '_assets', path)
dest_full_path = os.path.join(dest_path, '_assets', path)
if not os.path.exists(dest_full_path):
os.makedirs(dest_full_path, exist_ok=True)
for file_ in os.listdir(src_path):
if not os.path.isfile(os.path.join(dest_full_path, file_)):
copy(os.path.join(src_path, file_),
dest_full_path)
def add_entry(src, process, args, path_qc, plane, path_img=None, path_img_overlay=None,
qcslice=None,
qcslice_operations=[],
qcslice_layout=None,
dpi=300,
stretch_contrast_method='contrast_stretching',
angle_line=None,
fps=None,
dataset=None,
subject=None):
"""
Create QC report.
:param src: Path to input file (only used to populate report metadata)
:param process:
:param args:
:param path_qc:
:param plane:
:param path_img: Path to image to display
:param path_img_overlay: Path to image to display on top of path_img (will flip between the two)
:param qcslice: spinalcordtoolbox.reports.slice:Axial or spinalcordtoolbox.reports.slice:Sagittal
:param qcslice_operations:
:param qcslice_layout:
:param dpi: int: Output resolution of the image
:param stretch_contrast_method: Method for stretching contrast. See QcImage
:param angle_line: [float]: See generate_qc()
:param fps: float: Number of frames per second for output gif images
:param dataset: str: Dataset name
:param subject: str: Subject name
:return:
"""
qc_param = Params(src, process, args, plane, path_qc, dpi, dataset, subject)
report = QcReport(qc_param, '')
if qcslice is not None:
@QcImage(report, 'none', qcslice_operations, stretch_contrast_method=stretch_contrast_method,
angle_line=angle_line, process=process, fps=fps)
def layout(qslice):
# This will call qc.__call__(self, func):
return qcslice_layout(qslice)
layout(qcslice)
elif path_img is not None:
report.make_content_path()
report.update_description_file(skimage.io.imread(path_img).shape[:2])
copyfile(path_img, qc_param.abs_bkg_img_path())
if path_img_overlay is not None:
# User specified a second image to overlay
copyfile(path_img_overlay, qc_param.abs_overlay_img_path())
else:
# Copy the image both as "overlay" and "path_img_overlay", so it appears static.
# TODO: Leave the possibility in the reports/assets/js files to have static images (instead of having to
# flip between two images).
copyfile(path_img, qc_param.abs_overlay_img_path())
logger.info('Successfully generated the QC results in %s', qc_param.qc_results)
logger.info('Use the following command to see the results in a browser:')
try:
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2":
# If user runs SCT within the official Docker distribution, the command xdg-open will not be working therefore
# we prefer to instruct the user to manually open the generated html file.
try:
# if user runs SCT within the official Docker distribution, the variable below is defined. More info at:
# https://github.com/neuropoly/sct_docker/blob/master/sct_docker.py#L84
os.environ["DOCKER"]
logger.info('please go to "%s/" and double click on the "index.html" file', path_qc)
except KeyError:
logger.info('xdg-open "%s/index.html"', path_qc)
elif _platform == "darwin":
logger.info('open "%s/index.html"', path_qc)
else:
logger.info('open file "%s/index.html"', path_qc)
except ImportError:
print("WARNING! Platform undetectable.")
def generate_qc(fname_in1, fname_in2=None, fname_seg=None, angle_line=None, args=None, path_qc=None,
dataset=None, subject=None, path_img=None, process=None, fps=None):
"""
Generate a QC entry allowing to quickly review results. This function is the entry point and is called by SCT
scripts (e.g. sct_propseg).
:param fname_in1: str: File name of input image #1 (mandatory)
:param fname_in2: str: File name of input image #2
:param fname_seg: str: File name of input segmentation
:param angle_line: list: Angle [in rad, wrt. vertical line, must be between -pi and pi] to apply to the line overlaid on the image, for\
each slice, for slice that don't have an angle to display, a nan is expected. To be used for assessing cord orientation.
:param args: args from parent function
:param path_qc: str: Path to save QC report
:param dataset: str: Dataset name
:param subject: str: Subject name
:param path_img: dict: Path to image to display (e.g., a graph), instead of computing the image from MRI.
:param process: str: Name of SCT function. e.g., sct_propseg
:param fps: float: Number of frames per second for output gif images. Used only for sct_frmi_moco and sct_dmri_moco.
:return: None
"""
logger.info('\n*** Generate Quality Control (QC) html report ***')
dpi = 300
plane = None
qcslice_type = None
qcslice_operations = None
qcslice_layout = None
# Get QC specifics based on SCT process
# Axial orientation, switch between two input images
if process in ['sct_register_multimodal', 'sct_register_to_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_in2), Image(fname_seg)])
qcslice_operations = [QcImage.no_seg_seg]
def qcslice_layout(x): return x.mosaic()[:2]
# Rotation visualisation
elif process in ['rotation']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.line_angle]
def qcslice_layout(x): return x.mosaic(return_center=True)
# Axial orientation, switch between the image and the segmentation
elif process in ['sct_propseg', 'sct_deepseg_sc', 'sct_deepseg_gm']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.listed_seg]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between the image and the centerline
elif process in ['sct_get_centerline']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.label_centerline]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between the image and the white matter segmentation (linear interp, in blue)
elif process in ['sct_warp_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.template]
def qcslice_layout(x): return x.mosaic()
# Axial orientation, switch between gif image (before and after motion correction) and grid overlay
elif process in ['sct_dmri_moco', 'sct_fmri_moco']:
plane = 'Axial'
if fname_seg is None:
raise Exception("Segmentation is needed to ensure proper cropping around spinal cord.")
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_in2), Image(fname_seg)])
qcslice_operations = [QcImage.grid]
def qcslice_layout(x): return x.mosaics_through_time()
# Sagittal orientation, display vertebral labels
elif process in ['sct_label_vertebrae']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.label_vertebrae]
def qcslice_layout(x): return x.single()
# Sagittal orientation, display posterior labels
elif process in ['sct_label_utils']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
# projected_image = projected(Image(fname_seg))
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.label_utils]
def qcslice_layout(x): return x.single()
# Sagittal orientation, display PMJ box
elif process in ['sct_detect_pmj']:
plane = 'Sagittal'
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.highlight_pmj]
def qcslice_layout(x): return x.single()
# Sagittal orientation, static image
elif process in ['sct_straighten_spinalcord']:
plane = 'Sagittal'
dpi = 100
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_in1)], p_resample=None)
qcslice_operations = [QcImage.vertical_line]
def qcslice_layout(x): return x.single()
# Metric outputs (only graphs)
elif process in ['sct_process_segmentation']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
fname_list = [fname_in1]
# fname_seg should be a list of 4 images: 3 for each of the `qcslice_operations`, plus an extra
# centerline image, which is needed to make `Sagittal.get_center_spit` work correctly
fname_list.extend(fname_seg)
qcslice_type = qcslice.Sagittal([Image(fname) for fname in fname_list], p_resample=None)
qcslice_operations = [QcImage.smooth_centerline, QcImage.highlight_pmj, QcImage.listed_seg]
def qcslice_layout(x): return x.single()
else:
raise ValueError("Unrecognized process: {}".format(process))
add_entry(
src=fname_in1,
process=process,
args=args,
path_qc=path_qc,
dataset=dataset,
subject=subject,
plane=plane,
path_img=path_img,
dpi=dpi,
qcslice=qcslice_type,
qcslice_operations=qcslice_operations,
qcslice_layout=qcslice_layout,
stretch_contrast_method='equalized',
angle_line=angle_line,
fps=fps
)
def get_json_data_from_path(path_json):
"""Read all json files present in the given path, and output an aggregated json structure"""
results = []
for file_json in glob.iglob(os.path.join(path_json, '*.json')):
logger.debug('Opening: ' + file_json)
with open(file_json, 'r+') as fjson:
results.append(json.load(fjson))
return results
|
|
"""Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=1) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
from itertools import ifilter, ifilterfalse, imap, izip
__all__ = ["cmp","dircmp","cmpfiles"]
_cache = {}
BUFSIZE=8*1024
def cmp(f1, f2, shallow=1):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
Return value:
True if the files are the same, False otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
"""
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
result = _cache.get((f1, f2))
if result and (s1, s2) == result[:2]:
return result[2]
outcome = _do_cmp(f1, f2)
_cache[f1, f2] = s1, s2, outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a,b,ignore=None,hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
self.common = map(a.__getitem__, ifilter(b.__contains__, a))
self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))
self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = os.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.itervalues():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.itervalues():
print
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.itervalues():
print
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError, attr
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=1):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except os.error:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(flist, skip):
return list(ifilterfalse(skip.__contains__, flist))
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
|
|
# -*- encoding: utf-8
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import desc
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.databases import mssql
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import CursorSQL
from sqlalchemy.testing.assertsql import DialectSQL
from sqlalchemy.util import ue
metadata = None
cattable = None
matchtable = None
class LegacySchemaAliasingTest(fixtures.TestBase, AssertsCompiledSQL):
"""Legacy behavior tried to prevent schema-qualified tables
from being rendered as dotted names, and were instead aliased.
This behavior no longer seems to be required.
"""
def setup(self):
metadata = MetaData()
self.t1 = table(
"t1",
column("a", Integer),
column("b", String),
column("c", String),
)
self.t2 = Table(
"t2",
metadata,
Column("a", Integer),
Column("b", Integer),
Column("c", Integer),
schema="schema",
)
def _assert_sql(self, element, legacy_sql, modern_sql=None):
dialect = mssql.dialect(legacy_schema_aliasing=True)
self.assert_compile(element, legacy_sql, dialect=dialect)
dialect = mssql.dialect()
self.assert_compile(element, modern_sql or "foob", dialect=dialect)
def _legacy_dialect(self):
return mssql.dialect(legacy_schema_aliasing=True)
def test_result_map(self):
s = self.t2.select()
c = s.compile(dialect=self._legacy_dialect())
assert self.t2.c.a in set(c._create_result_map()["a"][1])
def test_result_map_use_labels(self):
s = self.t2.select(use_labels=True)
c = s.compile(dialect=self._legacy_dialect())
assert self.t2.c.a in set(c._create_result_map()["schema_t2_a"][1])
def test_straight_select(self):
self._assert_sql(
self.t2.select(),
"SELECT t2_1.a, t2_1.b, t2_1.c FROM [schema].t2 AS t2_1",
"SELECT [schema].t2.a, [schema].t2.b, "
"[schema].t2.c FROM [schema].t2",
)
def test_straight_select_use_labels(self):
self._assert_sql(
self.t2.select(use_labels=True),
"SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b, "
"t2_1.c AS schema_t2_c FROM [schema].t2 AS t2_1",
"SELECT [schema].t2.a AS schema_t2_a, "
"[schema].t2.b AS schema_t2_b, "
"[schema].t2.c AS schema_t2_c FROM [schema].t2",
)
def test_join_to_schema(self):
t1, t2 = self.t1, self.t2
self._assert_sql(
t1.join(t2, t1.c.a == t2.c.a).select(),
"SELECT t1.a, t1.b, t1.c, t2_1.a, t2_1.b, t2_1.c FROM t1 "
"JOIN [schema].t2 AS t2_1 ON t2_1.a = t1.a",
"SELECT t1.a, t1.b, t1.c, [schema].t2.a, [schema].t2.b, "
"[schema].t2.c FROM t1 JOIN [schema].t2 ON [schema].t2.a = t1.a",
)
def test_union_schema_to_non(self):
t1, t2 = self.t1, self.t2
s = (
select([t2.c.a, t2.c.b])
.apply_labels()
.union(select([t1.c.a, t1.c.b]).apply_labels())
.alias()
.select()
)
self._assert_sql(
s,
"SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM "
"(SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b "
"FROM [schema].t2 AS t2_1 UNION SELECT t1.a AS t1_a, "
"t1.b AS t1_b FROM t1) AS anon_1",
"SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM "
"(SELECT [schema].t2.a AS schema_t2_a, [schema].t2.b AS "
"schema_t2_b FROM [schema].t2 UNION SELECT t1.a AS t1_a, "
"t1.b AS t1_b FROM t1) AS anon_1",
)
def test_column_subquery_to_alias(self):
a1 = self.t2.alias("a1")
s = select([self.t2, select([a1.c.a]).scalar_subquery()])
self._assert_sql(
s,
"SELECT t2_1.a, t2_1.b, t2_1.c, "
"(SELECT a1.a FROM [schema].t2 AS a1) "
"AS anon_1 FROM [schema].t2 AS t2_1",
"SELECT [schema].t2.a, [schema].t2.b, [schema].t2.c, "
"(SELECT a1.a FROM [schema].t2 AS a1) AS anon_1 FROM [schema].t2",
)
class IdentityInsertTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "mssql"
__dialect__ = mssql.MSDialect()
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, cattable
metadata = MetaData(testing.db)
cattable = Table(
"cattable",
metadata,
Column("id", Integer),
Column("description", String(50)),
PrimaryKeyConstraint("id", name="PK_cattable"),
)
def setup(self):
metadata.create_all()
def teardown(self):
metadata.drop_all()
def test_compiled(self):
self.assert_compile(
cattable.insert().values(id=9, description="Python"),
"INSERT INTO cattable (id, description) "
"VALUES (:id, :description)",
)
def test_execute(self):
with testing.db.connect() as conn:
conn.execute(cattable.insert().values(id=9, description="Python"))
cats = conn.execute(cattable.select().order_by(cattable.c.id))
eq_([(9, "Python")], list(cats))
result = conn.execute(cattable.insert().values(description="PHP"))
eq_([10], result.inserted_primary_key)
lastcat = conn.execute(
cattable.select().order_by(desc(cattable.c.id))
)
eq_((10, "PHP"), lastcat.first())
def test_executemany(self):
with testing.db.connect() as conn:
conn.execute(
cattable.insert(),
[
{"id": 89, "description": "Python"},
{"id": 8, "description": "Ruby"},
{"id": 3, "description": "Perl"},
{"id": 1, "description": "Java"},
],
)
cats = conn.execute(cattable.select().order_by(cattable.c.id))
eq_(
[(1, "Java"), (3, "Perl"), (8, "Ruby"), (89, "Python")],
list(cats),
)
conn.execute(
cattable.insert(),
[{"description": "PHP"}, {"description": "Smalltalk"}],
)
lastcats = conn.execute(
cattable.select().order_by(desc(cattable.c.id)).limit(2)
)
eq_([(91, "Smalltalk"), (90, "PHP")], list(lastcats))
def test_insert_plain_param(self):
with testing.db.connect() as conn:
conn.execute(cattable.insert(), id=5)
eq_(conn.scalar(select([cattable.c.id])), 5)
def test_insert_values_key_plain(self):
with testing.db.connect() as conn:
conn.execute(cattable.insert().values(id=5))
eq_(conn.scalar(select([cattable.c.id])), 5)
def test_insert_values_key_expression(self):
with testing.db.connect() as conn:
conn.execute(cattable.insert().values(id=literal(5)))
eq_(conn.scalar(select([cattable.c.id])), 5)
def test_insert_values_col_plain(self):
with testing.db.connect() as conn:
conn.execute(cattable.insert().values({cattable.c.id: 5}))
eq_(conn.scalar(select([cattable.c.id])), 5)
def test_insert_values_col_expression(self):
with testing.db.connect() as conn:
conn.execute(cattable.insert().values({cattable.c.id: literal(5)}))
eq_(conn.scalar(select([cattable.c.id])), 5)
class QueryUnicodeTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
@testing.requires.mssql_freetds
@testing.requires.python2
@testing.provide_metadata
def test_convert_unicode(self):
meta = self.metadata
t1 = Table(
"unitest_table",
meta,
Column("id", Integer, primary_key=True),
Column("descr", mssql.MSText()),
)
meta.create_all()
with testing.db.connect() as con:
con.execute(
ue(
"insert into unitest_table values ('abc \xc3\xa9 def')"
).encode("UTF-8")
)
r = con.execute(t1.select()).first()
assert isinstance(r[1], util.text_type), (
"%s is %s instead of unicode, working on %s"
% (r[1], type(r[1]), meta.bind)
)
eq_(r[1], util.ue("abc \xc3\xa9 def"))
class QueryTest(testing.AssertsExecutionResults, fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
def test_fetchid_trigger(self):
"""
Verify identity return value on inserting to a trigger table.
MSSQL's OUTPUT INSERTED clause does not work for the
case of a table having an identity (autoincrement)
primary key column, and which also has a trigger configured
to fire upon each insert and subsequently perform an
insert into a different table.
SQLALchemy's MSSQL dialect by default will attempt to
use an OUTPUT_INSERTED clause, which in this case will
raise the following error:
ProgrammingError: (ProgrammingError) ('42000', 334,
"[Microsoft][SQL Server Native Client 10.0][SQL Server]The
target table 't1' of the DML statement cannot have any enabled
triggers if the statement contains an OUTPUT clause without
INTO clause.", 7748) 'INSERT INTO t1 (descr) OUTPUT inserted.id
VALUES (?)' ('hello',)
This test verifies a workaround, which is to rely on the
older SCOPE_IDENTITY() call, which still works for this scenario.
To enable the workaround, the Table must be instantiated
with the init parameter 'implicit_returning = False'.
"""
# todo: this same test needs to be tried in a multithreaded context
# with multiple threads inserting to the same table.
# todo: check whether this error also occurs with clients other
# than the SQL Server Native Client. Maybe an assert_raises
# test should be written.
meta = MetaData(testing.db)
t1 = Table(
"t1",
meta,
Column("id", Integer, mssql_identity_start=100, primary_key=True),
Column("descr", String(200)),
# the following flag will prevent the
# MSSQLCompiler.returning_clause from getting called,
# though the ExecutionContext will still have a
# _select_lastrowid, so the SELECT SCOPE_IDENTITY() will
# hopefully be called instead.
implicit_returning=False,
)
t2 = Table(
"t2",
meta,
Column("id", Integer, mssql_identity_start=200, primary_key=True),
Column("descr", String(200)),
)
meta.create_all()
con = testing.db.connect()
con.execute(
"""create trigger paj on t1 for insert as
insert into t2 (descr) select descr from inserted"""
)
try:
tr = con.begin()
r = con.execute(t2.insert(), descr="hello")
self.assert_(r.inserted_primary_key == [200])
r = con.execute(t1.insert(), descr="hello")
self.assert_(r.inserted_primary_key == [100])
finally:
tr.commit()
con.execute("""drop trigger paj""")
meta.drop_all()
@testing.provide_metadata
def _test_disable_scope_identity(self):
engine = engines.testing_engine(options={"use_scope_identity": False})
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
implicit_returning=False,
)
metadata.create_all(engine)
with self.sql_execution_asserter(engine) as asserter:
engine.execute(t1.insert(), {"data": "somedata"})
# TODO: need a dialect SQL that acts like Cursor SQL
asserter.assert_(
DialectSQL(
"INSERT INTO t1 (data) VALUES (:data)", {"data": "somedata"}
),
CursorSQL("SELECT @@identity AS lastrowid"),
)
@testing.provide_metadata
def test_enable_scope_identity(self):
engine = engines.testing_engine(options={"use_scope_identity": True})
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
implicit_returning=False,
)
metadata.create_all(engine)
with self.sql_execution_asserter(engine) as asserter:
engine.execute(t1.insert())
# even with pyodbc, we don't embed the scope identity on a
# DEFAULT VALUES insert
asserter.assert_(
CursorSQL("INSERT INTO t1 DEFAULT VALUES"),
CursorSQL("SELECT scope_identity() AS lastrowid"),
)
@testing.only_on("mssql+pyodbc")
@testing.provide_metadata
def test_embedded_scope_identity(self):
engine = engines.testing_engine(options={"use_scope_identity": True})
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
implicit_returning=False,
)
metadata.create_all(engine)
with self.sql_execution_asserter(engine) as asserter:
engine.execute(t1.insert(), {"data": "somedata"})
# pyodbc-specific system
asserter.assert_(
CursorSQL(
"INSERT INTO t1 (data) VALUES (?); select scope_identity()",
("somedata",),
)
)
@testing.provide_metadata
def test_insertid_schema(self):
meta = self.metadata
eng = engines.testing_engine(
options=dict(legacy_schema_aliasing=False)
)
meta.bind = eng
con = eng.connect()
con.execute("create schema paj")
@event.listens_for(meta, "after_drop")
def cleanup(target, connection, **kw):
connection.execute("drop schema paj")
tbl = Table(
"test", meta, Column("id", Integer, primary_key=True), schema="paj"
)
tbl.create()
tbl.insert().execute({"id": 1})
eq_(tbl.select().scalar(), 1)
@testing.provide_metadata
def test_insertid_schema_legacy(self):
meta = self.metadata
eng = engines.testing_engine(options=dict(legacy_schema_aliasing=True))
meta.bind = eng
con = eng.connect()
con.execute("create schema paj")
@event.listens_for(meta, "after_drop")
def cleanup(target, connection, **kw):
connection.execute("drop schema paj")
tbl = Table(
"test", meta, Column("id", Integer, primary_key=True), schema="paj"
)
tbl.create()
tbl.insert().execute({"id": 1})
eq_(tbl.select().scalar(), 1)
@testing.provide_metadata
def test_returning_no_autoinc(self):
meta = self.metadata
table = Table(
"t1",
meta,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
table.create()
result = (
table.insert()
.values(id=1, data=func.lower("SomeString"))
.returning(table.c.id, table.c.data)
.execute()
)
eq_(result.fetchall(), [(1, "somestring")])
@testing.provide_metadata
def test_delete_schema(self):
meta = self.metadata
eng = engines.testing_engine(
options=dict(legacy_schema_aliasing=False)
)
meta.bind = eng
con = eng.connect()
con.execute("create schema paj")
@event.listens_for(meta, "after_drop")
def cleanup(target, connection, **kw):
connection.execute("drop schema paj")
tbl = Table(
"test", meta, Column("id", Integer, primary_key=True), schema="paj"
)
tbl.create()
tbl.insert().execute({"id": 1})
eq_(tbl.select().scalar(), 1)
tbl.delete(tbl.c.id == 1).execute()
eq_(tbl.select().scalar(), None)
@testing.provide_metadata
def test_delete_schema_legacy(self):
meta = self.metadata
eng = engines.testing_engine(options=dict(legacy_schema_aliasing=True))
meta.bind = eng
con = eng.connect()
con.execute("create schema paj")
@event.listens_for(meta, "after_drop")
def cleanup(target, connection, **kw):
connection.execute("drop schema paj")
tbl = Table(
"test", meta, Column("id", Integer, primary_key=True), schema="paj"
)
tbl.create()
tbl.insert().execute({"id": 1})
eq_(tbl.select().scalar(), 1)
tbl.delete(tbl.c.id == 1).execute()
eq_(tbl.select().scalar(), None)
@testing.provide_metadata
def test_insertid_reserved(self):
meta = self.metadata
table = Table("select", meta, Column("col", Integer, primary_key=True))
table.create()
table.insert().execute(col=7)
eq_(table.select().scalar(), 7)
class Foo(object):
def __init__(self, **kw):
for k in kw:
setattr(self, k, kw[k])
def full_text_search_missing():
"""Test if full text search is not implemented and return False if
it is and True otherwise."""
try:
connection = testing.db.connect()
try:
connection.execute("CREATE FULLTEXT CATALOG Catalog AS " "DEFAULT")
return False
except Exception:
return True
finally:
connection.close()
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "mssql"
__skip_if__ = (full_text_search_missing,)
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table(
"cattable",
metadata,
Column("id", Integer),
Column("description", String(50)),
PrimaryKeyConstraint("id", name="PK_cattable"),
)
matchtable = Table(
"matchtable",
metadata,
Column("id", Integer),
Column("title", String(200)),
Column("category_id", Integer, ForeignKey("cattable.id")),
PrimaryKeyConstraint("id", name="PK_matchtable"),
)
DDL(
"""CREATE FULLTEXT INDEX
ON cattable (description)
KEY INDEX PK_cattable"""
).execute_at("after-create", matchtable)
DDL(
"""CREATE FULLTEXT INDEX
ON matchtable (title)
KEY INDEX PK_matchtable"""
).execute_at("after-create", matchtable)
metadata.create_all()
cattable.insert().execute(
[
{"id": 1, "description": "Python"},
{"id": 2, "description": "Ruby"},
]
)
matchtable.insert().execute(
[
{
"id": 1,
"title": "Web Development with Rails",
"category_id": 2,
},
{"id": 2, "title": "Dive Into Python", "category_id": 1},
{
"id": 3,
"title": "Programming Matz's Ruby",
"category_id": 2,
},
{"id": 4, "title": "Guide to Django", "category_id": 1},
{"id": 5, "title": "Python in a Nutshell", "category_id": 1},
]
)
DDL("WAITFOR DELAY '00:00:05'").execute(bind=engines.testing_engine())
@classmethod
def teardown_class(cls):
metadata.drop_all()
connection = testing.db.connect()
connection.execute("DROP FULLTEXT CATALOG Catalog")
connection.close()
def test_expression(self):
self.assert_compile(
matchtable.c.title.match("somstr"),
"CONTAINS (matchtable.title, ?)",
)
def test_simple_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([2, 5], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("Matz's"))
.execute()
.fetchall()
)
eq_([3], [r.id for r in results])
def test_simple_prefix_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match('"nut*"'))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results])
def test_simple_inflectional_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match('FORMSOF(INFLECTIONAL, "dives")'))
.execute()
.fetchall()
)
eq_([2], [r.id for r in results])
def test_or_match(self):
results1 = (
matchtable.select()
.where(
or_(
matchtable.c.title.match("nutshell"),
matchtable.c.title.match("ruby"),
)
)
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([3, 5], [r.id for r in results1])
results2 = (
matchtable.select()
.where(matchtable.c.title.match("nutshell OR ruby"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = (
matchtable.select()
.where(
and_(
matchtable.c.title.match("python"),
matchtable.c.title.match("nutshell"),
)
)
.execute()
.fetchall()
)
eq_([5], [r.id for r in results1])
results2 = (
matchtable.select()
.where(matchtable.c.title.match("python AND nutshell"))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = (
matchtable.select()
.where(
and_(
cattable.c.id == matchtable.c.category_id,
or_(
cattable.c.description.match("Ruby"),
matchtable.c.title.match("nutshell"),
),
)
)
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([1, 3, 5], [r.id for r in results])
|
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError as DjangoValidationError
from django.core.validators import RegexValidator
from django.forms import ImageField as DjangoImageField
from django.utils import six, timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.encoding import is_protected_type, smart_text
from django.utils.translation import ugettext_lazy as _
from rest_framework import ISO_8601
from rest_framework.compat import (
EmailValidator, MinValueValidator, MaxValueValidator,
MinLengthValidator, MaxLengthValidator, URLValidator, OrderedDict,
unicode_repr, unicode_to_repr
)
from rest_framework.exceptions import ValidationError
from rest_framework.settings import api_settings
from rest_framework.utils import html, representation, humanize_datetime
import collections
import copy
import datetime
import decimal
import inspect
import re
import uuid
class empty:
"""
This class is used to represent no data being provided for a given input
or output value.
It is required because `None` may be a valid input or output value.
"""
pass
def is_simple_callable(obj):
"""
True if the object is a callable that takes no arguments.
"""
function = inspect.isfunction(obj)
method = inspect.ismethod(obj)
if not (function or method):
return False
args, _, _, defaults = inspect.getargspec(obj)
len_args = len(args) if function else len(args) - 1
len_defaults = len(defaults) if defaults else 0
return len_args <= len_defaults
def get_attribute(instance, attrs):
"""
Similar to Python's built in `getattr(instance, attr)`,
but takes a list of nested attributes, instead of a single attribute.
Also accepts either attribute lookup on objects or dictionary lookups.
"""
for attr in attrs:
if instance is None:
# Break out early if we get `None` at any point in a nested lookup.
return None
try:
if isinstance(instance, collections.Mapping):
instance = instance[attr]
else:
instance = getattr(instance, attr)
except ObjectDoesNotExist:
return None
if is_simple_callable(instance):
try:
instance = instance()
except (AttributeError, KeyError) as exc:
# If we raised an Attribute or KeyError here it'd get treated
# as an omitted field in `Field.get_attribute()`. Instead we
# raise a ValueError to ensure the exception is not masked.
raise ValueError('Exception raised in callable attribute "{0}"; original exception was: {1}'.format(attr, exc))
return instance
def set_value(dictionary, keys, value):
"""
Similar to Python's built in `dictionary[key] = value`,
but takes a list of nested keys instead of a single key.
set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2}
set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2}
set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}}
"""
if not keys:
dictionary.update(value)
return
for key in keys[:-1]:
if key not in dictionary:
dictionary[key] = {}
dictionary = dictionary[key]
dictionary[keys[-1]] = value
class CreateOnlyDefault(object):
"""
This class may be used to provide default values that are only used
for create operations, but that do not return any value for update
operations.
"""
def __init__(self, default):
self.default = default
def set_context(self, serializer_field):
self.is_update = serializer_field.parent.instance is not None
if callable(self.default) and hasattr(self.default, 'set_context') and not self.is_update:
self.default.set_context(serializer_field)
def __call__(self):
if self.is_update:
raise SkipField()
if callable(self.default):
return self.default()
return self.default
def __repr__(self):
return unicode_to_repr(
'%s(%s)' % (self.__class__.__name__, unicode_repr(self.default))
)
class CurrentUserDefault(object):
def set_context(self, serializer_field):
self.user = serializer_field.context['request'].user
def __call__(self):
return self.user
def __repr__(self):
return unicode_to_repr('%s()' % self.__class__.__name__)
class SkipField(Exception):
pass
NOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`'
NOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`'
NOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`'
USE_READONLYFIELD = 'Field(read_only=True) should be ReadOnlyField'
MISSING_ERROR_MESSAGE = (
'ValidationError raised by `{class_name}`, but error key `{key}` does '
'not exist in the `error_messages` dictionary.'
)
class Field(object):
_creation_counter = 0
default_error_messages = {
'required': _('This field is required.'),
'null': _('This field may not be null.')
}
default_validators = []
default_empty_html = empty
initial = None
def __init__(self, read_only=False, write_only=False,
required=None, default=empty, initial=empty, source=None,
label=None, help_text=None, style=None,
error_messages=None, validators=None, allow_null=False):
self._creation_counter = Field._creation_counter
Field._creation_counter += 1
# If `required` is unset, then use `True` unless a default is provided.
if required is None:
required = default is empty and not read_only
# Some combinations of keyword arguments do not make sense.
assert not (read_only and write_only), NOT_READ_ONLY_WRITE_ONLY
assert not (read_only and required), NOT_READ_ONLY_REQUIRED
assert not (required and default is not empty), NOT_REQUIRED_DEFAULT
assert not (read_only and self.__class__ == Field), USE_READONLYFIELD
self.read_only = read_only
self.write_only = write_only
self.required = required
self.default = default
self.source = source
self.initial = self.initial if (initial is empty) else initial
self.label = label
self.help_text = help_text
self.style = {} if style is None else style
self.allow_null = allow_null
if self.default_empty_html is not empty:
if not required:
self.default_empty_html = empty
elif default is not empty:
self.default_empty_html = default
if validators is not None:
self.validators = validators[:]
# These are set up by `.bind()` when the field is added to a serializer.
self.field_name = None
self.parent = None
# Collect default error message from self and parent classes
messages = {}
for cls in reversed(self.__class__.__mro__):
messages.update(getattr(cls, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def bind(self, field_name, parent):
"""
Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
"""
# In order to enforce a consistent style, we error if a redundant
# 'source' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
assert self.source != field_name, (
"It is redundant to specify `source='%s'` on field '%s' in "
"serializer '%s', because it is the same as the field name. "
"Remove the `source` keyword argument." %
(field_name, self.__class__.__name__, parent.__class__.__name__)
)
self.field_name = field_name
self.parent = parent
# `self.label` should default to being based on the field name.
if self.label is None:
self.label = field_name.replace('_', ' ').capitalize()
# self.source should default to being the same as the field name.
if self.source is None:
self.source = field_name
# self.source_attrs is a list of attributes that need to be looked up
# when serializing the instance, or populating the validated data.
if self.source == '*':
self.source_attrs = []
else:
self.source_attrs = self.source.split('.')
# .validators is a lazily loaded property, that gets its default
# value from `get_validators`.
@property
def validators(self):
if not hasattr(self, '_validators'):
self._validators = self.get_validators()
return self._validators
@validators.setter
def validators(self, validators):
self._validators = validators
def get_validators(self):
return self.default_validators[:]
def get_initial(self):
"""
Return a value to use when the field is being returned as a primitive
value, without any object instance.
"""
return self.initial
def get_value(self, dictionary):
"""
Given the *incoming* primitive data, return the value for this field
that should be validated and transformed to a native value.
"""
if html.is_html_input(dictionary):
# HTML forms will represent empty fields as '', and cannot
# represent None or False values directly.
if self.field_name not in dictionary:
if getattr(self.root, 'partial', False):
return empty
return self.default_empty_html
ret = dictionary[self.field_name]
if ret == '' and self.allow_null:
# If the field is blank, and null is a valid value then
# determine if we should use null instead.
return '' if getattr(self, 'allow_blank', False) else None
return ret
return dictionary.get(self.field_name, empty)
def get_attribute(self, instance):
"""
Given the *outgoing* object instance, return the primitive value
that should be used for this field.
"""
try:
return get_attribute(instance, self.source_attrs)
except (KeyError, AttributeError) as exc:
if not self.required and self.default is empty:
raise SkipField()
msg = (
'Got {exc_type} when attempting to get a value for field '
'`{field}` on serializer `{serializer}`.\nThe serializer '
'field might be named incorrectly and not match '
'any attribute or key on the `{instance}` instance.\n'
'Original exception text was: {exc}.'.format(
exc_type=type(exc).__name__,
field=self.field_name,
serializer=self.parent.__class__.__name__,
instance=instance.__class__.__name__,
exc=exc
)
)
raise type(exc)(msg)
def get_default(self):
"""
Return the default value to use when validating data if no input
is provided for this field.
If a default has not been set for this field then this will simply
return `empty`, indicating that no value should be set in the
validated data for this field.
"""
if self.default is empty:
raise SkipField()
if callable(self.default):
if hasattr(self.default, 'set_context'):
self.default.set_context(self)
return self.default()
return self.default
def validate_empty_values(self, data):
"""
Validate empty values, and either:
* Raise `ValidationError`, indicating invalid data.
* Raise `SkipField`, indicating that the field should be ignored.
* Return (True, data), indicating an empty value that should be
returned without any furhter validation being applied.
* Return (False, data), indicating a non-empty value, that should
have validation applied as normal.
"""
if self.read_only:
return (True, self.get_default())
if data is empty:
if getattr(self.root, 'partial', False):
raise SkipField()
if self.required:
self.fail('required')
return (True, self.get_default())
if data is None:
if not self.allow_null:
self.fail('null')
return (True, None)
return (False, data)
def run_validation(self, data=empty):
"""
Validate a simple representation and return the internal value.
The provided data may be `empty` if no representation was included
in the input.
May raise `SkipField` if the field should not be included in the
validated data.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
self.run_validators(value)
return value
def run_validators(self, value):
"""
Test the given value against all the validators on the field,
and either raise a `ValidationError` or simply return.
"""
errors = []
for validator in self.validators:
if hasattr(validator, 'set_context'):
validator.set_context(self)
try:
validator(value)
except ValidationError as exc:
# If the validation error contains a mapping of fields to
# errors then simply raise it immediately rather than
# attempting to accumulate a list of errors.
if isinstance(exc.detail, dict):
raise
errors.extend(exc.detail)
except DjangoValidationError as exc:
errors.extend(exc.messages)
if errors:
raise ValidationError(errors)
def to_internal_value(self, data):
"""
Transform the *incoming* primitive data into a native value.
"""
raise NotImplementedError(
'{cls}.to_internal_value() must be implemented.'.format(
cls=self.__class__.__name__
)
)
def to_representation(self, value):
"""
Transform the *outgoing* native value into primitive data.
"""
raise NotImplementedError(
'{cls}.to_representation() must be implemented.\n'
'If you are upgrading from REST framework version 2 '
'you might want `ReadOnlyField`.'.format(
cls=self.__class__.__name__
)
)
def fail(self, key, **kwargs):
"""
A helper method that simply raises a validation error.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
raise AssertionError(msg)
message_string = msg.format(**kwargs)
raise ValidationError(message_string)
@property
def root(self):
"""
Returns the top-level serializer for this field.
"""
root = self
while root.parent is not None:
root = root.parent
return root
@property
def context(self):
"""
Returns the context as passed to the root serializer on initialization.
"""
return getattr(self.root, '_context', {})
def __new__(cls, *args, **kwargs):
"""
When a field is instantiated, we store the arguments that were used,
so that we can present a helpful representation of the object.
"""
instance = super(Field, cls).__new__(cls)
instance._args = args
instance._kwargs = kwargs
return instance
def __deepcopy__(self, memo):
"""
When cloning fields we instantiate using the arguments it was
originally created with, rather than copying the complete state.
"""
args = copy.deepcopy(self._args)
kwargs = dict(self._kwargs)
# Bit ugly, but we need to special case 'validators' as Django's
# RegexValidator does not support deepcopy.
# We treat validator callables as immutable objects.
# See https://github.com/tomchristie/django-rest-framework/issues/1954
validators = kwargs.pop('validators', None)
kwargs = copy.deepcopy(kwargs)
if validators is not None:
kwargs['validators'] = validators
return self.__class__(*args, **kwargs)
def __repr__(self):
"""
Fields are represented using their initial calling arguments.
This allows us to create descriptive representations for serializer
instances that show all the declared fields on the serializer.
"""
return unicode_to_repr(representation.field_repr(self))
# Boolean types...
class BooleanField(Field):
default_error_messages = {
'invalid': _('"{input}" is not a valid boolean.')
}
default_empty_html = False
initial = False
TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
def __init__(self, **kwargs):
assert 'allow_null' not in kwargs, '`allow_null` is not a valid option. Use `NullBooleanField` instead.'
super(BooleanField, self).__init__(**kwargs)
def to_internal_value(self, data):
if data in self.TRUE_VALUES:
return True
elif data in self.FALSE_VALUES:
return False
self.fail('invalid', input=data)
def to_representation(self, value):
if value in self.TRUE_VALUES:
return True
elif value in self.FALSE_VALUES:
return False
return bool(value)
class NullBooleanField(Field):
default_error_messages = {
'invalid': _('"{input}" is not a valid boolean.')
}
initial = None
TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
NULL_VALUES = set(('n', 'N', 'null', 'Null', 'NULL', '', None))
def __init__(self, **kwargs):
assert 'allow_null' not in kwargs, '`allow_null` is not a valid option.'
kwargs['allow_null'] = True
super(NullBooleanField, self).__init__(**kwargs)
def to_internal_value(self, data):
if data in self.TRUE_VALUES:
return True
elif data in self.FALSE_VALUES:
return False
elif data in self.NULL_VALUES:
return None
self.fail('invalid', input=data)
def to_representation(self, value):
if value in self.NULL_VALUES:
return None
if value in self.TRUE_VALUES:
return True
elif value in self.FALSE_VALUES:
return False
return bool(value)
# String types...
class CharField(Field):
default_error_messages = {
'blank': _('This field may not be blank.'),
'max_length': _('Ensure this field has no more than {max_length} characters.'),
'min_length': _('Ensure this field has at least {min_length} characters.')
}
initial = ''
def __init__(self, **kwargs):
self.allow_blank = kwargs.pop('allow_blank', False)
self.trim_whitespace = kwargs.pop('trim_whitespace', True)
self.max_length = kwargs.pop('max_length', None)
self.min_length = kwargs.pop('min_length', None)
super(CharField, self).__init__(**kwargs)
if self.max_length is not None:
message = self.error_messages['max_length'].format(max_length=self.max_length)
self.validators.append(MaxLengthValidator(self.max_length, message=message))
if self.min_length is not None:
message = self.error_messages['min_length'].format(min_length=self.min_length)
self.validators.append(MinLengthValidator(self.min_length, message=message))
def run_validation(self, data=empty):
# Test for the empty string here so that it does not get validated,
# and so that subclasses do not need to handle it explicitly
# inside the `to_internal_value()` method.
if data == '':
if not self.allow_blank:
self.fail('blank')
return ''
return super(CharField, self).run_validation(data)
def to_internal_value(self, data):
value = six.text_type(data)
return value.strip() if self.trim_whitespace else value
def to_representation(self, value):
return six.text_type(value)
class EmailField(CharField):
default_error_messages = {
'invalid': _('Enter a valid email address.')
}
def __init__(self, **kwargs):
super(EmailField, self).__init__(**kwargs)
validator = EmailValidator(message=self.error_messages['invalid'])
self.validators.append(validator)
class RegexField(CharField):
default_error_messages = {
'invalid': _('This value does not match the required pattern.')
}
def __init__(self, regex, **kwargs):
super(RegexField, self).__init__(**kwargs)
validator = RegexValidator(regex, message=self.error_messages['invalid'])
self.validators.append(validator)
class SlugField(CharField):
default_error_messages = {
'invalid': _('Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.')
}
def __init__(self, **kwargs):
super(SlugField, self).__init__(**kwargs)
slug_regex = re.compile(r'^[-a-zA-Z0-9_]+$')
validator = RegexValidator(slug_regex, message=self.error_messages['invalid'])
self.validators.append(validator)
class URLField(CharField):
default_error_messages = {
'invalid': _('Enter a valid URL.')
}
def __init__(self, **kwargs):
super(URLField, self).__init__(**kwargs)
validator = URLValidator(message=self.error_messages['invalid'])
self.validators.append(validator)
class UUIDField(Field):
default_error_messages = {
'invalid': _('"{value}" is not a valid UUID.'),
}
def to_internal_value(self, data):
if not isinstance(data, uuid.UUID):
try:
return uuid.UUID(data)
except (ValueError, TypeError):
self.fail('invalid', value=data)
return data
def to_representation(self, value):
return str(value)
# Number types...
class IntegerField(Field):
default_error_messages = {
'invalid': _('A valid integer is required.'),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'max_string_length': _('String value too large.')
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, **kwargs):
self.max_value = kwargs.pop('max_value', None)
self.min_value = kwargs.pop('min_value', None)
super(IntegerField, self).__init__(**kwargs)
if self.max_value is not None:
message = self.error_messages['max_value'].format(max_value=self.max_value)
self.validators.append(MaxValueValidator(self.max_value, message=message))
if self.min_value is not None:
message = self.error_messages['min_value'].format(min_value=self.min_value)
self.validators.append(MinValueValidator(self.min_value, message=message))
def to_internal_value(self, data):
if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:
self.fail('max_string_length')
try:
data = int(data)
except (ValueError, TypeError):
self.fail('invalid')
return data
def to_representation(self, value):
return int(value)
class FloatField(Field):
default_error_messages = {
'invalid': _('A valid number is required.'),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'max_string_length': _('String value too large.')
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, **kwargs):
self.max_value = kwargs.pop('max_value', None)
self.min_value = kwargs.pop('min_value', None)
super(FloatField, self).__init__(**kwargs)
if self.max_value is not None:
message = self.error_messages['max_value'].format(max_value=self.max_value)
self.validators.append(MaxValueValidator(self.max_value, message=message))
if self.min_value is not None:
message = self.error_messages['min_value'].format(min_value=self.min_value)
self.validators.append(MinValueValidator(self.min_value, message=message))
def to_internal_value(self, data):
if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:
self.fail('max_string_length')
try:
return float(data)
except (TypeError, ValueError):
self.fail('invalid')
def to_representation(self, value):
return float(value)
class DecimalField(Field):
default_error_messages = {
'invalid': _('A valid number is required.'),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'max_digits': _('Ensure that there are no more than {max_digits} digits in total.'),
'max_decimal_places': _('Ensure that there are no more than {max_decimal_places} decimal places.'),
'max_whole_digits': _('Ensure that there are no more than {max_whole_digits} digits before the decimal point.'),
'max_string_length': _('String value too large.')
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
coerce_to_string = api_settings.COERCE_DECIMAL_TO_STRING
def __init__(self, max_digits, decimal_places, coerce_to_string=None, max_value=None, min_value=None, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.coerce_to_string = coerce_to_string if (coerce_to_string is not None) else self.coerce_to_string
self.max_value = max_value
self.min_value = min_value
super(DecimalField, self).__init__(**kwargs)
if self.max_value is not None:
message = self.error_messages['max_value'].format(max_value=self.max_value)
self.validators.append(MaxValueValidator(self.max_value, message=message))
if self.min_value is not None:
message = self.error_messages['min_value'].format(min_value=self.min_value)
self.validators.append(MinValueValidator(self.min_value, message=message))
def to_internal_value(self, data):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
data = smart_text(data).strip()
if len(data) > self.MAX_STRING_LENGTH:
self.fail('max_string_length')
try:
value = decimal.Decimal(data)
except decimal.DecimalException:
self.fail('invalid')
# Check for NaN. It is the only value that isn't equal to itself,
# so we can use this to identify NaN values.
if value != value:
self.fail('invalid')
# Check for infinity and negative infinity.
if value in (decimal.Decimal('Inf'), decimal.Decimal('-Inf')):
self.fail('invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
self.fail('max_digits', max_digits=self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
self.fail('max_decimal_places', max_decimal_places=self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
self.fail('max_whole_digits', max_whole_digits=self.max_digits - self.decimal_places)
return value
def to_representation(self, value):
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(six.text_type(value).strip())
context = decimal.getcontext().copy()
context.prec = self.max_digits
quantized = value.quantize(
decimal.Decimal('.1') ** self.decimal_places,
context=context
)
if not self.coerce_to_string:
return quantized
return '{0:f}'.format(quantized)
# Date & time fields...
class DateTimeField(Field):
default_error_messages = {
'invalid': _('Datetime has wrong format. Use one of these formats instead: {format}.'),
'date': _('Expected a datetime but got a date.'),
}
format = api_settings.DATETIME_FORMAT
input_formats = api_settings.DATETIME_INPUT_FORMATS
default_timezone = timezone.get_default_timezone() if settings.USE_TZ else None
def __init__(self, format=empty, input_formats=None, default_timezone=None, *args, **kwargs):
self.format = format if format is not empty else self.format
self.input_formats = input_formats if input_formats is not None else self.input_formats
self.default_timezone = default_timezone if default_timezone is not None else self.default_timezone
super(DateTimeField, self).__init__(*args, **kwargs)
def enforce_timezone(self, value):
"""
When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes.
"""
if (self.default_timezone is not None) and not timezone.is_aware(value):
return timezone.make_aware(value, self.default_timezone)
elif (self.default_timezone is None) and timezone.is_aware(value):
return timezone.make_naive(value, timezone.UTC())
return value
def to_internal_value(self, value):
if isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
self.fail('date')
if isinstance(value, datetime.datetime):
return self.enforce_timezone(value)
for format in self.input_formats:
if format.lower() == ISO_8601:
try:
parsed = parse_datetime(value)
except (ValueError, TypeError):
pass
else:
if parsed is not None:
return self.enforce_timezone(parsed)
else:
try:
parsed = datetime.datetime.strptime(value, format)
except (ValueError, TypeError):
pass
else:
return self.enforce_timezone(parsed)
humanized_format = humanize_datetime.datetime_formats(self.input_formats)
self.fail('invalid', format=humanized_format)
def to_representation(self, value):
if self.format is None:
return value
if self.format.lower() == ISO_8601:
value = value.isoformat()
if value.endswith('+00:00'):
value = value[:-6] + 'Z'
return value
return value.strftime(self.format)
class DateField(Field):
default_error_messages = {
'invalid': _('Date has wrong format. Use one of these formats instead: {format}.'),
'datetime': _('Expected a date but got a datetime.'),
}
format = api_settings.DATE_FORMAT
input_formats = api_settings.DATE_INPUT_FORMATS
def __init__(self, format=empty, input_formats=None, *args, **kwargs):
self.format = format if format is not empty else self.format
self.input_formats = input_formats if input_formats is not None else self.input_formats
super(DateField, self).__init__(*args, **kwargs)
def to_internal_value(self, value):
if isinstance(value, datetime.datetime):
self.fail('datetime')
if isinstance(value, datetime.date):
return value
for format in self.input_formats:
if format.lower() == ISO_8601:
try:
parsed = parse_date(value)
except (ValueError, TypeError):
pass
else:
if parsed is not None:
return parsed
else:
try:
parsed = datetime.datetime.strptime(value, format)
except (ValueError, TypeError):
pass
else:
return parsed.date()
humanized_format = humanize_datetime.date_formats(self.input_formats)
self.fail('invalid', format=humanized_format)
def to_representation(self, value):
if self.format is None:
return value
# Applying a `DateField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
'Expected a `date`, but got a `datetime`. Refusing to coerce, '
'as this may mean losing timezone information. Use a custom '
'read-only field and deal with timezone issues explicitly.'
)
if self.format.lower() == ISO_8601:
return value.isoformat()
return value.strftime(self.format)
class TimeField(Field):
default_error_messages = {
'invalid': _('Time has wrong format. Use one of these formats instead: {format}.'),
}
format = api_settings.TIME_FORMAT
input_formats = api_settings.TIME_INPUT_FORMATS
def __init__(self, format=empty, input_formats=None, *args, **kwargs):
self.format = format if format is not empty else self.format
self.input_formats = input_formats if input_formats is not None else self.input_formats
super(TimeField, self).__init__(*args, **kwargs)
def to_internal_value(self, value):
if isinstance(value, datetime.time):
return value
for format in self.input_formats:
if format.lower() == ISO_8601:
try:
parsed = parse_time(value)
except (ValueError, TypeError):
pass
else:
if parsed is not None:
return parsed
else:
try:
parsed = datetime.datetime.strptime(value, format)
except (ValueError, TypeError):
pass
else:
return parsed.time()
humanized_format = humanize_datetime.time_formats(self.input_formats)
self.fail('invalid', format=humanized_format)
def to_representation(self, value):
if self.format is None:
return value
# Applying a `TimeField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
'Expected a `time`, but got a `datetime`. Refusing to coerce, '
'as this may mean losing timezone information. Use a custom '
'read-only field and deal with timezone issues explicitly.'
)
if self.format.lower() == ISO_8601:
return value.isoformat()
return value.strftime(self.format)
# Choice types...
class ChoiceField(Field):
default_error_messages = {
'invalid_choice': _('"{input}" is not a valid choice.')
}
def __init__(self, choices, **kwargs):
# Allow either single or paired choices style:
# choices = [1, 2, 3]
# choices = [(1, 'First'), (2, 'Second'), (3, 'Third')]
pairs = [
isinstance(item, (list, tuple)) and len(item) == 2
for item in choices
]
if all(pairs):
self.choices = OrderedDict([(key, display_value) for key, display_value in choices])
else:
self.choices = OrderedDict([(item, item) for item in choices])
# Map the string representation of choices to the underlying value.
# Allows us to deal with eg. integer choices while supporting either
# integer or string input, but still get the correct datatype out.
self.choice_strings_to_values = dict([
(six.text_type(key), key) for key in self.choices.keys()
])
self.allow_blank = kwargs.pop('allow_blank', False)
super(ChoiceField, self).__init__(**kwargs)
def to_internal_value(self, data):
if data == '' and self.allow_blank:
return ''
try:
return self.choice_strings_to_values[six.text_type(data)]
except KeyError:
self.fail('invalid_choice', input=data)
def to_representation(self, value):
if value in ('', None):
return value
return self.choice_strings_to_values[six.text_type(value)]
class MultipleChoiceField(ChoiceField):
default_error_messages = {
'invalid_choice': _('"{input}" is not a valid choice.'),
'not_a_list': _('Expected a list of items but got type "{input_type}".')
}
default_empty_html = []
def get_value(self, dictionary):
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.fail('not_a_list', input_type=type(data).__name__)
return set([
super(MultipleChoiceField, self).to_internal_value(item)
for item in data
])
def to_representation(self, value):
return set([
self.choice_strings_to_values[six.text_type(item)] for item in value
])
# File types...
class FileField(Field):
default_error_messages = {
'required': _('No file was submitted.'),
'invalid': _('The submitted data was not a file. Check the encoding type on the form.'),
'no_name': _('No filename could be determined.'),
'empty': _('The submitted file is empty.'),
'max_length': _('Ensure this filename has at most {max_length} characters (it has {length}).'),
}
use_url = api_settings.UPLOADED_FILES_USE_URL
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
self.use_url = kwargs.pop('use_url', self.use_url)
super(FileField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
try:
# `UploadedFile` objects should have name and size attributes.
file_name = data.name
file_size = data.size
except AttributeError:
self.fail('invalid')
if not file_name:
self.fail('no_name')
if not self.allow_empty_file and not file_size:
self.fail('empty')
if self.max_length and len(file_name) > self.max_length:
self.fail('max_length', max_length=self.max_length, length=len(file_name))
return data
def to_representation(self, value):
if self.use_url:
if not value:
return None
url = value.url
request = self.context.get('request', None)
if request is not None:
return request.build_absolute_uri(url)
return url
return value.name
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
'Upload a valid image. The file you uploaded was either not an image or a corrupted image.'
),
}
def __init__(self, *args, **kwargs):
self._DjangoImageField = kwargs.pop('_DjangoImageField', DjangoImageField)
super(ImageField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
# Image validation is a bit grungy, so we'll just outright
# defer to Django's implementation so we don't need to
# consider it, or treat PIL as a test dependency.
file_object = super(ImageField, self).to_internal_value(data)
django_field = self._DjangoImageField()
django_field.error_messages = self.error_messages
django_field.to_python(file_object)
return file_object
# Composite field types...
class _UnvalidatedField(Field):
def __init__(self, *args, **kwargs):
super(_UnvalidatedField, self).__init__(*args, **kwargs)
self.allow_blank = True
self.allow_null = True
def to_internal_value(self, data):
return data
def to_representation(self, value):
return value
class ListField(Field):
child = _UnvalidatedField()
initial = []
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".')
}
def __init__(self, *args, **kwargs):
self.child = kwargs.pop('child', copy.deepcopy(self.child))
assert not inspect.isclass(self.child), '`child` has not been instantiated.'
super(ListField, self).__init__(*args, **kwargs)
self.child.bind(field_name='', parent=self)
def get_value(self, dictionary):
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_list(dictionary, prefix=self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_list(data)
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.fail('not_a_list', input_type=type(data).__name__)
return [self.child.run_validation(item) for item in data]
def to_representation(self, data):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
return [self.child.to_representation(item) for item in data]
class DictField(Field):
child = _UnvalidatedField()
initial = {}
default_error_messages = {
'not_a_dict': _('Expected a dictionary of items but got type "{input_type}".')
}
def __init__(self, *args, **kwargs):
self.child = kwargs.pop('child', copy.deepcopy(self.child))
assert not inspect.isclass(self.child), '`child` has not been instantiated.'
super(DictField, self).__init__(*args, **kwargs)
self.child.bind(field_name='', parent=self)
def get_value(self, dictionary):
# We override the default field access in order to support
# dictionaries in HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_dict(dictionary, prefix=self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
"""
Dicts of native values <- Dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_dict(data)
if not isinstance(data, dict):
self.fail('not_a_dict', input_type=type(data).__name__)
return dict([
(six.text_type(key), self.child.run_validation(value))
for key, value in data.items()
])
def to_representation(self, value):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
return dict([
(six.text_type(key), self.child.to_representation(val))
for key, val in value.items()
])
# Miscellaneous field types...
class ReadOnlyField(Field):
"""
A read-only field that simply returns the field value.
If the field is a method with no parameters, the method will be called
and it's return value used as the representation.
For example, the following would call `get_expiry_date()` on the object:
class ExampleSerializer(self):
expiry_date = ReadOnlyField(source='get_expiry_date')
"""
def __init__(self, **kwargs):
kwargs['read_only'] = True
super(ReadOnlyField, self).__init__(**kwargs)
def to_representation(self, value):
return value
class HiddenField(Field):
"""
A hidden field does not take input from the user, or present any output,
but it does populate a field in `validated_data`, based on its default
value. This is particularly useful when we have a `unique_for_date`
constraint on a pair of fields, as we need some way to include the date in
the validated data.
"""
def __init__(self, **kwargs):
assert 'default' in kwargs, 'default is a required argument.'
kwargs['write_only'] = True
super(HiddenField, self).__init__(**kwargs)
def get_value(self, dictionary):
# We always use the default value for `HiddenField`.
# User input is never provided or accepted.
return empty
def to_internal_value(self, data):
return data
class SerializerMethodField(Field):
"""
A read-only field that get its representation from calling a method on the
parent serializer class. The method called will be of the form
"get_{field_name}", and should take a single argument, which is the
object being serialized.
For example:
class ExampleSerializer(self):
extra_info = SerializerMethodField()
def get_extra_info(self, obj):
return ... # Calculate some data to return.
"""
def __init__(self, method_name=None, **kwargs):
self.method_name = method_name
kwargs['source'] = '*'
kwargs['read_only'] = True
super(SerializerMethodField, self).__init__(**kwargs)
def bind(self, field_name, parent):
# In order to enforce a consistent style, we error if a redundant
# 'method_name' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
default_method_name = 'get_{field_name}'.format(field_name=field_name)
assert self.method_name != default_method_name, (
"It is redundant to specify `%s` on SerializerMethodField '%s' in "
"serializer '%s', because it is the same as the default method name. "
"Remove the `method_name` argument." %
(self.method_name, field_name, parent.__class__.__name__)
)
# The method name should default to `get_{field_name}`.
if self.method_name is None:
self.method_name = default_method_name
super(SerializerMethodField, self).bind(field_name, parent)
def to_representation(self, value):
method = getattr(self.parent, self.method_name)
return method(value)
class ModelField(Field):
"""
A generic field that can be used against an arbitrary model field.
This is used by `ModelSerializer` when dealing with custom model fields,
that do not have a serializer field to be mapped to.
"""
default_error_messages = {
'max_length': _('Ensure this field has no more than {max_length} characters.'),
}
def __init__(self, model_field, **kwargs):
self.model_field = model_field
# The `max_length` option is supported by Django's base `Field` class,
# so we'd better support it here.
max_length = kwargs.pop('max_length', None)
super(ModelField, self).__init__(**kwargs)
if max_length is not None:
message = self.error_messages['max_length'].format(max_length=max_length)
self.validators.append(MaxLengthValidator(max_length, message=message))
def to_internal_value(self, data):
rel = getattr(self.model_field, 'rel', None)
if rel is not None:
return rel.to._meta.get_field(rel.field_name).to_python(data)
return self.model_field.to_python(data)
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def to_representation(self, obj):
value = self.model_field._get_val_from_obj(obj)
if is_protected_type(value):
return value
return self.model_field.value_to_string(obj)
|
|
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo.config import cfg
import testtools
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import jsonutils
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests import tools
try:
OrderedDict = collections.OrderedDict
except AttributeError:
import ordereddict
OrderedDict = ordereddict.OrderedDict
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
class TestBaseOVS(base.BaseTestCase):
def setUp(self):
super(TestBaseOVS, self).setUp()
self.root_helper = 'sudo'
self.ovs = ovs_lib.BaseOVS(self.root_helper)
self.br_name = 'bridge1'
def test_add_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
bridge = self.ovs.add_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--may-exist",
"add-br", self.br_name])
self.assertEqual(bridge.br_name, self.br_name)
self.assertEqual(bridge.root_helper, self.ovs.root_helper)
def test_delete_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.delete_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--if-exists", "del-br",
self.br_name])
def test_bridge_exists_returns_true(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.assertTrue(self.ovs.bridge_exists(self.br_name))
mock_vsctl.assert_called_with(['br-exists', self.br_name],
check_error=True)
def test_bridge_exists_returns_false_for_exit_code_2(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 2\n')):
self.assertFalse(self.ovs.bridge_exists('bridge1'))
def test_bridge_exists_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.bridge_exists('bridge1')
def test_get_bridge_name_for_port_name_returns_bridge_for_valid_port(self):
port_name = 'bar'
with mock.patch.object(self.ovs, 'run_vsctl',
return_value=self.br_name) as mock_vsctl:
bridge = self.ovs.get_bridge_name_for_port_name(port_name)
self.assertEqual(bridge, self.br_name)
mock_vsctl.assert_called_with(['port-to-br', port_name],
check_error=True)
def test_get_bridge_name_for_port_name_returns_none_for_exit_code_1(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 1\n')):
self.assertFalse(self.ovs.get_bridge_name_for_port_name('bridge1'))
def test_get_bridge_name_for_port_name_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.get_bridge_name_for_port_name('bridge1')
def _test_port_exists(self, br_name, result):
with mock.patch.object(self.ovs,
'get_bridge_name_for_port_name',
return_value=br_name):
self.assertEqual(self.ovs.port_exists('bar'), result)
def test_port_exists_returns_true_for_bridge_name(self):
self._test_port_exists(self.br_name, True)
def test_port_exists_returns_false_for_none(self):
self._test_port_exists(None, False)
class OFCTLParamListMatcher(object):
def _parse(self, params):
actions_pos = params.find('actions')
return set(params[:actions_pos].split(',')), params[actions_pos:]
def __init__(self, params):
self.expected = self._parse(params)
def __eq__(self, other):
return self.expected == self._parse(other)
def __str__(self):
return 'ovs-ofctl parameters: %s, "%s"' % self.expected
__repr__ = __str__
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=10"
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def test_set_controller(self):
controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']
self.br.set_controller(controller_names)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-controller', self.BR_NAME,
'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'],
root_helper=self.root_helper)
def test_del_controller(self):
self.br.del_controller()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'del-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_get_controller(self):
self.execute.return_value = 'tcp:127.0.0.1:6633\ntcp:172.17.16.10:5555'
names = self.br.get_controller()
self.assertEqual(names,
['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'])
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'get-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_set_secure_mode(self):
self.br.set_secure_mode()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-fail-mode', self.BR_NAME,
'secure'], root_helper=self.root_helper)
def test_set_protocols(self):
protocols = 'OpenFlow13'
self.br.set_protocols(protocols)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set', 'bridge', self.BR_NAME,
"protocols=%s" % protocols],
root_helper=self.root_helper)
def test_create(self):
self.br.add_bridge(self.BR_NAME)
self.br.create()
def test_destroy(self):
self.br.delete_bridge(self.BR_NAME)
self.br.destroy()
def test_reset_bridge(self):
self.br.destroy()
self.br.create()
self.br.reset_bridge()
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def _test_delete_port(self, exp_timeout=None):
exp_timeout_str = self._build_timeout_opt(exp_timeout)
pname = "tap5"
self.br.delete_port(pname)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
def test_delete_port(self):
self._test_delete_port()
def test_call_command_non_default_timeput(self):
# This test is only for verifying a non-default timeout
# is correctly applied. Does not need to be repeated for
# every ovs_lib method
new_timeout = 5
self.br.vsctl_timeout = new_timeout
self._test_delete_port(new_timeout)
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = OrderedDict([('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = OrderedDict([('priority', 1),
('actions', 'normal')])
flow_dict_3 = OrderedDict([('priority', 2),
('actions', 'drop')])
flow_dict_4 = OrderedDict([('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = OrderedDict([
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = OrderedDict([
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = OrderedDict([
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
mock.call(["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef,"
"actions=strip_vlan,output:0"),
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"),
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"),
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=2,"
"in_port=%s,actions=drop" % ofport),
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (vid, ofport, lsw_id)),
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=3,"
"tun_id=%s,actions=mod_vlan_vid:%s,"
"output:%s" % (lsw_id, vid, ofport)),
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=4,"
"nw_src=%s,arp,actions=drop" % cidr),
root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_add_flow_timeout_set(self):
flow_dict = OrderedDict([('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input="hard_timeout=1000,idle_timeout=2000,priority=1,"
"actions=normal",
root_helper=self.root_helper)
def test_add_flow_default_priority(self):
flow_dict = OrderedDict([('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flows", self.BR_NAME, '-'],
process_input="hard_timeout=0,idle_timeout=0,priority=1,"
"actions=normal",
root_helper=self.root_helper)
def _test_get_port_ofport(self, ofport, expected_result):
pname = "tap99"
self.execute.return_value = ofport
self.assertEqual(self.br.get_port_ofport(pname), expected_result)
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"],
root_helper=self.root_helper)
def test_get_port_ofport_succeeds_for_valid_ofport(self):
self._test_get_port_ofport("6", "6")
def test_get_port_ofport_returns_invalid_ofport_for_non_int(self):
self._test_get_port_ofport("[]", ovs_lib.INVALID_OFPORT)
def test_get_port_ofport_returns_invalid_ofport_for_none(self):
self._test_get_port_ofport(None, ovs_lib.INVALID_OFPORT)
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
self.execute.return_value = datapath_id
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.execute.assert_called_once_with(
["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper,
process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME, '-'],
process_input="in_port=" + ofport,
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME, '-'],
process_input="tun_id=%s" % lsw_id,
root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME, '-'],
process_input="dl_vlan=%s" % vid,
root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_dump_flows(self):
table = 23
nxst_flow = "NXST_FLOW reply (xid=0x4):"
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
"n_packets=6, n_bytes=468, "
"priority=2,in_port=1 actions=drop",
" cookie=0x0, duration=18027.562s, table=0, "
"n_packets=0, n_bytes=0, "
"priority=3,in_port=1,dl_vlan=100 "
"actions=mod_vlan_vid:1,NORMAL",
" cookie=0x0, duration=18044.351s, table=0, "
"n_packets=9, n_bytes=594, priority=1 "
"actions=NORMAL", " cookie=0x0, "
"duration=18044.211s, table=23, n_packets=0, "
"n_bytes=0, priority=0 actions=drop"])
flow_args = '\n'.join([nxst_flow, flows])
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = [flow_args]
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(flows, retflows)
def test_dump_flows_ovs_dead(self):
table = 23
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = ['']
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(None, retflows)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = "6"
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper), None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_fragmented_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = "6"
vxlan_udp_port = "9999"
dont_fragment = False
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + constants.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=false",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper), None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
constants.TYPE_VXLAN, vxlan_udp_port,
dont_fragment),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
# Each element is a tuple of (expected mock call, return_value)
command = ["ovs-vsctl", self.TO, "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=patch", "options:peer=" + peer])
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper),
None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport)
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
"%s\n" % pname),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper),
external_ids),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
if is_xen:
expected_calls_and_values.append(
(mock.call(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper),
vif_id)
)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# A vif port on another bridge:
['tap88', {id_key: 'tap88id', 'attached-mac': 'tap88id'}, 1],
# Non-vif port on this bridge:
['tun22', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\ntun22'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\n'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'\n'.join((iface for iface, tag in data))),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,tag",
"list", "Port"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "clear", "Port", pname, "tag"],
root_helper=self.root_helper)
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
self.execute.return_value = 'br-int'
exp_timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "iface-to-br", iface],
root_helper=root_helper)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
root_helper = 'sudo'
self.execute.side_effect = Exception
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_bridges(self, exp_timeout=None):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
self.execute.return_value = 'br-int\nbr-ex\n'
timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.execute.assert_called_once_with(
["ovs-vsctl", timeout_str, "list-br"],
root_helper=root_helper)
def test_get_bridges(self):
self._test_get_bridges()
def test_get_bridges_not_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_get_bridges(new_timeout)
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=external_ids,name,ofport",
"find", "Interface",
'external_ids:iface-id="%s"' % iface_id],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
expected_calls_and_values.append(
(mock.call(["ovs-vsctl", self.TO,
"iface-to-br", data[0][headings.index('name')]],
root_helper=self.root_helper),
br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
if mac:
external_ids.append(["attached-mac", mac])
data = [[["map", external_ids], "tap99",
ofport if ofport else '["set",[]]']]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port)
return
self.assertEqual(vif_port.vif_id, 'tap99id')
self.assertEqual(vif_port.vif_mac, 'aa:bb:cc:dd:ee:ff')
self.assertEqual(vif_port.port_name, 'tap99')
self.assertEqual(vif_port.ofport, ofport)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_mac(self):
self._test_get_vif_port_by_id_with_data(ofport=1)
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
class TestDeferredOVSBridge(base.BaseTestCase):
def setUp(self):
super(TestDeferredOVSBridge, self).setUp()
self.br = mock.Mock()
self.mocked_do_action_flows = mock.patch.object(
self.br, 'do_action_flows').start()
self.add_flow_dict1 = dict(in_port=11, actions='drop')
self.add_flow_dict2 = dict(in_port=12, actions='drop')
self.mod_flow_dict1 = dict(in_port=21, actions='drop')
self.mod_flow_dict2 = dict(in_port=22, actions='drop')
self.del_flow_dict1 = dict(in_port=31)
self.del_flow_dict2 = dict(in_port=32)
def test_right_allowed_passthroughs(self):
expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port')
self.assertEqual(expected_passthroughs,
ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS)
def _verify_mock_call(self, expected_calls):
self.mocked_do_action_flows.assert_has_calls(expected_calls)
self.assertEqual(len(expected_calls),
len(self.mocked_do_action_flows.mock_calls))
def test_apply_on_exit(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
self._verify_mock_call(expected_calls)
def test_apply_on_exit_with_errors(self):
try:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
raise Exception
except Exception:
self._verify_mock_call([])
else:
self.fail('Exception would be reraised')
def test_apply(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
deferred_br.apply_flows()
self._verify_mock_call(expected_calls)
self._verify_mock_call(expected_calls)
def test_apply_order(self):
expected_calls = [
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('mod', [self.mod_flow_dict1, self.mod_flow_dict2]),
mock.call('add', [self.add_flow_dict1, self.add_flow_dict2]),
]
order = 'del', 'mod', 'add'
with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_apply_full_ordered(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('add', [self.add_flow_dict2]),
mock.call('mod', [self.mod_flow_dict2]),
]
with ovs_lib.DeferredOVSBridge(self.br,
full_ordered=True) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_getattr_unallowed_attr(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertEqual(self.br.add_port, deferred_br.add_port)
def test_getattr_unallowed_attr_failure(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertRaises(AttributeError, getattr, deferred_br, 'failure')
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
from Crypto.Util import asn1
from cryptography import fernet
from oslo_config import cfg
import six
from barbican import i18n as u
from barbican.plugin.crypto import crypto as c
CONF = cfg.CONF
simple_crypto_plugin_group = cfg.OptGroup(name='simple_crypto_plugin',
title="Simple Crypto Plugin Options")
simple_crypto_plugin_opts = [
cfg.StrOpt('kek',
default=b'dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=',
help=u._('Key encryption key to be used by Simple Crypto '
'Plugin'))
]
CONF.register_group(simple_crypto_plugin_group)
CONF.register_opts(simple_crypto_plugin_opts, group=simple_crypto_plugin_group)
class SimpleCryptoPlugin(c.CryptoPluginBase):
"""Insecure implementation of the crypto plugin."""
def __init__(self, conf=CONF):
self.master_kek = conf.simple_crypto_plugin.kek
def _get_kek(self, kek_meta_dto):
if not kek_meta_dto.plugin_meta:
raise ValueError(u._('KEK not yet created.'))
# the kek is stored encrypted. Need to decrypt.
encryptor = fernet.Fernet(self.master_kek)
# Note : If plugin_meta type is unicode, encode to byte.
if isinstance(kek_meta_dto.plugin_meta, six.text_type):
kek_meta_dto.plugin_meta = kek_meta_dto.plugin_meta.encode('utf-8')
return encryptor.decrypt(kek_meta_dto.plugin_meta)
def encrypt(self, encrypt_dto, kek_meta_dto, project_id):
kek = self._get_kek(kek_meta_dto)
unencrypted = encrypt_dto.unencrypted
if not isinstance(unencrypted, str):
raise ValueError(
u._(
'Unencrypted data must be a byte type, but was '
'{unencrypted_type}'
).format(
unencrypted_type=type(unencrypted)
)
)
encryptor = fernet.Fernet(kek)
cyphertext = encryptor.encrypt(unencrypted)
return c.ResponseDTO(cyphertext, None)
def decrypt(self, encrypted_dto, kek_meta_dto, kek_meta_extended,
project_id):
kek = self._get_kek(kek_meta_dto)
encrypted = encrypted_dto.encrypted
decryptor = fernet.Fernet(kek)
return decryptor.decrypt(encrypted)
def bind_kek_metadata(self, kek_meta_dto):
kek_meta_dto.algorithm = 'aes'
kek_meta_dto.bit_length = 128
kek_meta_dto.mode = 'cbc'
if not kek_meta_dto.plugin_meta:
# the kek is stored encrypted in the plugin_meta field
encryptor = fernet.Fernet(self.master_kek)
key = fernet.Fernet.generate_key()
kek_meta_dto.plugin_meta = encryptor.encrypt(key)
return kek_meta_dto
def generate_symmetric(self, generate_dto, kek_meta_dto, project_id):
byte_length = int(generate_dto.bit_length) / 8
unencrypted = os.urandom(byte_length)
return self.encrypt(c.EncryptDTO(unencrypted),
kek_meta_dto,
project_id)
def generate_asymmetric(self, generate_dto, kek_meta_dto, project_id):
"""Generate asymmetric keys based on below rules:
- RSA, with passphrase (supported)
- RSA, without passphrase (supported)
- DSA, without passphrase (supported)
- DSA, with passphrase (not supported)
Note: PyCrypto is not capable of serializing DSA
keys and DER formated keys. Such keys will be
serialized to Base64 PEM to store in DB.
TODO (atiwari/reaperhulk): PyCrypto is not capable to serialize
DSA keys and DER formated keys, later we need to pick better
crypto lib.
"""
if(generate_dto.algorithm is None or generate_dto
.algorithm.lower() == 'rsa'):
private_key = RSA.generate(
generate_dto.bit_length, None, None, 65537)
elif generate_dto.algorithm.lower() == 'dsa':
private_key = DSA.generate(generate_dto.bit_length, None, None)
else:
raise c.CryptoPrivateKeyFailureException()
public_key = private_key.publickey()
# Note (atiwari): key wrapping format PEM only supported
if generate_dto.algorithm.lower() == 'rsa':
public_key, private_key = self._wrap_key(public_key, private_key,
generate_dto.passphrase)
if generate_dto.algorithm.lower() == 'dsa':
if generate_dto.passphrase:
raise ValueError(u._('Passphrase not supported for DSA key'))
public_key, private_key = self._serialize_dsa_key(public_key,
private_key)
private_dto = self.encrypt(c.EncryptDTO(private_key),
kek_meta_dto,
project_id)
public_dto = self.encrypt(c.EncryptDTO(public_key),
kek_meta_dto,
project_id)
passphrase_dto = None
if generate_dto.passphrase:
if isinstance(generate_dto.passphrase, six.text_type):
generate_dto.passphrase = generate_dto.passphrase.encode(
'utf-8')
passphrase_dto = self.encrypt(c.EncryptDTO(generate_dto.
passphrase),
kek_meta_dto,
project_id)
return private_dto, public_dto, passphrase_dto
def supports(self, type_enum, algorithm=None, bit_length=None,
mode=None):
if type_enum == c.PluginSupportTypes.ENCRYPT_DECRYPT:
return True
if type_enum == c.PluginSupportTypes.SYMMETRIC_KEY_GENERATION:
return self._is_algorithm_supported(algorithm,
bit_length)
elif type_enum == c.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION:
return self._is_algorithm_supported(algorithm,
bit_length)
else:
return False
def _wrap_key(self, public_key, private_key,
passphrase):
pkcs = 8
key_wrap_format = 'PEM'
private_key = private_key.exportKey(key_wrap_format, passphrase, pkcs)
public_key = public_key.exportKey()
return public_key, private_key
def _serialize_dsa_key(self, public_key, private_key):
pub_seq = asn1.DerSequence()
pub_seq[:] = [0, public_key.p, public_key.q,
public_key.g, public_key.y]
public_key = ("-----BEGIN DSA PUBLIC KEY-----\n{0}"
"-----END DSA PUBLIC KEY-----"
.format(pub_seq.encode().encode("base64")))
prv_seq = asn1.DerSequence()
prv_seq[:] = [0, private_key.p, private_key.q,
private_key.g, private_key.y, private_key.x]
private_key = ("-----BEGIN DSA PRIVATE KEY-----\n{0}"
"-----END DSA PRIVATE KEY-----"
.format(prv_seq.encode().encode("base64")))
return public_key, private_key
def _is_algorithm_supported(self, algorithm=None, bit_length=None):
"""check if algorithm and bit_length combination is supported."""
if algorithm is None or bit_length is None:
return False
if (algorithm.lower() in
c.PluginSupportTypes.SYMMETRIC_ALGORITHMS and bit_length in
c.PluginSupportTypes.SYMMETRIC_KEY_LENGTHS):
return True
elif (algorithm.lower() in c.PluginSupportTypes.ASYMMETRIC_ALGORITHMS
and bit_length in c.PluginSupportTypes.ASYMMETRIC_KEY_LENGTHS):
return True
else:
return False
|
|
"""Test breakpoint by file/line number; and list variables with array types."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ArrayTypesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.c', '// Set break point at this line.')
def test_and_run_command(self):
"""Test 'frame variable var_name' on some variables with array types."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=False)
self.runCmd("run", RUN_SUCCEEDED)
# The test suite sometimes shows that the process has exited without stopping.
#
# CC=clang ./dotest.py -v -t array_types
# ...
# Process 76604 exited with status = 0 (0x00000000)
self.runCmd("process status")
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=['resolved, hit count = 1'])
# Issue 'variable list' command on several array-type variables.
self.expect(
"frame variable --show-types strings",
VARIABLES_DISPLAYED_CORRECTLY,
startstr='(char *[4])',
substrs=[
'(char *) [0]',
'Hello',
'(char *) [1]',
'Hola',
'(char *) [2]',
'Bonjour',
'(char *) [3]',
'Guten Tag'])
self.expect(
"frame variable --show-types --raw -- char_16",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
'(char) [0]',
'(char) [15]'])
self.expect(
"frame variable --show-types ushort_matrix",
VARIABLES_DISPLAYED_CORRECTLY,
startstr='(unsigned short [2][3])')
self.expect(
"frame variable --show-types long_6",
VARIABLES_DISPLAYED_CORRECTLY,
startstr='(long [6])')
@expectedFailureNetBSD
@add_test_categories(['pyapi'])
def test_and_python_api(self):
"""Use Python APIs to inspect variables with array types."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.c", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Sanity check the print representation of breakpoint.
bp = str(breakpoint)
self.expect(bp, msg="Breakpoint looks good", exe=False,
substrs=["file = 'main.c'",
"line = %d" % self.line,
"locations = 1"])
self.expect(
bp,
msg="Breakpoint is not resolved as yet",
exe=False,
matching=False,
substrs=["resolved = 1"])
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Sanity check the print representation of process.
proc = str(process)
self.expect(proc, msg="Process looks good", exe=False,
substrs=["state = stopped",
"executable = a.out"])
# The stop reason of the thread should be breakpoint.
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
# Sanity check the print representation of thread.
thr = str(thread)
# TODO(zturner): Whether the TID is printed in hex or decimal should be controlled by a setting,
# and this test should read the value of the setting. This check is currently hardcoded to
# match the check in Core/FormatEntity.cpp in the function FormatEntity::Format() for
# the Entry::Type::ThreadID case of the switch statement.
if self.getPlatform() == "linux" or self.getPlatform() == "freebsd":
tidstr = "tid = %u" % thread.GetThreadID()
else:
tidstr = "tid = 0x%4.4x" % thread.GetThreadID()
self.expect(
thr,
"Thread looks good with stop reason = breakpoint",
exe=False,
substrs=[tidstr])
# The breakpoint should have a hit count of 1.
self.assertEqual(breakpoint.GetHitCount(), 1, BREAKPOINT_HIT_ONCE)
# The breakpoint should be resolved by now.
bp = str(breakpoint)
self.expect(bp, "Breakpoint looks good and is resolved", exe=False,
substrs=["file = 'main.c'",
"line = %d" % self.line,
"locations = 1"])
# Sanity check the print representation of frame.
frame = thread.GetFrameAtIndex(0)
frm = str(frame)
self.expect(
frm,
"Frame looks good with correct index %d" %
frame.GetFrameID(),
exe=False,
substrs=[
"#%d" %
frame.GetFrameID()])
# Lookup the "strings" string array variable and sanity check its print
# representation.
variable = frame.FindVariable("strings")
var = str(variable)
self.expect(
var,
"Variable for 'strings' looks good with correct name",
exe=False,
substrs=[
"%s" %
variable.GetName()])
self.DebugSBValue(variable)
self.assertEquals(variable.GetNumChildren(), 4,
"Variable 'strings' should have 4 children")
byte_size = variable.GetByteSize()
self.assertTrue(byte_size >= 4*4 and byte_size <= 1024)
child3 = variable.GetChildAtIndex(3)
self.DebugSBValue(child3)
self.assertEquals(child3.GetSummary(), '"Guten Tag"',
'strings[3] == "Guten Tag"')
# Lookup the "char_16" char array variable.
variable = frame.FindVariable("char_16")
self.DebugSBValue(variable)
self.assertEquals(variable.GetNumChildren(), 16,
"Variable 'char_16' should have 16 children")
# Lookup the "ushort_matrix" ushort[] array variable.
# Notice the pattern of int(child0_2.GetValue(), 0). We pass a
# base of 0 so that the proper radix is determined based on the contents
# of the string. Same applies to long().
variable = frame.FindVariable("ushort_matrix")
self.DebugSBValue(variable)
self.assertEquals(variable.GetNumChildren(), 2,
"Variable 'ushort_matrix' should have 2 children")
child0 = variable.GetChildAtIndex(0)
self.DebugSBValue(child0)
self.assertEquals(child0.GetNumChildren(), 3,
"Variable 'ushort_matrix[0]' should have 3 children")
child0_2 = child0.GetChildAtIndex(2)
self.DebugSBValue(child0_2)
self.assertEquals(int(child0_2.GetValue(), 0), 3,
"ushort_matrix[0][2] == 3")
# Lookup the "long_6" char array variable.
variable = frame.FindVariable("long_6")
self.DebugSBValue(variable)
self.assertEquals(variable.GetNumChildren(), 6,
"Variable 'long_6' should have 6 children")
child5 = variable.GetChildAtIndex(5)
self.DebugSBValue(child5)
self.assertEquals(int(child5.GetValue(), 0), 6,
"long_6[5] == 6")
# Last, check that "long_6" has a value type of eValueTypeVariableLocal
# and "argc" has eValueTypeVariableArgument.
from lldbsuite.test.lldbutil import value_type_to_str
self.assertTrue(
variable.GetValueType() == lldb.eValueTypeVariableLocal,
"Variable 'long_6' should have '%s' value type." %
value_type_to_str(
lldb.eValueTypeVariableLocal))
argc = frame.FindVariable("argc")
self.DebugSBValue(argc)
self.assertEquals(argc.GetValueType(), lldb.eValueTypeVariableArgument,
"Variable 'argc' should have '%s' value type." %
value_type_to_str(lldb.eValueTypeVariableArgument))
|
|
import json
from docutils import nodes
from docutils import statemachine
from docutils.parsers.rst import Directive
from sphinx.util.nodes import set_source_info
import jsonschema
def get_standard_cls(standard):
return {
3: jsonschema.validators.Draft3Validator,
4: jsonschema.validators.Draft4Validator,
6: jsonschema.validators.Draft6Validator}[standard]
class jsonschema_node(nodes.Element):
pass
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def split_content(l):
parts = []
should_pass = True
part = []
comment = []
def add_part():
hl_lines = []
for i, line in enumerate(part):
if line.lstrip().startswith('*'):
line = line.replace('*', '', 1)
hl_lines.append(i + 1)
part[i] = line
content = '\n'.join(part)
try:
json_content = json.loads(content)
except ValueError:
if should_pass:
raise ValueError("Invalid json: {0}".format(content))
else:
# A complex number will never validate
json_content = 1+1j
parts.append(AttrDict({
'should_pass': should_pass,
'content': content,
'json': json_content,
'comment': comment,
'hl_lines': hl_lines}
))
for line in l:
if line.startswith('//'):
comment.append(line[2:].lstrip())
elif line == '--':
add_part()
should_pass = True
part = []
comment = []
elif line == '--X':
add_part()
should_pass = False
part = []
comment = []
else:
part.append(line)
add_part()
return parts[0], parts[1:]
class SchemaExampleDirective(Directive):
has_content = True
validate = True
optional_arguments = 1
def run(self):
env = self.state.document.settings.env
if len(self.arguments) == 1:
standard = int(self.arguments[0])
else:
standard = env.config.jsonschema_standard
standard = get_standard_cls(standard)
result = []
schema, parts = split_content(self.content)
container = jsonschema_node()
set_source_info(self, container)
literal = nodes.literal_block(
schema.content, schema.content)
literal['language'] = 'javascript'
literal['classes'] = container['classes'] = ['jsonschema']
if schema.hl_lines:
literal['highlight_args'] = {'hl_lines': schema.hl_lines}
set_source_info(self, literal)
container.append(literal)
result.append(container)
for part in parts:
if self.validate:
is_valid = True
try:
jsonschema.validate(
part.json, schema.json,
cls=standard)
except jsonschema.ValidationError as e:
is_valid = False
except jsonschema.SchemaError as e:
raise ValueError("Schema is invalid:\n{0}\n\n{1}".format(
str(e), schema.content))
if is_valid != part.should_pass:
if part.should_pass:
raise ValueError(
"Doc says fragment should pass, "
"but it does not validate:\n" +
part.content)
else:
raise ValueError(
"Doc says fragment should not pass, "
"but it validates:\n" +
part.content)
else:
is_valid = part.should_pass
if len(part.comment):
paragraph = nodes.paragraph('', '')
comment = statemachine.StringList(part.comment)
comment.parent = self.content.parent
self.state.nested_parse(comment, 0, paragraph)
paragraph['classes'] = ['jsonschema-comment']
set_source_info(self, paragraph)
result.append(paragraph)
container = jsonschema_node()
set_source_info(self, container)
literal = nodes.literal_block(
part.content, part.content)
literal['language'] = 'javascript'
if is_valid:
literal['classes'] = container['classes'] = ['jsonschema-pass']
else:
literal['classes'] = container['classes'] = ['jsonschema-fail']
if part.hl_lines:
literal['highlight_args'] = {'hl_lines': part.hl_lines}
set_source_info(self, literal)
container.append(literal)
result.append(container)
return result
class SchemaExampleNoValidationDirective(SchemaExampleDirective):
validate = False
def visit_jsonschema_node_html(self, node):
pass
def depart_jsonschema_node_html(self, node):
pass
def visit_jsonschema_node_latex(self, node):
adjust = False
color = "gray"
char = ""
if 'jsonschema-pass' in node['classes']:
char = r"\Checkmark"
color = "ForestGreen"
adjust = True
elif 'jsonschema-fail' in node['classes']:
char = r"\XSolidBrush"
color = "BrickRed"
adjust = True
elif 'jsonschema' in node['classes']:
char = r"\{ json schema \}"
if adjust:
self.body.append(r"\begin{adjustwidth}{2.5em}{0pt}")
self.body.append(r"\vspace{4pt}")
self.body.append(r"\begin{jsonframe}{%s}{%s}" % (char, color))
def depart_jsonschema_node_latex(self, node):
adjust = False
if 'jsonschema-pass' in node['classes']:
adjust = True
elif 'jsonschema-fail' in node['classes']:
adjust = True
self.body.append(r"\end{jsonframe}")
if adjust:
self.body.append(r"\end{adjustwidth}")
def setup(app):
app.add_config_value('jsonschema_standard', 4, 'env')
app.add_directive('schema_example',
SchemaExampleDirective)
app.add_directive('schema_example_novalid',
SchemaExampleNoValidationDirective)
app.add_node(
jsonschema_node,
html=(visit_jsonschema_node_html, depart_jsonschema_node_html),
latex=(visit_jsonschema_node_latex, depart_jsonschema_node_latex))
passoptionstopackages = r'\PassOptionsToPackage{dvipsnames}{xcolor}'
latex_preamble = r"""
\usepackage{changepage}
\usepackage{xcolor}
"""
|
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import csv
import re
from wlauto import ResultProcessor, settings, instrumentation
from wlauto.exceptions import ConfigError, ResultProcessorError
class DVFS(ResultProcessor):
name = 'dvfs'
description = """
Reports DVFS state residency data form ftrace power events.
This generates a ``dvfs.csv`` in the top-level results directory that,
for each workload iteration, reports the percentage of time each CPU core
spent in each of the DVFS frequency states (P-states), as well as percentage
of the time spent in idle, during the execution of the workload.
.. note:: ``trace-cmd`` instrument *MUST* be enabled in the instrumentation,
and at least ``'power*'`` events must be enabled.
"""
def __init__(self, **kwargs):
super(DVFS, self).__init__(**kwargs)
self.device = None
self.infile = None
self.outfile = None
self.current_cluster = None
self.currentstates_of_clusters = []
self.current_frequency_of_clusters = []
self.timestamp = []
self.state_time_map = {} # hold state at timestamp
self.cpuid_time_map = {} # hold cpuid at timestamp
self.cpu_freq_time_spent = {}
self.cpuids_of_clusters = []
self.power_state = [0, 1, 2, 3]
self.UNKNOWNSTATE = 4294967295
self.multiply_factor = None
self.corename_of_clusters = []
self.numberofcores_in_cluster = []
self.minimum_frequency_cluster = []
self.idlestate_description = {}
def validate(self):
if not instrumentation.instrument_is_installed('trace-cmd'):
raise ConfigError('"dvfs" works only if "trace_cmd" in enabled in instrumentation')
def initialize(self, context): # pylint: disable=R0912
self.device = context.device
if not self.device.has('cpuidle'):
raise ConfigError('Device does not appear to have cpuidle capability; is the right module installed?')
if not self.device.core_names:
message = 'Device does not specify its core types (core_names/core_clusters not set in device_config).'
raise ResultProcessorError(message)
number_of_clusters = max(self.device.core_clusters) + 1
# In IKS devices, actual number of cores is double
# from what we get from device.number_of_cores
if self.device.scheduler == 'iks':
self.multiply_factor = 2
elif self.device.scheduler == 'unknown':
# Device doesn't specify its scheduler type. It could be IKS, in
# which case reporeted values would be wrong, so error out.
message = ('The Device doesn not specify it\'s scheduler type. If you are '
'using a generic device interface, please make sure to set the '
'"scheduler" parameter in the device config.')
raise ResultProcessorError(message)
else:
self.multiply_factor = 1
# separate out the cores in each cluster
# It is list of list of cores in cluster
listof_cores_clusters = []
for cluster in range(number_of_clusters):
listof_cores_clusters.append([core for core in self.device.core_clusters if core == cluster])
# Extract minimum frequency of each cluster and
# the idle power state with its descriptive name
#
total_cores = 0
current_cores = 0
for cluster, cores_list in enumerate(listof_cores_clusters):
self.corename_of_clusters.append(self.device.core_names[total_cores])
if self.device.scheduler != 'iks':
self.idlestate_description.update({s.id: s.desc for s in self.device.get_cpuidle_states(total_cores)})
else:
self.idlestate_description.update({s.id: s.desc for s in self.device.get_cpuidle_states()})
total_cores += len(cores_list)
self.numberofcores_in_cluster.append(len(cores_list))
for i in range(current_cores, total_cores):
if i in self.device.online_cpus:
self.minimum_frequency_cluster.append(int(self.device.get_cpu_min_frequency("cpu{}".format(i))))
break
current_cores = total_cores
length_frequency_cluster = len(self.minimum_frequency_cluster)
if length_frequency_cluster != number_of_clusters:
diff = number_of_clusters - length_frequency_cluster
offline_value = -1
for i in range(diff):
if self.device.scheduler != 'iks':
self.minimum_frequency_cluster.append(offline_value)
else:
self.minimum_frequency_cluster.append(self.device.iks_switch_frequency)
def process_iteration_result(self, result, context):
"""
Parse the trace.txt for each iteration, calculate DVFS residency state/frequencies
and dump the result in csv and flush the data for next iteration.
"""
self.infile = os.path.join(context.output_directory, 'trace.txt')
if os.path.isfile(self.infile):
self.logger.debug('Running result_processor "dvfs"')
self.outfile = os.path.join(settings.output_directory, 'dvfs.csv')
self.flush_parse_initialize()
self.calculate()
self.percentage()
self.generate_csv(context)
self.logger.debug('Completed result_processor "dvfs"')
else:
self.logger.debug('trace.txt not found.')
def flush_parse_initialize(self):
"""
Store state, cpu_id for each timestamp from trace.txt and flush all the values for
next iterations.
"""
self.current_cluster = 0
self.current_frequency_of_clusters = []
self.timestamp = []
self.currentstates_of_clusters = []
self.state_time_map = {}
self.cpuid_time_map = {}
self.cpu_freq_time_spent = {}
self.cpuids_of_clusters = []
self.parse() # Parse trace.txt generated from trace-cmd instrumentation
# Initialize the states of each core of clusters and frequency of
# each clusters with its minimum freq
# cpu_id is assigned for each of clusters.
# For IKS devices cpuid remains same in other clusters
# and for other it will increment by 1
count = 0
for cluster, cores_number in enumerate(self.numberofcores_in_cluster):
self.currentstates_of_clusters.append([-1 for dummy in range(cores_number)])
self.current_frequency_of_clusters.append(self.minimum_frequency_cluster[cluster])
if self.device.scheduler == 'iks':
self.cpuids_of_clusters.append([j for j in range(cores_number)])
else:
self.cpuids_of_clusters.append(range(count, count + cores_number))
count += cores_number
# Initialize the time spent in each state/frequency for each core.
for i in range(self.device.number_of_cores * self.multiply_factor):
self.cpu_freq_time_spent["cpu{}".format(i)] = {}
for j in self.unique_freq():
self.cpu_freq_time_spent["cpu{}".format(i)][j] = 0
# To determine offline -1 state is added
offline_value = -1
self.cpu_freq_time_spent["cpu{}".format(i)][offline_value] = 0
if 0 not in self.unique_freq():
self.cpu_freq_time_spent["cpu{}".format(i)][0] = 0
def update_cluster_freq(self, state, cpu_id):
""" Update the cluster frequency and current cluster"""
# For IKS devices cluster changes only possible when
# freq changes, for other it is determine by cpu_id.
if self.device.scheduler != 'iks':
self.current_cluster = self.get_cluster(cpu_id, state)
if self.get_state_name(state) == "freqstate":
self.current_cluster = self.get_cluster(cpu_id, state)
self.current_frequency_of_clusters[self.current_cluster] = state
def get_cluster(self, cpu_id, state):
# For IKS if current state is greater than switch
# freq then it is in cluster2 else cluster1
# For other, Look the current cpu_id and check this id
# belong to which cluster.
if self.device.scheduler == 'iks':
return 1 if state >= self.device.iks_switch_frequency else 0
else:
for cluster, cpuids_list in enumerate(self.cpuids_of_clusters):
if cpu_id in cpuids_list:
return cluster
def get_cluster_freq(self):
return self.current_frequency_of_clusters[self.current_cluster]
def update_state(self, state, cpu_id): # pylint: disable=R0912
"""
Update state of each cores in every cluster.
This is done for each timestamp.
"""
POWERDOWN = 2
offline_value = -1
# if state is in unknowstate, then change state of current cpu_id
# with cluster freq of current cluster.
# if state is in powerstate then change state with that power state.
if self.get_state_name(state) in ["unknownstate", "powerstate"]:
for i in range(len(self.cpuids_of_clusters[self.current_cluster])):
if cpu_id == self.cpuids_of_clusters[self.current_cluster][i]:
if self.get_state_name(state) == "unknownstate":
self.currentstates_of_clusters[self.current_cluster][i] = self.current_frequency_of_clusters[self.current_cluster]
elif self.get_state_name(state) == "powerstate":
self.currentstates_of_clusters[self.current_cluster][i] = state
# If state is in freqstate then update the state with current state.
# For IKS, if all cores is in power down and current state is freqstate
# then update the all the cores in current cluster to current state
# and other state cluster changed to Power down.
if self.get_state_name(state) == "freqstate":
for i, j in enumerate(self.currentstates_of_clusters[self.current_cluster]):
if j != offline_value:
self.currentstates_of_clusters[self.current_cluster][i] = state
if cpu_id == self.cpuids_of_clusters[self.current_cluster][i]:
self.currentstates_of_clusters[self.current_cluster][i] = state
if self.device.scheduler == 'iks':
check = False # All core in cluster is power down
for i in range(len(self.currentstates_of_clusters[self.current_cluster])):
if self.currentstates_of_clusters[self.current_cluster][i] != POWERDOWN:
check = True
break
if not check:
for i in range(len(self.currentstates_of_clusters[self.current_cluster])):
self.currentstates_of_clusters[self.current_cluster][i] = self.current_frequency_of_clusters[self.current_cluster]
for cluster, state_list in enumerate(self.currentstates_of_clusters):
if cluster != self.current_cluster:
for j in range(len(state_list)):
self.currentstates_of_clusters[i][j] = POWERDOWN
def unique_freq(self):
""" Determine the unique Frequency and state"""
unique_freq = []
for i in self.timestamp:
if self.state_time_map[i] not in unique_freq and self.state_time_map[i] != self.UNKNOWNSTATE:
unique_freq.append(self.state_time_map[i])
for i in self.minimum_frequency_cluster:
if i not in unique_freq:
unique_freq.append(i)
return unique_freq
def parse(self):
"""
Parse the trace.txt ::
store timestamp, state, cpu_id
---------------------------------------------------------------------------------
|timestamp| |state| |cpu_id|
<idle>-0 [001] 294.554380: cpu_idle: state=4294967295 cpu_id=1
<idle>-0 [001] 294.554454: power_start: type=1 state=0 cpu_id=1
<idle>-0 [001] 294.554458: cpu_idle: state=0 cpu_id=1
<idle>-0 [001] 294.554464: power_end: cpu_id=1
<idle>-0 [001] 294.554471: cpu_idle: state=4294967295 cpu_id=1
<idle>-0 [001] 294.554590: power_start: type=1 state=0 cpu_id=1
<idle>-0 [001] 294.554593: cpu_idle: state=0 cpu_id=1
<idle>-0 [001] 294.554636: power_end: cpu_id=1
<idle>-0 [001] 294.554639: cpu_idle: state=4294967295 cpu_id=1
<idle>-0 [001] 294.554669: power_start: type=1 state=0 cpu_id=1
"""
pattern = re.compile(r'\s+(?P<time>\S+)\S+\s*(?P<desc>(cpu_idle:|cpu_frequency:))\s*state=(?P<state>\d+)\s*cpu_id=(?P<cpu_id>\d+)')
start_trace = False
stop_trace = False
with open(self.infile, 'r') as f:
for line in f:
#Start collecting data from label "TRACE_MARKER_START" and
#stop with label "TRACE_MARKER_STOP"
if line.find("TRACE_MARKER_START") != -1:
start_trace = True
if line.find("TRACE_MARKER_STOP") != -1:
stop_trace = True
if start_trace and not stop_trace:
match = pattern.search(line)
if match:
self.timestamp.append(float(match.group('time')))
self.state_time_map[float(match.group('time'))] = int(match.group('state'))
self.cpuid_time_map[float(match.group('time'))] = int(match.group('cpu_id'))
def get_state_name(self, state):
if state in self.power_state:
return "powerstate"
elif state == self.UNKNOWNSTATE:
return "unknownstate"
else:
return "freqstate"
def populate(self, time1, time2):
diff = time2 - time1
for cluster, states_list in enumerate(self.currentstates_of_clusters):
for k, j in enumerate(states_list):
if self.device.scheduler == 'iks' and cluster == 1:
self.cpu_freq_time_spent["cpu{}".format(self.cpuids_of_clusters[cluster][k] + len(self.currentstates_of_clusters[0]))][j] += diff
else:
self.cpu_freq_time_spent["cpu{}".format(self.cpuids_of_clusters[cluster][k])][j] += diff
def calculate(self):
for i in range(len(self.timestamp) - 1):
self.update_cluster_freq(self.state_time_map[self.timestamp[i]], self.cpuid_time_map[self.timestamp[i]])
self.update_state(self.state_time_map[self.timestamp[i]], self.cpuid_time_map[self.timestamp[i]])
self.populate(self.timestamp[i], self.timestamp[i + 1])
def percentage(self):
"""Normalize the result with total execution time."""
temp = self.cpu_freq_time_spent.copy()
for i in self.cpu_freq_time_spent:
total = 0
for j in self.cpu_freq_time_spent[i]:
total += self.cpu_freq_time_spent[i][j]
for j in self.cpu_freq_time_spent[i]:
if total != 0:
temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total
else:
temp[i][j] = 0
return temp
def generate_csv(self, context): # pylint: disable=R0912,R0914
""" generate the '''dvfs.csv''' with the state, frequency and cores """
temp = self.percentage()
total_state = self.unique_freq()
offline_value = -1
ghz_conversion = 1000000
mhz_conversion = 1000
with open(self.outfile, 'a+') as f:
writer = csv.writer(f, delimiter=',')
reader = csv.reader(f)
# Create the header in the format below
# workload name, iteration, state, A7 CPU0,A7 CPU1,A7 CPU2,A7 CPU3,A15 CPU4,A15 CPU5
if sum(1 for row in reader) == 0:
header_row = ['workload', 'iteration', 'state']
count = 0
for cluster, states_list in enumerate(self.currentstates_of_clusters):
for dummy_index in range(len(states_list)):
header_row.append("{} CPU{}".format(self.corename_of_clusters[cluster], count))
count += 1
writer.writerow(header_row)
if offline_value in total_state:
total_state.remove(offline_value) # remove the offline state
for i in sorted(total_state):
temprow = []
temprow.extend([context.result.spec.label, context.result.iteration])
if "state{}".format(i) in self.idlestate_description:
temprow.append(self.idlestate_description["state{}".format(i)])
else:
state_value = float(i)
if state_value / ghz_conversion >= 1:
temprow.append("{} Ghz".format(state_value / ghz_conversion))
else:
temprow.append("{} Mhz".format(state_value / mhz_conversion))
for j in range(self.device.number_of_cores * self.multiply_factor):
temprow.append("{0:.3f}".format(temp["cpu{}".format(j)][i]))
writer.writerow(temprow)
check_off = True # Checking whether core is OFFLINE
for i in range(self.device.number_of_cores * self.multiply_factor):
temp_val = "{0:.3f}".format(temp["cpu{}".format(i)][offline_value])
if float(temp_val) > 1:
check_off = False
break
if check_off is False:
temprow = []
temprow.extend([context.result.spec.label, context.result.iteration])
temprow.append("OFFLINE")
for i in range(self.device.number_of_cores * self.multiply_factor):
temprow.append("{0:.3f}".format(temp["cpu{}".format(i)][offline_value]))
writer.writerow(temprow)
|
|
#/!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iris Classification Sample Cloud Runner.
"""
import argparse
import datetime
import os
import subprocess
import uuid
import apache_beam as beam
import tensorflow as tf
import trainer.model as iris
import google.cloud.ml as ml
import google.cloud.ml.dataflow.io.tfrecordio as tfrecordio
import google.cloud.ml.io as io
# Model variables
MODEL_NAME = 'iris'
TRAINER_NAME = 'trainer-1.0.tar.gz'
def _default_project():
get_project = ['gcloud', 'config', 'list', 'project',
'--format=value(core.project)']
with open(os.devnull, 'w') as dev_null:
return subprocess.check_output(get_project, stderr=dev_null).strip()
parser = argparse.ArgumentParser(
description='Runs Training on the Iris model data.')
parser.add_argument('--project_id',
help='The project to which the job will be submitted.')
parser.add_argument('--cloud', action='store_true',
help='Run preprocessing on the cloud.')
parser.add_argument('--metadata_path',
help='The path to the metadata file from preprocessing.')
parser.add_argument('--training_data',
default='gs://cloud-ml-data/iris/data_train.csv',
help='Data to analyze and encode as training features.')
parser.add_argument('--eval_data',
default='gs://cloud-ml-data/iris/data_eval.csv',
help='Data to encode as evaluation features.')
parser.add_argument('--predict_data',
default='gs://cloud-ml-data/iris/data_predict.csv',
help='Data to encode as prediction features.')
parser.add_argument('--output_dir', default=None,
help=('Google Cloud Storage or Local directory in which '
'to place outputs.'))
parser.add_argument('--deploy_model_name', default='iris',
help=('If --cloud is used, the model is deployed with this '
'name. The default is iris.'))
parser.add_argument('--deploy_model_version',
default='v' + uuid.uuid4().hex[:4],
help=('If --cloud is used, the model is deployed with this '
'version. The default is four random characters.'))
args, passthrough_args = parser.parse_known_args()
if not args.project_id:
args.project_id = _default_project()
if not args.output_dir:
if args.cloud:
args.output_dir = os.path.join('gs://' + args.project_id + '-ml',
MODEL_NAME)
else:
path = 'output'
if not os.path.isdir(path):
os.makedirs(path)
args.output_dir = path
TRAINER_URI = os.path.join(args.output_dir, TRAINER_NAME)
MODULE_NAME = 'trainer.task'
EXPORT_SUBDIRECTORY = 'model'
def preprocess(pipeline):
feature_set = iris.IrisFeatures()
training_data = beam.io.TextFileSource(
args.training_data, strip_trailing_newlines=True,
coder=io.CsvCoder.from_feature_set(feature_set, feature_set.csv_columns))
eval_data = beam.io.TextFileSource(
args.eval_data, strip_trailing_newlines=True,
coder=io.CsvCoder.from_feature_set(feature_set, feature_set.csv_columns))
predict_data = beam.io.TextFileSource(
args.predict_data, strip_trailing_newlines=True,
coder=io.CsvCoder.from_feature_set(feature_set, feature_set.csv_columns,
has_target_columns=False))
train = pipeline | beam.Read('ReadTrainingData', training_data)
evaluate = pipeline | beam.Read('ReadEvalData', eval_data)
predict = pipeline | beam.Read('ReadPredictData', predict_data)
(metadata, train_features, eval_features, predict_features) = (
(train, evaluate, predict)
| 'Preprocess'
>> ml.Preprocess(feature_set, input_format='csv',
format_metadata={'headers': feature_set.csv_columns}))
# Writes metadata.yaml (text file), features_train, features_eval, and
# features_eval (TFRecord files)
(metadata | 'SaveMetadata'
>> io.SaveMetadata(os.path.join(args.output_dir, 'metadata.yaml')))
# We turn off sharding of the feature files because the dataset is very small.
(train_features | 'SaveTrain'
>> io.SaveFeatures(
os.path.join(args.output_dir, 'features_train')))
(eval_features | 'SaveEval'
>> io.SaveFeatures(
os.path.join(args.output_dir, 'features_eval')))
(predict_features | 'SavePredict'
>> io.SaveFeatures(
os.path.join(args.output_dir, 'features_predict')))
return metadata, train_features, eval_features, predict_features
def get_train_parameters(metadata):
job_args = []
return {
'package_uris': [TRAINER_URI],
'python_module': MODULE_NAME,
'export_subdir': EXPORT_SUBDIRECTORY,
'metadata': metadata,
'label': 'Train',
'region': 'us-central1',
'scale_tier': 'STANDARD_1',
'job_args': job_args
}
def train(pipeline, train_features=None, eval_features=None, metadata=None):
if not train_features:
train_features = (
pipeline
| 'ReadTrain'
>> io.LoadFeatures(os.path.join(args.output_dir, 'features_train*')))
if not eval_features:
eval_features = (
pipeline
| 'ReadEval'
>> io.LoadFeatures(os.path.join(args.output_dir, 'features_eval*')))
trained_model, results = ((train_features, eval_features)
| ml.Train(**get_train_parameters(metadata)))
trained_model | 'SaveModel' >> io.SaveModel(os.path.join(args.output_dir,
'saved_model'))
results | io.SaveTrainingJobResult(os.path.join(args.output_dir,
'train_results'))
return trained_model, results
def evaluate(pipeline, trained_model=None, eval_features=None):
if not eval_features:
eval_features = (
pipeline
| 'ReadEval'
>> io.LoadFeatures(os.path.join(args.output_dir, 'features_eval*')))
if not trained_model:
trained_model = (pipeline
| 'LoadModel'
>> io.LoadModel(os.path.join(args.output_dir,
'saved_model')))
# Run our evaluation data through a Batch Evaluation, then pull out just
# the expected and predicted target values.
evaluations = (eval_features
| 'Evaluate' >> ml.Evaluate(trained_model)
| beam.Map('CreateEvaluations', make_evaluation_dict))
coder = io.CsvCoder(['key', 'target', 'predicted', 'score'],
['target', 'predicted', 'score'])
write_text_file(evaluations, 'WriteEvaluation', 'model_evaluations', coder)
return evaluations
def make_evaluation_dict((example, prediction)):
# When running inside of Dataflow, we don't have our global scope,
# so import tf here so that we can access it.
import numpy
import tensorflow as tf
tf_example = tf.train.Example()
tf_example.ParseFromString(example.values()[0])
feature_map = tf_example.features.feature
scores = prediction['score']
prediction = numpy.argmax(scores)
return {
'key': feature_map['key'].bytes_list.value[0],
'target': feature_map['species'].int64_list.value[0],
'predicted': prediction,
'score': scores[prediction]
}
def deploy_model(pipeline, model_name, version_name, trained_model=None):
if not trained_model:
trained_model = (pipeline
| 'LoadModel'
>> io.LoadModel(os.path.join(args.output_dir,
'saved_model')))
return trained_model | ml.DeployVersion(model_name, version_name)
def model_analysis(pipeline, evaluation_data=None, metadata=None):
if not metadata:
metadata = pipeline | io.LoadMetadata(
os.path.join(args.output_dir, "metadata.yaml"))
if not evaluation_data:
coder = io.CsvCoder(['key', 'target', 'predicted', 'score'],
['target', 'predicted', 'score'])
evaluation_data = read_text_file(pipeline, 'ReadEvaluation',
'model_evaluations', coder=coder)
confusion_matrix, precision_recall, logloss = (
evaluation_data | 'AnalyzeModel' >> ml.AnalyzeModel(metadata))
confusion_matrix | io.SaveConfusionMatrixCsv(
os.path.join(args.output_dir, 'analyzer_cm.csv'))
precision_recall | io.SavePrecisionRecallCsv(
os.path.join(args.output_dir, 'analyzer_pr.csv'))
write_text_file(logloss, 'Write Log Loss', 'analyzer_logloss.csv')
return confusion_matrix, precision_recall, logloss
def get_pipeline_name():
if args.cloud:
return 'BlockingDataflowPipelineRunner'
else:
return 'DirectPipelineRunner'
def dataflow():
"""Run Preprocessing, Training, Eval, and Prediction as a single Dataflow."""
print 'Building',TRAINER_NAME,'package.'
subprocess.check_call(['python', 'setup.py', 'sdist', '--format=gztar'])
subprocess.check_call(['gsutil', '-q', 'cp',
os.path.join('dist', TRAINER_NAME),
TRAINER_URI])
opts = None
if args.cloud:
options = {
'staging_location': os.path.join(args.output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(args.output_dir, 'tmp'),
'job_name': ('cloud-ml-sample-iris' + '-'
+ datetime.datetime.now().strftime('%Y%m%d%H%M%S')),
'project': args.project_id,
# Dataflow needs a copy of the version of the cloud ml sdk that
# is being used.
'extra_packages': [ml.sdk_location, TRAINER_URI],
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
else:
# For local runs, the trainer must be installed as a module.
subprocess.check_call(['pip', 'install', '--upgrade', '--force-reinstall',
'--user', os.path.join('dist', TRAINER_NAME)])
p = beam.Pipeline(get_pipeline_name(), options=opts)
# Every function below writes its ouput to a file. The inputs to these
# functions are also optional; if they are missing, the input values are read
# from a file. Therefore if running this script multiple times, some steps can
# be removed to prevent recomputing values.
metadata, train_features, eval_features, predict_features = preprocess(p)
trained_model, results = train(p, train_features, eval_features, metadata)
evaluations = evaluate(p, trained_model, eval_features)
confusion_matrix, precision_recall, logloss = (
model_analysis(p, evaluations, metadata))
if args.cloud:
deployed = deploy_model(p, args.deploy_model_name,
args.deploy_model_version, trained_model)
# Use our deployed model to run a batch prediction.
output_uri = os.path.join(args.output_dir, 'batch_prediction_results')
deployed | "Batch Predict" >> ml.Predict([args.predict_data], output_uri,
region='us-central1',
data_format='TEXT')
print 'Deploying %s version: %s' % (args.deploy_model_name,
args.deploy_model_version)
p.run()
if args.cloud:
print 'Deployed %s version: %s' % (args.deploy_model_name,
args.deploy_model_version)
def write_text_file(pcollection, label, output_name,
coder=beam.coders.ToStringCoder()):
return pcollection | label >> beam.Write(beam.io.TextFileSink(
os.path.join(args.output_dir, output_name),
shard_name_template='',
coder=coder))
def read_text_file(pipeline, label, input_name,
coder=beam.coders.StrUtf8Coder()):
return pipeline | label >> beam.Read(beam.io.TextFileSource(
os.path.join(args.output_dir, input_name),
strip_trailing_newlines=True,
coder=coder))
def main():
dataflow()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import csv
from datetime import datetime as dt, timedelta as td
import logging
import sys
import re
from copy import deepcopy
import sys
from dateutil.parser import parse
from volttron.platform.messaging import topics
from volttron.platform.agent import utils
from volttron.platform.agent.utils import jsonapi, setup_logging
from volttron.platform.vip.agent import Agent, Core
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.agent.driven import ConversionMapper
from volttron.platform.messaging import (headers as headers_mod, topics)
__version__ = '3.0.0'
__author1__ = 'Craig Allwardt <craig.allwardt@pnnl.gov>'
__author2__ = 'Robert Lutes <robert.lutes@pnnl.gov>'
__author3__ = 'Poorva Sharma <poorva.sharma@pnnl.gov>'
__copyright__ = 'Copyright (c) 2015, Battelle Memorial Institute'
__license__ = 'FreeBSD'
DATE_FORMAT = '%m-%d-%y %H:%M'
utils.setup_logging()
_log = logging.getLogger(__name__)
logging.basicConfig(level=logging.debug,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d-%y %H:%M')
def driven_agent(config_path, **kwargs):
"""Reads agent configuration and converts it to run driven agent.
:param kwargs: Any driver specific parameters"""
config = utils.load_config(config_path)
arguments = config.get('arguments')
mode = True if config.get('mode', 'PASSIVE') == 'ACTIVE' else False
multiple_devices = isinstance(config['device']['unit'], dict)
campus_building_config = config['device']
analysis_name = campus_building_config.get('analysis_name', 'analysis_name')
analysis_dict = {'analysis_name': analysis_name}
arguments.update(analysis_dict)
agent_id = config.get('agentid', None)
actuator_id = agent_id if agent_id is not None else analysis_name
campus_building = dict((key, campus_building_config[key]) for key in ['campus', 'building'])
analysis = deepcopy(campus_building)
analysis.update(analysis_dict)
device_config = config['device']['unit']
command_devices = device_config.keys()
device_topic_dict = {}
device_topic_list = []
subdevices_list = []
from_file = config.get('from_file')
for device_name in device_config:
device_topic = topics.DEVICES_VALUE(campus=campus_building.get('campus'),
building=campus_building.get('building'),
unit=device_name,
path='',
point='all')
device_topic_dict.update({device_topic: device_name})
device_topic_list.append(device_name)
if multiple_devices:
for subdevice in device_config[device_name]['subdevices']:
subdevices_list.append(subdevice)
subdevice_topic = topics.DEVICES_VALUE(campus=campus_building.get('campus'),
building=campus_building.get('building'),
unit=device_name,
path=subdevice,
point='all')
subdevice_name = device_name + "/" + subdevice
device_topic_dict.update({subdevice_topic: subdevice_name})
device_topic_list.append(subdevice_name)
base_actuator_path = topics.RPC_DEVICE_PATH(campus=campus_building.get('campus', ''),
building=campus_building.get('building', ''),
unit=None,
path='',
point=None)
conversion_map = config.get('conversion_map')
map_names = {}
for key, value in conversion_map.items():
map_names[key.lower() if isinstance(key, str) else key] = value
application = config.get('application')
validation_error = ''
if not application:
validation_error = 'Invalid application specified in config\n'
if validation_error:
_log.error(validation_error)
raise ValueError(validation_error)
config.update(config.get('arguments'))
converter = ConversionMapper()
output_file_prefix = config.get('output_file')
#unittype_map = config.get('unittype_map', None)
#assert unittype_map
klass = _get_class(application)
# This instances is used to call the applications run method when
# data comes in on the message bus. It is constructed here
# so that_process_results each time run is called the application
# can keep it state.
app_instance = klass(**arguments)
class DrivenAgent(Agent):
"""Agent listens to message bus device and runs when data is published.
"""
def __init__(self, **kwargs):
"""
Initializes agent
:param kwargs: Any driver specific parameters"""
super(DrivenAgent, self).__init__(**kwargs)
# master is where we copy from to get a poppable list of
# subdevices that should be present before we run the analysis.
self._master_devices = device_topic_list
self._needed_devices = []
self._device_values = {}
self._initialize_devices()
self.received_input_datetime = None
self._kwargs = kwargs
self._header_written = False
self.file_creation_set = set()
def _initialize_devices(self):
self._needed_devices = deepcopy(self._master_devices)
self._device_values = {}
@Core.receiver('onstart')
def starup(self, sender, **kwargs):
"""
Starts up the agent and subscribes to device topics
based on agent configuration.
:param sender:
:param kwargs: Any driver specific parameters
:type sender: str"""
self._initialize_devices()
for device_topic in device_topic_dict:
_log.debug('Subscribing to ' + device_topic)
self.vip.pubsub.subscribe(peer='pubsub',
prefix=device_topic,
callback=self.on_analysis_message)
def _should_run_now(self):
"""
Checks if messages from all the devices are received
before running application
:returns: True or False based on received messages.
:rtype: boolean"""
# Assumes the unit/all values will have values.
if not len(self._device_values.keys()) > 0:
return False
return not len(self._needed_devices) > 0
def on_analysis_message(self, peer, sender, bus, topic, headers, message):
"""
Subscribe to device data and assemble data set to pass
to applications.
:param peer:
:param sender: device name
:param bus:
:param topic: device path topic
:param headers: message headers
:param message: message containing points and values dict
from device with point type
:type peer: str
:type sender: str
:type bus: str
:type topic: str
:type headers: dict
:type message: dict"""
device_data = message[0]
if isinstance(device_data, list):
device_data = device_data[0]
def aggregate_subdevice(device_data):
tagged_device_data = {}
device_tag = device_topic_dict[topic]
if device_tag not in self._needed_devices:
return False
for key, value in device_data.items():
device_data_tag = '&'.join([key, device_tag])
tagged_device_data[device_data_tag] = value
self._device_values.update(tagged_device_data)
self._needed_devices.remove(device_tag)
return True
device_needed = aggregate_subdevice(device_data)
if not device_needed:
_log.error("Warning device values already present, "
"reinitializing")
if self._should_run_now():
field_names = {}
for k, v in self._device_values.items():
field_names[k.lower() if isinstance(k, str) else k] = v
if not converter.initialized and conversion_map is not None:
converter.setup_conversion_map(map_names, field_names)
if from_file:
_timestamp = parse(headers.get('Date'))
self.received_input_datetime = _timestamp
else:
_timestamp = dt.now()
self.received_input_datetime = dt.utcnow()
device_data = converter.process_row(field_names)
results = app_instance.run(_timestamp, device_data)
# results = app_instance.run(
# dateutil.parser.parse(self._subdevice_values['Timestamp'],
# fuzzy=True), self._subdevice_values)
self._process_results(results)
self._initialize_devices()
else:
_log.info("Still need {} before running.".format(self._needed_devices))
def _process_results(self, results):
"""
Runs driven application with converted data. Calls appropriate
methods to process commands, log and table_data in results.
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Same as results param.
:rtype: Results object \\volttron.platform.agent.driven"""
def make_actuator_request(command_dict, results):
for device_tag, new_value in command_dict.items():
_log.debug("COMMAND TABLE: {}->{}".format(device_tag, new_value))
if mode:
_log.debug("ACTUATE ON DEVICE.")
results, actuator_error = self.actuator_request(results)
if not actuator_error:
self.actuator_set(results)
return results
_log.debug('Processing Results!')
for device, point_value_dict in results.devices.items():
make_actuator_request(point_value_dict, results)
make_actuator_request(results.commands, results)
for value in results.log_messages:
_log.debug("LOG: {}".format(value))
for key, value in results.table_output.items():
_log.debug("TABLE: {}->{}".format(key, value))
if output_file_prefix is not None:
results = self.create_file_output(results)
if len(results.table_output.keys()):
results = self.publish_analysis_results(results)
return results
def publish_analysis_results(self, results):
"""
Publish table_data in analysis results to the message bus for
capture by the data historian.
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Same as results param.
:rtype: Results object \\volttron.platform.agent.driven"""
headers = {
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
headers_mod.DATE: str(self.received_input_datetime),
}
for app, analysis_table in results.table_output.items():
try:
name_timestamp = app.split('&')
_name = name_timestamp[0]
timestamp = name_timestamp[1]
except:
_name = app
timestamp = str(self.received_input_datetime)
headers = {
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
headers_mod.DATE: timestamp,
}
for entry in analysis_table:
for key, value in entry.items():
for _device in command_devices:
analysis['unit'] = _device
analysis_topic = topics.ANALYSIS_VALUE(point=key, **analysis)
datatype = 'float'
if isinstance(value, int):
datatype = 'int'
kbase = key[key.rfind('/') + 1:]
message = [{kbase: value},
{kbase: {'tz': 'US/Pacific',
'type': datatype,
'units': 'float',
}
}]
self.vip.pubsub.publish(
'pubsub', analysis_topic, headers, message)
return results
def create_file_output(self, results):
"""
Create results/data files for testing and algorithm validation
if table data is present in the results.
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Same as results param.
:rtype: Results object \\volttron.platform.agent.driven"""
for key, value in results.table_output.items():
name_timestamp = key.split('&')
_name = name_timestamp[0]
timestamp = name_timestamp[1]
file_name = output_file_prefix + "-" + _name + ".csv"
if file_name not in self.file_creation_set:
self._header_written = False
self.file_creation_set.update([file_name])
for row in value:
with open(file_name, 'a+') as file_to_write:
row.update({'Timestamp': timestamp})
_keys = row.keys()
file_output = csv.DictWriter(file_to_write, _keys)
if not self._header_written:
file_output.writeheader()
self._header_written = True
file_output.writerow(row)
file_to_write.close()
return results
def actuator_request(self, results):
"""
Calls the actuator's request_new_schedule method to get
device schedule
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Return result from request_new_schedule method
and True or False for error in scheduling device.
:rtype: dict and boolean
:Return Values:
The return values has the following format:
result = {'info': u'', 'data': {}, 'result': 'SUCCESS'}
request_error = True/False
warning:: Calling without previously scheduling a device and not within
the time allotted will raise a LockError"""
_now = dt.now()
str_now = _now.strftime(DATE_FORMAT)
_end = _now + td(minutes=1)
str_end = _end.strftime(DATE_FORMAT)
for _device in command_devices:
actuation_device = base_actuator_path(unit=_device, point='')
schedule_request = [[actuation_device, str_now, str_end]]
try:
result = self.vip.rpc.call('platform.actuator',
'request_new_schedule',
actuator_id, _device, 'HIGH',
schedule_request).get(timeout=4)
except RemoteError as ex:
_log.warning("Failed to schedule device {} (RemoteError): {}".format(_device, str(ex)))
request_error = True
if result['result'] == 'FAILURE':
if result['info'] =='TASK_ID_ALREADY_EXISTS':
_log.info('Task to schedule device already exists ' + _device)
request_error = False
else:
_log.warn('Failed to schedule device (unavailable) ' + _device)
request_error = True
else:
request_error = False
return results, request_error
def actuator_set(self, results):
"""
Calls the actuator's set_point method to set point on device
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven"""
def make_actuator_set(device, point_value_dict):
for point, new_value in point_value_dict.items():
point_path = base_actuator_path(unit=device, point=point)
try:
result = self.vip.rpc.call('platform.actuator', 'set_point',
actuator_id, point_path,
new_value).get(timeout=4)
_log.debug("Set point {} to {}".format(point_path, new_value))
except RemoteError as ex:
_log.warning("Failed to set {} to {}: {}".format(point_path, new_value, str(ex)))
continue
for device, point_value_dict in results.devices.items():
make_actuator_set(device, point_value_dict)
for device in command_devices:
make_actuator_set(device, results.commands)
DrivenAgent.__name__ = 'DrivenLoggerAgent'
return DrivenAgent(**kwargs)
def _get_class(kls):
"""Get driven application information."""
parts = kls.split('.')
module = ".".join(parts[:-1])
main_mod = __import__(module)
for comp in parts[1:]:
main_mod = getattr(main_mod, comp)
return main_mod
def main(argv=sys.argv):
''' Main method.'''
utils.vip_main(driven_agent)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
|
import logging
import io
import json
import random
import copy
from klausuromat import exceptions, operations, ifilter, identifier
# Basic code generator that can build (verifying) code but is not able to compile it
class BasicGenerator:
# Possible operations
all_operations = operations.all_
# Initialize
def __init__(self, settings, language):
# Open settings file (JSON) and convert to dict
with io.open(settings[0], mode='r', encoding=settings[1]) as fd:
self._settings = json.load(fd)
# Open language file (JSON) and convert to dict
with io.open(language[0], mode='r', encoding=language[1]) as fd:
self._language = json.load(fd)
# Build list of types
self._type_list = [type_['name'] for type_ in self._language['type']]
# Start logging
log = '/'.join((self._settings['LOG_DIRECTORY'], self._settings['LOG_FILENAME']))
logging.basicConfig(filename=log, level=logging.DEBUG)
logging.info('Created instance of "{}"'.format(self.__class__.__name__))
# Instantiate class filter
self._filter = ifilter.IdentifierFilter(self._settings)
# Save initial values
self._options = {}
self._requirements = []
self._namespace = set()
self._active = self
self._ids = []
self._operations = []
# Make iterable
def __iter__(self):
return iter(self._operations)
# Set active generator or disable (False)
def set_active(self, value):
# Raise exception if the generator has been disabled already
if not self._active:
raise exceptions.GeneratorDisabledError()
self._active = value
# Active property
active = property(lambda self: self._active, set_active)
# Add an Identifier to this generator
def add_identifier(self, type_, name=None, value=None, reference=None, call_by=None):
# Build instance of identifier and append it
id_ = self._identifier(type_, name=name, value=value, reference=reference, call_by=call_by)
self._ids.append(id_)
# Return identifier instance
return id_
# Initialize identifiers
def initialize(self):
# Use initialize operation
self.add_operation(self.get_operation(operations.Initialize))
# Add function generator and return instance
def add_function(self, name=None, ids=None):
# Sample ids from specified range if they are not set
if ids is None:
ids = self._sample_identifiers(self._settings['FUNCTION_IDENTIFIER_RANGE'], ids)
# Create function generator and return
from .function import FunctionGenerator
return self._add_child(FunctionGenerator, ids=ids, name=name)
# Add conditional generator and return instance
def add_conditional(self):
# Create conditional generator and return
from .if_else import ConditionalGenerator
return self._add_child(ConditionalGenerator)
# Do a random operation
# Note: No configuration possible on an operation, therefore there is no return
# Use getOperation() and addOperation() instead
def operate(self, op_list=None):
# Check if this generator is active
self._check_active_state()
# Check operator list
if op_list is None:
# Build list of operations
op_list = BasicGenerator.all_operations[:]
elif type(op_list) is str:
# Cast to list
op_list = [op_list]
elif type(op_list) is not list:
# Raise if not a list
raise TypeError('Expected str or list, received type {}'.format(type(op_list)))
else:
# Copy list
op_list = op_list[:]
# Log operation list
logging.info('Possible operations: {}'.format(op_list))
# Choose an operation randomly and pop it from the list
random.shuffle(op_list)
call = op_list.pop()
# Check if operator exists
try:
call = vars(operations)[call]
except KeyError as exc:
raise exceptions.GeneratorOperationUnknownError(call) from exc
# Apply operation on the current set of identifiers
try:
operation = self.get_operation(call)
operation.random()
except exceptions.GeneratorOperationNotPossibleError as exc:
# No operation left
if not op_list:
logging.error('No operation could use the current set of identifiers')
logging.info('Identifiers: {}'.format(self._ids))
raise exceptions.GeneratorGenerationNotPossibleError(
'No operation could use the current set of identifiers') from exc
# Try to use another operation
else:
logging.info('Operation failed: {}'.format(call))
logging.info('Identifiers: {}'.format(self._ids))
return self.operate(op_list)
# Let operation know we're done and append operation
self.add_operation(operation)
# Create instance of an operation
def get_operation(self, operator):
# Choose randomly from list if operator is not a string
if not isinstance(operator, str):
try:
operator = random.choice(operator)
except TypeError:
pass
# Get operator from string
if isinstance(operator, str):
try:
operator = vars(operations)[operator]
except KeyError as exc:
raise exceptions.GeneratorOperationUnknownError(operator) from exc
# Check if format string exists in JSON file
# Note: This is a bit bugged at the moment, as the requirements have to be added to the main generator
# And it's dirty...
try:
requirements = self._language['format'][operator.__name__]['requires']
except KeyError as exc:
raise exceptions.GeneratorJSONKeyError(operator.__name__) from exc
# Apply operation on the current set of identifiers
instance = operator(self._ids, self._settings, self._language, self._filter)
self._requirements.extend(requirements) # Dirty!
# Return created instance
return instance
# Append operation instance to generator
def add_operation(self, operation):
# Append operation
self._operations.append(operation)
# Compare identifiers with internal stored identifiers
def compare_identifiers(self, all_ids):
from .child import GeneratorChild
# Retrieve operations that have a result or are a generator
operations_ = [operation for operation in self._operations
if hasattr(operation, 'result') or isinstance(operation, GeneratorChild)]
# Compare amount of operations
if len(operations_) != len(all_ids):
logging.error(
'One or more operation is missing in real results. '
'Predicted: {}; Real: {}'.format(len(operations_), len(all_ids)))
return False
# Loop through operations
for index, ids in enumerate(zip(operations_, all_ids)):
# Unpack
operation, real = ids
# Operation is another generator
if isinstance(operation, GeneratorChild):
if not operation.compare_identifiers(real):
return False
# Normal operation
else:
# Loop through identifiers
for id_ in operation.snapshot:
# Check if identifier exists in real results
if id_.name not in real:
logging.error('Operation[{}] "{}", Identifier "{}" is missing in real results'.format(
index, operation.__class__.__name__, id_.name))
return False
# Check if values are equal
if id_.value != real[id_.name]:
logging.error('Operation[{}] "{}", Identifier "{}": {} != {}'.format(
index, operation.__class__.__name__, id_.name, id_.value, real[id_.name]))
logging.info('Identifiers of the operation above: {}'.format(operation.snapshot))
return False
# No error: Code seems to be fine
return True
# Return raw code
def code(self, **options):
# Get code pieces
code = self.code_pieces(options)
# Join and wrap code pieces
code = {key: ('\n'.join(set(value)) if key == 'requirements' else '\n'.join(value))
for key, value in code.items()}
# Add line breaks (after)
keys = ['requirements', 'prototypes']
code.update({key: value + '\n\n' for key, value in code.items() if key in keys and value})
# Add line breaks (before)
keys = ['functions']
code.update({key: '\n\n' + value for key, value in code.items() if key in keys and value})
# Return wrapped content
return self._language['wrapper'].format(code)
# Create instance of an identifier
def _identifier(self, type_, **kwargs):
return identifier.Identifier(self._settings, self._language, self._type_list, self._namespace, type_, **kwargs)
# Add a child generator and return it's instance
def _add_child(self, call, ids=None, **kwargs):
# Set ids if none
if ids is None:
ids = self._ids
# Create instance, append to operations and set active state
gen = call(ids, self._settings, self._language, self._filter, self, **kwargs)
self._operations.append(gen)
self.active = gen
return gen
# Raise depending on active state
def _check_active_state(self):
# Generator is disabled
if not self.active:
raise exceptions.GeneratorDisabledError()
# Generator is inactive
elif self.active != self:
raise exceptions.GeneratorInactiveError()
# Sample from identifiers by a specified range
def _sample_identifiers(self, range_, ids):
# Use all identifiers as source if ids is not set
if ids is None:
ids = self._ids
# Sample from identifiers
start, stop = range_
try:
n = random.randrange(start, stop if len(ids) > stop else len(ids))
except ValueError:
n = len(ids)
return random.sample(ids, len(ids) if n > len(ids) else n)
# Return code pieces (with verifying code if requested)
def code_pieces(self, options):
# Save code options so we don't have to pass it to every function
self._options = options
# Store amount of indentations
if 'block' not in self._options:
self._options['block'] = 1
# Default and start values
code = self._code_pieces_default()
# Add operations
self._code_pieces_operations(code, self._operations)
# Add resulting identifiers
if self._options.get('result'):
self._code_pieces_result(code)
# Return dictionary
return code
# Return code pieces defaults for code generation
def _code_pieces_default(self):
# Code piece dictionary
code = {key: [] for key in ['requirements', 'prototypes', 'main', 'functions']}
# Requirements (e.g. includes)
code['requirements'] = self._requirements
return code
# Return code pieces that returns the final values of all identifiers
def _code_pieces_result(self, code, key='main'):
# Retrieve result by using a result operation
indent = self._indent()
code[key].append(indent + self.get_operation(operations.Result).snapshot_code().replace('\n', '\n' + indent))
# Append code pieces of all operations inside to a specified key
def _code_pieces_operations(self, code, operations_, key='main', block=True):
from .child import GeneratorChild
from .function import FunctionGenerator
# Start verify block
if block and self._options.get('verify'):
code[key].append(self._code_pieces_verify_start())
# Loop through operations
for i, operation in enumerate(operations_):
# Add separator (if not a function)
# Note: This is quite nasty... but necessary to avoid JSON corruption
if i > 0 and self._options.get('verify') and not isinstance(operation, FunctionGenerator):
code[key].append(self._code_pieces_verify_separator())
# Generator (child)
if isinstance(operation, GeneratorChild):
# Append code of child generator
self._code_pieces_child(code, operation)
# Normal operation
else:
# Append code of operation
self._code_pieces_operation(code[key], operation)
# End verify block
if block and self._options.get('verify'):
code[key].append(self._code_pieces_verify_end())
# Append code pieces of a normal operation
def _code_pieces_operation(self, code, operation):
# Get indent
indent = self._indent()
# Add operation name (if requested and operation is a normal operation [not special like call, result, ...])
if self._options.get('comments') and operation.name in BasicGenerator.all_operations:
code.append(indent + self._language['comment'].format(operation.name))
# Show result (as hint)
if self._options.get('identifiers') and operation.assign:
# Append to code (with bit representation when necessary)
code.append(operation.hint(bits=(operation.name in operations.bitop)))
# Add generated code
code.append(indent + operation.code().replace('\n', '\n' + indent) +
('\n' if not self._options.get('verify') else ''))
# Add verifying code pieces
if self._options.get('verify'):
code.append(indent + operation.snapshot_code(verify=True).replace('\n', '\n' + indent) + '\n')
# Append code pieces that have been generated by a child
def _code_pieces_child(self, code, operation):
# Get code of child
child = operation.code_pieces(copy.copy(self._options))
# Extend code lists
for key, value in child.items():
code.setdefault(key, []).extend(value)
# Return code pieces that start a verifying block
def _code_pieces_verify_start(self):
return self._indent() + self._language['verify']['id']['start']
# Return code pieces that separate verifying blocks
def _code_pieces_verify_separator(self):
return self._indent() + self._language['verify']['id']['separator']
# Return code pieces that end a verifying block
def _code_pieces_verify_end(self):
return self._indent() + self._language['verify']['id']['end']
# Maximum identifier depth used in generator
def _max_identifier_depth(self):
max_ = {}
# Loop through identifiers and find the one that has the maximum amount of references
for id_ in self._ids:
depth = id_.reference_depth()
# Value is greater than the current value stored
if depth > max_.get(id_.type, -1):
max_[id_.type] = depth
return max_
# Return indent (block count) * indent string
def _indent(self):
return self._options['block'] * self._language['indent']
|
|
from __future__ import unicode_literals
import datetime
import sys
import unittest
from django.contrib.admin import (
AllValuesFieldListFilter, BooleanFieldListFilter, ModelAdmin,
RelatedOnlyFieldListFilter, SimpleListFilter, site,
)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import RequestFactory, TestCase, override_settings
from django.utils.encoding import force_text
from .models import Book, Bookmark, Department, Employee, TaggedItem
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class NotNinetiesListFilter(SimpleListFilter):
title = "Not nineties books"
parameter_name = "book_year"
def lookups(self, request, model_admin):
return (
('the 90s', "the 1990's"),
)
def queryset(self, request, queryset):
if self.value() == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
else:
return queryset.exclude(year__gte=1990, year__lte=1999)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1 / 0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.get_queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted({
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.get_queryset(request).all()
})
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class DepartmentListFilterLookupWithUnderscoredParameter(DepartmentListFilterLookupWithNonStringValue):
parameter_name = 'department__whatever'
class DepartmentListFilterLookupWithDynamicValue(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
if self.value() == 'the 80s':
return (('the 90s', "the 1990's"),)
elif self.value() == 'the 90s':
return (('the 80s', "the 1980's"),)
else:
return (('the 80s', "the 1980's"), ('the 90s', "the 1990's"),)
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = (
'year',
'author',
'contributors',
('is_best_seller', BooleanFieldListFilter),
'date_registered',
'no',
)
class BookAdminWithUnderscoreLookupAndTuple(BookAdmin):
list_filter = (
'year',
('author__email', AllValuesFieldListFilter),
'contributors',
'is_best_seller',
'date_registered',
'no',
)
class BookAdminWithCustomQueryset(ModelAdmin):
def __init__(self, user, *args, **kwargs):
self.user = user
super(BookAdminWithCustomQueryset, self).__init__(*args, **kwargs)
list_filter = ('year',)
def get_queryset(self, request):
return super(BookAdminWithCustomQueryset, self).get_queryset(request).filter(author=self.user)
class BookAdminRelatedOnlyFilter(ModelAdmin):
list_filter = (
'year', 'is_best_seller', 'date_registered', 'no',
('author', RelatedOnlyFieldListFilter),
('contributors', RelatedOnlyFieldListFilter),
('employee__department', RelatedOnlyFieldListFilter),
)
ordering = ('-id',)
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class NotNinetiesListFilterAdmin(ModelAdmin):
list_filter = (NotNinetiesListFilter,)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class DepartmentFilterUnderscoredEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithUnderscoredParameter, ]
class DepartmentFilterDynamicValueBookAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithDynamicValue, ]
class BookmarkAdminGenericRelation(ModelAdmin):
list_filter = ['tags__tag']
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
if self.today.month == 12:
self.next_month = self.today.replace(year=self.today.year + 1, month=1, day=1)
else:
self.next_month = self.today.replace(month=self.today.month + 1, day=1)
self.next_year = self.today.replace(year=self.today.year + 1, month=1, day=1)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
self.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
self.djangonaut_book = Book.objects.create(
title='Djangonaut: an art of living', year=2009,
author=self.alfred, is_best_seller=True, date_registered=self.today,
)
self.bio_book = Book.objects.create(
title='Django: a biography', year=1999, author=self.alfred,
is_best_seller=False, no=207,
)
self.django_book = Book.objects.create(
title='The Django Book', year=None, author=self.bob,
is_best_seller=None, date_registered=self.today, no=103,
)
self.guitar_book = Book.objects.create(
title='Guitar for dummies', year=2002, is_best_seller=True,
date_registered=self.one_week_ago,
)
self.guitar_book.contributors.set([self.bob, self.lisa])
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def get_changelist(self, request, model, modeladmin):
return ChangeList(
request, model, modeladmin.list_display,
modeladmin.list_display_links, modeladmin.list_filter,
modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page,
modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin,
)
def test_choicesfieldlistfilter_has_none_choice(self):
"""
The last choice is for the None value.
"""
class BookmarkChoicesAdmin(ModelAdmin):
list_display = ['none_or_null']
list_filter = ['none_or_null']
modeladmin = BookmarkChoicesAdmin(Bookmark, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Bookmark, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['display'], 'None')
self.assertEqual(choices[-1]['query_string'], '?none_or_null__isnull=True')
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today,
self.tomorrow,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.next_month})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(day=1),
self.next_month,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.next_year})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(month=1, day=1),
self.next_year,
)
)
request = self.request_factory.get('/', {
'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow),
})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
str(self.one_week_ago),
str(self.tomorrow),
)
)
# Null/not null queries
request = self.request_factory.get('/', {'date_registered__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0], self.bio_book)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), 'display', 'No date')
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__isnull=True')
request = self.request_factory.get('/', {'date_registered__isnull': 'False'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 3)
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), 'display', 'Has date')
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__isnull=False')
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows doesn't support setting a timezone that differs from the "
"system timezone."
)
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_allvaluesfieldlistfilter_custom_qs(self):
# Make sure that correct filters are returned with custom querysets
modeladmin = BookAdminWithCustomQueryset(self.alfred, Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
# Should have 'All', 1999 and 2009 options i.e. the subset of years of
# books written by alfred (which is the filtering criteria set by
# BookAdminWithCustomQueryset.get_queryset())
self.assertEqual(3, len(choices))
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['query_string'], '?year=1999')
self.assertEqual(choices[2]['query_string'], '?year=2009')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that all users are present in the author's list filter
filterspec = changelist.get_filters(request)[0][1]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that all users are present in the contrib's list filter
filterspec = changelist.get_filters(request)[0][2]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
# With one book, the list filter should appear because there is also a
# (None) option.
Book.objects.exclude(pk=self.djangonaut_book.pk).delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 2)
# With no books remaining, no list filters should appear.
Book.objects.all().delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_relatedonlyfieldlistfilter_foreignkey(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that only actual authors are present in author's list filter
filterspec = changelist.get_filters(request)[0][4]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_underscorelookup_foreignkey(self):
Department.objects.create(code='TEST', description='Testing')
self.djangonaut_book.employee = self.john
self.djangonaut_book.save()
self.bio_book.employee = self.jack
self.bio_book.save()
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Only actual departments should be present in employee__department's
# list filter.
filterspec = changelist.get_filters(request)[0][6]
expected = [
(self.dev.code, str(self.dev)),
(self.design.code, str(self.design)),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_manytomany(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that only actual contributors are present in contrib's list filter
filterspec = changelist.get_filters(request)[0][5]
expected = [(self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_listfilter_genericrelation(self):
django_bookmark = Bookmark.objects.create(url='https://www.djangoproject.com/')
python_bookmark = Bookmark.objects.create(url='https://www.python.org/')
kernel_bookmark = Bookmark.objects.create(url='https://www.kernel.org/')
TaggedItem.objects.create(content_object=django_bookmark, tag='python')
TaggedItem.objects.create(content_object=python_bookmark, tag='python')
TaggedItem.objects.create(content_object=kernel_bookmark, tag='linux')
modeladmin = BookmarkAdminGenericRelation(Bookmark, site)
request = self.request_factory.get('/', {'tags__tag': 'python'})
changelist = self.get_changelist(request, Bookmark, modeladmin)
queryset = changelist.get_queryset(request)
expected = [python_bookmark, django_bookmark]
self.assertEqual(list(queryset), expected)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_fieldlistfilter_underscorelookup_tuple(self):
"""
Ensure ('fieldpath', ClassName ) lookups pass lookup_allowed checks
when fieldpath contains double underscore in value (#19182).
"""
modeladmin = BookAdminWithUnderscoreLookupAndTuple(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'author__email': 'alfred@example.com'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book, self.djangonaut_book])
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(
choices[3]['query_string'],
'?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk
)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
msg = "The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.get_changelist(request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.get_changelist(request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed (#17828).
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
with self.assertRaises(ZeroDivisionError):
self.get_changelist(request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
list_filter works with two-characters long field names (#16080).
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in' (#17091).
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters (#19318).
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.department.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.department.pk)
def test_lookup_with_non_string_value_underscored(self):
"""
Ensure SimpleListFilter lookups pass lookup_allowed checks when
parameter_name attribute contains double-underscore value (#19182).
"""
modeladmin = DepartmentFilterUnderscoredEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department__whatever': self.john.department.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__whatever=%s' % self.john.department.pk)
def test_fk_with_to_field(self):
"""
A filter on a FK respects the FK's to_field attribute (#17972).
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
def test_lookup_with_dynamic_value(self):
"""
Ensure SimpleListFilter can access self.value() inside the lookup.
"""
modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
def _test_choices(request, expected_displays):
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = tuple(c['display'] for c in filterspec.choices(changelist))
self.assertEqual(choices, expected_displays)
_test_choices(self.request_factory.get('/', {}),
("All", "the 1980's", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 80s'}),
("All", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 90s'}),
("All", "the 1980's"))
def test_list_filter_queryset_filtered_by_default(self):
"""
A list filter that filters the queryset by default gives the correct
full_result_count.
"""
modeladmin = NotNinetiesListFilterAdmin(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
changelist.get_results(request)
self.assertEqual(changelist.full_result_count, 4)
|
|
from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.urls import Resolver404, resolve
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = force_text(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = '[unable to retrieve the current user]'
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'user_str': user_str,
'filtered_POST_items': self.filter.get_post_parameters(self.request).items(),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
if self.request is not None:
c['request_GET_items'] = self.request.GET.items()
c['request_FILES_items'] = self.request.FILES.items()
c['request_COOKIES_items'] = self.request.COOKIES.items()
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = force_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried or ( # empty URLconf
request.path == '/' and
len(tried) == 1 and # default URLconf
len(tried[0]) == 1 and
getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
)):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _(
"Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."
),
"explanation": _(
"You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"
),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
r"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.textContent = s.textContent == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.textContent = link.textContent.trim() == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.get_raw_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% if template_info.bottom != template_info.total %} cut-bottom{% endif %}">
{% for source_line in template_info.source_lines %}
{% if source_line.0 == template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}"""
"""<span class="specific">{{ template_info.during }}</span>"""
"""{{ template_info.after }}</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endif %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
""" """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:0 %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}"""
"{% for source_line in template_info.source_lines %}"
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public website">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
{% if user_str %}
<h3 id="user-info">USER</h3>
<p>{{ user_str }}</p>
{% endif %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_GET_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST_items %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in filtered_POST_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_FILES_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for k, v in request_COOKIES_items %}
<tr>
<td>{{ k }}</td>
<td class="code"><pre>{{ v|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""") # NOQA
TECHNICAL_500_TEXT_TEMPLATE = (""""""
"""{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}
{% for source_line in template_info.source_lines %}"""
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}{% if frames %}
Traceback:"""
"{% for frame in frames %}"
"{% ifchanged frame.exc_cause %}"
" {% if frame.exc_cause %}" """
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
{% if user_str %}USER: {{ user_str }}{% endif %}
GET:{% for k, v in request_GET_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request_FILES_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request_COOKIES_items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% if not is_email %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
{% endif %}
""") # NOQA
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>
{% if request_path %}
The current path, <code>{{ request_path|escape }}</code>,{% else %}
The empty path{% endif %} didn't match any of these.
</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
|
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: disable=no-value-for-parameter, too-many-lines, protected-access
# pylint: disable=too-many-public-methods
import unittest
from argus.backends.heat import client
from heatclient import exc
from heatclient import client as heatclient_client
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient import exceptions as ks_exc
try:
import unittest.mock as mock
except ImportError:
import mock
class TestHeatClient(unittest.TestCase):
@mock.patch('six.moves.urllib_parse.urlparse')
@mock.patch('keystoneclient.discover.Discover')
def _test_discover_auth_versions_exception(
self, mock_discover, mock_url_parse,
client_exception=None, expected_auth="fake auth"):
auths = {
"/v2": None,
"/v3": None
}
if client_exception is None:
mock_url_for = mock.Mock()
mock_url_for.url_for.side_effect = tuple(auths)
mock_discover.return_value = mock_url_for
result = client._discover_auth_versions(
mock.sentinel, mock.sentinel)
self.assertEqual(result, tuple(auths.values()))
else:
auth_url = mock.sentinel
mock_discover.side_effect = ks_exc.ClientException()
mock_path = mock.Mock()
mock_path.path = expected_auth
mock_url_parse.return_value = mock_path
if expected_auth in auths.keys():
result = client._discover_auth_versions(
mock.sentinel, auth_url)
auths[expected_auth] = auth_url
self.assertEqual(
sorted(result), sorted(tuple(auths.values())))
else:
msg = ('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url. Identity service may not support API '
'version discovery. Please provide a versioned '
'auth_url instead.')
exp = exc.CommandError(msg)
with self.assertRaises(exc.CommandError) as ex:
client._discover_auth_versions(
mock.sentinel, mock.sentinel)
self.assertEqual(ex.exception.message, exp.message)
@mock.patch('keystoneclient.discover.Discover')
def test_discover_auth_version_success(self, mock_discover):
mock_url_for = mock.Mock()
mock_url_for.url_for = mock.Mock(side_effect=['2.0', '3.0'])
mock_discover.return_value = mock_url_for
result = client._discover_auth_versions(mock.sentinel, mock.sentinel)
self.assertEqual(result, ('2.0', '3.0'))
def test_discover_auth_version_v2(self):
self._test_discover_auth_versions_exception(
client_exception=True,
expected_auth='/v2'
)
def test_discover_auth_version_v3(self):
self._test_discover_auth_versions_exception(
client_exception=True,
expected_auth='/v3'
)
def test_discover_auth_version_command_error(self):
self._test_discover_auth_versions_exception(
client_exception=True
)
def _test_keystone_v3_auth(self, auth_token):
v3_auth_url = mock.sentinel
kwargs = {
"fake param 1": mock.sentinel,
"auth_token": auth_token,
"fake param 2": mock.sentinel
}
if auth_token is None:
class_ = v3_auth.password.Password
else:
class_ = v3_auth.token.Token
with mock.patch('keystoneclient.auth.identity.v3.' + class_.__name__,
spec=class_) as mock_class:
result = client._get_keystone_v3_auth(v3_auth_url, **kwargs)
self.assertTrue(isinstance(result, class_))
if auth_token is None:
kwargs.pop('auth_token', None)
mock_class.assert_called_once_with(v3_auth_url, **kwargs)
else:
mock_class.assert_called_once_with(v3_auth_url, auth_token)
def test_password(self):
self._test_keystone_v3_auth(auth_token=None)
def test_token(self):
self._test_keystone_v3_auth(auth_token=mock.sentinel)
def _test_get_keystone_v2_auth(self, auth_token):
v2_auth_url = mock.sentinel
kwargs = {
"fake param 1": mock.sentinel,
"auth_token": auth_token,
"fake param 2": mock.sentinel,
"project_id": mock.sentinel,
"project_name": mock.sentinel,
"tenant_id": mock.sentinel,
"tenant_name": mock.sentinel,
"username": mock.sentinel,
"password": mock.sentinel
}
if auth_token is not None:
class_ = v2_auth.Token
else:
class_ = v2_auth.Password
with mock.patch('keystoneclient.auth.identity.v2.' + class_.__name__,
spec=class_) as mock_class:
result = client._get_keystone_v2_auth(v2_auth_url, **kwargs)
self.assertTrue(isinstance(result, class_))
if auth_token is not None:
mock_class.assert_called_once_with(
v2_auth_url, auth_token,
tenant_id=mock.sentinel,
tenant_name=mock.sentinel)
else:
mock_class.assert_called_once_with(
v2_auth_url,
username=kwargs.pop('username', None),
password=kwargs.pop('password', None),
tenant_id=mock.sentinel,
tenant_name=mock.sentinel)
def test_get_keystone_v2_auth_token(self):
self._test_get_keystone_v2_auth(auth_token=mock.sentinel)
def test_get_keystone_v2_auth_password(self):
self._test_get_keystone_v2_auth(auth_token=None)
@mock.patch('argus.backends.heat.client._get_keystone_v3_auth')
@mock.patch('argus.backends.heat.client._get_keystone_v2_auth')
@mock.patch('argus.backends.heat.client._discover_auth_versions')
def _test_get_keystone_auth(self, mock_discover, mock_v2, mock_v3,
v2_auth_url=None, v3_auth_url=None, **kwargs):
mock_discover.return_value = (v2_auth_url, v3_auth_url)
mock_v2.return_value = mock.sentinel
mock_v3.return_value = mock.sentinel
result = client._get_keystone_auth(mock.sentinel, mock.sentinel,
**kwargs)
if v3_auth_url and v2_auth_url:
user_domain_name = kwargs.get('user_domain_name', None)
user_domain_id = kwargs.get('user_domain_id', None)
project_domain_name = kwargs.get('project_domain_name', None)
project_domain_id = kwargs.get('project_domain_id', None)
if (user_domain_name or user_domain_id or project_domain_name or
project_domain_id):
mock_v3.assert_called_once_with(v3_auth_url, **kwargs)
self.assertEqual(result, v3_auth_url)
else:
mock_v2.assert_called_once_with(v2_auth_url, **kwargs)
self.assertEqual(result, v2_auth_url)
elif v3_auth_url:
mock_v3.assert_called_once_with(v3_auth_url, **kwargs)
self.assertEqual(result, v3_auth_url)
elif v2_auth_url:
mock_v2.assert_called_once_with(v2_auth_url, **kwargs)
self.assertEqual(result, v2_auth_url)
def test_get_keystone_auth_v3_v2(self):
kwargs = {
"user_domain_name": mock.sentinel
}
self._test_get_keystone_auth(v2_auth_url=mock.sentinel,
v3_auth_url=mock.sentinel, **kwargs)
def test_get_keystone_auth_v3_v2_no_kwargs(self):
self._test_get_keystone_auth(v2_auth_url=mock.sentinel,
v3_auth_url=mock.sentinel)
def test_get_keystone_auth_v3(self):
kwargs = {
"user_domain_name": mock.sentinel
}
self._test_get_keystone_auth(v3_auth_url=mock.sentinel, **kwargs)
def test_get_keystone_auth_v2(self):
kwargs = {
"user_domain_name": mock.sentinel
}
self._test_get_keystone_auth(v2_auth_url=mock.sentinel, **kwargs)
@mock.patch('argus.backends.heat.client._discover_auth_versions')
def test_get_keystone_auth_fails(self, mock_discover):
v2_auth_url, v3_auth_url = None, None
mock_discover.return_value = (v2_auth_url, v3_auth_url)
with self.assertRaises(exc.CommandError) as ex:
client._get_keystone_auth("session", "url")
self.assertEqual(ex, 'Unable to determine the Keystone '
'version to authenticate with using the '
'given auth_url.')
@mock.patch('argus.backends.heat.client._get_keystone_auth')
@mock.patch('heatclient.common.utils.env')
@mock.patch('keystoneclient.session.Session')
def test_heat_client(self, mock_kssession, mock_env, mock_get_ks_auth):
mock_env.return_value = mock.sentinel
mock_get_endpoint = mock.Mock()
mock_get_endpoint.get_endpoint.return_value = mock.sentinel
mock_get_ks_auth.return_value = mock_get_endpoint
class Credentials(object):
def __init__(self):
self.username = "fake username"
self.user_id = "fake user id"
self.password = "fake password"
self.tenant_id = "fake tenant id"
self.tenant_name = "fake tenant name"
credentials = Credentials()
result = client.heat_client(credentials)
self.assertTrue(result, heatclient_client.Client)
mock_kssession.assert_called_once_with(verify=True)
mock_env.assert_called_once_with('OS_AUTH_URL')
|
|
# Copyright 2016 Rackspace
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
subunit-describe-calls is a parser for subunit streams to determine what REST
API calls are made inside of a test and in what order they are called.
Runtime Arguments
-----------------
* ``--subunit, -s``: (Optional) The path to the subunit file being parsed,
defaults to stdin
* ``--non-subunit-name, -n``: (Optional) The file_name that the logs are being
stored in
* ``--output-file, -o``: (Optional) The path where the JSON output will be
written to. This contains more information than is present in stdout.
* ``--ports, -p``: (Optional) The path to a JSON file describing the ports
being used by different services
* ``--verbose, -v``: (Optional) Print Request and Response Headers and Body
data to stdout in the non cliff deprecated CLI
* ``--all-stdout, -a``: (Optional) Print Request and Response Headers and Body
data to stdout
Usage
-----
subunit-describe-calls will take in either stdin subunit v1 or v2 stream or a
file path which contains either a subunit v1 or v2 stream passed via the
``--subunit`` parameter. This is then parsed checking for details contained in
the file_bytes of the ``--non-subunit-name`` parameter (the default is
pythonlogging which is what Tempest uses to store logs). By default `the
OpenStack default ports
<https://docs.openstack.org/install-guide/firewalls-default-ports.html>`_
are used unless a file is provided via the ``--ports`` option. The resulting
output is dumped in JSON output to the path provided in the ``--output-file``
option.
Ports file JSON structure
^^^^^^^^^^^^^^^^^^^^^^^^^
::
{
"<port number>": "<name of service>",
...
}
Output file JSON structure
^^^^^^^^^^^^^^^^^^^^^^^^^^
::
{
"full_test_name[with_id_and_tags]": [
{
"name": "The ClassName.MethodName that made the call",
"verb": "HTTP Verb",
"service": "Name of the service",
"url": "A shortened version of the URL called",
"status_code": "The status code of the response",
"request_headers": "The headers of the request",
"request_body": "The body of the request",
"response_headers": "The headers of the response",
"response_body": "The body of the response"
}
]
}
"""
import argparse
import collections
import io
import os
import re
import sys
import traceback
from cliff.command import Command
from oslo_serialization import jsonutils as json
import subunit
import testtools
DESCRIPTION = "Outputs all HTTP calls a given test made that were logged."
class UrlParser(testtools.TestResult):
uuid_re = re.compile(r'(^|[^0-9a-f])[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
'[0-9a-f]{4}-[0-9a-f]{12}([^0-9a-f]|$)')
id_re = re.compile(r'(^|[^0-9a-z])[0-9a-z]{8}[0-9a-z]{4}[0-9a-z]{4}'
'[0-9a-z]{4}[0-9a-z]{12}([^0-9a-z]|$)')
ip_re = re.compile(r'(^|[^0-9])[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]'
'{1,3}([^0-9]|$)')
url_re = re.compile(r'.*INFO.*Request \((?P<name>.*)\): (?P<code>[\d]{3}) '
r'(?P<verb>\w*) (?P<url>.*) .*')
port_re = re.compile(r'.*:(?P<port>\d+).*')
path_re = re.compile(r'http[s]?://[^/]*/(?P<path>.*)')
request_re = re.compile(r'.* Request - Headers: (?P<headers>.*)')
response_re = re.compile(r'.* Response - Headers: (?P<headers>.*)')
body_re = re.compile(r'.*Body: (?P<body>.*)')
# Based on OpenStack default ports:
# https://docs.openstack.org/install-guide/firewalls-default-ports.html
services = {
"8776": "Block Storage",
"8774": "Nova",
"8773": "Nova-API", "8775": "Nova-API",
"8386": "Sahara",
"35357": "Keystone", "5000": "Keystone",
"9292": "Glance", "9191": "Glance",
"9696": "Neutron",
"6000": "Swift", "6001": "Swift", "6002": "Swift",
"8004": "Heat", "8000": "Heat", "8003": "Heat",
"8777": "Ceilometer",
"80": "Horizon",
"8080": "Swift",
"443": "SSL",
"873": "rsync",
"3260": "iSCSI",
"3306": "MySQL",
"5672": "AMQP",
"8082": "murano",
"8778": "Clustering",
"8999": "Vitrage",
"8989": "Mistral"}
def __init__(self, services=None):
super(UrlParser, self).__init__()
self.test_logs = {}
self.services = services or self.services
def addSuccess(self, test, details=None):
output = test.shortDescription() or test.id()
calls = self.parse_details(details)
self.test_logs.update({output: calls})
def addSkip(self, test, err, details=None):
output = test.shortDescription() or test.id()
calls = self.parse_details(details)
self.test_logs.update({output: calls})
def addError(self, test, err, details=None):
output = test.shortDescription() or test.id()
calls = self.parse_details(details)
self.test_logs.update({output: calls})
def addFailure(self, test, err, details=None):
output = test.shortDescription() or test.id()
calls = self.parse_details(details)
self.test_logs.update({output: calls})
def stopTestRun(self):
super(UrlParser, self).stopTestRun()
def startTestRun(self):
super(UrlParser, self).startTestRun()
def parse_details(self, details):
if details is None:
return
calls = []
for _, detail in details.items():
in_request = False
in_response = False
current_call = {}
for line in detail.as_text().split("\n"):
url_match = self.url_re.match(line)
request_match = self.request_re.match(line)
response_match = self.response_re.match(line)
body_match = self.body_re.match(line)
if url_match is not None:
if current_call != {}:
calls.append(current_call.copy())
current_call = {}
in_request, in_response = False, False
current_call.update({
"name": url_match.group("name"),
"verb": url_match.group("verb"),
"status_code": url_match.group("code"),
"service": self.get_service(url_match.group("url")),
"url": self.url_path(url_match.group("url"))})
elif request_match is not None:
in_request, in_response = True, False
current_call.update(
{"request_headers": request_match.group("headers")})
elif in_request and body_match is not None:
in_request = False
current_call.update(
{"request_body": body_match.group(
"body")})
elif response_match is not None:
in_request, in_response = False, True
current_call.update(
{"response_headers": response_match.group(
"headers")})
elif in_response and body_match is not None:
in_response = False
current_call.update(
{"response_body": body_match.group("body")})
if current_call != {}:
calls.append(current_call.copy())
return calls
def get_service(self, url):
match = self.port_re.match(url)
if match is not None:
return self.services.get(match.group("port"), "Unknown")
return "Unknown"
def url_path(self, url):
match = self.path_re.match(url)
if match is not None:
path = match.group("path")
path = self.uuid_re.sub(r'\1<uuid>\2', path)
path = self.ip_re.sub(r'\1<ip>\2', path)
path = self.id_re.sub(r'\1<id>\2', path)
return path
return url
class FileAccumulator(testtools.StreamResult):
def __init__(self, non_subunit_name='pythonlogging'):
super(FileAccumulator, self).__init__()
self.route_codes = collections.defaultdict(io.BytesIO)
self.non_subunit_name = non_subunit_name
def status(self, **kwargs):
if kwargs.get('file_name') != self.non_subunit_name:
return
file_bytes = kwargs.get('file_bytes')
if not file_bytes:
return
route_code = kwargs.get('route_code')
stream = self.route_codes[route_code]
stream.write(file_bytes)
class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
desc = DESCRIPTION
super(ArgumentParser, self).__init__(description=desc)
self.prog = "subunit-describe-calls"
_parser_add_args(self)
def parse(stream, non_subunit_name, ports):
if ports is not None and os.path.exists(ports):
ports = json.loads(open(ports).read())
url_parser = UrlParser(ports)
suite = subunit.ByteStreamToStreamResult(
stream, non_subunit_name=non_subunit_name)
result = testtools.StreamToExtendedDecorator(url_parser)
accumulator = FileAccumulator(non_subunit_name)
result = testtools.StreamResultRouter(result)
result.add_rule(accumulator, 'test_id', test_id=None)
result.startTestRun()
suite.run(result)
for bytes_io in accumulator.route_codes.values(): # v1 processing
bytes_io.seek(0)
suite = subunit.ProtocolTestCase(bytes_io)
suite.run(url_parser)
result.stopTestRun()
return url_parser
def output(url_parser, output_file, all_stdout):
if output_file is not None:
with open(output_file, "w") as outfile:
outfile.write(json.dumps(url_parser.test_logs))
return
for test_name in url_parser.test_logs:
items = url_parser.test_logs[test_name]
sys.stdout.write('{0}\n'.format(test_name))
if not items:
sys.stdout.write('\n')
continue
for item in items:
sys.stdout.write('\t- {0} {1} request for {2} to {3}\n'.format(
item.get('status_code'), item.get('verb'),
item.get('service'), item.get('url')))
if all_stdout:
sys.stdout.write('\t\t- request headers: {0}\n'.format(
item.get('request_headers')))
sys.stdout.write('\t\t- request body: {0}\n'.format(
item.get('request_body')))
sys.stdout.write('\t\t- response headers: {0}\n'.format(
item.get('response_headers')))
sys.stdout.write('\t\t- response body: {0}\n'.format(
item.get('response_body')))
sys.stdout.write('\n')
def entry_point(cl_args=None):
print('Running subunit_describe_calls ...')
if not cl_args:
print("Use of: 'subunit-describe-calls' is deprecated, "
"please use: 'tempest subunit-describe-calls'")
cl_args = ArgumentParser().parse_args()
parser = parse(cl_args.subunit, cl_args.non_subunit_name, cl_args.ports)
output(parser, cl_args.output_file, cl_args.all_stdout)
def _parser_add_args(parser):
parser.add_argument(
"-s", "--subunit", metavar="<subunit file>",
nargs="?", type=argparse.FileType('rb'), default=sys.stdin,
help="The path to the subunit output file(default:stdin v1/v2 stream)"
)
parser.add_argument(
"-n", "--non-subunit-name", metavar="<non subunit name>",
default="pythonlogging",
help="The name used in subunit to describe the file contents."
)
parser.add_argument(
"-o", "--output-file", metavar="<output file>", default=None,
help="The output file name for the json."
)
parser.add_argument(
"-p", "--ports", metavar="<ports file>", default=None,
help="A JSON file describing the ports for each service."
)
group = parser.add_mutually_exclusive_group()
# the -v and --verbose command are for the old subunit-describe-calls
# main() CLI interface. It does not work with the new
# tempest subunit-describe-callss CLI. So when the main CLI approach is
# deleted this argument is not needed.
group.add_argument(
"-v", "--verbose", action='store_true', dest='all_stdout',
help='Add Request and Response header and body data to stdout print.'
' NOTE: This argument deprecated and does not work with'
' tempest subunit-describe-calls CLI.'
' Use new option: "-a", "--all-stdout"'
)
group.add_argument(
"-a", "--all-stdout", action='store_true',
help="Add Request and Response header and body data to stdout print."
" Note: this argument work with the subunit-describe-calls and"
" tempest subunit-describe-calls CLI commands."
)
class TempestSubunitDescribeCalls(Command):
def get_parser(self, prog_name):
parser = super(TempestSubunitDescribeCalls, self).get_parser(prog_name)
_parser_add_args(parser)
return parser
def take_action(self, parsed_args):
try:
entry_point(parsed_args)
except Exception:
traceback.print_exc()
raise
def get_description(self):
return DESCRIPTION
if __name__ == "__main__":
entry_point()
|
|
r"""
Elliptic functions historically comprise the elliptic integrals
and their inverses, and originate from the problem of computing the
arc length of an ellipse. From a more modern point of view,
an elliptic function is defined as a doubly periodic function, i.e.
a function which satisfies
.. math ::
f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z)
for some half-periods `\omega_1, \omega_2` with
`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic
functions are the Jacobi elliptic functions. More broadly, this section
includes quasi-doubly periodic functions (such as the Jacobi theta
functions) and other functions useful in the study of elliptic functions.
Many different conventions for the arguments of
elliptic functions are in use. It is even standard to use
different parameterizations for different functions in the same
text or software (and mpmath is no exception).
The usual parameters are the elliptic nome `q`, which usually
must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary
complex number); the elliptic modulus `k` (an arbitrary complex
number); and the half-period ratio `\tau`, which usually must
satisfy `\mathrm{Im}[\tau] > 0`.
These quantities can be expressed in terms of each other
using the following relations:
.. math ::
m = k^2
.. math ::
\tau = -i \frac{K(1-m)}{K(m)}
.. math ::
q = e^{i \pi \tau}
.. math ::
k = \frac{\vartheta_2^4(q)}{\vartheta_3^4(q)}
In addition, an alternative definition is used for the nome in
number theory, which we here denote by q-bar:
.. math ::
\bar{q} = q^2 = e^{2 i \pi \tau}
For convenience, mpmath provides functions to convert
between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`,
:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`).
**References**
1. [AbramowitzStegun]_
2. [WhittakerWatson]_
"""
from .functions import defun, defun_wrapped
def nome(ctx, m):
m = ctx.convert(m)
if not m:
return m
if m == ctx.one:
return m
if ctx.isnan(m):
return m
if ctx.isinf(m):
if m == ctx.ninf:
return type(m)(-1)
else:
return ctx.mpc(-1)
a = ctx.ellipk(ctx.one-m)
b = ctx.ellipk(m)
v = ctx.exp(-ctx.pi*a/b)
if not ctx._im(m) and ctx._re(m) < 1:
if ctx._is_real_type(m):
return v.real
else:
return v.real + 0j
elif m == 2:
v = ctx.mpc(0, v.imag)
return v
@defun_wrapped
def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qfrom(q=0.25)
0.25
>>> qfrom(m=mfrom(q=0.25))
0.25
>>> qfrom(k=kfrom(q=0.25))
0.25
>>> qfrom(tau=taufrom(q=0.25))
(0.25 + 0.0j)
>>> qfrom(qbar=qbarfrom(q=0.25))
0.25
"""
if q is not None:
return ctx.convert(q)
if m is not None:
return nome(ctx, m)
if k is not None:
return nome(ctx, ctx.convert(k)**2)
if tau is not None:
return ctx.expjpi(tau)
if qbar is not None:
return ctx.sqrt(qbar)
@defun_wrapped
def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the number-theoretic nome `\bar q`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qbarfrom(qbar=0.25)
0.25
>>> qbarfrom(q=qfrom(qbar=0.25))
0.25
>>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned
0.25
>>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned
0.25
>>> qbarfrom(tau=taufrom(qbar=0.25))
(0.25 + 0.0j)
"""
if qbar is not None:
return ctx.convert(qbar)
if q is not None:
return ctx.convert(q) ** 2
if m is not None:
return nome(ctx, m) ** 2
if k is not None:
return nome(ctx, ctx.convert(k)**2) ** 2
if tau is not None:
return ctx.expjpi(2*tau)
@defun_wrapped
def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic half-period ratio `\tau`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> taufrom(tau=0.5j)
(0.0 + 0.5j)
>>> taufrom(q=qfrom(tau=0.5j))
(0.0 + 0.5j)
>>> taufrom(m=mfrom(tau=0.5j))
(0.0 + 0.5j)
>>> taufrom(k=kfrom(tau=0.5j))
(0.0 + 0.5j)
>>> taufrom(qbar=qbarfrom(tau=0.5j))
(0.0 + 0.5j)
"""
if tau is not None:
return ctx.convert(tau)
if m is not None:
m = ctx.convert(m)
return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m)
if k is not None:
k = ctx.convert(k)
return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2)
if q is not None:
return ctx.log(q) / (ctx.pi*ctx.j)
if qbar is not None:
qbar = ctx.convert(qbar)
return ctx.log(qbar) / (2*ctx.pi*ctx.j)
@defun_wrapped
def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic modulus `k`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> kfrom(k=0.25)
0.25
>>> kfrom(m=mfrom(k=0.25))
0.25
>>> kfrom(q=qfrom(k=0.25))
0.25
>>> kfrom(tau=taufrom(k=0.25))
(0.25 + 0.0j)
>>> kfrom(qbar=qbarfrom(k=0.25))
0.25
As `q \to 1` and `q \to -1`, `k` rapidly approaches
`1` and `i \infty` respectively::
>>> kfrom(q=0.75)
0.9999999999999899166471767
>>> kfrom(q=-0.75)
(0.0 + 7041781.096692038332790615j)
>>> kfrom(q=1)
1
>>> kfrom(q=-1)
(0.0 + +infj)
"""
if k is not None:
return ctx.convert(k)
if m is not None:
return ctx.sqrt(m)
if tau is not None:
q = ctx.expjpi(tau)
if qbar is not None:
q = ctx.sqrt(qbar)
if q == 1:
return q
if q == -1:
return ctx.mpc(0,'inf')
return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2
@defun_wrapped
def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic parameter `m`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> mfrom(m=0.25)
0.25
>>> mfrom(q=qfrom(m=0.25))
0.25
>>> mfrom(k=kfrom(m=0.25))
0.25
>>> mfrom(tau=taufrom(m=0.25))
(0.25 + 0.0j)
>>> mfrom(qbar=qbarfrom(m=0.25))
0.25
As `q \to 1` and `q \to -1`, `m` rapidly approaches
`1` and `-\infty` respectively::
>>> mfrom(q=0.75)
0.9999999999999798332943533
>>> mfrom(q=-0.75)
-49586681013729.32611558353
>>> mfrom(q=1)
1.0
>>> mfrom(q=-1)
-inf
The inverse nome as a function of `q` has an integer
Taylor series expansion::
>>> taylor(lambda q: mfrom(q), 0, 7)
[0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0]
"""
if m is not None:
return m
if k is not None:
return k**2
if tau is not None:
q = ctx.expjpi(tau)
if qbar is not None:
q = ctx.sqrt(qbar)
if q == 1:
return ctx.convert(q)
if q == -1:
return q*ctx.inf
v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4
if ctx._is_real_type(q) and q < 0:
v = v.real
return v
jacobi_spec = {
'sn' : ([3],[2],[1],[4], 'sin', 'tanh'),
'cn' : ([4],[2],[2],[4], 'cos', 'sech'),
'dn' : ([4],[3],[3],[4], '1', 'sech'),
'ns' : ([2],[3],[4],[1], 'csc', 'coth'),
'nc' : ([2],[4],[4],[2], 'sec', 'cosh'),
'nd' : ([3],[4],[4],[3], '1', 'cosh'),
'sc' : ([3],[4],[1],[2], 'tan', 'sinh'),
'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'),
'cd' : ([3],[2],[2],[3], 'cos', '1'),
'cs' : ([4],[3],[2],[1], 'cot', 'csch'),
'dc' : ([2],[3],[3],[2], 'sec', '1'),
'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'),
'cc' : None,
'ss' : None,
'nn' : None,
'dd' : None
}
@defun
def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None):
try:
S = jacobi_spec[kind]
except KeyError:
raise ValueError("First argument must be a two-character string "
"containing 's', 'c', 'd' or 'n', e.g.: 'sn'")
if u is None:
def f(*args, **kwargs):
return ctx.ellipfun(kind, *args, **kwargs)
f.__name__ = kind
return f
prec = ctx.prec
try:
ctx.prec += 10
u = ctx.convert(u)
q = ctx.qfrom(m=m, q=q, k=k, tau=tau)
if S is None:
v = ctx.one + 0*q*u
elif q == ctx.zero:
if S[4] == '1': v = ctx.one
else: v = getattr(ctx, S[4])(u)
v += 0*q*u
elif q == ctx.one:
if S[5] == '1': v = ctx.one
else: v = getattr(ctx, S[5])(u)
v += 0*q*u
else:
t = u / ctx.jtheta(3, 0, q)**2
v = ctx.one
for a in S[0]: v *= ctx.jtheta(a, 0, q)
for b in S[1]: v /= ctx.jtheta(b, 0, q)
for c in S[2]: v *= ctx.jtheta(c, t, q)
for d in S[3]: v /= ctx.jtheta(d, t, q)
finally:
ctx.prec = prec
return +v
@defun_wrapped
def kleinj(ctx, tau=None, **kwargs):
r"""
Evaluates the Klein j-invariant, which is a modular function defined for
`\tau` in the upper half-plane as
.. math ::
J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)}
where `g_2` and `g_3` are the modular invariants of the Weierstrass
elliptic function,
.. math ::
g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4}
g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}.
An alternative, common notation is that of the j-function
`j(\tau) = 1728 J(\tau)`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/kleinj.py
.. image :: /modules/mpmath/plots/kleinj.png
.. literalinclude :: /modules/mpmath/plots/kleinj2.py
.. image :: /modules/mpmath/plots/kleinj2.png
**Examples**
Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tau = 0.625+0.75*j
>>> tau = 0.625+0.75*j
>>> kleinj(tau)
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
>>> kleinj(tau+1)
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
>>> kleinj(-1/tau)
(-0.1507492166511182267125242 + 0.07595948379084571927228946j)
The j-function has a famous Laurent series expansion in terms of the nome
`\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`::
>>> mp.dps = 15
>>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True)
[1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0]
The j-function admits exact evaluation at special algebraic points
related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163::
>>> @extraprec(10)
... def h(n):
... v = (1+sqrt(n)*j)
... if n > 2:
... v *= 0.5
... return v
...
>>> mp.dps = 25
>>> for n in [1,2,3,7,11,19,43,67,163]:
... n, chop(1728*kleinj(h(n)))
...
(1, 1728.0)
(2, 8000.0)
(3, 0.0)
(7, -3375.0)
(11, -32768.0)
(19, -884736.0)
(43, -884736000.0)
(67, -147197952000.0)
(163, -262537412640768000.0)
Also at other special points, the j-function assumes explicit
algebraic values, e.g.::
>>> chop(1728*kleinj(j*sqrt(5)))
1264538.909475140509320227
>>> identify(cbrt(_)) # note: not simplified
'((100+sqrt(13520))/2)'
>>> (50+26*sqrt(5))**3
1264538.909475140509320227
"""
q = ctx.qfrom(tau=tau, **kwargs)
t2 = ctx.jtheta(2,0,q)
t3 = ctx.jtheta(3,0,q)
t4 = ctx.jtheta(4,0,q)
P = (t2**8 + t3**8 + t4**8)**3
Q = 54*(t2*t3*t4)**8
return P/Q
def RF_calc(ctx, x, y, z, r):
if y == z: return RC_calc(ctx, x, y, r)
if x == z: return RC_calc(ctx, y, x, r)
if x == y: return RC_calc(ctx, z, x, r)
if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)):
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z):
return x*y*z
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z):
return ctx.zero
xm,ym,zm = x,y,z
A0 = Am = (x+y+z)/3
Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z))
g = ctx.mpf(0.25)
pow4 = ctx.one
m = 0
while 1:
xs = ctx.sqrt(xm)
ys = ctx.sqrt(ym)
zs = ctx.sqrt(zm)
lm = xs*ys + xs*zs + ys*zs
Am1 = (Am+lm)*g
xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g
if pow4 * Q < abs(Am):
break
Am = Am1
m += 1
pow4 *= g
t = pow4/Am
X = (A0-x)*t
Y = (A0-y)*t
Z = -X-Y
E2 = X*Y-Z**2
E3 = X*Y*Z
return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240
def RC_calc(ctx, x, y, r, pv=True):
if not (ctx.isnormal(x) and ctx.isnormal(y)):
if ctx.isinf(x) or ctx.isinf(y):
return 1/(x*y)
if y == 0:
return ctx.inf
if x == 0:
return ctx.pi / ctx.sqrt(y) / 2
raise ValueError
# Cauchy principal value
if pv and ctx._im(y) == 0 and ctx._re(y) < 0:
return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r)
if x == y:
return 1/ctx.sqrt(x)
extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x))
ctx.prec += extraprec
if ctx._is_real_type(x) and ctx._is_real_type(y):
x = ctx._re(x)
y = ctx._re(y)
a = ctx.sqrt(x/y)
if x < y:
b = ctx.sqrt(y-x)
v = ctx.acos(a)/b
else:
b = ctx.sqrt(x-y)
v = ctx.acosh(a)/b
else:
sx = ctx.sqrt(x)
sy = ctx.sqrt(y)
v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy)
ctx.prec -= extraprec
return v
def RJ_calc(ctx, x, y, z, p, r):
if not (ctx.isnormal(x) and ctx.isnormal(y) and \
ctx.isnormal(z) and ctx.isnormal(p)):
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p):
return x*y*z
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p):
return ctx.zero
if not p:
return ctx.inf
xm,ym,zm,pm = x,y,z,p
A0 = Am = (x + y + z + 2*p)/5
delta = (p-x)*(p-y)*(p-z)
Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p))
m = 0
g = ctx.mpf(0.25)
pow4 = ctx.one
S = 0
while 1:
sx = ctx.sqrt(xm)
sy = ctx.sqrt(ym)
sz = ctx.sqrt(zm)
sp = ctx.sqrt(pm)
lm = sx*sy + sx*sz + sy*sz
Am1 = (Am+lm)*g
xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g
dm = (sp+sx) * (sp+sy) * (sp+sz)
em = delta * ctx.power(4, -3*m) / dm**2
if pow4 * Q < abs(Am):
break
T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm
S += T
pow4 *= g
m += 1
Am = Am1
t = ctx.ldexp(1,-2*m) / Am
X = (A0-x)*t
Y = (A0-y)*t
Z = (A0-z)*t
P = (-X-Y-Z)/2
E2 = X*Y + X*Z + Y*Z - 3*P**2
E3 = X*Y*Z + 2*E2*P + 4*P**3
E4 = (2*X*Y*Z + E2*P + 3*P**3)*P
E5 = X*Y*Z*P**2
P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5
Q = 24024
v1 = g**m * ctx.power(Am, -1.5) * P/Q
v2 = 6*S
return v1 + v2
@defun
def elliprf(ctx, x, y, z):
r"""
Evaluates the Carlson symmetric elliptic integral of the first kind
.. math ::
R_F(x,y,z) = \frac{1}{2}
\int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}}
which is defined for `x,y,z \notin (-\infty,0)`, and with
at most one of `x,y,z` being zero.
For real `x,y,z \ge 0`, the principal square root is taken in the integrand.
For complex `x,y,z`, the principal square root is taken as `t \to \infty`
and as `t \to 0` non-principal branches are chosen as necessary so as to
make the integrand continuous.
**Examples**
Some basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprf(0,1,1); pi/2
1.570796326794896619231322
1.570796326794896619231322
>>> elliprf(0,1,inf)
0.0
>>> elliprf(1,1,1)
1.0
>>> elliprf(2,2,2)**2
0.5
>>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0)
+inf
+inf
+inf
+inf
Representing complete elliptic integrals in terms of `R_F`::
>>> m = mpf(0.75)
>>> ellipk(m); elliprf(0,1-m,1)
2.156515647499643235438675
2.156515647499643235438675
>>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3
1.211056027568459524803563
1.211056027568459524803563
Some symmetries and argument transformations::
>>> x,y,z = 2,3,4
>>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x)
0.5840828416771517066928492
0.5840828416771517066928492
0.5840828416771517066928492
>>> k = mpf(100000)
>>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z)
0.001847032121923321253219284
0.001847032121923321253219284
>>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x)
>>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l)
0.5840828416771517066928492
0.5840828416771517066928492
>>> elliprf((x+l)/4,(y+l)/4,(z+l)/4)
0.5840828416771517066928492
Comparing with numerical integration::
>>> x,y,z = 2,3,4
>>> elliprf(x,y,z)
0.5840828416771517066928492
>>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5)
>>> q = extradps(25)(quad)
>>> q(f, [0,inf])
0.5840828416771517066928492
With the following arguments, the square root in the integrand becomes
discontinuous at `t = 1/2` if the principal branch is used. To obtain
the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}`
on `t \in (0, 1/2)`::
>>> x,y,z = j-1,j,0
>>> elliprf(x,y,z)
(0.7961258658423391329305694 - 1.213856669836495986430094j)
>>> -q(f, [0,0.5]) + q(f, [0.5,inf])
(0.7961258658423391329305694 - 1.213856669836495986430094j)
The so-called *first lemniscate constant*, a transcendental number::
>>> elliprf(0,1,2)
1.31102877714605990523242
>>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1])
1.31102877714605990523242
>>> gamma('1/4')**2/(4*sqrt(2*pi))
1.31102877714605990523242
**References**
1. [Carlson]_
2. [DLMF]_ Chapter 19. Elliptic Integrals
"""
x = ctx.convert(x)
y = ctx.convert(y)
z = ctx.convert(z)
prec = ctx.prec
try:
ctx.prec += 20
tol = ctx.eps * 2**10
v = RF_calc(ctx, x, y, z, tol)
finally:
ctx.prec = prec
return +v
@defun
def elliprc(ctx, x, y, pv=True):
r"""
Evaluates the degenerate Carlson symmetric elliptic integral
of the first kind
.. math ::
R_C(x,y) = R_F(x,y,y) =
\frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}.
If `y \in (-\infty,0)`, either a value defined by continuity,
or with *pv=True* the Cauchy principal value, can be computed.
If `x \ge 0, y > 0`, the value can be expressed in terms of
elementary functions as
.. math ::
R_C(x,y) =
\begin{cases}
\dfrac{1}{\sqrt{y-x}}
\cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\
\dfrac{1}{\sqrt{y}}, & x = y \\
\dfrac{1}{\sqrt{x-y}}
\cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\
\end{cases}.
**Examples**
Some special values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprc(1,2)*4; elliprc(0,1)*2; +pi
3.141592653589793238462643
3.141592653589793238462643
3.141592653589793238462643
>>> elliprc(1,0)
+inf
>>> elliprc(5,5)**2
0.2
>>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf)
0.0
0.0
0.0
Comparing with the elementary closed-form solution::
>>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3'))
2.041630778983498390751238
2.041630778983498390751238
>>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5'))
1.875180765206547065111085
1.875180765206547065111085
Comparing with numerical integration::
>>> q = extradps(25)(quad)
>>> elliprc(2, -3, pv=True)
0.3333969101113672670749334
>>> elliprc(2, -3, pv=False)
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
>>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf])
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
"""
x = ctx.convert(x)
y = ctx.convert(y)
prec = ctx.prec
try:
ctx.prec += 20
tol = ctx.eps * 2**10
v = RC_calc(ctx, x, y, tol, pv)
finally:
ctx.prec = prec
return +v
@defun
def elliprj(ctx, x, y, z, p):
r"""
Evaluates the Carlson symmetric elliptic integral of the third kind
.. math ::
R_J(x,y,z,p) = \frac{3}{2}
\int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}.
Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand
is defined so as to be continuous along the path of integration for
complex values of the arguments.
**Examples**
Some values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprj(1,1,1,1)
1.0
>>> elliprj(2,2,2,2); 1/(2*sqrt(2))
0.3535533905932737622004222
0.3535533905932737622004222
>>> elliprj(0,1,2,2)
1.067937989667395702268688
>>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi))
1.067937989667395702268688
>>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4
1.380226776765915172432054
1.380226776765915172432054
>>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0)
+inf
+inf
+inf
>>> elliprj(1,inf,1,0); elliprj(1,1,1,inf)
0.0
0.0
>>> chop(elliprj(1+j, 1-j, 1, 1))
0.8505007163686739432927844
Scale transformation::
>>> x,y,z,p = 2,3,4,5
>>> k = mpf(100000)
>>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p)
4.521291677592745527851168e-9
4.521291677592745527851168e-9
Comparing with numerical integration::
>>> elliprj(1,2,3,4)
0.2398480997495677621758617
>>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3)))
>>> 1.5*quad(f, [0,inf])
0.2398480997495677621758617
>>> elliprj(1,2+1j,3,4-2j)
(0.216888906014633498739952 + 0.04081912627366673332369512j)
>>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3)))
>>> 1.5*quad(f, [0,inf])
(0.216888906014633498739952 + 0.04081912627366673332369511j)
"""
x = ctx.convert(x)
y = ctx.convert(y)
z = ctx.convert(z)
p = ctx.convert(p)
prec = ctx.prec
try:
ctx.prec += 20
tol = ctx.eps * 2**10
v = RJ_calc(ctx, x, y, z, p, tol)
finally:
ctx.prec = prec
return +v
@defun
def elliprd(ctx, x, y, z):
r"""
Evaluates the degenerate Carlson symmetric elliptic integral
of the third kind or Carlson elliptic integral of the
second kind `R_D(x,y,z) = R_J(x,y,z,z)`.
See :func:`~mpmath.elliprj` for additional information.
**Examples**
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprd(1,2,3)
0.2904602810289906442326534
>>> elliprj(1,2,3,3)
0.2904602810289906442326534
The so-called *second lemniscate constant*, a transcendental number::
>>> elliprd(0,2,1)/3
0.5990701173677961037199612
>>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1])
0.5990701173677961037199612
>>> gamma('3/4')**2/sqrt(2*pi)
0.5990701173677961037199612
"""
return ctx.elliprj(x,y,z,z)
@defun
def elliprg(ctx, x, y, z):
r"""
Evaluates the Carlson completely symmetric elliptic integral
of the second kind
.. math ::
R_G(x,y,z) = \frac{1}{4} \int_0^{\infty}
\frac{t}{\sqrt{(t+x)(t+y)(t+z)}}
\left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt.
**Examples**
Evaluation for real and complex arguments::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprg(0,1,1)*4; +pi
3.141592653589793238462643
3.141592653589793238462643
>>> elliprg(0,0.5,1)
0.6753219405238377512600874
>>> chop(elliprg(1+j, 1-j, 2))
1.172431327676416604532822
A double integral that can be evaluated in terms of `R_G`::
>>> x,y,z = 2,3,4
>>> def f(t,u):
... st = fp.sin(t); ct = fp.cos(t)
... su = fp.sin(u); cu = fp.cos(u)
... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st
...
>>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13)
1.725503028069
>>> nprint(elliprg(x,y,z), 13)
1.725503028069
"""
x = ctx.convert(x)
y = ctx.convert(y)
z = ctx.convert(z)
if not z: x, z = z, x
if not z: y, z = x, y
if not z: return ctx.inf
def terms():
T1 = 0.5*z*ctx.elliprf(x,y,z)
T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3
T3 = 0.5*ctx.sqrt(x*y/z)
return T1,T2,T3
return ctx.sum_accurately(terms)
@defun_wrapped
def ellipf(ctx, phi, m):
r"""
Evaluates the Legendre incomplete elliptic integral of the first kind
.. math ::
F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}}
or equivalently
.. math ::
F(\phi,m) = \int_0^{\sin \phi}
\frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}.
The function reduces to a complete elliptic integral of the first kind
(see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is,
.. math ::
F\left(\frac{\pi}{2}, m\right) = K(m).
In the defining integral, it is assumed that the principal branch
of the square root is taken and that the path of integration avoids
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
the function extends quasi-periodically as
.. math ::
F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}.
**Plots**
.. literalinclude :: /modules/mpmath/plots/ellipf.py
.. image :: /modules/mpmath/plots/ellipf.png
**Examples**
Basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipf(0,1)
0.0
>>> ellipf(0,0)
0.0
>>> ellipf(1,0); ellipf(2+3j,0)
1.0
(2.0 + 3.0j)
>>> ellipf(1,1); log(sec(1)+tan(1))
1.226191170883517070813061
1.226191170883517070813061
>>> ellipf(pi/2, -0.5); ellipk(-0.5)
1.415737208425956198892166
1.415737208425956198892166
>>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1)
+inf
+inf
>>> ellipf(1.5, 1)
3.340677542798311003320813
Comparing with numerical integration::
>>> z,m = 0.5, 1.25
>>> ellipf(z,m)
0.5287219202206327872978255
>>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z])
0.5287219202206327872978255
The arguments may be complex numbers::
>>> ellipf(3j, 0.5)
(0.0 + 1.713602407841590234804143j)
>>> ellipf(3+4j, 5-6j)
(1.269131241950351323305741 - 0.3561052815014558335412538j)
>>> z,m = 2+3j, 1.25
>>> k = 1011
>>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m)
(4086.184383622179764082821 - 3003.003538923749396546871j)
(4086.184383622179764082821 - 3003.003538923749396546871j)
For `|\Re(z)| < \pi/2`, the function can be expressed as a
hypergeometric series of two variables
(see :func:`~mpmath.appellf1`)::
>>> z,m = 0.5, 0.25
>>> ellipf(z,m)
0.5050887275786480788831083
>>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2)
0.5050887275786480788831083
"""
z = phi
if not (ctx.isnormal(z) and ctx.isnormal(m)):
if m == 0:
return z + m
if z == 0:
return z * m
if m == ctx.inf or m == ctx.ninf: return z/m
raise ValueError
x = z.real
ctx.prec += max(0, ctx.mag(x))
pi = +ctx.pi
away = abs(x) > pi/2
if m == 1:
if away:
return ctx.inf
if away:
d = ctx.nint(x/pi)
z = z-pi*d
P = 2*d*ctx.ellipk(m)
else:
P = 0
c, s = ctx.cos_sin(z)
return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P
@defun_wrapped
def ellipe(ctx, *args):
r"""
Called with a single argument `m`, evaluates the Legendre complete
elliptic integral of the second kind, `E(m)`, defined by
.. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\,
\frac{\pi}{2}
\,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right).
Called with two arguments `\phi, m`, evaluates the incomplete elliptic
integral of the second kind
.. math ::
E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt =
\int_0^{\sin z}
\frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt.
The incomplete integral reduces to a complete integral when
`\phi = \frac{\pi}{2}`; that is,
.. math ::
E\left(\frac{\pi}{2}, m\right) = E(m).
In the defining integral, it is assumed that the principal branch
of the square root is taken and that the path of integration avoids
crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
the function extends quasi-periodically as
.. math ::
E(\phi + n \pi, m) = 2 n E(m) + F(\phi,m), n \in \mathbb{Z}.
**Plots**
.. literalinclude :: /modules/mpmath/plots/ellipe.py
.. image :: /modules/mpmath/plots/ellipe.png
**Examples for the complete integral**
Basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipe(0)
1.570796326794896619231322
>>> ellipe(1)
1.0
>>> ellipe(-1)
1.910098894513856008952381
>>> ellipe(2)
(0.5990701173677961037199612 + 0.5990701173677961037199612j)
>>> ellipe(inf)
(0.0 + +infj)
>>> ellipe(-inf)
+inf
Verifying the defining integral and hypergeometric
representation::
>>> ellipe(0.5)
1.350643881047675502520175
>>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
1.350643881047675502520175
>>> pi/2*hyp2f1(0.5,-0.5,1,0.5)
1.350643881047675502520175
Evaluation is supported for arbitrary complex `m`::
>>> ellipe(0.5+0.25j)
(1.360868682163129682716687 - 0.1238733442561786843557315j)
>>> ellipe(3+4j)
(1.499553520933346954333612 - 1.577879007912758274533309j)
A definite integral::
>>> quad(ellipe, [0,1])
1.333333333333333333333333
**Examples for the incomplete integral**
Basic values and limits::
>>> ellipe(0,1)
0.0
>>> ellipe(0,0)
0.0
>>> ellipe(1,0)
1.0
>>> ellipe(2+3j,0)
(2.0 + 3.0j)
>>> ellipe(1,1); sin(1)
0.8414709848078965066525023
0.8414709848078965066525023
>>> ellipe(pi/2, -0.5); ellipe(-0.5)
1.751771275694817862026502
1.751771275694817862026502
>>> ellipe(pi/2, 1); ellipe(-pi/2, 1)
1.0
-1.0
>>> ellipe(1.5, 1)
0.9974949866040544309417234
Comparing with numerical integration::
>>> z,m = 0.5, 1.25
>>> ellipe(z,m)
0.4740152182652628394264449
>>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z])
0.4740152182652628394264449
The arguments may be complex numbers::
>>> ellipe(3j, 0.5)
(0.0 + 7.551991234890371873502105j)
>>> ellipe(3+4j, 5-6j)
(24.15299022574220502424466 + 75.2503670480325997418156j)
>>> k = 35
>>> z,m = 2+3j, 1.25
>>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m)
(48.30138799412005235090766 + 17.47255216721987688224357j)
(48.30138799412005235090766 + 17.47255216721987688224357j)
For `|\Re(z)| < \pi/2`, the function can be expressed as a
hypergeometric series of two variables
(see :func:`~mpmath.appellf1`)::
>>> z,m = 0.5, 0.25
>>> ellipe(z,m)
0.4950017030164151928870375
>>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
0.4950017030164151928870376
"""
if len(args) == 1:
return ctx._ellipe(args[0])
else:
phi, m = args
z = phi
if not (ctx.isnormal(z) and ctx.isnormal(m)):
if m == 0:
return z + m
if z == 0:
return z * m
if m == ctx.inf or m == ctx.ninf:
return ctx.inf
raise ValueError
x = z.real
ctx.prec += max(0, ctx.mag(x))
pi = +ctx.pi
away = abs(x) > pi/2
if away:
d = ctx.nint(x/pi)
z = z-pi*d
P = 2*d*ctx.ellipe(m)
else:
P = 0
def terms():
c, s = ctx.cos_sin(z)
x = c**2
y = 1-m*s**2
RF = ctx.elliprf(x, y, 1)
RD = ctx.elliprd(x, y, 1)
return s*RF, -m*s**3*RD/3
return ctx.sum_accurately(terms) + P
@defun_wrapped
def ellippi(ctx, *args):
r"""
Called with three arguments `n, \phi, m`, evaluates the Legendre
incomplete elliptic integral of the third kind
.. math ::
\Pi(n; \phi, m) = \int_0^{\phi}
\frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} =
\int_0^{\sin \phi}
\frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}.
Called with two arguments `n, m`, evaluates the complete
elliptic integral of the third kind
`\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`.
In the defining integral, it is assumed that the principal branch
of the square root is taken and that the path of integration avoids
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
the function extends quasi-periodically as
.. math ::
\Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}.
**Plots**
.. literalinclude :: /modules/mpmath/plots/ellippi.py
.. image :: /modules/mpmath/plots/ellippi.png
**Examples for the complete integral**
Some basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellippi(0,-5); ellipk(-5)
0.9555039270640439337379334
0.9555039270640439337379334
>>> ellippi(inf,2)
0.0
>>> ellippi(2,inf)
0.0
>>> abs(ellippi(1,5))
+inf
>>> abs(ellippi(0.25,1))
+inf
Evaluation in terms of simpler functions::
>>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25)
1.956616279119236207279727
1.956616279119236207279727
>>> ellippi(3,0); pi/(2*sqrt(-2))
(0.0 - 1.11072073453959156175397j)
(0.0 - 1.11072073453959156175397j)
>>> ellippi(-3,0); pi/(2*sqrt(4))
0.7853981633974483096156609
0.7853981633974483096156609
**Examples for the incomplete integral**
Basic values and limits::
>>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5)
1.622944760954741603710555
1.622944760954741603710555
>>> ellippi(1,0,1)
0.0
>>> ellippi(inf,0,1)
0.0
>>> ellippi(0,0.25,0.5); ellipf(0.25,0.5)
0.2513040086544925794134591
0.2513040086544925794134591
>>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2
2.054332933256248668692452
2.054332933256248668692452
>>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75)
135.240868757890840755058
135.240868757890840755058
>>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3)
0.9190227391656969903987269
0.9190227391656969903987269
Complex arguments are supported::
>>> ellippi(0.5, 5+6j-2*pi, -7-8j)
(-0.3612856620076747660410167 + 0.5217735339984807829755815j)
Some degenerate cases::
>>> ellippi(1,1)
+inf
>>> ellippi(1,0)
+inf
>>> ellippi(1,2,0)
+inf
>>> ellippi(1,2,1)
+inf
>>> ellippi(1,0,1)
0.0
"""
if len(args) == 2:
n, m = args
complete = True
z = phi = ctx.pi/2
else:
n, phi, m = args
complete = False
z = phi
if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)):
if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m):
raise ValueError
if complete:
if m == 0:
if n == 1:
return ctx.inf
return ctx.pi/(2*ctx.sqrt(1-n))
if n == 0: return ctx.ellipk(m)
if ctx.isinf(n) or ctx.isinf(m): return ctx.zero
else:
if z == 0: return z
if ctx.isinf(n): return ctx.zero
if ctx.isinf(m): return ctx.zero
if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m):
raise ValueError
if complete:
if m == 1:
if n == 1:
return ctx.inf
return -ctx.inf/ctx.sign(n-1)
away = False
else:
x = z.real
ctx.prec += max(0, ctx.mag(x))
pi = +ctx.pi
away = abs(x) > pi/2
if away:
d = ctx.nint(x/pi)
z = z-pi*d
P = 2*d*ctx.ellippi(n,m)
if ctx.isinf(P):
return ctx.inf
else:
P = 0
def terms():
if complete:
c, s = ctx.zero, ctx.one
else:
c, s = ctx.cos_sin(z)
x = c**2
y = 1-m*s**2
RF = ctx.elliprf(x, y, 1)
RJ = ctx.elliprj(x, y, 1, 1-n*s**2)
return s*RF, n*s**3*RJ/3
return ctx.sum_accurately(terms) + P
|
|
"""PhoneMetadata object definitions"""
# Based on original Java code and protocol buffer:
# resources/phonemetadata.proto
# java/src/com/google/i18n/phonenumbers/Phonemetadata.java
# Copyright (C) 2010-2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from .util import UnicodeMixin, ImmutableMixin, mutating_method
from .util import u, unicod, rpr, force_unicode
REGION_CODE_FOR_NON_GEO_ENTITY = u("001")
class NumberFormat(UnicodeMixin, ImmutableMixin):
"""Representation of way that a phone number can be formatted for output"""
@mutating_method
def __init__(self,
pattern=None,
format=None,
leading_digits_pattern=None,
national_prefix_formatting_rule=None,
national_prefix_optional_when_formatting=None,
domestic_carrier_code_formatting_rule=None):
# pattern is a regex that is used to match the national (significant)
# number. For example, the pattern "(20)(\d{4})(\d{4})" will match
# number "2070313000", which is the national (significant) number for
# Google London. Note the presence of the parentheses, which are
# capturing groups what specifies the grouping of numbers.
self.pattern = force_unicode(pattern) # Unicode string holding regexp
# format specifies how the national (significant) number matched by
# pattern should be formatted. Using the same example as above, format
# could contain "$1 $2 $3", meaning that the number should be
# formatted as "20 7031 3000". Each $x is replaced by the numbers
# captured by group x in the regex specified by pattern.
self.format = force_unicode(format) # None or Unicode string
# This field is a regex that is used to match a certain number of
# digits at the beginning of the national (significant) number. When
# the match is successful, the accompanying pattern and format should
# be used to format this number. For example, if
# leading_digits="[1-3]|44", then all the national numbers starting
# with 1, 2, 3 or 44 should be formatted using the accompanying
# pattern and format.
#
# The first leading_digits_pattern matches up to the first three digits
# of the national (significant) number; the next one matches the first
# four digits, then the first five and so on, until the
# leading_digits_pattern can uniquely identify one pattern and format
# to be used to format the number.
#
# In the case when only one formatting pattern exists, no
# leading_digits_pattern is needed.
self.leading_digits_pattern = [] # list of Unicode strings holding regexps
if leading_digits_pattern is not None:
self.leading_digits_pattern = [force_unicode(p) for p in leading_digits_pattern]
# This field specifies how the national prefix ($NP) together with the
# first group ($FG) in the national significant number should be
# formatted in the NATIONAL format when a national prefix exists for a
# certain country. For example, when this field contains "($NP$FG)", a
# number from Beijing, China (whose $NP = 0), which would by default
# be formatted without national prefix as 10 1234 5678 in NATIONAL
# format, will instead be formatted as (010) 1234 5678; to format it
# as (0)10 1234 5678, the field would contain "($NP)$FG". Note $FG
# should always be present in this field, but $NP can be omitted. For
# example, having "$FG" could indicate the number should be formatted
# in NATIONAL format without the national prefix. This is commonly
# used to override the rule specified for the territory in the XML
# file.
#
# When this field is missing, a number will be formatted without
# national prefix in NATIONAL format. This field does not affect how a
# number is formatted in other formats, such as INTERNATIONAL.
self.national_prefix_formatting_rule = force_unicode(national_prefix_formatting_rule) # None or Unicode string
# This field specifies whether the $NP can be omitted when formatting
# a number in national format, even though it usually wouldn't be. For
# example, a UK number would be formatted by our library as 020 XXXX
# XXXX. If we have commonly seen this number written by people without
# the leading 0, for example as (20) XXXX XXXX, this field would be
# set to true. This will be inherited from the value set for the
# territory in the XML file, unless a national_prefix_formatting_rule
# is defined specifically for this NumberFormat.
if national_prefix_optional_when_formatting is not None:
self.national_prefix_optional_when_formatting = bool(national_prefix_optional_when_formatting)
else:
self.national_prefix_optional_when_formatting = None
# This field specifies how any carrier code ($CC) together with the
# first group ($FG) in the national significant number should be
# formatted when format_with_carrier_code is called, if carrier codes
# are used for a certain country.
self.domestic_carrier_code_formatting_rule = force_unicode(domestic_carrier_code_formatting_rule) # None or Unicode string
def merge_from(self, other):
"""Merge information from another NumberFormat object into this one."""
if other.pattern is not None:
self.pattern = other.pattern
if other.format is not None:
self.format = other.format
self.leading_digits_pattern.extend(other.leading_digits_pattern)
if other.national_prefix_formatting_rule is not None:
self.national_prefix_formatting_rule = other.national_prefix_formatting_rule
if other.national_prefix_optional_when_formatting is not None:
self.national_prefix_optional_when_formatting = other.national_prefix_optional_when_formatting
if other.domestic_carrier_code_formatting_rule is not None:
self.domestic_carrier_code_formatting_rule = other.domestic_carrier_code_formatting_rule
def __eq__(self, other):
if not isinstance(other, NumberFormat):
return False
return (repr(self) == repr(other))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __unicode__(self):
# Generate a string that is valid Python input for the constructor.
# Note that we use rpr (variant of repr), which generates its own quotes.
result = unicod("NumberFormat(pattern=%s, format=%s") % (rpr(self.pattern), rpr(self.format))
if self.leading_digits_pattern:
result += (unicod(", leading_digits_pattern=[%s]") %
unicod(", ").join([rpr(ld) for ld in self.leading_digits_pattern]))
if self.national_prefix_formatting_rule is not None:
result += unicod(", national_prefix_formatting_rule=%s") % rpr(self.national_prefix_formatting_rule)
if self.national_prefix_optional_when_formatting is not None:
result += unicod(", national_prefix_optional_when_formatting=%s") % str(self.national_prefix_optional_when_formatting)
if self.domestic_carrier_code_formatting_rule is not None:
result += unicod(", domestic_carrier_code_formatting_rule=%s") % rpr(self.domestic_carrier_code_formatting_rule)
result += unicod(")")
return result
class PhoneNumberDesc(UnicodeMixin, ImmutableMixin):
"""Class representing the description of a set of phone numbers."""
@mutating_method
def __init__(self,
national_number_pattern=None,
example_number=None,
possible_length=None,
possible_length_local_only=None):
# The national_number_pattern is the pattern that a valid national
# significant number would match. This specifies information such as
# its total length and leading digits.
self.national_number_pattern = force_unicode(national_number_pattern) # None or Unicode string holding regexp
# An example national significant number for the specific type. It
# should not contain any formatting information.
self.example_number = force_unicode(example_number) # None or Unicode string
# These represent the lengths a phone number from this region can be. They
# will be sorted from smallest to biggest. Note that these lengths are for
# the full number, without country calling code or national prefix. For
# example, for the Swiss number +41789270000, in local format 0789270000,
# this would be 9.
# This could be used to highlight tokens in a text that may be a phone
# number, or to quickly prune numbers that could not possibly be a phone
# number for this locale.
if possible_length is None:
possible_length = ()
self.possible_length = possible_length # sequence of int
# These represent the lengths that only local phone numbers (without an area
# code) from this region can be. They will be sorted from smallest to
# biggest. For example, since the American number 456-1234 may be locally
# diallable, although not diallable from outside the area, 7 could be a
# possible value.
# This could be used to highlight tokens in a text that may be a phone
# number.
# To our knowledge, area codes are usually only relevant for some fixed-line
# and mobile numbers, so this field should only be set for those types of
# numbers (and the general description) - however there are exceptions for
# NANPA countries.
if possible_length_local_only is None:
possible_length_local_only = ()
self.possible_length_local_only = possible_length_local_only # sequence of int
def merge_from(self, other):
"""Merge information from another PhoneNumberDesc object into this one."""
if other.national_number_pattern is not None:
self.national_number_pattern = other.national_number_pattern
if other.example_number is not None:
self.example_number = other.example_number
def __eq__(self, other):
if not isinstance(other, PhoneNumberDesc):
return False
return (repr(self) == repr(other))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __unicode__(self):
# Generate a string that is valid Python input for constructor
result = unicod("PhoneNumberDesc(")
sep = unicod("")
if self.national_number_pattern is not None:
result += unicod("%snational_number_pattern=%s") % (sep, rpr(self.national_number_pattern))
sep = unicod(", ")
if self.example_number is not None:
result += unicod("%sexample_number=%s") % (sep, rpr(self.example_number))
sep = unicod(", ")
if self.possible_length:
result += unicod("%spossible_length=%s") % (sep, tuple(self.possible_length))
sep = unicod(", ")
if self.possible_length_local_only:
result += unicod("%spossible_length_local_only=%s") % (sep, tuple(self.possible_length_local_only))
sep = unicod(", ")
result += unicod(")")
return result
def _same_pattern(left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
return (left.national_number_pattern == right.national_number_pattern)
class PhoneMetadata(UnicodeMixin, ImmutableMixin):
"""Class representing metadata for international telephone numbers for a region.
This class is hand created based on phonemetadata.proto. Please refer to that file
for detailed descriptions of the meaning of each field.
WARNING: This API isn't stable. It is considered libphonenumber-internal
and can change at any time. We only declare it as public for easy
inclusion in our build tools not in this package. Clients should not
refer to this file, we do not commit to support backwards-compatibility or
to warn about breaking changes.
"""
# Lock that protects the *_available fields while they are being modified.
# The modificiation involves loading data from a file, so we cannot just
# rely on the GIL.
_metadata_lock = threading.Lock()
# If a region code is a key in this dict, metadata for that region is available.
# The corresponding value of the map is either:
# - a function which loads the region's metadata
# - None, to indicate that the metadata is already loaded
_region_available = {} # ISO 3166-1 alpha 2 => function or None
# Likewise for short number metadata.
_short_region_available = {} # ISO 3166-1 alpha 2 => function or None
# Likewise for non-geo country calling codes.
_country_code_available = {} # country calling code (as int) => function or None
_region_metadata = {} # ISO 3166-1 alpha 2 => PhoneMetadata
_short_region_metadata = {} # ISO 3166-1 alpha 2 => PhoneMetadata
# A mapping from a country calling code for a non-geographical entity to
# the PhoneMetadata for that country calling code. Examples of the country
# calling codes include 800 (International Toll Free Service) and 808
# (International Shared Cost Service).
_country_code_metadata = {} # country calling code (as int) => PhoneMetadata
@classmethod
def metadata_for_region(kls, region_code, default=None):
loader = kls._region_available.get(region_code, None)
if loader is not None:
# Region metadata is available but has not yet been loaded. Do so now.
kls._metadata_lock.acquire()
loader(region_code)
kls._region_available[region_code] = None
kls._metadata_lock.release()
return kls._region_metadata.get(region_code, default)
@classmethod
def short_metadata_for_region(kls, region_code, default=None):
loader = kls._short_region_available.get(region_code, None)
if loader is not None:
# Region short number metadata is available but has not yet been loaded. Do so now.
kls._metadata_lock.acquire()
loader(region_code)
kls._short_region_available[region_code] = None
kls._metadata_lock.release()
return kls._short_region_metadata.get(region_code, default)
@classmethod
def metadata_for_nongeo_region(kls, country_code, default=None):
loader = kls._country_code_available.get(country_code, None)
if loader is not None:
# Region metadata is available but has not yet been loaded. Do so now.
kls._metadata_lock.acquire()
loader(country_code)
kls._country_code_available[country_code] = None
kls._metadata_lock.release()
return kls._country_code_metadata.get(country_code, default)
@classmethod
def metadata_for_region_or_calling_code(kls, country_calling_code, region_code):
if region_code == REGION_CODE_FOR_NON_GEO_ENTITY:
return kls.metadata_for_nongeo_region(country_calling_code, None)
else:
return kls.metadata_for_region(region_code, None)
@classmethod
def register_region_loader(kls, region_code, loader):
kls._region_available[region_code] = loader
@classmethod
def register_short_region_loader(kls, region_code, loader):
kls._short_region_available[region_code] = loader
@classmethod
def register_nongeo_region_loader(kls, country_code, loader):
kls._country_code_available[country_code] = loader
@classmethod
def load_all(kls):
"""Force immediate load of all metadata"""
# Force expansion of contents to lists because we invalidate the iterator
for region_code, region_loader in list(kls._region_available.items()):
if region_loader is not None: # pragma no cover
region_loader(region_code)
kls._region_available[region_code] = None
for country_code, cc_loader in list(kls._country_code_available.items()):
if cc_loader is not None:
cc_loader(country_code)
kls._country_code_available[country_code] = None
@mutating_method
def __init__(self,
id,
general_desc=None,
fixed_line=None,
mobile=None,
toll_free=None,
premium_rate=None,
shared_cost=None,
personal_number=None,
voip=None,
pager=None,
uan=None,
emergency=None,
voicemail=None,
short_code=None,
standard_rate=None,
carrier_specific=None,
sms_services=None,
no_international_dialling=None,
country_code=None,
international_prefix=None,
preferred_international_prefix=None,
national_prefix=None,
preferred_extn_prefix=None,
national_prefix_for_parsing=None,
national_prefix_transform_rule=None,
number_format=None,
intl_number_format=None,
main_country_for_code=False,
leading_digits=None,
leading_zero_possible=False,
mobile_number_portable_region=False,
short_data=False,
register=True):
# The general_desc contains information which is a superset of
# descriptions for all types of phone numbers. If any element is
# missing in the description of a specific type of number, the element
# will inherit from its counterpart in the general_desc. For all types
# that are generally relevant to normal phone numbers, if the whole
# type is missing in the PhoneNumberMetadata XML file, it will not have
# national number data, and the possible lengths will be [-1].
self.general_desc = general_desc # None or PhoneNumberDesc
self.fixed_line = fixed_line # None or PhoneNumberDesc
self.mobile = mobile # None or PhoneNumberDesc
self.toll_free = toll_free # None or PhoneNumberDesc
self.premium_rate = premium_rate # None or PhoneNumberDesc
self.shared_cost = shared_cost # None or PhoneNumberDesc
self.personal_number = personal_number # None or PhoneNumberDesc
self.voip = voip # None or PhoneNumberDesc
self.pager = pager # None or PhoneNumberDesc
self.uan = uan # None or PhoneNumberDesc
self.emergency = emergency # None or PhoneNumberDesc
self.voicemail = voicemail # None or PhoneNumberDesc
self.short_code = short_code # None or PhoneNumberDesc
self.standard_rate = standard_rate # None or PhoneNumberDesc
self.carrier_specific = carrier_specific # None or PhoneNumberDesc
self.sms_services = sms_services # None or PhoneNumberDesc
# The rules here distinguish the numbers that are only able to be
# dialled nationally.
self.no_international_dialling = no_international_dialling # None or PhoneNumberDesc
# The ISO 3166-1 alpha-2 representation of a country/region, with the
# exception of "country calling codes" used for non-geographical
# entities, such as Universal International Toll Free Number
# (+800). These are all given the ID "001", since this is the numeric
# region code for the world according to UN M.49:
# http://en.wikipedia.org/wiki/UN_M.49
self.id = force_unicode(id) # None or Unicode string
# The country calling code that one would dial from overseas when
# trying to dial a phone number in this country. For example, this
# would be "64" for New Zealand.
self.country_code = country_code # None or int
# The international_prefix of country A is the number that needs to be
# dialled from country A to another country (country B). This is
# followed by the country code for country B. Note that some countries
# may have more than one international prefix, and for those cases, a
# regular expression matching the international prefixes will be
# stored in this field.
self.international_prefix = force_unicode(international_prefix) # None or Unicode string
# If more than one international prefix is present, a preferred prefix
# can be specified here for out-of-country formatting purposes. If
# this field is not present, and multiple international prefixes are
# present, then "+" will be used instead.
self.preferred_international_prefix = force_unicode(preferred_international_prefix) # None or Unicode string
# The national prefix of country A is the number that needs to be
# dialled before the national significant number when dialling
# internally. This would not be dialled when dialling
# internationally. For example, in New Zealand, the number that would
# be locally dialled as 09 345 3456 would be dialled from overseas as
# +64 9 345 3456. In this case, 0 is the national prefix.
self.national_prefix = force_unicode(national_prefix) # None or Unicode string
# The preferred prefix when specifying an extension in this
# country. This is used for formatting only, and if this is not
# specified, a suitable default should be used instead. For example,
# if you wanted extensions to be formatted in the following way: 1
# (365) 345 445 ext. 2345 " ext. " should be the preferred extension
# prefix.
self.preferred_extn_prefix = force_unicode(preferred_extn_prefix) # None or Unicode string
# This field is used for cases where the national prefix of a country
# contains a carrier selection code, and is written in the form of a
# regular expression. For example, to dial the number 2222-2222 in
# Fortaleza, Brazil (area code 85) using the long distance carrier Oi
# (selection code 31), one would dial 0 31 85 2222 2222. Assuming the
# only other possible carrier selection code is 32, the field will
# contain "03[12]".
#
# When it is missing, this field inherits the value of national_prefix,
# if that is present.
self.national_prefix_for_parsing = force_unicode(national_prefix_for_parsing) # None or Unicode string holding regexp
# This field is only populated and used under very rare situations.
# For example, mobile numbers in Argentina are written in two
# completely different ways when dialed in-country and out-of-country
# (e.g. 0343 15 555 1212 is exactly the same number as +54 9 343 555
# 1212). This field is used together with national_prefix_for_parsing
# to transform the number into a particular representation for storing
# in the PhoneNumber class in those rare cases.
self.national_prefix_transform_rule = force_unicode(national_prefix_transform_rule) # None or Unicode string
# Specifies whether the mobile and fixed-line patterns are the same or
# not. This is used to speed up determining phone number type in
# countries where these two types of phone numbers can never be
# distinguished.
self.same_mobile_and_fixed_line_pattern = _same_pattern(self.mobile, self.fixed_line)
# Note that the number format here is used for formatting only, not
# parsing. Hence all the varied ways a user *may* write a number need
# not be recorded - just the ideal way we would like to format it for
# them. When this element is absent, the national significant number
# will be formatted as a whole without any formatting applied.
self.number_format = [] # List of NumberFormat objects
if number_format is not None:
self.number_format = number_format
# This field is populated only when the national significant number is
# formatted differently when it forms part of the INTERNATIONAL format
# and NATIONAL format. A case in point is mobile numbers in Argentina:
# The number, which would be written in INTERNATIONAL format as
# +54 9 343 555 1212, will be written as 0343 15 555 1212 for NATIONAL
# format. In this case, the prefix 9 is inserted when dialling from
# overseas, but otherwise the prefix 0 and the carrier selection code
# 15 (inserted after the area code of 343) is used.
# Note: this field is populated by setting a value for <intlFormat>
# inside the <numberFormat> tag in the XML file. If <intlFormat> is
# not set then it defaults to the same value as the <format> tag.
#
# Examples:
# To set the <intlFormat> to a different value than the <format>:
# <numberFormat pattern=....>
# <format>$1 $2 $3</format>
# <intlFormat>$1-$2-$3</intlFormat>
# </numberFormat>
#
# To have a format only used for national formatting, set <intlFormat> to
# "NA":
# <numberFormat pattern=....>
# <format>$1 $2 $3</format>
# <intlFormat>NA</intlFormat>
# </numberFormat>
self.intl_number_format = [] # List of NumberFormat objects
if intl_number_format is not None:
self.intl_number_format = intl_number_format
# This field is set when this country is considered to be the main
# country for a calling code. It may not be set by more than one
# country with the same calling code, and it should not be set by
# countries with a unique calling code. This can be used to indicate
# that "GB" is the main country for the calling code "44" for example,
# rather than Jersey or the Isle of Man.
self.main_country_for_code = bool(main_country_for_code)
# This field is populated only for countries or regions that share a
# country calling code. If a number matches this pattern, it could
# belong to this region. This is not intended as a replacement for
# is_valid_for_region, and does not mean the number must come from this
# region (for example, 800 numbers are valid for all NANPA countries.)
# This field should be a regular expression of the expected prefix
# match.
self.leading_digits = force_unicode(leading_digits) # None or Unicode string holding regexp
# Deprecated: do not use. Will be deleted when there are no references
# to this later.
self.leading_zero_possible = bool(leading_zero_possible)
# This field is set when this country has implemented mobile number
# portability. This means that transferring mobile numbers between
# carriers is allowed. A consequence of this is that phone prefix to
# carrier mapping is less reliable.
self.mobile_number_portable_region = mobile_number_portable_region # bool
# Record whether this metadata is for short numbers or normal numbers.
self.short_data = short_data # bool
if register:
# Register this instance with the relevant class-wide map
if self.id == REGION_CODE_FOR_NON_GEO_ENTITY:
if self.country_code in PhoneMetadata._country_code_metadata:
other = PhoneMetadata._country_code_metadata[self.country_code]
if self != other:
raise Exception("Duplicate non-geo PhoneMetadata for %s (from %s:%s)" % (self.country_code, self.id, self.country_code))
else:
PhoneMetadata._country_code_metadata[self.country_code] = self
elif self.short_data:
if self.id in PhoneMetadata._short_region_metadata:
other = PhoneMetadata._short_region_metadata[self.id]
if self != other:
raise Exception("Duplicate short PhoneMetadata for %s (from %s:%s)" % (self.id, self.id, self.country_code))
else:
PhoneMetadata._short_region_metadata[self.id] = self
else:
if self.id in PhoneMetadata._region_metadata:
other = PhoneMetadata._region_metadata[self.id]
if self != other:
raise Exception("Duplicate PhoneMetadata for %s (from %s:%s)" % (self.id, self.id, self.country_code))
else:
PhoneMetadata._region_metadata[self.id] = self
def __eq__(self, other):
if not isinstance(other, PhoneMetadata):
return False
return (repr(self) == repr(other))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __unicode__(self):
# Generate a string that is valid Python input for the constructor
result = (unicod("PhoneMetadata(id='%s', country_code=%r, international_prefix=%s") %
(self.id, self.country_code, rpr(self.international_prefix)))
result += unicod(",\n general_desc=%s") % self.general_desc
if self.fixed_line is not None:
result += unicod(",\n fixed_line=%s") % self.fixed_line
if self.mobile is not None:
result += unicod(",\n mobile=%s") % self.mobile
if self.toll_free is not None:
result += unicod(",\n toll_free=%s") % self.toll_free
if self.premium_rate is not None:
result += unicod(",\n premium_rate=%s") % self.premium_rate
if self.shared_cost is not None:
result += unicod(",\n shared_cost=%s") % self.shared_cost
if self.personal_number is not None:
result += unicod(",\n personal_number=%s") % self.personal_number
if self.voip is not None:
result += unicod(",\n voip=%s") % self.voip
if self.pager is not None:
result += unicod(",\n pager=%s") % self.pager
if self.uan is not None:
result += unicod(",\n uan=%s") % self.uan
if self.emergency is not None:
result += unicod(",\n emergency=%s") % self.emergency
if self.voicemail is not None:
result += unicod(",\n voicemail=%s") % self.voicemail
if self.short_code is not None:
result += unicod(",\n short_code=%s") % self.short_code
if self.standard_rate is not None:
result += unicod(",\n standard_rate=%s") % self.standard_rate
if self.carrier_specific is not None:
result += unicod(",\n carrier_specific=%s") % self.carrier_specific
if self.sms_services is not None:
result += unicod(",\n sms_services=%s") % self.sms_services
if self.no_international_dialling is not None:
result += unicod(",\n no_international_dialling=%s") % self.no_international_dialling
if self.preferred_international_prefix is not None:
result += unicod(",\n preferred_international_prefix=%s") % rpr(self.preferred_international_prefix)
if self.national_prefix is not None:
result += unicod(",\n national_prefix=%s") % rpr(self.national_prefix)
if self.preferred_extn_prefix is not None:
result += unicod(",\n preferred_extn_prefix=%s") % rpr(self.preferred_extn_prefix)
if self.national_prefix_for_parsing is not None:
result += unicod(",\n national_prefix_for_parsing=%s") % rpr(self.national_prefix_for_parsing)
if self.national_prefix_transform_rule is not None:
# Note that we use rpr() on self.national_prefix_transform_rule, which generates its own quotes
result += unicod(",\n national_prefix_transform_rule=%s") % rpr(self.national_prefix_transform_rule)
if self.number_format:
result += unicod(",\n number_format=[%s]") % unicod(',\n ').join(map(u, self.number_format))
if self.intl_number_format:
result += unicod(",\n intl_number_format=[%s]") % unicod(',\n ').join(map(u, self.intl_number_format))
if self.main_country_for_code:
result += unicod(",\n main_country_for_code=True")
if self.leading_digits is not None:
result += unicod(",\n leading_digits='%s'") % self.leading_digits
if self.leading_zero_possible:
result += unicod(",\n leading_zero_possible=True")
if self.mobile_number_portable_region:
result += unicod(",\n mobile_number_portable_region=True")
if self.short_data:
result += unicod(",\n short_data=True")
result += unicod(")")
return result
|
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Make sure, that the examples given in user guide all work.
"""
import TestSCons
test = TestSCons.TestSCons()
if not test.where_is('msginit'):
test.skip_test("Could not find 'msginit'; skipping test(s)\n")
pot_contents = """\
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\\n"
"Report-Msgid-Bugs-To: \\n"
"POT-Creation-Date: 2012-05-27 00:35+0200\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: LANGUAGE <LL@li.org>\\n"
"Language: \\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=CHARSET\\n"
"Content-Transfer-Encoding: 8bit\\n"
#
#: a.cpp:1
msgid "Hello from a.cpp"
msgstr ""
"""
###############################################################################
# POInit: Example 1
###############################################################################
test.subdir(['ex1'])
test.write( ['ex1', 'SConstruct'],
"""
env = Environment( tools = ["default", "msginit"] )
env['POAUTOINIT'] = 1
env.POInit(['en','pl']) # messages.pot --> [en.po, pl.po]
""")
#
test.write(['ex1', 'messages.pot'], pot_contents)
# NOTE: msginit(1) prints all its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-create', chdir = 'ex1', stderr = None)
test.must_exist( ['ex1', 'en.po'] )
test.must_exist( ['ex1', 'pl.po'] )
test.must_contain( ['ex1', 'en.po'], "Hello from a.cpp")
test.must_contain( ['ex1', 'pl.po'], "Hello from a.cpp")
###############################################################################
# POInit: Example 2
###############################################################################
test.subdir(['ex2'])
test.write( ['ex2', 'SConstruct'],
"""
env = Environment( tools = ["default", "msginit"] )
env['POAUTOINIT'] = 1
env.POInit(['en','pl'], ['foo']) # foo.pot --> [en.po, pl.po]
""")
#
test.write(['ex2', 'foo.pot'], pot_contents)
# NOTE: msginit(1) prints all its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-create', chdir = 'ex2', stderr = None)
test.must_exist( ['ex2', 'en.po'] )
test.must_exist( ['ex2', 'pl.po'] )
test.must_contain( ['ex2', 'en.po'], "Hello from a.cpp" )
test.must_contain( ['ex2', 'pl.po'], "Hello from a.cpp" )
###############################################################################
# POInit: Example 3
###############################################################################
test.subdir(['ex3'])
test.write( ['ex3', 'SConstruct'],
"""
env = Environment( tools = ["default", "msginit"] )
env['POAUTOINIT'] = 1
env.POInit(['en','pl'], POTDOMAIN='foo') # foo.pot --> [en.po, pl.po]
""")
#
test.write(['ex3', 'foo.pot'], pot_contents)
# NOTE: msginit(1) prints all its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-create', chdir = 'ex3', stderr = None)
test.must_exist( ['ex3', 'en.po'] )
test.must_exist( ['ex3', 'pl.po'] )
test.must_contain( ['ex3', 'en.po'], "Hello from a.cpp")
test.must_contain( ['ex3', 'pl.po'], "Hello from a.cpp")
###############################################################################
# POInit: Example 4
###############################################################################
test.subdir(['ex4'])
test.write( ['ex4', 'SConstruct'],
"""
env = Environment( tools = ["default", "msginit"] )
env['POAUTOINIT'] = 1
env.POInit(LINGUAS_FILE = 1) # needs 'LINGUAS' file
""")
test.write(['ex4', 'LINGUAS'],"""
en
pl
""")
#
test.write(['ex4', 'messages.pot'], pot_contents)
# NOTE: msginit(1) prints all its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-create', chdir = 'ex4', stderr = None)
test.must_exist( ['ex4', 'en.po'] )
test.must_exist( ['ex4', 'pl.po'] )
test.must_contain( ['ex4', 'en.po'], "Hello from a.cpp")
test.must_contain( ['ex4', 'pl.po'], "Hello from a.cpp")
###############################################################################
# POInit: Example 5
###############################################################################
test.subdir(['ex5'])
test.write( ['ex5', 'SConstruct'],
"""
env = Environment( tools = ["default", "msginit"] )
env['POAUTOINIT'] = 1
env.POInit(['en', 'pl'], LINGUAS_FILE = 1) # needs 'LINGUAS' file
""")
test.write(['ex5', 'LINGUAS'],"""
de
fr
""")
#
test.write(['ex5', 'messages.pot'], pot_contents)
# NOTE: msginit(1) prints all its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-create', chdir = 'ex5', stderr = None)
test.must_exist( ['ex5', 'en.po'] )
test.must_exist( ['ex5', 'pl.po'] )
test.must_exist( ['ex5', 'de.po'] )
test.must_exist( ['ex5', 'fr.po'] )
test.must_contain( ['ex5', 'en.po'], "Hello from a.cpp")
test.must_contain( ['ex5', 'pl.po'], "Hello from a.cpp")
test.must_contain( ['ex5', 'de.po'], "Hello from a.cpp")
test.must_contain( ['ex5', 'fr.po'], "Hello from a.cpp")
###############################################################################
# POInit: Example 6
###############################################################################
test.subdir(['ex6'])
test.write( ['ex6', 'SConstruct'],
"""
env = Environment( tools = ["default", "msginit"] )
env['POAUTOINIT'] = 1
env['LINGUAS_FILE'] = 1
env['POTDOMAIN'] = 'foo'
env.POInit()
""")
test.write(['ex6', 'LINGUAS'],"""
en
pl
""")
#
test.write(['ex6', 'foo.pot'], pot_contents)
# NOTE: msginit(1) prints all its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-create', chdir = 'ex6', stderr = None)
test.must_exist( ['ex6', 'en.po'] )
test.must_exist( ['ex6', 'pl.po'] )
test.must_contain( ['ex6', 'en.po'], "Hello from a.cpp")
test.must_contain( ['ex6', 'pl.po'], "Hello from a.cpp")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
import pytest
from spacy.util import get_lang_class
def pytest_addoption(parser):
parser.addoption("--slow", action="store_true", help="include slow tests")
def pytest_runtest_setup(item):
def getopt(opt):
# When using 'pytest --pyargs spacy' to test an installed copy of
# spacy, pytest skips running our pytest_addoption() hook. Later, when
# we call getoption(), pytest raises an error, because it doesn't
# recognize the option we're asking about. To avoid this, we need to
# pass a default value. We default to False, i.e., we act like all the
# options weren't given.
return item.config.getoption(f"--{opt}", False)
for opt in ["slow"]:
if opt in item.keywords and not getopt(opt):
pytest.skip(f"need --{opt} option to run")
# Fixtures for language tokenizers (languages sorted alphabetically)
@pytest.fixture(scope="module")
def tokenizer():
return get_lang_class("xx")().tokenizer
@pytest.fixture(scope="session")
def am_tokenizer():
return get_lang_class("am")().tokenizer
@pytest.fixture(scope="session")
def ar_tokenizer():
return get_lang_class("ar")().tokenizer
@pytest.fixture(scope="session")
def bn_tokenizer():
return get_lang_class("bn")().tokenizer
@pytest.fixture(scope="session")
def ca_tokenizer():
return get_lang_class("ca")().tokenizer
@pytest.fixture(scope="session")
def cs_tokenizer():
return get_lang_class("cs")().tokenizer
@pytest.fixture(scope="session")
def da_tokenizer():
return get_lang_class("da")().tokenizer
@pytest.fixture(scope="session")
def de_tokenizer():
return get_lang_class("de")().tokenizer
@pytest.fixture(scope="session")
def de_vocab():
return get_lang_class("de")().vocab
@pytest.fixture(scope="session")
def el_tokenizer():
return get_lang_class("el")().tokenizer
@pytest.fixture(scope="session")
def en_tokenizer():
return get_lang_class("en")().tokenizer
@pytest.fixture(scope="session")
def en_vocab():
return get_lang_class("en")().vocab
@pytest.fixture(scope="session")
def en_parser(en_vocab):
nlp = get_lang_class("en")(en_vocab)
return nlp.create_pipe("parser")
@pytest.fixture(scope="session")
def es_tokenizer():
return get_lang_class("es")().tokenizer
@pytest.fixture(scope="session")
def eu_tokenizer():
return get_lang_class("eu")().tokenizer
@pytest.fixture(scope="session")
def fa_tokenizer():
return get_lang_class("fa")().tokenizer
@pytest.fixture(scope="session")
def fi_tokenizer():
return get_lang_class("fi")().tokenizer
@pytest.fixture(scope="session")
def fr_tokenizer():
return get_lang_class("fr")().tokenizer
@pytest.fixture(scope="session")
def ga_tokenizer():
return get_lang_class("ga")().tokenizer
@pytest.fixture(scope="session")
def gu_tokenizer():
return get_lang_class("gu")().tokenizer
@pytest.fixture(scope="session")
def he_tokenizer():
return get_lang_class("he")().tokenizer
@pytest.fixture(scope="session")
def hi_tokenizer():
return get_lang_class("hi")().tokenizer
@pytest.fixture(scope="session")
def hr_tokenizer():
return get_lang_class("hr")().tokenizer
@pytest.fixture
def hu_tokenizer():
return get_lang_class("hu")().tokenizer
@pytest.fixture(scope="session")
def id_tokenizer():
return get_lang_class("id")().tokenizer
@pytest.fixture(scope="session")
def it_tokenizer():
return get_lang_class("it")().tokenizer
@pytest.fixture(scope="session")
def ja_tokenizer():
pytest.importorskip("sudachipy")
return get_lang_class("ja")().tokenizer
@pytest.fixture(scope="session")
def ko_tokenizer():
pytest.importorskip("natto")
return get_lang_class("ko")().tokenizer
@pytest.fixture(scope="session")
def lb_tokenizer():
return get_lang_class("lb")().tokenizer
@pytest.fixture(scope="session")
def lt_tokenizer():
return get_lang_class("lt")().tokenizer
@pytest.fixture(scope="session")
def mk_tokenizer():
return get_lang_class("mk")().tokenizer
@pytest.fixture(scope="session")
def ml_tokenizer():
return get_lang_class("ml")().tokenizer
@pytest.fixture(scope="session")
def nb_tokenizer():
return get_lang_class("nb")().tokenizer
@pytest.fixture(scope="session")
def ne_tokenizer():
return get_lang_class("ne")().tokenizer
@pytest.fixture(scope="session")
def nl_tokenizer():
return get_lang_class("nl")().tokenizer
@pytest.fixture(scope="session")
def pl_tokenizer():
return get_lang_class("pl")().tokenizer
@pytest.fixture(scope="session")
def pt_tokenizer():
return get_lang_class("pt")().tokenizer
@pytest.fixture(scope="session")
def ro_tokenizer():
return get_lang_class("ro")().tokenizer
@pytest.fixture(scope="session")
def ru_tokenizer():
pytest.importorskip("pymorphy2")
return get_lang_class("ru")().tokenizer
@pytest.fixture
def ru_lemmatizer():
pytest.importorskip("pymorphy2")
return get_lang_class("ru")().add_pipe("lemmatizer")
@pytest.fixture(scope="session")
def sa_tokenizer():
return get_lang_class("sa")().tokenizer
@pytest.fixture(scope="session")
def sr_tokenizer():
return get_lang_class("sr")().tokenizer
@pytest.fixture(scope="session")
def sv_tokenizer():
return get_lang_class("sv")().tokenizer
@pytest.fixture(scope="session")
def th_tokenizer():
pytest.importorskip("pythainlp")
return get_lang_class("th")().tokenizer
@pytest.fixture(scope="session")
def ti_tokenizer():
return get_lang_class("ti")().tokenizer
@pytest.fixture(scope="session")
def tr_tokenizer():
return get_lang_class("tr")().tokenizer
@pytest.fixture(scope="session")
def tt_tokenizer():
return get_lang_class("tt")().tokenizer
@pytest.fixture(scope="session")
def ky_tokenizer():
return get_lang_class("ky")().tokenizer
@pytest.fixture(scope="session")
def uk_tokenizer():
pytest.importorskip("pymorphy2")
return get_lang_class("uk")().tokenizer
@pytest.fixture(scope="session")
def ur_tokenizer():
return get_lang_class("ur")().tokenizer
@pytest.fixture(scope="session")
def yo_tokenizer():
return get_lang_class("yo")().tokenizer
@pytest.fixture(scope="session")
def zh_tokenizer_char():
nlp = get_lang_class("zh")()
return nlp.tokenizer
@pytest.fixture(scope="session")
def zh_tokenizer_jieba():
pytest.importorskip("jieba")
config = {
"nlp": {
"tokenizer": {
"@tokenizers": "spacy.zh.ChineseTokenizer",
"segmenter": "jieba",
}
}
}
nlp = get_lang_class("zh").from_config(config)
return nlp.tokenizer
@pytest.fixture(scope="session")
def zh_tokenizer_pkuseg():
pytest.importorskip("spacy_pkuseg")
config = {
"nlp": {
"tokenizer": {
"@tokenizers": "spacy.zh.ChineseTokenizer",
"segmenter": "pkuseg",
}
},
"initialize": {"tokenizer": {"pkuseg_model": "web"}},
}
nlp = get_lang_class("zh").from_config(config)
nlp.initialize()
return nlp.tokenizer
@pytest.fixture(scope="session")
def hy_tokenizer():
return get_lang_class("hy")().tokenizer
|
|
"""
The MIT License (MIT)
Copyright (c) 2018 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from collections import defaultdict
import aiohttp
import argparse
import discord
import os
import re
import yaml
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
from io import StringIO
import asyncio
PATH = os.path.join("data", "post")
JSON = os.path.join(PATH, "settings.json")
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
class Post:
"""Post things from somewhere."""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
self._session = None
async def get_session(self):
if self._session is None:
self._session = aiohttp.ClientSession()
return self._session
def __unload(self):
if self._session is not None:
loop = asyncio.get_event_loop()
loop.create_task(self._session.close())
def parser(self):
p = argparse.ArgumentParser('[p]post')
p.add_argument(
"--path",
action="store",
dest="path"
)
p.add_argument(
"--url",
action="store",
dest="url"
)
p.add_argument(
"--data",
action="store",
dest="data"
)
return p
def parse_mentions(self, value, server=None):
"""Parse channel mentions"""
if value is None:
return None
def channel_repl(matchobj):
name = matchobj.group(1)
channel = discord.utils.get(server.channels, name=name)
if channel:
return channel.mention
else:
return "#{}".format(name)
return re.sub('#([A-Za-z0-9\-]+)', channel_repl, value)
def parse_emoji(self, value):
"""Parse emojis."""
if value is None:
return None
def emoji_repl(matchobj):
name = matchobj.group(1)
s = ':{}:'.format(name)
for emoji in self.bot.get_all_emojis():
if emoji.name == name:
s = '<:{}:{}>'.format(emoji.name, emoji.id)
break
return s
return re.sub(':([A-Za-z0-9\-_]+):', emoji_repl, value)
@checks.mod_or_permissions()
@commands.command(name="post", pass_context=True, no_pm=True)
async def post(self, ctx, channel: discord.Channel, *args):
"""Post things to channel"""
parser = self.parser()
try:
pa = parser.parse_args(args)
except SystemExit:
await self.bot.send_cmd_help(ctx)
return
data = None
if pa.data:
with open(os.path.join(PATH, pa.data)) as f:
data = yaml.load(f)
elif pa.url:
session = await self.get_session()
async with session.get(pa.url) as resp:
s = await resp.text()
with StringIO(s) as f:
data = yaml.load(f)
elif pa.path:
with open(pa.path) as f:
data = yaml.load(f)
if not data:
await self.bot.say("No data found.")
return
for d in data.get('embeds', []):
title = self.parse_emoji(d.get('title'))
description = self.parse_emoji(d.get('description'))
color = discord.Color.dark_blue()
d_color = d.get('color')
if d_color is not None:
c = getattr(discord.Color, d_color)
if c:
color = c()
em = discord.Embed(
title=title,
description=description,
color=color
)
image_url = d.get('image', {}).get('url')
if image_url:
em.set_image(url=image_url)
fields = d.get('fields', [])
if fields:
for f in fields:
name = f.get('name')
value = f.get('value')
name = self.parse_emoji(name)
value = self.parse_mentions(value, server=ctx.message.server)
em.add_field(name=name, value=value)
try:
await self.bot.send_message(channel, embed=em)
except Exception as e:
print(e)
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = Post(bot)
bot.add_cog(n)
|
|
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.db import models
from south.db import db
from south.v2 import DataMigration
class Migration(DataMigration):
no_dry_run = True
def forwards(self, orm):
"""Sync up the locales enabled in settings.py with the locale teams."""
locale_teams = orm.Locale.objects.all()
enabled_locales = [l.lower() for l in settings.SUMO_LANGUAGES]
deleted_teams = []
for team in locale_teams:
if team.locale.lower() not in enabled_locales:
deleted_teams.append(team.locale)
team.delete()
print 'Deleted locale teams: %s' % deleted_teams
def backwards(self, orm):
raise RuntimeError('Cannot reverse this migration.')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'gallery.image': {
'Meta': {'ordering': "['-created']", 'unique_together': "(('locale', 'title'), ('is_draft', 'creator'))", 'object_name': 'Image'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gallery_images'", 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'locale': ('kitsune.sumo.models.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'products.platform': {
'Meta': {'object_name': 'Platform'},
'display_order': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'visible': ('django.db.models.fields.BooleanField', [], {})
},
u'products.product': {
'Meta': {'ordering': "['display_order']", 'object_name': 'Product'},
'description': ('django.db.models.fields.TextField', [], {}),
'display_order': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'image_cachebuster': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'image_offset': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'platforms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['products.Platform']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'sprite_height': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'products.topic': {
'Meta': {'ordering': "['product', 'display_order']", 'unique_together': "(('slug', 'product'),)", 'object_name': 'Topic'},
'description': ('django.db.models.fields.TextField', [], {}),
'display_order': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subtopics'", 'null': 'True', 'to': u"orm['products.Topic']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': u"orm['products.Product']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'wiki.document': {
'Meta': {'unique_together': "(('parent', 'locale'), ('title', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'},
'allow_discussion': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': u"orm['wiki.Revision']"}),
'html': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'latest_localizable_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'localizable_for+'", 'null': 'True', 'to': u"orm['wiki.Revision']"}),
'locale': ('kitsune.sumo.models.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'needs_change': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'needs_change_comment': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': u"orm['wiki.Document']"}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['products.Product']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['products.Topic']", 'symmetrical': 'False'})
},
u'wiki.documentimage': {
'Meta': {'unique_together': "(('document', 'image'),)", 'object_name': 'DocumentImage'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gallery.Image']"})
},
u'wiki.documentlink': {
'Meta': {'unique_together': "(('linked_from', 'linked_to', 'kind'),)", 'object_name': 'DocumentLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'linked_from': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentlink_to_set'", 'to': u"orm['wiki.Document']"}),
'linked_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documentlink_from_set'", 'to': u"orm['wiki.Document']"})
},
u'wiki.helpfulvote': {
'Meta': {'object_name': 'HelpfulVote'},
'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': u"orm['auth.User']"}),
'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': u"orm['wiki.Revision']"}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'wiki.helpfulvotemetadata': {
'Meta': {'object_name': 'HelpfulVoteMetadata'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': u"orm['wiki.HelpfulVote']"})
},
u'wiki.importantdate': {
'Meta': {'object_name': 'ImportantDate'},
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'wiki.locale': {
'Meta': {'ordering': "['locale']", 'object_name': 'Locale'},
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'locales_editor'", 'blank': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leaders': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'locales_leader'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '7', 'db_index': 'True'}),
'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'locales_reviewer'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'wiki.revision': {
'Meta': {'object_name': 'Revision'},
'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': u"orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': u"orm['wiki.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_ready_for_localization': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'readied_for_localization': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'readied_for_localization_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'readied_for_l10n_revisions'", 'null': 'True', 'to': u"orm['auth.User']"}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': u"orm['auth.User']"}),
'significance': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['wiki']
symmetrical = True
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""A parser for Relay's text format."""
from __future__ import absolute_import
import sys
from ast import literal_eval
from typing import Any, Deque, Dict, List, Optional, TypeVar, Tuple, Union
from collections import deque
import tvm
from . import module
from .base import Span, SourceName
from . import adt
from . import expr
from . import ty
from . import op
PYTHON_VERSION = sys.version_info.major
try:
from .grammar.py3.RelayVisitor import RelayVisitor
from .grammar.py3.RelayParser import RelayParser
from .grammar.py3.RelayLexer import RelayLexer
except ImportError:
raise Exception("Couldn't find ANTLR parser. Try building with USE_ANTLR=ON.")
try:
from antlr4 import InputStream, CommonTokenStream
from antlr4.error.ErrorListener import ErrorListener
except ImportError:
raise Exception("Couldn't find ANTLR runtime." +
"Try running `pip{version} install antlr4-python{version}-runtime`."
.format(version=PYTHON_VERSION))
sys.setrecursionlimit(10000)
class ParseError(Exception):
"""Exception type for parse errors."""
def __init__(self, message: str) -> None:
super(ParseError, self).__init__()
self.message = message
def __repr__(self):
return "ParseError({})".format(self.message)
def __str__(self):
return repr(self)
class OpWrapper:
"""Overload the __call__ for op."""
pass
class ExprOp(OpWrapper):
"""Call an expr. The default, but does not handle attrs well."""
def __init__(self, operator):
self.operator = operator
def __call__(self, args, attrs, type_args):
try:
return expr.Call(self.operator, args, attrs, type_args)
except Exception:
raise Exception("Operator {} is not registered. It's attributes are {}"
.format(self.operator, attrs))
class FuncOp(OpWrapper):
"""Convert the attrs, call the python function with the attrs passed in as keyword arguments.
Tvm should provide this in the future, as this is pretty similar to what op.get is providing.
"""
def __init__(self, operator):
self.operator = operator
def convert(self, v):
if isinstance(v, tuple):
return tuple([self.convert(x) for x in v])
if isinstance(v, expr.Constant):
return v.data.asnumpy().item()
if isinstance(v, str):
return v
raise Exception(v)
def __call__(self, args, attrs, type_args):
if attrs is None:
attrs = {}
x = self.operator(*args, **{k: self.convert(v) for k, v in attrs.items()})
if isinstance(x, expr.TupleWrapper):
x = x.astuple()
return x
BINARY_OPS = {
RelayParser.MUL: op.multiply,
RelayParser.DIV: op.divide,
RelayParser.ADD: op.add,
RelayParser.SUB: op.subtract,
RelayParser.LT: op.less,
RelayParser.GT: op.greater,
RelayParser.LE: op.less_equal,
RelayParser.GE: op.greater_equal,
RelayParser.EQ: op.equal,
RelayParser.NE: op.not_equal,
}
FUNC_OPS = {
"nn.conv2d": op.nn.conv2d,
"nn.batch_norm": op.nn.batch_norm,
"nn.dense": op.nn.dense,
"nn.bias_add": op.nn.bias_add,
"nn.max_pool2d": op.nn.max_pool2d,
"nn.global_max_pool2d": op.nn.global_max_pool2d,
"nn.avg_pool2d": op.nn.avg_pool2d,
"nn.global_avg_pool2d": op.nn.global_avg_pool2d,
"nn.softmax": op.nn.softmax,
"reshape": op.reshape,
"nn.conv2d_transpose": op.nn.conv2d_transpose,
"concatenate": op.concatenate,
"nn.dropout": op.nn.dropout_raw,
"zeros": op.zeros,
"split": op.split,
"cast": op.cast
}
TYPE_PREFIXES = [
"int",
"uint",
"float",
"bool",
]
T = TypeVar("T")
Scope = Deque[Tuple[str, T]]
Scopes = Deque[Scope[T]]
def lookup(scopes: Scopes[T], name: str) -> Optional[T]:
"""Look up `name` in `scopes`."""
for scope in scopes:
for key, val in scope:
if key == name:
return val
return None
def spanify(f):
"""A decorator which attaches span information
to the value returned by calling `f`.
Intended for use with the below AST visiting
methods. The idea is that after we do the work
of constructing the AST we attach Span information.
"""
def _wrapper(*args, **kwargs):
# Assumes 0th arg is self and gets source_name from object.
sn = args[0].source_name
# Assumes 1st arg is an ANTLR parser context.
ctx = args[1]
ast = f(*args, **kwargs)
line, col = ctx.getSourceInterval()
sp = Span(sn, line, col)
if isinstance(ast, tvm.relay.expr.TupleWrapper):
ast = ast.astuple()
ast.set_span(sp)
return ast
return _wrapper
# TODO(@jmp): Use https://stackoverflow.com/q/13889941
# to figure out how to get ANTLR4 to be more unhappy about syntax errors
class ParseTreeToRelayIR(RelayVisitor):
"""Parse Relay text format into Relay IR."""
def __init__(self, source_name: str) -> None:
self.source_name = source_name
self.module = module.Module({}) # type: module.Module
# Adding an empty scope allows naked lets without pain.
self.var_scopes = deque([deque()]) # type: Scopes[expr.Var]
self.global_vars = {} # type: Scope[expr.GlobalVar]
self.type_var_scopes = deque([deque()]) # type: Scopes[ty.TypeVar]
self.global_type_vars = {} # type: Scope[expr.GlobalVar]
self.graph_expr = [] # type: List[expr.Expr]
super(ParseTreeToRelayIR, self).__init__()
def enter_var_scope(self) -> None:
"""Enter a new Var scope so it can be popped off later."""
self.var_scopes.appendleft(deque())
def exit_var_scope(self) -> Scope[expr.Var]:
"""Pop off the current Var scope and return it."""
return self.var_scopes.popleft()
def mk_var(self, name: str, typ: ty.Type = None):
"""Create a new Var and add it to the Var scope."""
var = expr.Var(name, typ)
self.var_scopes[0].appendleft((name, var))
return var
def mk_global_var(self, name: str) -> expr.GlobalVar:
"""Create a new GlobalVar and add it to the GlobalVar scope."""
if name in self.global_vars:
raise ParseError(f"duplicate global var \"{name}\"")
var = expr.GlobalVar(name)
self.global_vars[name] = var
return var
def enter_type_param_scope(self) -> None:
"""Enter a new TypeVar scope so it can be popped off later."""
self.type_var_scopes.appendleft(deque())
def exit_type_param_scope(self) -> Scope[ty.TypeVar]:
"""Pop off the current TypeVar scope and return it."""
return self.type_var_scopes.popleft()
def mk_typ(self, name: str, kind: ty.Kind) -> ty.TypeVar:
"""Create a new TypeVar and add it to the TypeVar scope."""
typ = ty.TypeVar(name, kind)
self.type_var_scopes[0].appendleft((name, typ))
return typ
def mk_global_typ_var(self, name, kind):
# (str, ty.Kind) -> ty.GlobalTypeVar
"""Create a new TypeVar and add it to the TypeVar scope."""
typ = ty.GlobalTypeVar(name, kind)
self._check_existing_typ_expr(name, typ)
self.global_type_vars[name] = typ
return typ
# TODO: rethink whether we should have type constructors mixed with type vars.
def mk_global_typ_cons(self, name, cons):
self._check_existing_typ_expr(name, cons)
self.global_type_vars[name] = cons
def _check_existing_typ_expr(self, name, new_expr):
if name in self.global_type_vars:
new_typ_name = self._type_expr_name(new_expr)
existing_typ_name = self._type_expr_name(self.global_type_vars[name])
raise ParseError(
f"{new_typ_name} `{name}` conflicts with existing {existing_typ_name}")
def _type_expr_name(self, e):
if isinstance(e, adt.Constructor):
return f"`{e.belong_to.var.name}` ADT constructor"
elif isinstance(e, ty.GlobalTypeVar):
if e.kind == ty.Kind.AdtHandle:
return f"ADT definition"
return "function definition"
def visitProjection(self, ctx):
return expr.TupleGetItem(self.visit(ctx.expr()), self.visit(ctx.NAT()))
def visitTerminal(self, node) -> Union[expr.Expr, int, float]:
"""Visit lexer tokens that aren't ignored or visited by other functions."""
node_type = node.getSymbol().type
node_text = node.getText()
if node_type == RelayLexer.NAT:
return int(node_text)
if node_type == RelayLexer.FLOAT:
return float(node_text[:-1])
if node_type == RelayLexer.BOOL_LIT:
if node_text == "True":
return True
if node_text == "False":
return False
raise ParseError("unrecognized BOOL_LIT: `{}`".format(node_text))
if node_type == RelayLexer.QUOTED_STRING:
return literal_eval(node_text)
raise ParseError(f"unhandled terminal \"{node_text}\" of type `{node_type}`")
def visitGeneralIdent(self, ctx):
name = ctx.getText()
# Look through all type prefixes for a match.
for type_prefix in TYPE_PREFIXES:
if name.startswith(type_prefix):
return ty.scalar_type(name)
# Next, look it up in the local then global type params.
type_param = lookup(self.type_var_scopes, name)
if type_param is None:
type_param = self.global_type_vars.get(name, None)
if type_param is not None:
return type_param
# Check if it's an operator.
op_name = ".".join([name.getText() for name in ctx.CNAME()])
if op_name in FUNC_OPS:
return FuncOp(FUNC_OPS[op_name])
return ExprOp(op.get(op_name))
def visitGlobalVar(self, ctx):
var_name = ctx.CNAME().getText()
global_var = self.global_vars.get(var_name, None)
if global_var is None:
raise ParseError(f"unbound global var `{var_name}`")
return global_var
def visitLocalVar(self, ctx):
var_name = ctx.CNAME().getText()
local_var = lookup(self.var_scopes, var_name)
if local_var is None:
raise ParseError(f"unbound local var `{var_name}`")
return local_var
def visitGraphVar(self, ctx):
return self.graph_expr[int(ctx.NAT().getText())]
def visit_list(self, ctx_list) -> List[Any]:
""""Visit a list of contexts."""
# type: RelayParser.ContextParserRuleContext
assert isinstance(ctx_list, list)
return [self.visit(ctx) for ctx in ctx_list]
def getTypeExpr(self, ctx) -> Optional[ty.Type]:
"""Return a (possibly None) Relay type."""
# type: : Optional[RelayParser.Type_Context]
if ctx is None:
return None
return self.visit(ctx)
def visitProg(self, ctx: RelayParser.ProgContext) -> Union[expr.Expr, module.Module]:
self.meta = None
if ctx.METADATA():
header, data = str(ctx.METADATA()).split("\n", 1)
assert header == "METADATA:"
self.meta = tvm.load_json(data)
if ctx.defn():
self.visit_list(ctx.defn())
return self.module
if ctx.expr():
return self.visit(ctx.expr())
return self.module
# Exprs
def visitOpIdent(self, ctx) -> op.Op:
op_name = ".".join([name.getText() for name in ctx.CNAME()])
if op_name in FUNC_OPS:
return FuncOp(FUNC_OPS[op_name])
return ExprOp(op.get(op_name))
# pass through
def visitParen(self, ctx: RelayParser.ParenContext) -> expr.Expr:
return self.visit(ctx.expr())
# pass through
def visitBody(self, ctx: RelayParser.BodyContext) -> expr.Expr:
return self.visit(ctx.expr())
def visitScalarFloat(self, ctx: RelayParser.ScalarFloatContext) -> expr.Constant:
return expr.const(self.visit(ctx.FLOAT()))
def visitScalarInt(self, ctx: RelayParser.ScalarIntContext) -> expr.Constant:
return expr.const(self.visit(ctx.NAT()))
def visitScalarBool(self, ctx: RelayParser.ScalarBoolContext) -> expr.Constant:
return expr.const(self.visit(ctx.BOOL_LIT()))
def visitNeg(self, ctx: RelayParser.NegContext) -> Union[expr.Constant, expr.Call]:
val = self.visit(ctx.expr())
if isinstance(val, expr.Constant) and val.data.asnumpy().ndim == 0:
# fold Neg in for scalars
return expr.const(-val.data.asnumpy().item())
return op.negative(val)
def visitTuple(self, ctx: RelayParser.TupleContext) -> expr.Tuple:
tup = self.visit_list(ctx.expr())
return expr.Tuple(tup)
def visitLet(self, ctx: RelayParser.LetContext) -> expr.Let:
"""Desugar various sequence constructs to Relay Let nodes."""
if ctx.var() is None:
# anonymous identity
ident = "_"
typ = None
var = self.mk_var(ident, typ)
else:
var = self.visitVar(ctx.var())
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
body = self.visit(ctx.expr(1))
return expr.Let(var, value, body)
def visitBinOp(self, ctx: RelayParser.BinOpContext) -> expr.Call:
"""Desugar binary operators."""
arg0, arg1 = self.visit_list(ctx.expr())
relay_op = BINARY_OPS.get(ctx.op.type)
if relay_op is None:
raise ParseError("unimplemented binary op.")
return relay_op(arg0, arg1)
@spanify
def visitVar(self, ctx: RelayParser.VarContext) -> expr.Var:
"""Visit a single variable."""
ident = ctx.localVar()
if ident is None:
raise ParseError("only local ids may be used in vars.")
typeExpr = self.getTypeExpr(ctx.typeExpr())
return self.mk_var(ident.getText()[1:], typeExpr)
def visitVarList(self, ctx: RelayParser.VarListContext) -> List[expr.Var]:
return self.visit_list(ctx.var())
# TODO: support a larger class of values than just Relay exprs
def visitAttr(self, ctx: RelayParser.AttrContext) -> Tuple[str, expr.Expr]:
return (ctx.CNAME().getText(), self.visit(ctx.expr()))
def visitArgNoAttr(self, ctx: RelayParser.ArgNoAttrContext):
return (self.visit_list(ctx.varList().var()), None)
def visitAttrSeq(self, ctx: RelayParser.AttrSeqContext) -> Dict[str, expr.Expr]:
return dict(self.visit_list(ctx.attr()))
def visitArgWithAttr(self, ctx: RelayParser.AttrSeqContext) \
-> Tuple[List[expr.Var], Dict[str, expr.Expr]]:
return (self.visit_list(ctx.var()), self.visitAttrSeq(ctx.attrSeq()))
def visitArgList(self, ctx: RelayParser.ArgListContext) \
-> Tuple[Optional[List[expr.Var]], Optional[Dict[str, expr.Expr]]]:
var_list = self.visit(ctx.varList()) if ctx.varList() else None
attr_list = self.visit(ctx.attrList()) if ctx.attrList() else None
return (var_list, attr_list)
def visitMeta(self, ctx: RelayParser.MetaContext):
type_key = str(ctx.CNAME())
index = int(self.visit(ctx.NAT()))
return self.meta[type_key][index]
def mk_func(
self,
ctx: Union[RelayParser.FuncContext, RelayParser.DefnContext]) \
-> expr.Function:
"""Construct a function from either a Func or Defn."""
# Enter var scope early to put params in scope.
self.enter_var_scope()
# Capture type params in params.
self.enter_type_param_scope()
type_params = ctx.typeParamList()
if type_params is not None:
type_params = type_params.generalIdent()
assert type_params
for ty_param in type_params:
name = ty_param.getText()
self.mk_typ(name, ty.Kind.Type)
var_list, attr_list = self.visit(ctx.argList())
if var_list is None:
var_list = []
ret_type = self.getTypeExpr(ctx.typeExpr())
body = self.visit(ctx.body())
# NB(@jroesch): you must stay in the type parameter scope until
# after you exit the body, you can reference the type parameters
# of your parent scopes.
type_params = list(self.exit_type_param_scope())
if type_params:
_, type_params = zip(*type_params)
self.exit_var_scope()
attrs = tvm.make.node("DictAttrs", **attr_list) if attr_list is not None else None
return expr.Function(var_list, body, ret_type, type_params, attrs)
@spanify
def visitFunc(self, ctx: RelayParser.FuncContext) -> expr.Function:
return self.mk_func(ctx)
# TODO: how to set spans for definitions?
# @spanify
def visitFuncDefn(self, ctx: RelayParser.DefnContext) -> None:
ident_name = ctx.globalVar().getText()[1:]
ident = self.mk_global_var(ident_name)
self.module[ident] = self.mk_func(ctx)
def handle_adt_header(
self,
ctx: Union[RelayParser.ExternAdtDefnContext, RelayParser.AdtDefnContext]):
"""Handles parsing of the name and type params of an ADT definition."""
adt_name = ctx.generalIdent().getText()
adt_var = self.mk_global_typ_var(adt_name, ty.Kind.AdtHandle)
# parse type params
type_params = ctx.typeParamList()
if type_params is None:
type_params = []
else:
type_params = [self.mk_typ(type_ident.getText(), ty.Kind.Type)
for type_ident in type_params.generalIdent()]
return adt_var, type_params
def visitExternAdtDefn(self, ctx: RelayParser.ExternAdtDefnContext):
# TODO(weberlo): update this handler once extern is implemented
self.enter_type_param_scope()
adt_var, type_params = self.handle_adt_header(ctx)
# update module being built
self.module[adt_var] = adt.TypeData(adt_var, type_params, [])
self.exit_type_param_scope()
def visitAdtDefn(self, ctx: RelayParser.AdtDefnContext):
self.enter_type_param_scope()
adt_var, type_params = self.handle_adt_header(ctx)
# parse constructors
adt_cons_defns = ctx.adtConsDefnList()
if adt_cons_defns is None:
adt_cons_defns = []
else:
adt_cons_defns = adt_cons_defns.adtConsDefn()
parsed_constructors = []
for cons_defn in adt_cons_defns:
inputs = [self.visit(inp) for inp in cons_defn.typeExpr()]
cons_defn_name = cons_defn.constructorName().getText()
cons_defn = adt.Constructor(cons_defn_name, inputs, adt_var)
self.mk_global_typ_cons(cons_defn_name, cons_defn)
parsed_constructors.append(cons_defn)
# update module being built
self.module[adt_var] = adt.TypeData(adt_var, type_params, parsed_constructors)
self.exit_type_param_scope()
def visitMatch(self, ctx: RelayParser.MatchContext):
match_type = ctx.matchType().getText()
if match_type == "match":
complete_match = True
elif match_type == "match?":
complete_match = False
else:
raise RuntimeError(f"unknown match type {match_type}")
# TODO: Will need some kind of type checking to know which ADT is being
# matched on.
match_data = self.visit(ctx.expr())
match_clauses = ctx.matchClauseList()
if match_clauses is None:
match_clauses = []
else:
match_clauses = match_clauses.matchClause()
parsed_clauses = []
for clause in match_clauses:
constructor_name = clause.constructorName().getText()
constructor = self.global_type_vars[constructor_name]
self.enter_var_scope()
patternList = clause.patternList()
if patternList is None:
patterns = []
else:
patterns = [self.visit(pattern) for pattern in patternList.pattern()]
clause_body = self.visit(clause.expr())
self.exit_var_scope()
# TODO: Do we need to pass `None` if it's a 0-arity cons, or is an empty list fine?
parsed_clauses.append(adt.Clause(
adt.PatternConstructor(
constructor,
patterns
),
clause_body
))
return adt.Match(match_data, parsed_clauses, complete=complete_match)
def visitPattern(self, ctx: RelayParser.PatternContext):
text = ctx.getText()
if text == "_":
return adt.PatternWildcard()
elif text.startswith("%"):
text = ctx.localVar().getText()
typ = ctx.typeExpr()
if typ is not None:
typ = self.visit(typ)
var = self.mk_var(text[1:], typ=typ)
return adt.PatternVar(var)
else:
raise ParseError(f"invalid pattern syntax \"{text}\"")
def visitCallNoAttr(self, ctx: RelayParser.CallNoAttrContext):
return (self.visit_list(ctx.exprList().expr()), None)
def visitCallWithAttr(self, ctx: RelayParser.CallWithAttrContext):
return (self.visit_list(ctx.expr()), self.visit(ctx.attrSeq()))
def call(self, func, args, attrs, type_args):
if isinstance(func, OpWrapper):
return func(args, attrs, type_args)
elif isinstance(func, adt.Constructor):
return func(*args)
return expr.Call(func, args, attrs, type_args)
@spanify
def visitCall(self, ctx: RelayParser.CallContext):
# type: (RelayParser.CallContext) -> expr.Call
func = self.visit(ctx.expr())
args, attrs = self.visit(ctx.callList())
res = self.call(func, args, attrs, [])
return res
@spanify
def visitIfElse(self, ctx: RelayParser.IfElseContext):
# type: (RelayParser.IfElseContext) -> expr.If
"""Construct a Relay If node. Creates a new scope for each branch."""
cond = self.visit(ctx.expr())
self.enter_var_scope()
true_branch = self.visit(ctx.body(0))
self.exit_var_scope()
self.enter_var_scope()
false_branch = self.visit(ctx.body(1))
self.exit_var_scope()
return expr.If(cond, true_branch, false_branch)
@spanify
def visitGraph(self, ctx: RelayParser.GraphContext):
# type: (RelayParser.GraphContext) -> expr.Expr
"""Visit a graph variable assignment."""
graph_nid = int(ctx.graphVar().getText()[1:])
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
if graph_nid != len(self.graph_expr):
raise ParseError(
"expected new graph variable to be `%{}`,".format(len(self.graph_expr)) + \
"but got `%{}`".format(graph_nid))
self.graph_expr.append(value)
kont = self.visit(ctx.expr(1))
return kont
# Types
# pylint: disable=unused-argument
def visitIncompleteType(self, ctx: RelayParser.IncompleteTypeContext):
# type (RelayParser.IncompleteTypeContext) -> None:
return None
def visitTypeCallType(self, ctx: RelayParser.TypeCallTypeContext):
func = self.visit(ctx.generalIdent())
args = [self.visit(arg) for arg in ctx.typeParamList().generalIdent()]
return ty.TypeCall(func, args)
def visitParensShape(self, ctx: RelayParser.ParensShapeContext):
# type: (RelayParser.ParensShapeContext) -> int
return self.visit(ctx.shape())
def visitShapeList(self, ctx: RelayParser.ShapeListContext):
# type: (RelayParser.ShapeListContext) -> List[int]
return self.visit_list(ctx.shape())
def visitTensor(self, ctx: RelayParser.TensorContext):
return tuple(self.visit_list(ctx.expr()))
def visitTensorType(self, ctx: RelayParser.TensorTypeContext):
# type: (RelayParser.TensorTypeContext) -> ty.TensorType
"""Create a simple tensor type. No generics."""
shape = self.visit(ctx.shapeList())
dtype = self.visit(ctx.typeExpr())
if not isinstance(dtype, ty.TensorType):
raise ParseError("expected dtype to be a Relay base type.")
dtype = dtype.dtype
return ty.TensorType(shape, dtype)
def visitTupleType(self, ctx: RelayParser.TupleTypeContext):
# type: (RelayParser.TupleTypeContext) -> ty.TupleType
return ty.TupleType(self.visit_list(ctx.typeExpr()))
def visitFuncType(self, ctx: RelayParser.FuncTypeContext):
# type: (RelayParser.FuncTypeContext) -> ty.FuncType
types = self.visit_list(ctx.typeExpr())
arg_types = types[:-1]
ret_type = types[-1]
return ty.FuncType(arg_types, ret_type, [], None)
def make_parser(data):
# type: (str) -> RelayParser
"""Construct a RelayParser a given data stream."""
input_stream = InputStream(data)
lexer = RelayLexer(input_stream)
lexer.addErrorListener(StrictErrorListener(data))
token_stream = CommonTokenStream(lexer)
p = RelayParser(token_stream)
p.addErrorListener(StrictErrorListener(data))
return p
__source_name_counter__ = 0
class StrictErrorListener(ErrorListener):
"""This ErrorListener fail eagerly on all error, and report the program."""
def __init__(self, text):
self.text = text
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("Syntax Error in:\n" + self.text)
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise Exception("Ambiguity Error in:\n" + self.text)
def reportAttemptingFullContext(self,
recognizer,
dfa,
startIndex,
stopIndex,
conflictingAlts,
configs):
raise Exception("Attempting Full Context in:\n" + self.text)
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise Exception("Context Sensitivity in:\n" + self.text)
def fromtext(data, source_name=None):
# type: (str, str) -> Union[expr.Expr, module.Module]
"""Parse a Relay program."""
if data == "":
raise ParseError("cannot parse the empty string.")
global __source_name_counter__
if source_name is None:
source_name = "source_file{0}".format(__source_name_counter__)
if isinstance(source_name, str):
source_name = SourceName(source_name)
tree = make_parser(data).prog()
return ParseTreeToRelayIR(source_name).visit(tree)
|
|
from datetime import (
datetime,
timedelta,
)
import inspect
import numpy as np
import pytest
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
date_range,
isna,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
import pandas.core.common as com
class TestReindexSetIndex:
# Tests that check both reindex and set_index
def test_dti_set_index_reindex_datetimeindex(self):
# GH#6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
def test_dti_set_index_reindex_freq_with_tz(self):
# GH#11314 with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
def test_set_reset_index_intervalindex(self):
df = DataFrame({"A": range(10)})
ser = pd.cut(df.A, 5)
df["B"] = ser
df = df.set_index("B")
df = df.reset_index()
def test_setitem_reset_index_dtypes(self):
# GH 22060
df = DataFrame(columns=["a", "b", "c"]).astype(
{"a": "datetime64[ns]", "b": np.int64, "c": np.float64}
)
df1 = df.set_index(["a"])
df1["d"] = []
result = df1.reset_index()
expected = DataFrame(columns=["a", "b", "c", "d"], index=range(0)).astype(
{"a": "datetime64[ns]", "b": np.int64, "c": np.float64, "d": np.float64}
)
tm.assert_frame_equal(result, expected)
df2 = df.set_index(["a", "b"])
df2["d"] = []
result = df2.reset_index()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"timezone, year, month, day, hour",
[["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]],
)
def test_reindex_timestamp_with_fold(self, timezone, year, month, day, hour):
# see gh-40817
test_timezone = gettz(timezone)
transition_1 = pd.Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=0,
tzinfo=test_timezone,
)
transition_2 = pd.Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=1,
tzinfo=test_timezone,
)
df = (
DataFrame({"index": [transition_1, transition_2], "vals": ["a", "b"]})
.set_index("index")
.reindex(["1", "2"])
)
tm.assert_frame_equal(
df,
DataFrame({"index": ["1", "2"], "vals": [None, None]}).set_index("index"),
)
class TestDataFrameSelectReindex:
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_reindex_copies(self):
# based on asv time_reindex_axis1
N = 10
df = DataFrame(np.random.randn(N * 10, N))
cols = np.arange(N)
np.random.shuffle(cols)
result = df.reindex(columns=cols, copy=True)
assert not np.shares_memory(result[0]._values, df[0]._values)
# pass both columns and index
result2 = df.reindex(columns=cols, index=df.index, copy=True)
assert not np.shares_memory(result2[0]._values, df[0]._values)
@td.skip_array_manager_not_yet_implemented
def test_reindex_date_fill_value(self):
# passing date to dt64 is deprecated
arr = date_range("2016-01-01", periods=6).values.reshape(3, 2)
df = DataFrame(arr, columns=["A", "B"], index=range(3))
ts = df.iloc[0, 0]
fv = ts.date()
with tm.assert_produces_warning(FutureWarning):
res = df.reindex(index=range(4), columns=["A", "B", "C"], fill_value=fv)
expected = DataFrame(
{"A": df["A"].tolist() + [ts], "B": df["B"].tolist() + [ts], "C": [ts] * 4}
)
tm.assert_frame_equal(res, expected)
# only reindexing rows
with tm.assert_produces_warning(FutureWarning):
res = df.reindex(index=range(4), fill_value=fv)
tm.assert_frame_equal(res, expected[["A", "B"]])
# same with a datetime-castable str
res = df.reindex(
index=range(4), columns=["A", "B", "C"], fill_value="2016-01-01"
)
tm.assert_frame_equal(res, expected)
def test_reindex_with_multi_index(self):
# https://github.com/pandas-dev/pandas/issues/29896
# tests for reindexing a multi-indexed DataFrame with a new MultiIndex
#
# confirms that we can reindex a multi-indexed DataFrame with a new
# MultiIndex object correctly when using no filling, backfilling, and
# padding
#
# The DataFrame, `df`, used in this test is:
# c
# a b
# -1 0 A
# 1 B
# 2 C
# 3 D
# 4 E
# 5 F
# 6 G
# 0 0 A
# 1 B
# 2 C
# 3 D
# 4 E
# 5 F
# 6 G
# 1 0 A
# 1 B
# 2 C
# 3 D
# 4 E
# 5 F
# 6 G
#
# and the other MultiIndex, `new_multi_index`, is:
# 0: 0 0.5
# 1: 2.0
# 2: 5.0
# 3: 5.8
df = DataFrame(
{
"a": [-1] * 7 + [0] * 7 + [1] * 7,
"b": list(range(7)) * 3,
"c": ["A", "B", "C", "D", "E", "F", "G"] * 3,
}
).set_index(["a", "b"])
new_index = [0.5, 2.0, 5.0, 5.8]
new_multi_index = MultiIndex.from_product([[0], new_index], names=["a", "b"])
# reindexing w/o a `method` value
reindexed = df.reindex(new_multi_index)
expected = DataFrame(
{"a": [0] * 4, "b": new_index, "c": [np.nan, "C", "F", np.nan]}
).set_index(["a", "b"])
tm.assert_frame_equal(expected, reindexed)
# reindexing with backfilling
expected = DataFrame(
{"a": [0] * 4, "b": new_index, "c": ["B", "C", "F", "G"]}
).set_index(["a", "b"])
reindexed_with_backfilling = df.reindex(new_multi_index, method="bfill")
tm.assert_frame_equal(expected, reindexed_with_backfilling)
reindexed_with_backfilling = df.reindex(new_multi_index, method="backfill")
tm.assert_frame_equal(expected, reindexed_with_backfilling)
# reindexing with padding
expected = DataFrame(
{"a": [0] * 4, "b": new_index, "c": ["A", "C", "F", "F"]}
).set_index(["a", "b"])
reindexed_with_padding = df.reindex(new_multi_index, method="pad")
tm.assert_frame_equal(expected, reindexed_with_padding)
reindexed_with_padding = df.reindex(new_multi_index, method="ffill")
tm.assert_frame_equal(expected, reindexed_with_padding)
@pytest.mark.parametrize(
"method,expected_values",
[
("nearest", [0, 1, 1, 2]),
("pad", [np.nan, 0, 1, 1]),
("backfill", [0, 1, 2, 2]),
],
)
def test_reindex_methods(self, method, expected_values):
df = DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = DataFrame({"x": expected_values}, index=target)
actual = df.reindex(target, method=method)
tm.assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=1)
tm.assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1])
tm.assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
tm.assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
tm.assert_frame_equal(e2, actual)
switched_method = (
"pad" if method == "backfill" else "backfill" if method == "pad" else method
)
actual = df[::-1].reindex(target, method=switched_method)
tm.assert_frame_equal(expected, actual)
def test_reindex_methods_nearest_special(self):
df = DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = DataFrame({"x": [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest", tolerance=0.2)
tm.assert_frame_equal(expected, actual)
expected = DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest", tolerance=[0.5, 0.01, 0.4, 0.1])
tm.assert_frame_equal(expected, actual)
def test_reindex_nearest_tz(self, tz_aware_fixture):
# GH26683
tz = tz_aware_fixture
idx = date_range("2019-01-01", periods=5, tz=tz)
df = DataFrame({"x": list(range(5))}, index=idx)
expected = df.head(3)
actual = df.reindex(idx[:3], method="nearest")
tm.assert_frame_equal(expected, actual)
def test_reindex_nearest_tz_empty_frame(self):
# https://github.com/pandas-dev/pandas/issues/31964
dti = pd.DatetimeIndex(["2016-06-26 14:27:26+00:00"])
df = DataFrame(index=pd.DatetimeIndex(["2016-07-04 14:00:59+00:00"]))
expected = DataFrame(index=dti)
result = df.reindex(dti, method="nearest")
tm.assert_frame_equal(result, expected)
def test_reindex_frame_add_nat(self):
rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s")
df = DataFrame({"A": np.random.randn(len(rng)), "B": rng})
result = df.reindex(range(15))
assert np.issubdtype(result["B"].dtype, np.dtype("M8[ns]"))
mask = com.isna(result)["B"]
assert mask[-5:].all()
assert not mask[:-5].any()
@pytest.mark.parametrize(
"method, exp_values",
[("ffill", [0, 1, 2, 3]), ("bfill", [1.0, 2.0, 3.0, np.nan])],
)
def test_reindex_frame_tz_ffill_bfill(self, frame_or_series, method, exp_values):
# GH#38566
obj = frame_or_series(
[0, 1, 2, 3],
index=date_range("2020-01-01 00:00:00", periods=4, freq="H", tz="UTC"),
)
new_index = date_range("2020-01-01 00:01:00", periods=4, freq="H", tz="UTC")
result = obj.reindex(new_index, method=method, tolerance=pd.Timedelta("1 hour"))
expected = frame_or_series(exp_values, index=new_index)
tm.assert_equal(result, expected)
def test_reindex_limit(self):
# GH 28631
data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]]
exp_data = [
["A", "A", "A"],
["B", "B", "B"],
["C", "C", "C"],
["D", "D", "D"],
["D", "D", "D"],
[np.nan, np.nan, np.nan],
]
df = DataFrame(data)
result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1)
expected = DataFrame(exp_data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"idx, check_index_type",
[
[["C", "B", "A"], True],
[["F", "C", "A", "D"], True],
[["A"], True],
[["A", "B", "C"], True],
[["C", "A", "B"], True],
[["C", "B"], True],
[["C", "A"], True],
[["A", "B"], True],
[["B", "A", "C"], True],
# reindex by these causes different MultiIndex levels
[["D", "F"], False],
[["A", "C", "B"], False],
],
)
def test_reindex_level_verify_first_level(self, idx, check_index_type):
df = DataFrame(
{
"jim": list("B" * 4 + "A" * 2 + "C" * 3),
"joe": list("abcdeabcd")[::-1],
"jolie": [10, 20, 30] * 3,
"joline": np.random.randint(0, 1000, 9),
}
)
icol = ["jim", "joe", "jolie"]
def f(val):
return np.nonzero((df["jim"] == val).to_numpy())[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level="jim")
right = df.iloc[i].set_index(icol)
tm.assert_frame_equal(left, right, check_index_type=check_index_type)
@pytest.mark.parametrize(
"idx",
[
("mid",),
("mid", "btm"),
("mid", "btm", "top"),
("mid",),
("mid", "top"),
("mid", "top", "btm"),
("btm",),
("btm", "mid"),
("btm", "mid", "top"),
("btm",),
("btm", "top"),
("btm", "top", "mid"),
("top",),
("top", "mid"),
("top", "mid", "btm"),
("top",),
("top", "btm"),
("top", "btm", "mid"),
],
)
def test_reindex_level_verify_first_level_repeats(self, idx):
df = DataFrame(
{
"jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7,
"joe": ["3rd"] * 2
+ ["1st"] * 3
+ ["2nd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["2nd"] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
"jolie": np.concatenate(
[
np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]
]
),
"joline": np.random.randn(20).round(3) * 10,
}
)
icol = ["jim", "joe", "jolie"]
def f(val):
return np.nonzero((df["jim"] == val).to_numpy())[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level="jim")
right = df.iloc[i].set_index(icol)
tm.assert_frame_equal(left, right)
@pytest.mark.parametrize(
"idx, indexer",
[
[
["1st", "2nd", "3rd"],
[2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10, 11, 12, 13, 14, 18, 19, 15, 16, 17],
],
[
["3rd", "2nd", "1st"],
[0, 1, 2, 3, 4, 10, 11, 12, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 13, 14],
],
[["2nd", "3rd"], [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]],
[["3rd", "1st"], [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]],
],
)
def test_reindex_level_verify_repeats(self, idx, indexer):
df = DataFrame(
{
"jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7,
"joe": ["3rd"] * 2
+ ["1st"] * 3
+ ["2nd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["2nd"] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
"jolie": np.concatenate(
[
np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]
]
),
"joline": np.random.randn(20).round(3) * 10,
}
)
icol = ["jim", "joe", "jolie"]
left = df.set_index(icol).reindex(idx, level="joe")
right = df.iloc[indexer].set_index(icol)
tm.assert_frame_equal(left, right)
@pytest.mark.parametrize(
"idx, indexer, check_index_type",
[
[list("abcde"), [3, 2, 1, 0, 5, 4, 8, 7, 6], True],
[list("abcd"), [3, 2, 1, 0, 5, 8, 7, 6], True],
[list("abc"), [3, 2, 1, 8, 7, 6], True],
[list("eca"), [1, 3, 4, 6, 8], True],
[list("edc"), [0, 1, 4, 5, 6], True],
[list("eadbc"), [3, 0, 2, 1, 4, 5, 8, 7, 6], True],
[list("edwq"), [0, 4, 5], True],
[list("wq"), [], False],
],
)
def test_reindex_level_verify(self, idx, indexer, check_index_type):
df = DataFrame(
{
"jim": list("B" * 4 + "A" * 2 + "C" * 3),
"joe": list("abcdeabcd")[::-1],
"jolie": [10, 20, 30] * 3,
"joline": np.random.randint(0, 1000, 9),
}
)
icol = ["jim", "joe", "jolie"]
left = df.set_index(icol).reindex(idx, level="joe")
right = df.iloc[indexer].set_index(icol)
tm.assert_frame_equal(left, right, check_index_type=check_index_type)
def test_non_monotonic_reindex_methods(self):
dr = date_range("2013-08-01", periods=6, freq="B")
data = np.random.randn(6, 1)
df = DataFrame(data, index=dr, columns=list("A"))
df_rev = DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A"))
# index is not monotonic increasing or decreasing
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="pad")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="ffill")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="bfill")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="nearest")
def test_reindex_sparse(self):
# https://github.com/pandas-dev/pandas/issues/35286
df = DataFrame(
{"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))}
)
result = df.reindex([0, 2])
expected = DataFrame(
{
"A": [0.0, np.nan],
"B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)),
},
index=[0, 2],
)
tm.assert_frame_equal(result, expected)
def test_reindex(self, float_frame):
datetime_series = tm.makeTimeSeries(nper=30)
newFrame = float_frame.reindex(datetime_series.index)
for col in newFrame.columns:
for idx, val in newFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in newFrame.items():
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = float_frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = float_frame.reindex(datetime_series.index[::2])
for col in nonContigFrame.columns:
for idx, val in nonContigFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in nonContigFrame.items():
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = float_frame.reindex(float_frame.index, copy=False)
assert newFrame.index is float_frame.index
# length zero
newFrame = float_frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(float_frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = float_frame.reindex([])
newFrame = newFrame.reindex(float_frame.index)
assert len(newFrame.index) == len(float_frame.index)
assert len(newFrame.columns) == len(float_frame.columns)
# pass non-Index
newFrame = float_frame.reindex(list(datetime_series.index))
expected = datetime_series.index._with_freq(None)
tm.assert_index_equal(newFrame.index, expected)
# copy with no axes
result = float_frame.reindex()
tm.assert_frame_equal(result, float_frame)
assert result is not float_frame
def test_reindex_nan(self):
df = DataFrame(
[[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=["joe", "jim"],
)
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
tm.assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype("object")
tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = DataFrame(
{
"other": ["a", "b", np.nan, "c"],
"date": ["2015-03-22", np.nan, "2012-01-08", np.nan],
"amount": [2, 3, 4, 5],
}
)
df["date"] = pd.to_datetime(df.date)
df["delta"] = (pd.to_datetime("2015-06-18") - df["date"]).shift(1)
left = df.set_index(["delta", "other", "date"]).reset_index()
right = df.reindex(columns=["delta", "other", "date", "amount"])
tm.assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(i)
assert df.index.name == "iname"
df = df.reindex(Index(np.arange(10), name="tmpname"))
assert df.index.name == "tmpname"
s = Series(np.random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(columns=i)
assert df.columns.name == "iname"
def test_reindex_int(self, int_frame):
smaller = int_frame.reindex(int_frame.index[::2])
assert smaller["A"].dtype == np.int64
bigger = smaller.reindex(int_frame.index)
assert bigger["A"].dtype == np.float64
smaller = int_frame.reindex(columns=["A", "B"])
assert smaller["A"].dtype == np.int64
def test_reindex_columns(self, float_frame):
new_frame = float_frame.reindex(columns=["A", "B", "E"])
tm.assert_series_equal(new_frame["B"], float_frame["B"])
assert np.isnan(new_frame["E"]).all()
assert "C" not in new_frame
# Length zero
new_frame = float_frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(
data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float,
)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(
data=[
[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method="ffill")
expected = DataFrame(
data=[
[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method="bfill")
expected = DataFrame(
data=[
[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(
np.ones((3, 3)),
index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)],
columns=["a", "b", "c"],
)
time_freq = date_range("2012-01-01", "2012-01-03", freq="d")
some_cols = ["a", "b"]
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(list(range(15)))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
tm.assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=range(5), fill_value=0.0)
expected = df.copy()
expected[4] = 0.0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value=0)
expected = df.copy()
expected[4] = 0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value="foo")
expected = df.copy()
expected[4] = "foo"
tm.assert_frame_equal(result, expected)
# other dtypes
df["foo"] = "foo"
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
tm.assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
tm.assert_frame_equal(result, expected)
# reindex fails
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
df.reindex(index=list(range(len(df))))
def test_reindex_with_duplicate_columns(self):
# reindex is invalid!
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
df.reindex(columns=["bar"])
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
df.reindex(columns=["bar", "foo"])
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = DataFrame(
{"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, index=[0, 1, 3]
)
result = df.reindex([0, 1, 3])
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis="index")
tm.assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = DataFrame({"A": [1.0, 2], "B": [4.0, 5], "C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ["A"], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ["A"], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify all"):
df.reindex([0, 1], [0], ["A"])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=["A"])
expected = DataFrame({"A": [1, 2]})
tm.assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(["b", "a"], ["e", "d"])
assert "reindex" in str(m[0].message)
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_reindex_boolean(self):
frame = DataFrame(
np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2]
)
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=range(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self, float_string_frame):
reindexed = float_string_frame.reindex(columns=["foo", "A", "B"])
assert "foo" in reindexed
reindexed = float_string_frame.reindex(columns=["A", "B"])
assert "foo" not in reindexed
def test_reindex_corner(self, int_frame):
index = Index(["a", "b", "c"])
dm = DataFrame({}).reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = int_frame.reindex(columns=["A", "B", "E"])
assert smaller["E"].dtype == np.float64
def test_reindex_with_nans(self):
df = DataFrame(
[[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=["a", "b"],
index=[100.0, 101.0, np.nan, 102.0, 103.0],
)
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
tm.assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(2), columns=range(2))
expected = df.reindex(range(2)).reindex(columns=range(2))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=["a", "b", "c"])
result = df.reindex(index=[0, 1], columns=["a", "b"])
expected = df.reindex([0, 1]).reindex(columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_reindex_multi_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(date_range("2012-01-01", periods=3, freq="H")),
]
)
df = DataFrame({"a": range(len(midx))}, index=midx)
df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]]
result = df2.reindex(midx)
expected = DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx)
tm.assert_frame_equal(result, expected)
def test_reindex_with_categoricalindex(self):
df = DataFrame(
{
"A": np.arange(3, dtype="int64"),
},
index=CategoricalIndex(list("abc"), dtype=CDT(list("cabe")), name="B"),
)
# reindexing
# convert to a regular index
result = df.reindex(["a", "b", "e"])
expected = DataFrame({"A": [0, 1, np.nan], "B": Series(list("abe"))}).set_index(
"B"
)
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["a", "b"])
expected = DataFrame({"A": [0, 1], "B": Series(list("ab"))}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["e"])
expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["d"])
expected = DataFrame({"A": [np.nan], "B": Series(["d"])}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
# since we are actually reindexing with a Categorical
# then return a Categorical
cats = list("cabe")
result = df.reindex(Categorical(["a", "e"], categories=cats))
expected = DataFrame(
{"A": [0, np.nan], "B": Series(list("ae")).astype(CDT(cats))}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(Categorical(["a"], categories=cats))
expected = DataFrame(
{"A": [0], "B": Series(list("a")).astype(CDT(cats))}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["a", "b", "e"])
expected = DataFrame({"A": [0, 1, np.nan], "B": Series(list("abe"))}).set_index(
"B"
)
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["a", "b"])
expected = DataFrame({"A": [0, 1], "B": Series(list("ab"))}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["e"])
expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
# give back the type of categorical that we received
result = df.reindex(Categorical(["a", "e"], categories=cats, ordered=True))
expected = DataFrame(
{"A": [0, np.nan], "B": Series(list("ae")).astype(CDT(cats, ordered=True))}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(Categorical(["a", "d"], categories=["a", "d"]))
expected = DataFrame(
{"A": [0, np.nan], "B": Series(list("ad")).astype(CDT(["a", "d"]))}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
df2 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(list("aabbca"), dtype=CDT(list("cabe")), name="B"),
)
# passed duplicate indexers are not allowed
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
df2.reindex(["a", "b"])
# args NotImplemented ATM
msg = r"argument {} is not implemented for CategoricalIndex\.reindex"
with pytest.raises(NotImplementedError, match=msg.format("method")):
df.reindex(["a"], method="ffill")
with pytest.raises(NotImplementedError, match=msg.format("level")):
df.reindex(["a"], level=1)
with pytest.raises(NotImplementedError, match=msg.format("limit")):
df.reindex(["a"], limit=2)
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
def test_reindex_multiindex_ffill_added_rows(self):
# GH#23693
# reindex added rows with nan values even when fill method was specified
mi = MultiIndex.from_tuples([("a", "b"), ("d", "e")])
df = DataFrame([[0, 7], [3, 4]], index=mi, columns=["x", "y"])
mi2 = MultiIndex.from_tuples([("a", "b"), ("d", "e"), ("h", "i")])
result = df.reindex(mi2, axis=0, method="ffill")
expected = DataFrame([[0, 7], [3, 4], [3, 4]], index=mi2, columns=["x", "y"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs",
[
{"method": "pad", "tolerance": timedelta(seconds=9)},
{"method": "backfill", "tolerance": timedelta(seconds=9)},
{"method": "nearest"},
{"method": None},
],
)
def test_reindex_empty_frame(self, kwargs):
# GH#27315
idx = date_range(start="2020", freq="30s", periods=3)
df = DataFrame([], index=Index([], name="time"), columns=["a"])
result = df.reindex(idx, **kwargs)
expected = DataFrame({"a": [pd.NA] * 3}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"src_idx",
[
Index([]),
CategoricalIndex([]),
],
)
@pytest.mark.parametrize(
"cat_idx",
[
# No duplicates
Index([]),
CategoricalIndex([]),
Index(["A", "B"]),
CategoricalIndex(["A", "B"]),
# Duplicates: GH#38906
Index(["A", "A"]),
CategoricalIndex(["A", "A"]),
],
)
def test_reindex_empty(self, src_idx, cat_idx):
df = DataFrame(columns=src_idx, index=["K"], dtype="f8")
result = df.reindex(columns=cat_idx)
expected = DataFrame(index=["K"], columns=cat_idx, dtype="f8")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]"])
def test_reindex_datetimelike_to_object(self, dtype):
# GH#39755 dont cast dt64/td64 to ints
mi = MultiIndex.from_product([list("ABCDE"), range(2)])
dti = date_range("2016-01-01", periods=10)
fv = np.timedelta64("NaT", "ns")
if dtype == "m8[ns]":
dti = dti - dti[0]
fv = np.datetime64("NaT", "ns")
ser = Series(dti, index=mi)
ser[::3] = pd.NaT
df = ser.unstack()
index = df.index.append(Index([1]))
columns = df.columns.append(Index(["foo"]))
res = df.reindex(index=index, columns=columns, fill_value=fv)
expected = DataFrame(
{
0: df[0].tolist() + [fv],
1: df[1].tolist() + [fv],
"foo": np.array(["NaT"] * 6, dtype=fv.dtype),
},
index=index,
)
assert (res.dtypes[[0, 1]] == object).all()
assert res.iloc[0, 0] is pd.NaT
assert res.iloc[-1, 0] is fv
assert res.iloc[-1, 1] is fv
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"index_df,index_res,index_exp",
[
(
CategoricalIndex([], categories=["A"]),
Index(["A"]),
Index(["A"]),
),
(
CategoricalIndex([], categories=["A"]),
Index(["B"]),
Index(["B"]),
),
(
CategoricalIndex([], categories=["A"]),
CategoricalIndex(["A"]),
CategoricalIndex(["A"]),
),
(
CategoricalIndex([], categories=["A"]),
CategoricalIndex(["B"]),
CategoricalIndex(["B"]),
),
],
)
def test_reindex_not_category(self, index_df, index_res, index_exp):
# GH#28690
df = DataFrame(index=index_df)
result = df.reindex(index=index_res)
expected = DataFrame(index=index_exp)
tm.assert_frame_equal(result, expected)
|
|
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enter()."
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your mom would be proud...if she were smarter.",
"Such a luser.",
"I have a small puppy that's better at this."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class CentralCorridor(Scene):
def enter(self):
print "The Gothons of Planet Percal #25 have invaded your ship and destroyed"
print "your entire crew. You are the last surviving member and your last"
print "mission is to get the neutron destruct bomb from the Weapons Armory,"
print "put it in the bridge, and blow the ship up after getting into an "
print "escape pod."
print "\n"
print "You're running down the central corridor to the Weapons Armory when"
print "a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume"
print "flowing around his hate filled body. He's blocking the door to the"
print "Armory and about to pull a weapon to blast you."
action = raw_input("> ")
if action == "shoot!":
print "Quick on the draw you yank out your blaster and fire it at the Gothon."
print "His clown costume is flowing and moving around his body, which throws"
print "off your aim. Your laser hits his costume but misses him entirely. This"
print "completely ruins his brand new costume his mother bought him, which"
print "makes him fly into an insane rage and blast you repeatedly in the face until"
print "you are dead. Then he eats you."
return 'death'
elif action == "dodge!":
print "Like a world class boxer you dodge, weave, slip and slide right"
print "as the Gothon's blaster cranks a laser past your head."
print "In the middle of your artful dodge your foot slips and you"
print "bang your head on the metal wall and pass out."
print "You wake up shortly after only to die as the Gothon stomps on"
print "your head and eats you."
return 'death'
elif action == "tell a joke":
print "Lucky for you they made you learn Gothon insults in the academy."
print "You tell the one Gothon joke you know:"
print "Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr."
print "The Gothon stops, tries not to laugh, then busts out laughing and can't move."
print "While he's laughing you run up and shoot him square in the head"
print "putting him down, then jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "DOES NOT COMPUTE!"
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print "You do a dive roll into the Weapon Armory, crouch and scan the room"
print "for more Gothons that might be hiding. It's dead quiet, too quiet."
print "You stand up and run to the far side of the room and find the"
print "neutron bomb in its container. There's a keypad lock on the box"
print "and you need the code to get the bomb out. If you get the code"
print "wrong 10 times then the lock closes forever and you can't"
print "get the bomb. The code is 3 digits."
code = "%d%d%d" % (randint(1,1), randint(1,1), randint(1,1))
print code
guess = raw_input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print "BZZZZEDDD!"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code:
print "The container clicks open and the seal breaks, letting gas out."
print "You grab the neutron bomb and run as fast as you can to the"
print "bridge where you must place it in the right spot."
return 'the_bridge'
else:
print "The lock buzzes one last time and then you hear a sickening"
print "melting sound as the mechanism is fused together."
print "You decide to sit there, and finally the Gothons blow up the"
print "ship from their ship and you die."
return 'death'
class TheBridge(Scene):
def enter(self):
print "You burst onto the Bridge with the netron destruct bomb"
print "under your arm and surprise 5 Gothons who are trying to"
print "take control of the ship. Each of them has an even uglier"
print "clown costume than the last. They haven't pulled their"
print "weapons out yet, as they see the active bomb under your"
print "arm and don't want to set it off."
action = raw_input("> ")
if action == "throw the bomb":
print "In a panic you throw the bomb at the group of Gothons"
print "and make a leap for the door. Right as you drop it a"
print "Gothon shoots you right in the back killing you."
print "As you die you see another Gothon frantically try to disarm"
print "the bomb. You die knowing they will probably blow up when"
print "it goes off."
return 'death'
elif action == "slowly place the bomb":
print "You point your blaster at the bomb under your arm"
print "and the Gothons put their hands up and start to sweat."
print "You inch backward to the door, open it, and then carefully"
print "place the bomb on the floor, pointing your blaster at it."
print "You then jump back through the door, punch the close button"
print "and blast the lock so the Gothons can't get out."
print "Now that the bomb is placed you run to the escape pod to"
print "get off this tin can."
return 'escape_pod'
else:
print "DOES NOT COMPUTE!"
return "the_bridge"
class EscapePod(Scene):
def enter(self):
print "You rush through the ship desperately trying to make it to"
print "the escape pod before the whole ship explodes. It seems like"
print "hardly any Gothons are on the ship, so your run is clear of"
print "interference. You get to the chamber with the escape pods, and"
print "now need to pick one to take. Some of them could be damaged"
print "but you don't have time to look. There's 5 pods, which one"
print "do you take?"
good_pod = randint(1,5)
guess = raw_input("[pod #]> ")
if int(guess) != good_pod:
print "You jump into pod %s and hit the eject button." % guess
print "The pod escapes out into the void of space, then"
print "implodes as the hull ruptures, crushing your body"
print "into jam jelly."
return 'death'
else:
print "You jump into pod %s and hit the eject button." % guess
print "The pod easily slides out into space heading to"
print "the planet below. As it flies to the planet, you look"
print "back and see your ship implode then explode like a"
print "bright star, taking out the Gothon ship at the same"
print "time. You won!"
return 'finished'
class Finished(Scene):
def enter(self):
print "You won! Good job."
return 'finished'
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dataflow client utility functions."""
import codecs
import getpass
import json
import logging
import os
import re
import time
from StringIO import StringIO
from datetime import datetime
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apache_beam import utils
from apache_beam.internal.auth import get_service_credentials
from apache_beam.internal.clients import storage
from apache_beam.internal.json_value import to_json_value
from apache_beam.runners.google_cloud_dataflow.internal.clients import dataflow
from apache_beam.transforms import cy_combiners
from apache_beam.transforms.display import DisplayData
from apache_beam.utils import dependency
from apache_beam.utils import retry
from apache_beam.utils.dependency import get_required_container_version
from apache_beam.utils.dependency import get_sdk_name_and_version
from apache_beam.utils.names import PropertyNames
from apache_beam.utils.pipeline_options import DebugOptions
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import StandardOptions
from apache_beam.utils.pipeline_options import WorkerOptions
class Step(object):
"""Wrapper for a dataflow Step protobuf."""
def __init__(self, step_kind, step_name, additional_properties=None):
self.step_kind = step_kind
self.step_name = step_name
self.proto = dataflow.Step(kind=step_kind, name=step_name)
self.proto.properties = {}
self._additional_properties = []
if additional_properties is not None:
for (n, v, t) in additional_properties:
self.add_property(n, v, t)
def add_property(self, name, value, with_type=False):
self._additional_properties.append((name, value, with_type))
self.proto.properties.additionalProperties.append(
dataflow.Step.PropertiesValue.AdditionalProperty(
key=name, value=to_json_value(value, with_type=with_type)))
def _get_outputs(self):
"""Returns a list of all output labels for a step."""
outputs = []
for p in self.proto.properties.additionalProperties:
if p.key == PropertyNames.OUTPUT_INFO:
for entry in p.value.array_value.entries:
for entry_prop in entry.object_value.properties:
if entry_prop.key == PropertyNames.OUTPUT_NAME:
outputs.append(entry_prop.value.string_value)
return outputs
def __reduce__(self):
"""Reduce hook for pickling the Step class more easily."""
return (Step, (self.step_kind, self.step_name, self._additional_properties))
def get_output(self, tag=None):
"""Returns name if it is one of the outputs or first output if name is None.
Args:
tag: tag of the output as a string or None if we want to get the
name of the first output.
Returns:
The name of the output associated with the tag or the first output
if tag was None.
Raises:
ValueError: if the tag does not exist within outputs.
"""
outputs = self._get_outputs()
if tag is None:
return outputs[0]
else:
name = '%s_%s' % (PropertyNames.OUT, tag)
if name not in outputs:
raise ValueError(
'Cannot find named output: %s in %s.' % (name, outputs))
return name
class Environment(object):
"""Wrapper for a dataflow Environment protobuf."""
def __init__(self, packages, options, environment_version):
self.standard_options = options.view_as(StandardOptions)
self.google_cloud_options = options.view_as(GoogleCloudOptions)
self.worker_options = options.view_as(WorkerOptions)
self.debug_options = options.view_as(DebugOptions)
self.proto = dataflow.Environment()
self.proto.clusterManagerApiService = GoogleCloudOptions.COMPUTE_API_SERVICE
self.proto.dataset = '{}/cloud_dataflow'.format(
GoogleCloudOptions.BIGQUERY_API_SERVICE)
self.proto.tempStoragePrefix = (
self.google_cloud_options.temp_location.replace(
'gs:/',
GoogleCloudOptions.STORAGE_API_SERVICE))
# User agent information.
self.proto.userAgent = dataflow.Environment.UserAgentValue()
self.local = 'localhost' in self.google_cloud_options.dataflow_endpoint
if self.google_cloud_options.service_account_email:
self.proto.serviceAccountEmail = (
self.google_cloud_options.service_account_email)
sdk_name, version_string = get_sdk_name_and_version()
self.proto.userAgent.additionalProperties.extend([
dataflow.Environment.UserAgentValue.AdditionalProperty(
key='name',
value=to_json_value(sdk_name)),
dataflow.Environment.UserAgentValue.AdditionalProperty(
key='version', value=to_json_value(version_string))])
# Version information.
self.proto.version = dataflow.Environment.VersionValue()
if self.standard_options.streaming:
job_type = 'PYTHON_STREAMING'
else:
job_type = 'PYTHON_BATCH'
self.proto.version.additionalProperties.extend([
dataflow.Environment.VersionValue.AdditionalProperty(
key='job_type',
value=to_json_value(job_type)),
dataflow.Environment.VersionValue.AdditionalProperty(
key='major', value=to_json_value(environment_version))])
# Experiments
if self.debug_options.experiments:
for experiment in self.debug_options.experiments:
self.proto.experiments.append(experiment)
# Worker pool(s) information.
package_descriptors = []
for package in packages:
package_descriptors.append(
dataflow.Package(
location='%s/%s' % (
self.google_cloud_options.staging_location.replace(
'gs:/', GoogleCloudOptions.STORAGE_API_SERVICE),
package),
name=package))
pool = dataflow.WorkerPool(
kind='local' if self.local else 'harness',
packages=package_descriptors,
taskrunnerSettings=dataflow.TaskRunnerSettings(
parallelWorkerSettings=dataflow.WorkerSettings(
baseUrl=GoogleCloudOptions.DATAFLOW_ENDPOINT,
servicePath=self.google_cloud_options.dataflow_endpoint)))
pool.autoscalingSettings = dataflow.AutoscalingSettings()
# Set worker pool options received through command line.
if self.worker_options.num_workers:
pool.numWorkers = self.worker_options.num_workers
if self.worker_options.max_num_workers:
pool.autoscalingSettings.maxNumWorkers = (
self.worker_options.max_num_workers)
if self.worker_options.autoscaling_algorithm:
values_enum = dataflow.AutoscalingSettings.AlgorithmValueValuesEnum
pool.autoscalingSettings.algorithm = {
'NONE': values_enum.AUTOSCALING_ALGORITHM_NONE,
'THROUGHPUT_BASED': values_enum.AUTOSCALING_ALGORITHM_BASIC,
}.get(self.worker_options.autoscaling_algorithm)
if self.worker_options.machine_type:
pool.machineType = self.worker_options.machine_type
if self.worker_options.disk_size_gb:
pool.diskSizeGb = self.worker_options.disk_size_gb
if self.worker_options.disk_type:
pool.diskType = self.worker_options.disk_type
if self.worker_options.zone:
pool.zone = self.worker_options.zone
if self.worker_options.network:
pool.network = self.worker_options.network
if self.worker_options.worker_harness_container_image:
pool.workerHarnessContainerImage = (
self.worker_options.worker_harness_container_image)
else:
# Default to using the worker harness container image for the current SDK
# version.
pool.workerHarnessContainerImage = (
'dataflow.gcr.io/v1beta3/python:%s' %
get_required_container_version())
if self.worker_options.use_public_ips is not None:
if self.worker_options.use_public_ips:
pool.ipConfiguration = (
dataflow.WorkerPool
.IpConfigurationValueValuesEnum.WORKER_IP_PUBLIC)
else:
pool.ipConfiguration = (
dataflow.WorkerPool
.IpConfigurationValueValuesEnum.WORKER_IP_PRIVATE)
if self.standard_options.streaming:
# Use separate data disk for streaming.
disk = dataflow.Disk()
if self.local:
disk.diskType = 'local'
# TODO(ccy): allow customization of disk.
pool.dataDisks.append(disk)
self.proto.workerPools.append(pool)
sdk_pipeline_options = options.get_all_options()
if sdk_pipeline_options:
self.proto.sdkPipelineOptions = (
dataflow.Environment.SdkPipelineOptionsValue())
options_dict = {k: v
for k, v in sdk_pipeline_options.iteritems()
if v is not None}
self.proto.sdkPipelineOptions.additionalProperties.append(
dataflow.Environment.SdkPipelineOptionsValue.AdditionalProperty(
key='options', value=to_json_value(options_dict)))
dd = DisplayData.create_from_options(options)
items = [item.get_dict() for item in dd.items]
self.proto.sdkPipelineOptions.additionalProperties.append(
dataflow.Environment.SdkPipelineOptionsValue.AdditionalProperty(
key='display_data', value=to_json_value(items)))
class Job(object):
"""Wrapper for a dataflow Job protobuf."""
def __str__(self):
def encode_shortstrings(input_buffer, errors='strict'):
"""Encoder (from Unicode) that suppresses long base64 strings."""
original_len = len(input_buffer)
if original_len > 150:
if self.base64_str_re.match(input_buffer):
input_buffer = '<string of %d bytes>' % original_len
input_buffer = input_buffer.encode('ascii', errors=errors)
else:
matched = self.coder_str_re.match(input_buffer)
if matched:
input_buffer = '%s<string of %d bytes>' % (
matched.group(1), matched.end(2) - matched.start(2))
input_buffer = input_buffer.encode('ascii', errors=errors)
return input_buffer, original_len
def decode_shortstrings(input_buffer, errors='strict'):
"""Decoder (to Unicode) that suppresses long base64 strings."""
shortened, length = encode_shortstrings(input_buffer, errors)
return unicode(shortened), length
def shortstrings_registerer(encoding_name):
if encoding_name == 'shortstrings':
return codecs.CodecInfo(name='shortstrings',
encode=encode_shortstrings,
decode=decode_shortstrings)
return None
codecs.register(shortstrings_registerer)
# Use json "dump string" method to get readable formatting;
# further modify it to not output too-long strings, aimed at the
# 10,000+ character hex-encoded "serialized_fn" values.
return json.dumps(
json.loads(encoding.MessageToJson(self.proto), encoding='shortstrings'),
indent=2, sort_keys=True)
@staticmethod
def default_job_name(job_name):
if job_name is None:
user_name = getpass.getuser().lower()
date_component = datetime.utcnow().strftime('%m%d%H%M%S-%f')
app_name = 'beamapp'
job_name = '{}-{}-{}'.format(app_name, user_name, date_component)
return job_name
def __init__(self, options):
self.options = options
self.google_cloud_options = options.view_as(GoogleCloudOptions)
if not self.google_cloud_options.job_name:
self.google_cloud_options.job_name = self.default_job_name(
self.google_cloud_options.job_name)
required_google_cloud_options = ['project', 'job_name', 'temp_location']
missing = [
option for option in required_google_cloud_options
if not getattr(self.google_cloud_options, option)]
if missing:
raise ValueError(
'Missing required configuration parameters: %s' % missing)
if not self.google_cloud_options.staging_location:
logging.info('Defaulting to the temp_location as staging_location: %s',
self.google_cloud_options.temp_location)
(self.google_cloud_options
.staging_location) = self.google_cloud_options.temp_location
# Make the staging and temp locations job name and time specific. This is
# needed to avoid clashes between job submissions using the same staging
# area or team members using same job names. This method is not entirely
# foolproof since two job submissions with same name can happen at exactly
# the same time. However the window is extremely small given that
# time.time() has at least microseconds granularity. We add the suffix only
# for GCS staging locations where the potential for such clashes is high.
if self.google_cloud_options.staging_location.startswith('gs://'):
path_suffix = '%s.%f' % (self.google_cloud_options.job_name, time.time())
self.google_cloud_options.staging_location = utils.path.join(
self.google_cloud_options.staging_location, path_suffix)
self.google_cloud_options.temp_location = utils.path.join(
self.google_cloud_options.temp_location, path_suffix)
self.proto = dataflow.Job(name=self.google_cloud_options.job_name)
if self.options.view_as(StandardOptions).streaming:
self.proto.type = dataflow.Job.TypeValueValuesEnum.JOB_TYPE_STREAMING
else:
self.proto.type = dataflow.Job.TypeValueValuesEnum.JOB_TYPE_BATCH
self.base64_str_re = re.compile(r'^[A-Za-z0-9+/]*=*$')
self.coder_str_re = re.compile(r'^([A-Za-z]+\$)([A-Za-z0-9+/]*=*)$')
def json(self):
return encoding.MessageToJson(self.proto)
def __reduce__(self):
"""Reduce hook for pickling the Job class more easily."""
return (Job, (self.options,))
class DataflowApplicationClient(object):
"""A Dataflow API client used by application code to create and query jobs."""
def __init__(self, options, environment_version):
"""Initializes a Dataflow API client object."""
self.standard_options = options.view_as(StandardOptions)
self.google_cloud_options = options.view_as(GoogleCloudOptions)
self.environment_version = environment_version
if self.google_cloud_options.no_auth:
credentials = None
else:
credentials = get_service_credentials()
self._client = dataflow.DataflowV1b3(
url=self.google_cloud_options.dataflow_endpoint,
credentials=credentials,
get_credentials=(not self.google_cloud_options.no_auth))
self._storage_client = storage.StorageV1(
url='https://www.googleapis.com/storage/v1',
credentials=credentials,
get_credentials=(not self.google_cloud_options.no_auth))
# TODO(silviuc): Refactor so that retry logic can be applied.
@retry.no_retries # Using no_retries marks this as an integration point.
def _gcs_file_copy(self, from_path, to_path):
to_folder, to_name = os.path.split(to_path)
with open(from_path, 'rb') as f:
self.stage_file(to_folder, to_name, f)
def stage_file(self, gcs_or_local_path, file_name, stream,
mime_type='application/octet-stream'):
"""Stages a file at a GCS or local path with stream-supplied contents."""
if not gcs_or_local_path.startswith('gs://'):
local_path = os.path.join(gcs_or_local_path, file_name)
logging.info('Staging file locally to %s', local_path)
with open(local_path, 'wb') as f:
f.write(stream.read())
return
gcs_location = gcs_or_local_path + '/' + file_name
bucket, name = gcs_location[5:].split('/', 1)
request = storage.StorageObjectsInsertRequest(
bucket=bucket, name=name)
logging.info('Starting GCS upload to %s...', gcs_location)
upload = storage.Upload(stream, mime_type)
try:
response = self._storage_client.objects.Insert(request, upload=upload)
except exceptions.HttpError as e:
reportable_errors = {
403: 'access denied',
404: 'bucket not found',
}
if e.status_code in reportable_errors:
raise IOError(('Could not upload to GCS path %s: %s. Please verify '
'that credentials are valid and that you have write '
'access to the specified path. Stale credentials can be '
'refreshed by executing "gcloud auth login".') %
(gcs_or_local_path, reportable_errors[e.status_code]))
raise
logging.info('Completed GCS upload to %s', gcs_location)
return response
# TODO(silviuc): Refactor so that retry logic can be applied.
@retry.no_retries # Using no_retries marks this as an integration point.
def create_job(self, job):
"""Creates job description. May stage and/or submit for remote execution."""
self.create_job_description(job)
# Stage and submit the job when necessary
dataflow_job_file = job.options.view_as(DebugOptions).dataflow_job_file
template_location = (
job.options.view_as(GoogleCloudOptions).template_location)
job_location = template_location or dataflow_job_file
if job_location:
gcs_or_local_path = os.path.dirname(job_location)
file_name = os.path.basename(job_location)
self.stage_file(gcs_or_local_path, file_name, StringIO(job.json()))
if not template_location:
return self.submit_job_description(job)
else:
return None
def create_job_description(self, job):
"""Creates a job described by the workflow proto."""
resources = dependency.stage_job_resources(
job.options, file_copy=self._gcs_file_copy)
job.proto.environment = Environment(
packages=resources, options=job.options,
environment_version=self.environment_version).proto
# TODO(silviuc): Remove the debug logging eventually.
logging.info('JOB: %s', job)
def submit_job_description(self, job):
"""Creates and excutes a job request."""
request = dataflow.DataflowProjectsJobsCreateRequest()
request.projectId = self.google_cloud_options.project
request.job = job.proto
try:
response = self._client.projects_jobs.Create(request)
except exceptions.BadStatusCodeError as e:
logging.error('HTTP status %d trying to create job'
' at dataflow service endpoint %s',
e.response.status,
self.google_cloud_options.dataflow_endpoint)
logging.fatal('details of server error: %s', e)
raise
logging.info('Create job: %s', response)
# The response is a Job proto with the id for the new job.
logging.info('Created job with id: [%s]', response.id)
logging.info(
'To access the Dataflow monitoring console, please navigate to '
'https://console.developers.google.com/project/%s/dataflow/job/%s',
self.google_cloud_options.project, response.id)
return response
@retry.with_exponential_backoff() # Using retry defaults from utils/retry.py
def modify_job_state(self, job_id, new_state):
"""Modify the run state of the job.
Args:
job_id: The id of the job.
new_state: A string representing the new desired state. It could be set to
either 'JOB_STATE_DONE', 'JOB_STATE_CANCELLED' or 'JOB_STATE_DRAINING'.
Returns:
True if the job was modified successfully.
"""
if new_state == 'JOB_STATE_DONE':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DONE
elif new_state == 'JOB_STATE_CANCELLED':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_CANCELLED
elif new_state == 'JOB_STATE_DRAINING':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DRAINING
else:
# Other states could only be set by the service.
return False
request = dataflow.DataflowProjectsJobsUpdateRequest()
request.jobId = job_id
request.projectId = self.google_cloud_options.project
request.job = dataflow.Job(requestedState=new_state)
self._client.projects_jobs.Update(request)
return True
@retry.with_exponential_backoff() # Using retry defaults from utils/retry.py
def get_job(self, job_id):
"""Gets the job status for a submitted job.
Args:
job_id: A string representing the job_id for the workflow as returned
by the a create_job() request.
Returns:
A Job proto. See below for interesting fields.
The Job proto returned from a get_job() request contains some interesting
fields:
currentState: An object representing the current state of the job. The
string representation of the object (str() result) has the following
possible values: JOB_STATE_UNKNONW, JOB_STATE_STOPPED,
JOB_STATE_RUNNING, JOB_STATE_DONE, JOB_STATE_FAILED,
JOB_STATE_CANCELLED.
createTime: UTC time when the job was created
(e.g. '2015-03-10T00:01:53.074Z')
currentStateTime: UTC time for the current state of the job.
"""
request = dataflow.DataflowProjectsJobsGetRequest()
request.jobId = job_id
request.projectId = self.google_cloud_options.project
response = self._client.projects_jobs.Get(request)
return response
@retry.with_exponential_backoff() # Using retry defaults from utils/retry.py
def list_messages(
self, job_id, start_time=None, end_time=None, page_token=None,
minimum_importance=None):
"""List messages associated with the execution of a job.
Args:
job_id: A string representing the job_id for the workflow as returned
by the a create_job() request.
start_time: If specified, only messages generated after the start time
will be returned, otherwise all messages since job started will be
returned. The value is a string representing UTC time
(e.g., '2015-08-18T21:03:50.644Z')
end_time: If specified, only messages generated before the end time
will be returned, otherwise all messages up to current time will be
returned. The value is a string representing UTC time
(e.g., '2015-08-18T21:03:50.644Z')
page_token: A string to be used as next page token if the list call
returned paginated results.
minimum_importance: Filter for messages based on importance. The possible
string values in increasing order of importance are: JOB_MESSAGE_DEBUG,
JOB_MESSAGE_DETAILED, JOB_MESSAGE_BASIC, JOB_MESSAGE_WARNING,
JOB_MESSAGE_ERROR. For example, a filter set on warning will allow only
warnings and errors and exclude all others.
Returns:
A tuple consisting of a list of JobMessage instances and a
next page token string.
Raises:
RuntimeError: if an unexpected value for the message_importance argument
is used.
The JobMessage objects returned by the call contain the following fields:
id: A unique string identifier for the message.
time: A string representing the UTC time of the message
(e.g., '2015-08-18T21:03:50.644Z')
messageImportance: An enumeration value for the message importance. The
value if converted to string will have the following possible values:
JOB_MESSAGE_DEBUG, JOB_MESSAGE_DETAILED, JOB_MESSAGE_BASIC,
JOB_MESSAGE_WARNING, JOB_MESSAGE_ERROR.
messageText: A message string.
"""
request = dataflow.DataflowProjectsJobsMessagesListRequest(
jobId=job_id, projectId=self.google_cloud_options.project)
if page_token is not None:
request.pageToken = page_token
if start_time is not None:
request.startTime = start_time
if end_time is not None:
request.endTime = end_time
if minimum_importance is not None:
if minimum_importance == 'JOB_MESSAGE_DEBUG':
request.minimumImportance = (
dataflow.DataflowProjectsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_DEBUG)
elif minimum_importance == 'JOB_MESSAGE_DETAILED':
request.minimumImportance = (
dataflow.DataflowProjectsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_DETAILED)
elif minimum_importance == 'JOB_MESSAGE_BASIC':
request.minimumImportance = (
dataflow.DataflowProjectsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_BASIC)
elif minimum_importance == 'JOB_MESSAGE_WARNING':
request.minimumImportance = (
dataflow.DataflowProjectsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_WARNING)
elif minimum_importance == 'JOB_MESSAGE_ERROR':
request.minimumImportance = (
dataflow.DataflowProjectsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_ERROR)
else:
raise RuntimeError(
'Unexpected value for minimum_importance argument: %r',
minimum_importance)
response = self._client.projects_jobs_messages.List(request)
return response.jobMessages, response.nextPageToken
class MetricUpdateTranslators(object):
"""Translators between accumulators and dataflow metric updates."""
@staticmethod
def translate_boolean(accumulator, metric_update_proto):
metric_update_proto.boolean = accumulator.value
@staticmethod
def translate_scalar_mean_int(accumulator, metric_update_proto):
if accumulator.count:
metric_update_proto.integerMean = dataflow.IntegerMean()
metric_update_proto.integerMean.sum = to_split_int(accumulator.sum)
metric_update_proto.integerMean.count = to_split_int(accumulator.count)
else:
metric_update_proto.nameAndKind.kind = None
@staticmethod
def translate_scalar_mean_float(accumulator, metric_update_proto):
if accumulator.count:
metric_update_proto.floatingPointMean = dataflow.FloatingPointMean()
metric_update_proto.floatingPointMean.sum = accumulator.sum
metric_update_proto.floatingPointMean.count = to_split_int(
accumulator.count)
else:
metric_update_proto.nameAndKind.kind = None
@staticmethod
def translate_scalar_counter_int(accumulator, metric_update_proto):
metric_update_proto.integer = to_split_int(accumulator.value)
@staticmethod
def translate_scalar_counter_float(accumulator, metric_update_proto):
metric_update_proto.floatingPoint = accumulator.value
def to_split_int(n):
res = dataflow.SplitInt64()
res.lowBits = n & 0xffffffff
res.highBits = n >> 32
return res
def translate_distribution(distribution_update, metric_update_proto):
"""Translate metrics DistributionUpdate to dataflow distribution update."""
dist_update_proto = dataflow.DistributionUpdate()
dist_update_proto.min = to_split_int(distribution_update.min)
dist_update_proto.max = to_split_int(distribution_update.max)
dist_update_proto.count = to_split_int(distribution_update.count)
dist_update_proto.sum = to_split_int(distribution_update.sum)
metric_update_proto.distribution = dist_update_proto
def translate_value(value, metric_update_proto):
metric_update_proto.integer = to_split_int(value)
def translate_scalar(accumulator, metric_update):
metric_update.scalar = to_json_value(accumulator.value, with_type=True)
def translate_mean(accumulator, metric_update):
if accumulator.count:
metric_update.meanSum = to_json_value(accumulator.sum, with_type=True)
metric_update.meanCount = to_json_value(accumulator.count, with_type=True)
else:
# A denominator of 0 will raise an error in the service.
# What it means is we have nothing to report yet, so don't.
metric_update.kind = None
# To enable a counter on the service, add it to this dictionary.
metric_translations = {
cy_combiners.CountCombineFn: ('sum', translate_scalar),
cy_combiners.SumInt64Fn: ('sum', translate_scalar),
cy_combiners.MinInt64Fn: ('min', translate_scalar),
cy_combiners.MaxInt64Fn: ('max', translate_scalar),
cy_combiners.MeanInt64Fn: ('mean', translate_mean),
cy_combiners.SumFloatFn: ('sum', translate_scalar),
cy_combiners.MinFloatFn: ('min', translate_scalar),
cy_combiners.MaxFloatFn: ('max', translate_scalar),
cy_combiners.MeanFloatFn: ('mean', translate_mean),
cy_combiners.AllCombineFn: ('and', translate_scalar),
cy_combiners.AnyCombineFn: ('or', translate_scalar),
}
counter_translations = {
cy_combiners.CountCombineFn: (
dataflow.NameAndKind.KindValueValuesEnum.SUM,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.SumInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.SUM,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.MinInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.MIN,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.MaxInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.MAX,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.MeanInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.MEAN,
MetricUpdateTranslators.translate_scalar_mean_int),
cy_combiners.SumFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.SUM,
MetricUpdateTranslators.translate_scalar_counter_float),
cy_combiners.MinFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.MIN,
MetricUpdateTranslators.translate_scalar_counter_float),
cy_combiners.MaxFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.MAX,
MetricUpdateTranslators.translate_scalar_counter_float),
cy_combiners.MeanFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.MEAN,
MetricUpdateTranslators.translate_scalar_mean_float),
cy_combiners.AllCombineFn: (
dataflow.NameAndKind.KindValueValuesEnum.AND,
MetricUpdateTranslators.translate_boolean),
cy_combiners.AnyCombineFn: (
dataflow.NameAndKind.KindValueValuesEnum.OR,
MetricUpdateTranslators.translate_boolean),
}
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes to represent any type of spectrum, essentially any
x y value pairs.
"""
import sys
from typing import Callable, List, Union
import numpy as np
from monty.json import MSONable
from scipy import stats
from scipy.ndimage.filters import convolve1d
from pymatgen.util.coord import get_linear_interpolated_value
from pymatgen.util.typing import ArrayLike
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
def lorentzian(x, x_0: float = 0, sigma: float = 1.0):
"""
:param x: x values
:param x_0: Center
:param sigma: FWHM
:return: Value of lorentzian at x.
"""
return 1 / np.pi * 0.5 * sigma / ((x - x_0) ** 2 + (0.5 * sigma) ** 2)
class Spectrum(MSONable):
"""
Base class for any type of xas, essentially just x, y values. Examples
include XRD patterns, XANES, EXAFS, NMR, DOS, etc.
Implements basic tools like application of smearing, normalization, addition
multiplication, etc.
Subclasses should extend this object and ensure that super is called with
ALL args and kwargs. That ensures subsequent things like add and mult work
properly.
"""
XLABEL = "x"
YLABEL = "y"
def __init__(self, x: ArrayLike, y: ArrayLike, *args, **kwargs):
r"""
Args:
x (ndarray): A ndarray of N values.
y (ndarray): A ndarray of N x k values. The first dimension must be
the same as that of x. Each of the k values are interpreted as separate.
*args: All subclasses should provide args other than x and y
when calling super, e.g., super().__init__(
x, y, arg1, arg2, kwarg1=val1, ..). This guarantees the +, -, *,
etc. operators work properly.
**kwargs: Same as that for *args.
"""
self.x = np.array(x)
self.y = np.array(y)
self.ydim = self.y.shape
if self.x.shape[0] != self.ydim[0]:
raise ValueError("x and y values have different first dimension!")
self._args = args
self._kwargs = kwargs
def __getattr__(self, item):
if item == self.XLABEL.lower():
return self.x
if item == self.YLABEL.lower():
return self.y
raise AttributeError("Invalid attribute name %s" % str(item))
def __len__(self):
return self.ydim[0]
def normalize(self, mode: Literal["max", "sum"] = "max", value: float = 1.0):
"""
Normalize the spectrum with respect to the sum of intensity
Args:
mode ("max" | "sum"): Normalization mode. "max" sets the max y value to value,
e.g., in XRD patterns. "sum" sets the sum of y to a value, i.e., like a
probability density.
value (float): Value to normalize to. Defaults to 1.
"""
if mode.lower() == "sum":
factor = np.sum(self.y, axis=0)
elif mode.lower() == "max":
factor = np.max(self.y, axis=0)
else:
raise ValueError("Unsupported normalization mode %s!" % mode)
self.y /= factor / value
def smear(self, sigma: float = 0.0, func: Union[str, Callable] = "gaussian"):
"""
Apply Gaussian/Lorentzian smearing to spectrum y value.
Args:
sigma: Std dev for Gaussian smear function
func: "gaussian" or "lorentzian" or a callable. If this is a callable, the sigma value is ignored. The
callable should only take a single argument (a numpy array) and return a set of weights.
"""
points = np.linspace(np.min(self.x) - np.mean(self.x), np.max(self.x) - np.mean(self.x), len(self.x))
if callable(func):
weights = func(points)
elif func.lower() == "gaussian":
weights = stats.norm.pdf(points, scale=sigma)
elif func.lower() == "lorentzian":
weights = lorentzian(points, sigma=sigma)
else:
raise ValueError(f"Invalid func {func}")
weights /= np.sum(weights)
if len(self.ydim) == 1:
total = np.sum(self.y)
self.y = convolve1d(self.y, weights)
self.y *= total / np.sum(self.y) # renormalize to maintain the same integrated sum as before.
else:
total = np.sum(self.y, axis=0)
self.y = np.array([convolve1d(self.y[:, k], weights) for k in range(self.ydim[1])]).T
self.y *= total / np.sum(self.y, axis=0) # renormalize to maintain the same integrated sum as before.
def get_interpolated_value(self, x: float) -> List[float]:
"""
Returns an interpolated y value for a particular x value.
Args:
x: x value to return the y value for
Returns:
Value of y at x
"""
if len(self.ydim) == 1:
return get_linear_interpolated_value(self.x, self.y, x)
return [get_linear_interpolated_value(self.x, self.y[:, k], x) for k in range(self.ydim[1])]
def copy(self):
"""
Returns:
Copy of Spectrum object.
"""
return self.__class__(self.x, self.y, *self._args, **self._kwargs)
def __add__(self, other):
"""
Add two Spectrum object together. Checks that x scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another Spectrum object
Returns:
Sum of the two Spectrum objects
"""
if not all(np.equal(self.x, other.x)):
raise ValueError("X axis values are not compatible!")
return self.__class__(self.x, self.y + other.y, *self._args, **self._kwargs)
def __sub__(self, other):
"""
Substract one Spectrum object from another. Checks that x scales are
the same.
Otherwise, a ValueError is thrown
Args:
other: Another Spectrum object
Returns:
Substraction of the two Spectrum objects
"""
if not all(np.equal(self.x, other.x)):
raise ValueError("X axis values are not compatible!")
return self.__class__(self.x, self.y - other.y, *self._args, **self._kwargs)
def __mul__(self, other):
"""
Scale the Spectrum's y values
Args:
other: scalar, The scale amount
Returns:
Spectrum object with y values scaled
"""
return self.__class__(self.x, other * self.y, *self._args, **self._kwargs)
__rmul__ = __mul__
def __truediv__(self, other):
"""
True division of y
Args:
other: The divisor
Returns:
Spectrum object with y values divided
"""
return self.__class__(self.x, self.y.__truediv__(other), *self._args, **self._kwargs)
def __floordiv__(self, other):
"""
True division of y
Args:
other: The divisor
Returns:
Spectrum object with y values divided
"""
return self.__class__(self.x, self.y.__floordiv__(other), *self._args, **self._kwargs)
__div__ = __truediv__
def __str__(self):
"""
Returns a string containing values and labels of spectrum object for
plotting.
"""
return "\n".join(
[
self.__class__.__name__,
f"{self.XLABEL}: {self.x}",
f"{self.YLABEL}: {self.y}",
]
)
def __repr__(self):
"""
Returns a printable representation of the class
"""
return self.__str__()
|
|
import sys
import PySide
import ui_mainwindow
from PySide import QtGui
from PySide import QtCore
import btceapi
epsilon = 0.0000001
def getTopOfTheBook(symbol):
asks, bids = btceapi.getDepth(symbol)
#return (float(asks[1][0]), float(asks[0][1] + asks[1][1]), float(bids[1][0]), float(bids[0][1] + bids[1][1]))
return (float(asks[0][0]), float(asks[0][1]), float(bids[0][0]), float(bids[0][1]))
def getTopOfTheBook2(symbol):
asks, bids = btceapi.getDepth(symbol)
return (float(asks[1][0]), float(asks[0][1] + asks[1][1]), float(bids[1][0]), float(bids[0][1] + bids[1][1]))
def getTopOfTheBook3(symbol):
asks, bids = btceapi.getDepth(symbol)
return (float(asks[2][0]), float(asks[0][1] + asks[1][1] + asks[2][1]), float(bids[2][0]), float(bids[0][1] + bids[1][1] + bids[2][1]))
def getTopOfTheBookMinSum(symbol, s):
asks, bids = btceapi.getDepth(symbol)
askDepth = 0
bidDepth = 0
ask, askAmount, bid, bidAmount = float(asks[0][0]), float(asks[0][1]), float(bids[0][0]), float(bids[0][1])
askSum = ask * askAmount
bidSum = bid * bidAmount
while(askSum < s):
askDepth = askDepth + 1
askSum = askSum + float(asks[askDepth][0]) * float(asks[askDepth][1])
ask = float(asks[askDepth][0])
askAmount = askAmount + float(asks[askDepth][1])
while(bidSum < s):
bidDepth = bidDepth + 1
bidSum = bidSum + float(bids[bidDepth][0]) * float(bids[bidDepth][1])
bid = float(bids[bidDepth][0])
bidAmount = bidAmount + float(bids[bidDepth][1])
return ask, askAmount, bid, bidAmount
def getTopOfTheBookMinAmount(symbol, minAskAmount, minBidAmount):
asks, bids = btceapi.getDepth(symbol)
askDepth = 0
bidDepth = 0
ask, askAmount, bid, bidAmount = float(asks[0][0]), float(asks[0][1]), float(bids[0][0]), float(bids[0][1])
while(askAmount < minAskAmount):
askDepth = askDepth + 1
ask = float(asks[askDepth][0])
askAmount = askAmount + float(asks[askDepth][1])
while(bidAmount < minBidAmount):
bidDepth = bidDepth + 1
bid = float(bids[bidDepth][0])
bidAmount = bidAmount + float(bids[bidDepth][1])
return ask, askAmount, bid, bidAmount
def refreshPairData(top, spinBox1, spinBox2, spinBox3, spinBox4):
spinBox1.setValue(top[0])
spinBox2.setValue(top[2])
spinBox3.setValue(top[1])
spinBox4.setValue(top[3])
return
p1 = "btc_usd"
p2 = "ltc_btc"
p3 = "ltc_usd"
def formatBTC(amount):
return float(int(amount*100))/100
tradeTimeout = 10000
k = float(0.998)
k2 = k * k
k3 = k * k * k
maxUSD = 30.0
maxBTC = 0.1
maxLTC = 3.0
minProfit = 0.01
tradeTimer = QtCore.QTimer()
tradeTimer.setInterval(tradeTimeout)
tradeTimer.setSingleShot(1)
class Listener(QtCore.QObject):
maxProfit = float(-1.0)
top1 = (1, 1, 1, 1)
top2 = (1, 1, 1, 1)
top3 = (1, 1, 1, 1)
def __init__(self, itemModel, timer, tradeAPI):
self.itemModel = itemModel
self.timer = timer
self.tradeAPI = tradeAPI
self.balance_usd = 0
self.balance_btc = 0
self.balance_ltc = 0
#def confirmTop(self):
#t1 = getTopOfTheBook(p1);
#t2 = getTopOfTheBook3(p2);
#t3 = getTopOfTheBook2(p3);
#info = tradeAPI.getInfo()
#balance_usd = info.balance_usd
#balance_ltc = info.balance_ltc
#balance_btc = info.balance_btc
#print ("top1: " + str(self.top1) + ", t1: " + str(t1) + "\n"
#+"top1: " + str(self.top2) + ", t1: " + str(t2) + "\n"
#"top1: " + str(self.top3) + ", t1: " + str(t3))
#return (abs(t1[0] - self.top1[0]) < epsilon and
#abs(t1[1] - self.top1[1]) < epsilon and
#abs(t1[2] - self.top1[2]) < epsilon and
#abs(t1[3] - self.top1[3]) < epsilon and
#abs(t2[0] - self.top2[0]) < epsilon and
#abs(t2[1] - self.top2[1]) < epsilon and
#abs(t2[2] - self.top2[2]) < epsilon and
#abs(t2[3] - self.top2[3]) < epsilon and
#abs(t3[0] - self.top3[0]) < epsilon and
#abs(t3[1] - self.top3[1]) < epsilon and
#abs(t3[2] - self.top3[2]) < epsilon and
#abs(t3[3] - self.top3[3]) < epsilon)
def tradeForward(self):
#if(not self.confirmTop()):
#print "Failed to confirm top"
#return
a1, X, b1, X2 = self.top1
a2, Y, b2, Y2 = self.top2
a3, Z2, b3, Z = self.top3
print str(QtCore.QDateTime.currentDateTime().toString()) + ": exploring usd->btc->ltc->usd arbitrage opportunity"
usdToSpend = min(self.balance_usd, maxUSD, a1 / k * min(X, maxBTC, self.balance_btc), a1 * a2 / k2 * min(Y, maxLTC, self.balance_ltc), a1 * a2 / k3 * Z)
profit1 = k3 * float(b3) / (float(a1) * float(a2)) - 1.0
usdProfit = usdToSpend * profit1
if(usdProfit < minProfit):
print "Investment " + str(usdToSpend) + ", profit " + str(usdProfit) + " less than " + str(minProfit)
return
btcToBuy = formatBTC(k * usdToSpend / a1)
ltcToBuy = k * btcToBuy / a2
if btcToBuy < btceapi.min_orders[p1]:
print "BTC to buy " + str(btcToBuy) + " less than minimum " + str(btceapi.min_orders[p1])
return
if ltcToBuy < btceapi.min_orders[p2]:
print "LTC to buy " + str(ltcToBuy) + " less than minimum " + str(btceapi.min_orders[p2])
return
if not tradeTimer.isActive():
print "Buying " + str(btcToBuy) + " btc at price " + str(a1) + " usd"
result1 = tradeAPI.trade(p1, "buy", a1, btcToBuy)
print "Trade 1, received: " + str(result1.received)
print "Buying " + str(ltcToBuy) + " ltc at price " + str(a2) + " btc"
result2 = tradeAPI.trade(p2, "buy", a2, ltcToBuy)
print "Trade 2, received: " + str(result2.received)
print "Selling " + str(ltcToBuy) + " ltc at price " + str(b3) + " usd"
result3 = tradeAPI.trade(p3, "sell", b3, ltcToBuy)
print "Trade 3, received: " + str(result3.received)
else:
print "Trade timer is active, not trading"
lastItem = QtGui.QStandardItem()
lastItem.setText(str(usdProfit))
lastItem.setToolTip(str(usdToSpend))
model.appendRow([QtGui.QStandardItem(str(QtCore.QDateTime.currentDateTime().toString())),
QtGui.QStandardItem("Forward"),
QtGui.QStandardItem(str(a1)),
QtGui.QStandardItem(str(X)),
QtGui.QStandardItem(str(b1)),
QtGui.QStandardItem(str(X2)),
QtGui.QStandardItem(str(a2)),
QtGui.QStandardItem(str(Y)),
QtGui.QStandardItem(str(b2)),
QtGui.QStandardItem(str(Y2)),
QtGui.QStandardItem(str(a3)),
QtGui.QStandardItem(str(Z2)),
QtGui.QStandardItem(str(b3)),
QtGui.QStandardItem(str(Z)),
QtGui.QStandardItem(str(profit1 * 100) + "%"),
lastItem])
model.reset()
def tradeBackward(self):
#if(not self.confirmTop()):
#print "Failed to confirm top"
#return
a1, X2, b1, X = self.top1
a2, Y2, b2, Y = self.top2
a3, Z, b3, Z2 = self.top3
print str(QtCore.QDateTime.currentDateTime().toString()) + ": exploring usd->ltc->btc->usd arbitrage opportunity"
usdToSpend = min(maxUSD, self.balance_usd, a3 / k * min(Z, maxLTC, self.balance_ltc), a3 / (k2 * b2) * min(float(int(Y * b2 * 100))/100, maxBTC, self.balance_btc), X * a3 / (k3 * b2))
profit2 = k3 * float(b1) * float(b2) / float(a3) - 1.0
usdProfit = usdToSpend * profit2
if(usdProfit < minProfit):
print "Investment " + str(usdToSpend) + ", profit " + str(usdProfit) + " less than " + str(minProfit)
return
ltcToBuy = k * usdToSpend / a3
btcToBuy = formatBTC(k * ltcToBuy * b2)
ltcToBuy = btcToBuy / (k * b2)
usdToGain = k * btcToBuy * b1
print "usdToSpend: " + str(usdToSpend) + ", usdToGain: " + str(usdToGain)
print "ltcToBuy: " + str(ltcToBuy)
print "btcToBuy: " + str(btcToBuy)
if btcToBuy < btceapi.min_orders[p1]:
print "BTC to buy " + str(btcToBuy) + " less than minimum " + str(btceapi.min_orders[p1])
return
if ltcToBuy < btceapi.min_orders[p2]:
print "LTC to buy " + str(ltcToBuy) + " less than minimum " + str(btceapi.min_orders[p2])
return
if not tradeTimer.isActive():
print "Buying " + str(ltcToBuy) + " ltc at price " + str(a3) + " usd"
result1 = tradeAPI.trade(p3, "buy", a3, ltcToBuy)
print "Trade 1, received: " + str(result1.received)
print "Selling " + str(ltcToBuy) + " ltc at price " + str(b2) + " btc"
result2 = tradeAPI.trade(p2, "sell", b2, ltcToBuy)
print "Trade 2, received: " + str(result2.received)
print "Selling " + str(btcToBuy) + " btc at price " + str(b1) + " usd"
result3 = tradeAPI.trade(p1, "sell", b1, btcToBuy)
print "Trade 3, received: " + str(result3.received)
else:
print "Trade timer is active, not trading"
lastItem = QtGui.QStandardItem()
lastItem.setText(str(usdProfit))
lastItem.setToolTip(str(usdToSpend))
model.appendRow([QtGui.QStandardItem(str(QtCore.QDateTime.currentDateTime().toString())),
QtGui.QStandardItem("Backward"),
QtGui.QStandardItem(str(a1)),
QtGui.QStandardItem(str(X2)),
QtGui.QStandardItem(str(b1)),
QtGui.QStandardItem(str(X)),
QtGui.QStandardItem(str(a2)),
QtGui.QStandardItem(str(Y2)),
QtGui.QStandardItem(str(b2)),
QtGui.QStandardItem(str(Y)),
QtGui.QStandardItem(str(a3)),
QtGui.QStandardItem(str(Z)),
QtGui.QStandardItem(str(b3)),
QtGui.QStandardItem(str(Z2)),
QtGui.QStandardItem(str(profit2 * 100) + "%"),
lastItem])
model.reset()
@QtCore.Slot()
def onTimer(self):
info = tradeAPI.getInfo()
self.balance_usd = float(info.balance_usd)
self.balance_btc = float(info.balance_btc)
self.balance_ltc = float(info.balance_ltc)
self.top1 = getTopOfTheBook(p1)
self.top2 = getTopOfTheBook(p2)
self.top3 = getTopOfTheBookMinAmount(p3, float(btceapi.min_orders[p1])/self.top2[0], float(btceapi.min_orders[p1])/self.top2[2])
a1, b1 = self.top1[0], self.top1[2]
a2, b2 = self.top2[0], self.top2[2]
a3, b3 = self.top3[0], self.top3[2]
refreshPairData(self.top1, content.doubleSpinBox_sec1_ask, content.doubleSpinBox_sec1_bid,
content.doubleSpinBox_sec1_askAmount, content.doubleSpinBox_sec1_bidAmount)
refreshPairData(self.top2, content.doubleSpinBox_sec2_ask, content.doubleSpinBox_sec2_bid,
content.doubleSpinBox_sec2_askAmount, content.doubleSpinBox_sec2_bidAmount)
refreshPairData(self.top3, content.doubleSpinBox_sec3_ask, content.doubleSpinBox_sec3_bid,
content.doubleSpinBox_sec3_askAmount, content.doubleSpinBox_sec3_bidAmount)
profit1 = k3 * float(b3) / (float(a1) * float(a2)) - 1.0
profit2 = k3 * float(b1) * float(b2) / float(a3) - 1.0
if profit1 > 0:
self.tradeForward()
elif profit2 > 0:
self.tradeBackward()
self.maxProfit = max(self.maxProfit, profit1, profit2)
content.label_result.setText('profit1 = ' + str(profit1 * 100) + '%\nprofit2 = ' + str(profit2 * 100) + '%\n' + "maxProfit = " + str(self.maxProfit * 100) + "%")
@QtCore.Slot()
def toggleTimer(self):
if timer.isActive():
timer.stop()
else:
timer.start()
app = QtGui.QApplication(sys.argv)
win = QtGui.QMainWindow()
content = ui_mainwindow.Ui_MainWindow()
content.setupUi(win)
content.label_sec1.setText(p1)
content.label_sec2.setText(p2)
content.label_sec3.setText(p3)
timer = QtCore.QTimer()
timer.setInterval(100)
timer.setSingleShot(0)
model = QtGui.QStandardItemModel()
model.setHorizontalHeaderLabels(["Time",
"TradeDirection",
"Ask1",
"Ask1_quantity",
"Bid1",
"Bid1_quantity",
"Ask2",
"Ask2_quantity",
"Bid2",
"Bid2_quantity",
"Ask3",
"Ask3_quantity",
"Bid3",
"Bid3_quantity",
"ProfitPercent",
"USD profit"])
key_file = "keyfile.txt"
with btceapi.KeyHandler(key_file, resaveOnDeletion=True) as handler:
key = handler.getKeys()[0]
tradeAPI = btceapi.TradeAPI(key, handler=handler)
listener = Listener(model, timer, tradeAPI)
content.tableView_history.setModel(model)
content.tableView_history.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
content.pushButton_pause.clicked.connect(listener.toggleTimer)
timer.timeout.connect(listener.onTimer)
timer.start()
win.show()
sys.exit(app.exec_())
|
|
import logging
from functools import wraps
import numpy as np
from matplotlib.figure import Figure
from ..external.modest_image import extract_matched_slices
from ..core.exceptions import IncompatibleAttribute
from ..core.data import Data
from ..core.util import lookup_class
from ..core.subset import Subset, RoiSubsetState
from ..core.roi import PolygonalROI
from ..core.callback_property import (
callback_property, CallbackProperty)
from ..core.edit_subset_mode import EditSubsetMode
from .viz_client import VizClient, init_mpl
from .layer_artist import (ScatterLayerArtist, LayerArtistContainer,
ImageLayerArtist, SubsetImageLayerArtist,
RGBImageLayerArtist)
def requires_data(func):
"""Decorator that checks an ImageClient for a non-null display_data
attribute. Only executes decorated function if present"""
@wraps(func)
def result(*args, **kwargs):
if args[0].display_data is None:
return
return func(*args, **kwargs)
return result
class ImageClient(VizClient):
display_data = CallbackProperty(None)
display_attribute = CallbackProperty(None)
def __init__(self, data, figure=None, axes=None, artist_container=None):
if axes is not None:
raise ValueError("ImageClient does not accept an axes")
figure, axes = init_mpl(figure, axes, wcs=True)
VizClient.__init__(self, data)
self.artists = artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._slice = None
self._view_window = None
self._view = None
self._image = None
self._override_image = None
self._ax = axes
self._ax.get_xaxis().set_ticks([])
self._ax.get_yaxis().set_ticks([])
self._figure = figure
self._norm_cache = {}
# custom axes formatter
def format_coord(x, y):
data = self.display_data
if data is None:
# MPL default method
return type(self._ax).format_coord(self._ax, x, y)
info = self.point_details(x, y)
return ' '.join(info['labels'])
self._ax.format_coord = format_coord
self._cid = self._ax.figure.canvas.mpl_connect('button_release_event',
self.check_update)
if hasattr(self._ax.figure.canvas, 'homeButton'):
# test code doesn't always use Glue's custom FigureCanvas
self._ax.figure.canvas.homeButton.connect(self.check_update)
def point_details(self, x, y):
data = self.display_data
pix = self._pixel_coords(x, y)
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
labels = ['%s=%s' % (data.get_world_component_id(i).label, w)
for i, w in enumerate(world)]
view = []
for p, s in zip(pix, data.shape):
p = int(p)
if not (0 <= p < s):
value = None
break
view.append(slice(p, p + 1))
else:
if self._override_image is None:
value = self.display_data[self.display_attribute, view]
else:
value = self._override_image[int(y), int(x)]
value = value.ravel()[0]
return dict(pix=pix, world=world, labels=labels, value=value)
@callback_property
def slice(self):
"""
Returns a tuple describing the current slice through the data
The tuple has length equal to the dimensionality of the display
data. Each entry is either:
'x' if the dimension is mapped to the X image axis
'y' if the dimension is mapped to the Y image axis
a number, indicating which fixed slice the dimension is restricted to
"""
if self._slice is not None:
return self._slice
if self.display_data is None:
return tuple()
ndim = self.display_data.ndim
if ndim == 1:
self._slice = ('x',)
elif ndim == 2:
self._slice = ('y', 'x')
else:
self._slice = (0,) * (ndim - 2) + ('y', 'x')
return self._slice
@slice.setter
def slice(self, value):
if self.slice == tuple(value):
return
relim = value.index('x') != self._slice.index('x') or \
value.index('y') != self._slice.index('y')
self._slice = tuple(value)
self._clear_override()
self._update_axis_labels()
self._update_data_plot(relim=relim)
self._update_subset_plots()
self._redraw()
@property
def axes(self):
return self._ax
@property
def is_3D(self):
"""
Returns True if the display data has 3 dimensions """
if not self.display_data:
return False
return len(self.display_data.shape) == 3
@property
def slice_ind(self):
"""
For 3D data, returns the pixel index of the current slice.
Otherwise, returns None
"""
if self.is_3D:
for s in self.slice:
if s not in ['x', 'y']:
return s
return None
@property
def image(self):
return self._image
@requires_data
def override_image(self, image):
"""Temporarily override the current slice view with another
image (i.e., an aggregate)
"""
self._override_image = image
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerArtist):
a.override_image(image)
self._update_data_plot()
self._redraw()
def _clear_override(self):
self._override_image = None
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerArtist):
a.clear_override()
@slice_ind.setter
def slice_ind(self, value):
if self.is_3D:
slc = [s if s in ['x', 'y'] else value for s in self.slice]
self.slice = slc
self._update_data_plot()
self._update_subset_plots()
self._redraw()
else:
raise IndexError("Can only set slice_ind for 3D images")
def can_image_data(self, data):
return data.ndim > 1
def _ensure_data_present(self, data):
if data not in self.artists:
self.add_layer(data)
def check_update(self, *args):
logging.getLogger(__name__).debug("check update")
vw = _view_window(self._ax)
if vw != self._view_window:
logging.getLogger(__name__).debug("updating")
self._update_data_plot()
self._update_subset_plots()
self._redraw()
self._view_window = vw
def set_data(self, data, attribute=None):
if not self.can_image_data(data):
return
self._ensure_data_present(data)
self._slice = None
attribute = attribute or _default_component(data)
self.display_data = data
self.display_attribute = attribute
self._update_axis_labels()
self._update_data_plot(relim=True)
self._update_subset_plots()
self._redraw()
def _update_wcs_axes(self, data, slc):
wcs = getattr(data.coords, 'wcs', None)
if wcs is not None and hasattr(self.axes, 'reset_wcs'):
self.axes.reset_wcs(wcs, slices=slc[::-1])
@requires_data
def _update_axis_labels(self):
labels = _axis_labels(self.display_data, self.slice)
self._update_wcs_axes(self.display_data, self.slice)
self._ax.set_xlabel(labels[1])
self._ax.set_ylabel(labels[0])
def set_attribute(self, attribute):
if not self.display_data or \
attribute not in self.display_data.component_ids():
raise IncompatibleAttribute(
"Attribute not in data's attributes: %s" % attribute)
if self.display_attribute is not None:
self._norm_cache[self.display_attribute] = self.get_norm()
self.display_attribute = attribute
if attribute in self._norm_cache:
self.set_norm(norm=self._norm_cache[attribute])
else:
self.clear_norm()
self._update_data_plot()
self._redraw()
def _redraw(self):
"""
Re-render the screen
"""
self._ax.figure.canvas.draw()
@requires_data
def set_norm(self, **kwargs):
for a in self.artists[self.display_data]:
a.set_norm(**kwargs)
self._update_data_plot()
self._redraw()
@requires_data
def clear_norm(self):
for a in self.artists[self.display_data]:
a.clear_norm()
@requires_data
def get_norm(self):
a = self.artists[self.display_data][0]
return a.norm
@requires_data
def set_cmap(self, cmap):
for a in self.artists[self.display_data]:
a.cmap = cmap
a.redraw()
def _build_view(self, matched=False):
att = self.display_attribute
shp = self.display_data.shape
shp_2d = _2d_shape(shp, self.slice)
x, y = np.s_[:], np.s_[:]
if matched:
v = extract_matched_slices(self._ax, shp_2d)
x = slice(v[0], v[1], v[2])
y = slice(v[3], v[4], v[5])
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
@requires_data
def _update_data_plot(self, relim=False):
"""
Re-sync the main image and its subsets
"""
if relim:
self.relim()
view = self._build_view(matched=True)
self._image = self.display_data[view]
transpose = self.slice.index('x') < self.slice.index('y')
self._view = view
for a in list(self.artists):
if (not isinstance(a, ScatterLayerArtist)) and \
a.layer.data is not self.display_data:
self.artists.remove(a)
else:
a.update(view, transpose)
for a in self.artists[self.display_data]:
a.update(view, transpose=transpose)
def relim(self):
shp = _2d_shape(self.display_data.shape, self.slice)
self._ax.set_xlim(0, shp[1])
self._ax.set_ylim(0, shp[0])
def _update_subset_single(self, s, redraw=False):
"""
Update the location and visual properties
of each point in a single subset
Parameters:
----------
s: A subset instance
The subset to refresh.
"""
logging.getLogger(__name__).debug("update subset single: %s", s)
self._update_scatter_layer(s)
if s not in self.artists:
return
if s.data is not self.display_data:
return
view = self._build_view(matched=True)
transpose = self.slice.index('x') < self.slice.index('y')
for a in self.artists[s]:
a.update(view, transpose)
if redraw:
self._redraw()
@property
def _slice_ori(self):
if not self.is_3D:
return None
for i, s in enumerate(self.slice):
if s not in ['x', 'y']:
return i
@requires_data
def apply_roi(self, roi):
subset_state = RoiSubsetState()
xroi, yroi = roi.to_polygon()
x, y = self._get_plot_attributes()
subset_state.xatt = x
subset_state.yatt = y
subset_state.roi = PolygonalROI(xroi, yroi)
mode = EditSubsetMode()
mode.update(self.data, subset_state, focus_data=self.display_data)
def _remove_subset(self, message):
self.delete_layer(message.sender)
def delete_layer(self, layer):
if layer not in self.artists:
return
for a in self.artists.pop(layer):
a.clear()
if layer is self.display_data:
self.display_data = None
if isinstance(layer, Data):
for subset in layer.subsets:
self.delete_layer(subset)
self._redraw()
def _remove_data(self, message):
self.delete_layer(message.data)
for s in message.data.subsets:
self.delete_layer(s)
def init_layer(self, layer):
# only auto-add subsets if they are of the main image
if isinstance(layer, Subset) and layer.data is not self.display_data:
return
self.add_layer(layer)
def rgb_mode(self, enable=None):
""" Query whether RGB mode is enabled, or toggle RGB mode
:param enable: bool, or None
If True or False, explicitly enable/disable RGB mode.
If None, check if RGB mode is enabled
:rtype: LayerArtist or None
If RGB mode is enabled, returns an RGBImageLayerArtist
If enable=False, return the new ImageLayerArtist
"""
# XXX need to better handle case where two RGBImageLayerArtists
# are created
if enable is None:
for a in self.artists:
if isinstance(a, RGBImageLayerArtist):
return a
return None
result = None
layer = self.display_data
if enable:
layer = self.display_data
v = self._view or self._build_view(matched=True)
a = RGBImageLayerArtist(layer, self._ax, last_view=v)
for artist in self.artists.pop(layer):
artist.clear()
self.artists.append(a)
result = a
else:
for artist in list(self.artists):
if isinstance(artist, RGBImageLayerArtist):
artist.clear()
self.artists.remove(artist)
result = self.add_layer(layer)
self._update_data_plot()
self._redraw()
return result
def add_layer(self, layer):
if layer in self.artists:
return self.artists[layer][0]
if layer.data not in self.data:
raise TypeError("Data not managed by client's data collection")
if not self.can_image_data(layer.data):
# if data is 1D, try to scatter plot
if len(layer.data.shape) == 1:
return self.add_scatter_layer(layer)
logging.getLogger(__name__).warning(
"Cannot visualize %s. Aborting", layer.label)
return
if isinstance(layer, Data):
result = ImageLayerArtist(layer, self._ax)
self.artists.append(result)
for s in layer.subsets:
self.add_layer(s)
elif isinstance(layer, Subset):
result = SubsetImageLayerArtist(layer, self._ax)
self.artists.append(result)
self._update_subset_single(layer)
else:
raise TypeError("Unrecognized layer type: %s" % type(layer))
return result
def add_scatter_layer(self, layer):
logging.getLogger(
__name__).debug('Adding scatter layer for %s' % layer)
if layer in self.artists:
logging.getLogger(__name__).debug('Layer already present')
return
result = ScatterLayerArtist(layer, self._ax)
self.artists.append(result)
self._update_scatter_layer(layer)
return result
@requires_data
def _update_scatter_layer(self, layer):
xatt, yatt = self._get_plot_attributes()
for a in self.artists[layer]:
if not isinstance(a, ScatterLayerArtist):
continue
a.xatt = xatt
a.yatt = yatt
if self.is_3D:
zatt = self.display_data.get_pixel_component_id(
self._slice_ori)
subset = (
zatt > self.slice_ind) & (zatt <= self.slice_ind + 1)
a.emphasis = subset
else:
a.emphasis = None
a.update()
a.redraw()
self._redraw()
@requires_data
def _get_plot_attributes(self):
x, y = _slice_axis(self.display_data.shape, self.slice)
ids = self.display_data.pixel_component_ids
return ids[x], ids[y]
def _pixel_coords(self, x, y):
"""From a slice coordinate (x,y), return the full (possibly
>2D) numpy index into the full data
*Note*
The inputs to this function are the reverse of numpy convention
(horizontal axis first, then vertical)
*Returns*
Either (x,y) or (x,y,z)
"""
result = list(self.slice)
result[result.index('x')] = x
result[result.index('y')] = y
return result
def is_visible(self, layer):
return all(a.visible for a in self.artists[layer])
def set_visible(self, layer, state):
for a in self.artists[layer]:
a.visible = state
def set_slice_ori(self, ori):
if not self.is_3D:
raise IndexError("Can only set slice_ori for 3D images")
if ori == 0:
self.slice = (0, 'y', 'x')
elif ori == 1:
self.slice = ('y', 0, 'x')
elif ori == 2:
self.slice = ('y', 'x', 0)
else:
raise ValueError("Orientation must be 0, 1, or 2")
def restore_layers(self, layers, context):
""" Restore a list of glue-serialized layer dicts """
for layer in layers:
c = lookup_class(layer.pop('_type'))
props = dict((k, v if k == 'stretch' else context.object(v))
for k, v in layer.items())
l = props['layer']
if c == ScatterLayerArtist:
l = self.add_scatter_layer(l)
elif c == ImageLayerArtist or c == SubsetImageLayerArtist:
if isinstance(l, Data):
self.set_data(l)
l = self.add_layer(l)
elif c == RGBImageLayerArtist:
r = props.pop('r')
g = props.pop('g')
b = props.pop('b')
self.display_data = l
self.display_attribute = r
l = self.rgb_mode(True)
l.r = r
l.g = g
l.b = b
else:
raise ValueError("Cannot restore layer of type %s" % l)
l.properties = props
def _2d_shape(shape, slc):
"""Return the shape of the 2D slice through a 2 or 3D image
"""
# - numpy ordering here
return shape[slc.index('y')], shape[slc.index('x')]
def _slice_axis(shape, slc):
"""
Return a 2-tuple of which axes in a dataset lie along the
x and y axes of the image
:param shape: Shape of original data. tuple of ints
:param slc: Slice through the data, tuple of ints, 'x', and 'y'
"""
return slc.index('x'), slc.index('y')
def _axis_labels(data, slc):
shape = data.shape
names = [data.get_world_component_id(i).label
for i in range(len(shape))]
return names[slc.index('y')], names[slc.index('x')]
def _view_window(ax):
""" Return a tuple describing the view window of an axes object.
The contents should not be used directly, Rather, several
return values should be compared with == to determine if the
window has been panned/zoomed
"""
ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
xlim, ylim = ax.get_xlim(), ax.get_ylim()
result = xlim[0], ylim[0], xlim[1], ylim[1], ext[0], ext[1]
logging.getLogger(__name__).debug("view window: %s", result)
return result
def _default_component(data):
"""Choose a default ComponentID to display for data
Returns PRIMARY if present
"""
cid = data.find_component_id('PRIMARY')
if cid is not None:
return cid
return data.component_ids()[0]
|
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils function for routing game experiment."""
# pylint:disable=too-many-lines,import-error,missing-function-docstring,protected-access,too-many-locals,invalid-name,too-many-arguments,too-many-branches,missing-class-docstring,too-few-public-methods
# pylint:disable=line-too-long
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import policy as policy_module
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr as external_mccfr
from open_spiel.python.algorithms import fictitious_play
from open_spiel.python.algorithms import nfsp
from open_spiel.python.algorithms import noisy_policy
from open_spiel.python.games import dynamic_routing
from open_spiel.python.games import dynamic_routing_utils
from open_spiel.python.mfg.algorithms import distribution as distribution_module
from open_spiel.python.mfg.algorithms import fictitious_play as mean_field_fictitious_play_module
from open_spiel.python.mfg.algorithms import mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv as nash_conv_module
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import dynamic_routing as mean_field_routing_game
import pyspiel
# pylint:enable=line-too-long
def create_games(origin,
destination,
num_vehicles,
graph,
max_time_step,
time_step_length=1.0,
departure_time=None):
if departure_time is not None:
raise NotImplementedError("To do.")
list_of_vehicles = [
dynamic_routing_utils.Vehicle(origin, destination)
for _ in range(num_vehicles)
]
game = dynamic_routing.DynamicRoutingGame(
{
"max_num_time_step": max_time_step,
"time_step_length": time_step_length
},
network=graph,
vehicles=list_of_vehicles)
seq_game = pyspiel.convert_to_turn_based(game)
od_demand = [
dynamic_routing_utils.OriginDestinationDemand(origin, destination, 0,
num_vehicles)
]
mfg_game = mean_field_routing_game.MeanFieldRoutingGame(
{
"max_num_time_step": max_time_step,
"time_step_length": time_step_length
},
network=graph,
od_demand=od_demand)
return game, seq_game, mfg_game
def create_braess_network(capacity):
graph_dict = {
"A": {
"connection": {
"B": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0
}
},
"location": [0, 0]
},
"B": {
"connection": {
"C": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
},
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [1, 0]
},
"C": {
"connection": {
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.25
},
"E": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [2, 1]
},
"D": {
"connection": {
"E": {
"a": 1,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
}
},
"location": [2, -1]
},
"E": {
"connection": {
"F": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.0
}
},
"location": [3, 0]
},
"F": {
"connection": {},
"location": [4, 0]
}
}
adjacency_list = {
key: list(value["connection"].keys())
for key, value in graph_dict.items()
}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
for o_node, value_dict in graph_dict.items():
for d_node, section_dict in value_dict["connection"].items():
road_section = dynamic_routing_utils._nodes_to_road_section(
origin=o_node, destination=d_node)
bpr_a_coefficient[road_section] = section_dict["a"]
bpr_b_coefficient[road_section] = section_dict["b"]
capacity[road_section] = section_dict["capacity"]
free_flow_travel_time[road_section] = section_dict[
"free_flow_travel_time"]
node_position = {key: value["location"] for key, value in graph_dict.items()}
return dynamic_routing_utils.Network(
adjacency_list,
node_position=node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time)
def create_augmented_braess_network(capacity):
graph_dict = {
"A": {
"connection": {
"B": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0
}
},
"location": [0, 0]
},
"B": {
"connection": {
"C": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
},
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [1, 0]
},
"C": {
"connection": {
"D": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.25
},
"E": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 2.0
}
},
"location": [2, 1]
},
"D": {
"connection": {
"E": {
"a": 1,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 1.0
},
"G": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.0
}
},
"location": [2, -1]
},
"E": {
"connection": {
"F": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": 0.0
}
},
"location": [3, 0]
},
"F": {
"connection": {},
"location": [4, 0]
},
"G": {
"connection": {},
"location": [3, -1]
}
}
adjacency_list = {
key: list(value["connection"].keys())
for key, value in graph_dict.items()
}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
for o_node, value_dict in graph_dict.items():
for d_node, section_dict in value_dict["connection"].items():
road_section = dynamic_routing_utils._nodes_to_road_section(
origin=o_node, destination=d_node)
bpr_a_coefficient[road_section] = section_dict["a"]
bpr_b_coefficient[road_section] = section_dict["b"]
capacity[road_section] = section_dict["capacity"]
free_flow_travel_time[road_section] = section_dict[
"free_flow_travel_time"]
node_position = {key: value["location"] for key, value in graph_dict.items()}
return dynamic_routing_utils.Network(
adjacency_list,
node_position=node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time)
def create_series_parallel_network(num_network_in_series,
time_step_length=1,
capacity=1):
i = 0
origin = "A_0->B_0"
graph_dict = {}
while i < num_network_in_series:
tt_up = random.random() + time_step_length
tt_down = random.random() + time_step_length
graph_dict.update({
f"A_{i}": {
"connection": {
f"B_{i}": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [0 + 3 * i, 0]
},
f"B_{i}": {
"connection": {
f"C_{i}": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": tt_up
},
f"D_{i}": {
"a": 1.0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": tt_down
}
},
"location": [1 + 3 * i, 0]
},
f"C_{i}": {
"connection": {
f"A_{i+1}": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [2 + 3 * i, 1]
},
f"D_{i}": {
"connection": {
f"A_{i+1}": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [2 + 3 * i, -1]
}
})
i += 1
graph_dict[f"A_{i}"] = {
"connection": {
"END": {
"a": 0,
"b": 1.0,
"capacity": capacity,
"free_flow_travel_time": time_step_length
}
},
"location": [0 + 3 * i, 0]
}
graph_dict["END"] = {"connection": {}, "location": [1 + 3 * i, 0]}
time_horizon = int(3.0 * (num_network_in_series + 1) / time_step_length)
destination = f"A_{i}->END"
adjacency_list = {
key: list(value["connection"].keys())
for key, value in graph_dict.items()
}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
for o_node, value_dict in graph_dict.items():
for d_node, section_dict in value_dict["connection"].items():
road_section = dynamic_routing_utils._nodes_to_road_section(
origin=o_node, destination=d_node)
bpr_a_coefficient[road_section] = section_dict["a"]
bpr_b_coefficient[road_section] = section_dict["b"]
capacity[road_section] = section_dict["capacity"]
free_flow_travel_time[road_section] = section_dict[
"free_flow_travel_time"]
node_position = {key: value["location"] for key, value in graph_dict.items()}
return dynamic_routing_utils.Network(
adjacency_list,
node_position=node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time
), origin, destination, time_horizon
def create_sioux_falls_network():
sioux_falls_adjacency_list = {}
sioux_falls_node_position = {}
bpr_a_coefficient = {}
bpr_b_coefficient = {}
capacity = {}
free_flow_travel_time = {}
content = open("./SiouxFalls_node.csv", "r").read()
for line in content.split("\n")[1:]:
row = line.split(",")
sioux_falls_node_position[row[0]] = [int(row[1]) / 1e5, int(row[2]) / 1e5]
sioux_falls_node_position[f"bef_{row[0]}"] = [
int(row[1]) / 1e5, int(row[2]) / 1e5
]
sioux_falls_node_position[f"aft_{row[0]}"] = [
int(row[1]) / 1e5, int(row[2]) / 1e5
]
sioux_falls_adjacency_list[f"bef_{row[0]}"] = [row[0]]
sioux_falls_adjacency_list[row[0]] = [f"aft_{row[0]}"]
sioux_falls_adjacency_list[f"aft_{row[0]}"] = []
bpr_a_coefficient[f"{row[0]}->aft_{row[0]}"] = 0.0
bpr_b_coefficient[f"{row[0]}->aft_{row[0]}"] = 1.0
capacity[f"{row[0]}->aft_{row[0]}"] = 0.0
free_flow_travel_time[f"{row[0]}->aft_{row[0]}"] = 0.0
bpr_a_coefficient[f"bef_{row[0]}->{row[0]}"] = 0.0
bpr_b_coefficient[f"bef_{row[0]}->{row[0]}"] = 1.0
capacity[f"bef_{row[0]}->{row[0]}"] = 0.0
free_flow_travel_time[f"bef_{row[0]}->{row[0]}"] = 0.0
content = open("./SiouxFalls_net.csv", "r").read()
for l in content.split("\n")[1:-1]:
_, origin, destination, a0, a1, a2, a3, a4 = l.split(",")
assert all(int(x) == 0 for x in [a1, a2, a3])
sioux_falls_adjacency_list[origin].append(destination)
road_section = f"{origin}->{destination}"
bpr_a_coefficient[road_section] = float(a4)
bpr_b_coefficient[road_section] = 4.0
capacity[road_section] = 1.0
free_flow_travel_time[road_section] = float(a0)
sioux_falls_od_demand = []
content = open("./SiouxFalls_od.csv", "r").read()
for line in content.split("\n")[1:-1]:
row = line.split(",")
sioux_falls_od_demand.append(
dynamic_routing_utils.OriginDestinationDemand(
f"bef_{row[0]}->{row[0]}", f"{row[1]}->aft_{row[1]}", 0,
float(row[2])))
return dynamic_routing_utils.Network(
sioux_falls_adjacency_list,
node_position=sioux_falls_node_position,
bpr_a_coefficient=bpr_a_coefficient,
bpr_b_coefficient=bpr_b_coefficient,
capacity=capacity,
free_flow_travel_time=free_flow_travel_time), sioux_falls_od_demand
def plot_network_n_player_game(g: dynamic_routing_utils.Network,
vehicle_locations=None):
"""Plot the network.
Args:
g: network to plot
vehicle_locations: vehicle location
"""
_, ax = plt.subplots()
o_xs, o_ys, d_xs, d_ys = g.return_list_for_matplotlib_quiver()
ax.quiver(
o_xs,
o_ys,
np.subtract(d_xs, o_xs),
np.subtract(d_ys, o_ys),
color="b",
angles="xy",
scale_units="xy",
scale=1)
ax.set_xlim([
np.min(np.concatenate((o_xs, d_xs))) - 0.5,
np.max(np.concatenate((o_xs, d_xs))) + 0.5
])
ax.set_ylim([
np.min(np.concatenate((o_ys, d_ys))) - 0.5,
np.max(np.concatenate((o_ys, d_ys))) + 0.5
])
if vehicle_locations is not None:
num_vehicle = len(vehicle_locations)
dict_location = {}
for vehicle_location in vehicle_locations:
if vehicle_location not in dict_location:
dict_location[vehicle_location] = 0.0
dict_location[vehicle_location] += 0.3 / num_vehicle
for point, width in dict_location.items():
circle = plt.Circle(point, width, color="r")
ax.add_patch(circle)
def plot_network_mean_field_game(g: dynamic_routing_utils.Network,
distribution=None,
scaling=1):
"""Plot the network.
Args:
g: network to plot
distribution: the distribution.
scaling: scaling factor. for plot rendering.
"""
_, ax = plt.subplots()
o_xs, o_ys, d_xs, d_ys = g.return_list_for_matplotlib_quiver()
ax.quiver(
o_xs,
o_ys,
np.subtract(d_xs, o_xs),
np.subtract(d_ys, o_ys),
color="b",
angles="xy",
scale_units="xy",
scale=1)
ax.set_xlim([
np.min(np.concatenate((o_xs, d_xs))) - 0.5,
np.max(np.concatenate((o_xs, d_xs))) + 0.5
])
ax.set_ylim([
np.min(np.concatenate((o_ys, d_ys))) - 0.5,
np.max(np.concatenate((o_ys, d_ys))) + 0.5
])
if distribution is not None:
for x, prob_of_position in distribution.items():
point = g.return_position_of_road_section(x)
width = 0.3 * scaling * prob_of_position
circle = plt.Circle(point, width, color="r")
ax.add_patch(circle)
def evolve_n_player_simultaneous_game(game, policy, graph):
state = game.new_initial_state()
i = 0
while not state.is_terminal():
i += 1
if state.is_chance_node():
# Sample a chance event outcome.
outcomes_with_probs = state.chance_outcomes()
action_list, prob_list = zip(*outcomes_with_probs)
action = np.random.choice(action_list, p=prob_list)
state.apply_action(action)
elif state.is_simultaneous_node():
# Simultaneous node: sample actions for all players.
chosen_actions = []
for i in range(game.num_players()):
legal_actions = state.legal_actions(i)
state_policy = policy(state, i)
assert len(legal_actions) == len(state_policy), (
f"{legal_actions} not same length than {state_policy}")
chosen_actions.append(
random.choices(legal_actions,
[state_policy[a] for a in legal_actions])[0])
state.apply_actions(chosen_actions)
else:
raise ValueError(
"State should either be simultaneous node or change node.")
plot_network_n_player_game(graph, [
graph.return_position_of_road_section(x)
for x in state.get_current_vehicle_locations()
])
print(f"Travel times: {[-x for x in state.returns()]}")
def evolve_n_player_sequential_game(seq_game, policy, graph, debug=False):
state = seq_game.new_initial_state()
while not state.is_terminal():
legal_actions = state.legal_actions()
if state.is_chance_node():
# Sample a chance event outcome.
outcomes_with_probs = state.chance_outcomes()
action_list, prob_list = zip(*outcomes_with_probs)
action = np.random.choice(action_list, p=prob_list)
if debug:
print("------------ Change node ------------")
print(
(f"Possible chance actions: {outcomes_with_probs}, the one taken: "
f"{action}."))
state.apply_action(action)
else:
if debug:
print("------------ Sequential action node ------------")
print(state.information_state_tensor())
print(state.observation_tensor())
print(state.information_state_string())
if policy is not None:
state_policy = policy(state)
vehicle_location = [
s.replace("'", "")
for s in str(state).split("[")[1].split("]")[0].split(", ")
]
if debug:
print((f"Policy for player {state.current_player()} at location "
f"{vehicle_location[state.current_player()]}: ") +
str([(str(graph.get_road_section_from_action_id(k)) +
f"with probability {v}")
for k, v in state_policy.items()]))
assert set(state_policy) == set(legal_actions)
action = random.choices(legal_actions,
[state_policy[a] for a in legal_actions])
assert len(action) == 1
action = action[0]
else:
action = random.choice(legal_actions)
state.apply_action(action)
vehicle_location = [
s.replace("'", "")
for s in str(state).split("[")[1].split("]")[0].split(", ")
]
if debug:
print(vehicle_location)
plot_network_n_player_game(
graph,
[graph.return_position_of_road_section(x) for x in vehicle_location])
if debug:
print(f"Travel times: {[-x for x in state.returns()]}")
def evolve_mean_field_game(mfg_game,
policy,
graph,
scaling=1,
frequency_printing=1):
distribution_mfg = distribution_module.DistributionPolicy(mfg_game, policy)
root_state = mfg_game.new_initial_state()
listing_states = [root_state]
# plot_network_mean_field_game(graph, {origin: 1})
i = 0
while not listing_states[0].is_terminal() and not all(
state._vehicle_without_legal_action for state in listing_states): # pylint:disable=protected-access
assert abs(sum(map(distribution_mfg.value, listing_states)) - 1) < 1e-4, (
f"{list(map(distribution_mfg.value, listing_states))}")
new_listing_states = []
list_of_state_seen = set()
# In case chance node:
if listing_states[0].current_player() == pyspiel.PlayerId.CHANCE:
for mfg_state in listing_states:
for action, _ in mfg_state.chance_outcomes():
new_mfg_state = mfg_state.child(action)
# Do not append twice the same file.
if str(new_mfg_state) not in list_of_state_seen:
new_listing_states.append(new_mfg_state)
list_of_state_seen.add(str(new_mfg_state))
current_distribution = {}
for mfg_state in new_listing_states:
location = mfg_state._vehicle_location # pylint:disable=protected-access
if location not in current_distribution:
current_distribution[location] = 0
current_distribution[location] += distribution_mfg.value(mfg_state)
plot_network_mean_field_game(graph, current_distribution, scaling=scaling)
# In case mean field node:
elif listing_states[0].current_player() == pyspiel.PlayerId.MEAN_FIELD:
for mfg_state in listing_states:
dist_to_register = mfg_state.distribution_support()
def get_probability_for_state(str_state):
try:
return distribution_mfg.value_str(str_state)
except ValueError:
return 0
dist = [
get_probability_for_state(str_state)
for str_state in dist_to_register
]
new_mfg_state = mfg_state.clone()
new_mfg_state.update_distribution(dist)
# Do not append twice the same file.
if str(new_mfg_state) not in list_of_state_seen:
new_listing_states.append(new_mfg_state)
list_of_state_seen.add(str(new_mfg_state))
# In case action node:
else:
assert (listing_states[0].current_player() ==
pyspiel.PlayerId.DEFAULT_PLAYER_ID), "The player id should be 0"
for mfg_state in listing_states:
for action, _ in policy.action_probabilities(mfg_state).items():
new_mfg_state = mfg_state.child(action)
# Do not append twice the same file.
if str(new_mfg_state) not in list_of_state_seen:
new_listing_states.append(new_mfg_state)
list_of_state_seen.add(str(new_mfg_state))
current_distribution = {}
for mfg_state in new_listing_states:
location = mfg_state._vehicle_location # pylint:disable=protected-access
if location not in current_distribution:
current_distribution[location] = 0
current_distribution[location] += distribution_mfg.value(mfg_state)
assert abs(sum(current_distribution.values()) - 1) < 1e-4, (
f"{current_distribution}")
i += 1
if i % frequency_printing == 0:
plot_network_mean_field_game(
graph, current_distribution, scaling=scaling)
listing_states = new_listing_states
def uniform_policy_n_player(seq_game):
return policy_module.UniformRandomPolicy(seq_game)
def first_action_policy_n_player(seq_game):
return policy_module.FirstActionPolicy(seq_game)
def ficticious_play(seq_game, number_of_iterations, compute_metrics=False):
xfp_solver = fictitious_play.XFPSolver(seq_game)
tick_time = time.time()
for _ in range(number_of_iterations):
xfp_solver.iteration()
timing = time.time() - tick_time
# print('done')
# average_policies = xfp_solver.average_policy_tables()
tabular_policy = policy_module.TabularPolicy(seq_game)
if compute_metrics:
nash_conv = exploitability.nash_conv(seq_game, xfp_solver.average_policy())
average_policy_values = expected_game_score.policy_value(
seq_game.new_initial_state(), [tabular_policy])
return timing, tabular_policy, nash_conv, average_policy_values
return timing, tabular_policy
def counterfactual_regret_minimization(seq_game,
number_of_iterations,
compute_metrics=False):
# freq_iteration_printing = number_of_iterations // 10
cfr_solver = cfr.CFRSolver(seq_game)
tick_time = time.time()
# print("CFRSolver initialized.")
for _ in range(number_of_iterations):
cfr_solver.evaluate_and_update_policy()
# if i % freq_iteration_printing == 0:
# print(f"Iteration {i}")
timing = time.time() - tick_time
# print("Finish.")
if compute_metrics:
nash_conv = exploitability.nash_conv(seq_game, cfr_solver.average_policy())
return timing, cfr_solver.average_policy(), nash_conv
return timing, cfr_solver.average_policy()
def external_sampling_monte_carlo_counterfactual_regret_minimization(
seq_game, number_of_iterations, compute_metrics=False):
cfr_solver = external_mccfr.ExternalSamplingSolver(
seq_game, external_mccfr.AverageType.SIMPLE)
tick_time = time.time()
# print("CFRSolver initialized.")
for _ in range(number_of_iterations):
cfr_solver.iteration()
timing = time.time() - tick_time
# print("Finish.")
if compute_metrics:
nash_conv = exploitability.nash_conv(seq_game, cfr_solver.average_policy())
return timing, cfr_solver.average_policy(), nash_conv
return timing, cfr_solver.average_policy()
class NFSPPolicies(policy_module.Policy):
"""Joint policy to be evaluated."""
def __init__(self, env, nfsp_policies, mode):
game = env.game
num_players = env.num_players
player_ids = list(range(num_players))
super().__init__(game, player_ids)
self._policies = nfsp_policies
self._mode = mode
self._obs = {
"info_state": [None] * num_players,
"legal_actions": [None] * num_players
}
def action_probabilities(self, state, player_id=None):
del player_id
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (
state.information_state_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
with self._policies[cur_player].temp_mode_as(self._mode):
p = self._policies[cur_player].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
def neural_ficticious_self_play(seq_game,
num_epoch,
sess,
compute_metrics=False):
env = rl_environment.Environment(seq_game)
# Parameters from the game.
num_players = env.num_players
num_actions = env.action_spec()["num_actions"]
info_state_size = env.observation_spec()["info_state"][0]
# Parameters for the algorithm.
hidden_layers_sizes = [int(l) for l in [128]]
kwargs = {
"replay_buffer_capacity": int(2e5),
"reservoir_buffer_capacity": int(2e6),
"min_buffer_size_to_learn": 1000,
"anticipatory_param": 0.1,
"batch_size": 128,
"learn_every": 64,
"rl_learning_rate": 0.01,
"sl_learning_rate": 0.01,
"optimizer_str": "sgd",
"loss_str": "mse",
"update_target_network_every": 19200,
"discount_factor": 1.0,
"epsilon_decay_duration": int(20e6),
"epsilon_start": 0.06,
"epsilon_end": 0.001,
}
# freq_epoch_printing = num_epoch // 10
agents = [
nfsp.NFSP(sess, idx, info_state_size, num_actions, hidden_layers_sizes,
**kwargs) for idx in range(num_players)
]
joint_avg_policy = NFSPPolicies(env, agents, nfsp.MODE.average_policy)
sess.run(tf.global_variables_initializer())
# print("TF initialized.")
tick_time = time.time()
for _ in range(num_epoch):
# if ep % freq_epoch_printing == 0:
# print(f"Iteration {ep}")
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
timing = time.time() - tick_time
# print("Finish.")
if compute_metrics:
tabular_policy = joint_avg_policy.TabularPolicy(seq_game)
average_policy_values = expected_game_score.policy_value(
seq_game.new_initial_state(), [tabular_policy])
nash_conv = exploitability.nash_conv(env.game, joint_avg_policy)
return timing, joint_avg_policy, average_policy_values, nash_conv
return timing, joint_avg_policy
def mean_field_uniform_policy(mfg_game,
number_of_iterations,
compute_metrics=False):
del number_of_iterations
uniform_policy = policy_module.UniformRandomPolicy(mfg_game)
if compute_metrics:
distribution_mfg = distribution_module.DistributionPolicy(
mfg_game, uniform_policy)
policy_value_ = policy_value.PolicyValue(mfg_game, distribution_mfg,
uniform_policy).value(
mfg_game.new_initial_state())
return uniform_policy, policy_value_
return uniform_policy
def mean_field_fictitious_play(mfg_game,
number_of_iterations,
compute_metrics=False):
fp = mean_field_fictitious_play_module.FictitiousPlay(mfg_game)
tick_time = time.time()
for _ in range(number_of_iterations):
fp.iteration()
timing = time.time() - tick_time
fp_policy = fp.get_policy()
# print('learning done')
if compute_metrics:
distribution_mfg = distribution_module.DistributionPolicy(
mfg_game, fp_policy)
# print('distribution done')
policy_value_ = policy_value.PolicyValue(mfg_game, distribution_mfg,
fp_policy).value(
mfg_game.new_initial_state())
nash_conv_fp = nash_conv_module.NashConv(mfg_game, fp_policy)
return timing, fp_policy, nash_conv_fp, policy_value_
return timing, fp_policy
def online_mirror_descent(mfg_game,
number_of_iterations,
compute_metrics=False,
return_policy=False,
md_p=None):
md = md_p if md_p else mirror_descent.MirrorDescent(mfg_game)
tick_time = time.time()
for _ in range(number_of_iterations):
md.iteration()
timing = time.time() - tick_time
md_policy = md.get_policy()
if compute_metrics:
distribution_mfg = distribution_module.DistributionPolicy(
mfg_game, md_policy)
# print('distribution done')
policy_value_ = policy_value.PolicyValue(mfg_game, distribution_mfg,
md_policy).value(
mfg_game.new_initial_state())
nash_conv_md = nash_conv_module.NashConv(mfg_game, md_policy)
if return_policy:
return timing, md_policy, nash_conv_md, policy_value_, md
return timing, md_policy, nash_conv_md, policy_value_
return timing, md_policy
class RandomPolicyDeviation:
def __init__(self):
self.policy_deviation = {}
def get_policy_deviation(self, state, player_id):
key = (str(state), player_id)
if key not in self.policy_deviation:
assert player_id == state.current_player()
action_probability = [random.random() for a in state.legal_actions()]
self.policy_deviation[key] = [
x / sum(action_probability) for x in action_probability
]
return self.policy_deviation[key]
def get_results_n_player_sequential_game(seq_game, policy):
state = seq_game.new_initial_state()
while not state.is_terminal():
legal_actions = state.legal_actions()
if state.is_chance_node():
outcomes_with_probs = state.chance_outcomes()
action_list, prob_list = zip(*outcomes_with_probs)
action = np.random.choice(action_list, p=prob_list)
else:
state_policy = policy(state)
assert set(state_policy) == set(legal_actions)
action = random.choices(legal_actions,
[state_policy[a] for a in legal_actions])
assert len(action) == 1
action = action[0]
state.apply_action(action)
return state.returns()
def get_list_results_n_player_game(seq_game, policy, num_sample=10):
return [
get_results_n_player_sequential_game(seq_game, policy)
for _ in range(num_sample)
]
def get_average_results_n_player_game(seq_game, policy, num_sample=10):
result_array = get_list_results_n_player_game(seq_game, policy, num_sample)
return sum([sum(i) / len(i) for i in zip(*result_array)]) / len(result_array)
def get_results_n_player_simultaneous_game(game, policy):
state = game.new_initial_state()
i = 0
while not state.is_terminal():
i += 1
if state.is_chance_node():
# Sample a chance event outcome.
outcomes_with_probs = state.chance_outcomes()
action_list, prob_list = zip(*outcomes_with_probs)
action = np.random.choice(action_list, p=prob_list)
state.apply_action(action)
elif state.is_simultaneous_node():
# Simultaneous node: sample actions for all players.
chosen_actions = []
for i in range(game.num_players()):
legal_actions = state.legal_actions(i)
state_policy = policy(state, player_id=i)
assert abs(sum([state_policy[a] for a in legal_actions]) - 1) < 1e-4
chosen_actions.append(
random.choices(legal_actions,
[state_policy[a] for a in legal_actions])[0])
state.apply_actions(chosen_actions)
else:
raise ValueError(
"State should either be simultaneous node or change node.")
return state.returns()
def get_list_results_n_player_simulataneous_game(game, policy, num_sample=10):
return [
get_results_n_player_simultaneous_game(game, policy)
for _ in range(num_sample)
]
def get_expected_value(seq_game, policy, num_sample, player=0):
results = get_list_results_n_player_game(
seq_game, policy, num_sample=num_sample)
expected_value = sum(x[player] for x in results) / num_sample
# num_vehicle = len(results[0])
# error_bar = abs(sum([x[1] for x in results]) - sum(
# [x[2] for x in results])) / num_sample_trajectories
# expected_value_policy = sum(sum(x[i] for x in results) for i in range(
# 1, BRAESS_NUM_VEHICLES)) / ((BRAESS_NUM_VEHICLES-1)*num_sample_trajectories)
return expected_value
def compute_regret_policy(game,
policy,
num_random_policy_tested=10,
num_sample=100):
time_tick = time.time()
expected_value_policy = get_expected_value(game, policy, num_sample)
worse_regret = 0
for _ in range(num_random_policy_tested):
noisy_n_policy = noisy_policy.NoisyPolicy(policy, player_id=0, alpha=1)
expected_value_noise = get_expected_value(
game, noisy_n_policy, num_sample, player=0)
approximate_regret = expected_value_noise - expected_value_policy
worse_regret = max(worse_regret, approximate_regret)
return worse_regret, time.time() - time_tick
def get_expected_value_sim_game(game, policy, num_sample, player=0):
results = get_list_results_n_player_simulataneous_game(
game, policy, num_sample=num_sample)
assert len(results) == num_sample
expected_value = sum(x[player] for x in results) / num_sample
# num_vehicle = len(results[0])
# error_bar = abs(sum([x[1] for x in results]) - sum(
# [x[2] for x in results])) / num_sample_trajectories
# expected_value_policy = sum(sum(x[i] for x in results) for i in range(
# 1, BRAESS_NUM_VEHICLES)) / ((BRAESS_NUM_VEHICLES-1)*num_sample_trajectories)
return expected_value
def compute_regret_policy_random_noise_sim_game(game,
policy,
num_random_policy_tested=10,
num_sample=100):
time_tick = time.time()
expected_value_policy = get_expected_value_sim_game(game, policy, num_sample)
worse_regret = 0
for _ in range(num_random_policy_tested):
noisy_n_policy = noisy_policy.NoisyPolicy(policy, player_id=0, alpha=1)
expected_value_noise = get_expected_value_sim_game(
game, noisy_n_policy, num_sample, player=0)
approximate_regret = expected_value_noise - expected_value_policy
worse_regret = max(worse_regret, approximate_regret)
return worse_regret, time.time() - time_tick
class PurePolicyResponse(policy_module.Policy):
def __init__(self, game, policy, player_id):
self.game = game
self.player_id = player_id
self.policy = policy
def pure_action(self, state):
raise NotImplementedError()
def action_probabilities(self, state, player_id=None):
assert player_id is not None
if player_id == self.player_id:
legal_actions = state.legal_actions(self.player_id)
if not legal_actions:
return {0: 1.0}
if len(legal_actions) == 1:
return {legal_actions[0]: 1.0}
answer = {action: 0.0 for action in legal_actions}
pure_a = self.pure_action(state)
assert pure_a in answer
answer[pure_a] = 1.0
return answer
return self.policy.action_probabilities(state, player_id)
class PathBCEResponse(PurePolicyResponse):
def pure_action(self, state):
location = state.get_current_vehicle_locations()[self.player_id].split(
"->")[1]
if location == "B":
return state.get_game().network.get_action_id_from_movement("B", "C")
if location == "C":
return state.get_game().network.get_action_id_from_movement("C", "E")
return 0
class PathBCDEResponse(PurePolicyResponse):
def pure_action(self, state):
location = state.get_current_vehicle_locations()[self.player_id].split(
"->")[1]
if location == "B":
return state.get_game().network.get_action_id_from_movement("B", "C")
if location == "C":
return state.get_game().network.get_action_id_from_movement("C", "D")
return 0
class PathBDEResponse(PurePolicyResponse):
def pure_action(self, state):
location = state.get_current_vehicle_locations()[self.player_id].split(
"->")[1]
if location == "B":
return state.get_game().network.get_action_id_from_movement("B", "D")
return 0
def compute_regret_policy_against_pure_policy_sim_game(game,
policy,
compute_true_value=False,
num_sample=100):
time_tick = time.time()
if compute_true_value:
expected_value_policy = expected_game_score.policy_value(
game.new_initial_state(), policy)[0]
else:
expected_value_policy = get_expected_value_sim_game(game, policy,
num_sample)
worse_regret = 0
policies = [
PathBCEResponse(game, policy, 0),
PathBCDEResponse(game, policy, 0),
PathBDEResponse(game, policy, 0)
]
for deviation_policy in policies:
if compute_true_value:
expected_value_noise = expected_game_score.policy_value(
game.new_initial_state(), deviation_policy)[0]
else:
expected_value_noise = get_expected_value_sim_game(
game, deviation_policy, num_sample, player=0)
approximate_regret = expected_value_noise - expected_value_policy
worse_regret = max(worse_regret, approximate_regret)
return worse_regret, time.time() - time_tick
def online_mirror_descent_sioux_falls(mfg_game,
number_of_iterations,
md_p=None):
nash_conv_dict = {}
md = md_p if md_p else mirror_descent.MirrorDescent(mfg_game)
tick_time = time.time()
for i in range(number_of_iterations):
md.iteration()
md_policy = md.get_policy()
nash_conv_md = nash_conv_module.NashConv(mfg_game, md_policy)
nash_conv_dict[i] = nash_conv_md.nash_conv()
print((f"Iteration {i}, Nash conv: {nash_conv_md.nash_conv()}, "
"time: {time.time() - tick_time}"))
timing = time.time() - tick_time
md_policy = md.get_policy()
distribution_mfg = distribution_module.DistributionPolicy(mfg_game, md_policy)
policy_value_ = policy_value.PolicyValue(
mfg_game, distribution_mfg, md_policy).value(mfg_game.new_initial_state())
nash_conv_md = nash_conv_module.NashConv(mfg_game, md_policy)
return timing, md_policy, nash_conv_md, policy_value_, md, nash_conv_dict
|
|
import logging
import os
from datetime import date, datetime
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.finders import find
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files.base import File
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import Count, Sum
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language, pgettext_lazy
from treebeard.mp_tree import MP_Node
from oscar.core.decorators import deprecated
from oscar.core.loading import get_class, get_classes, get_model
from oscar.core.utils import slugify
from oscar.core.validators import non_python_keyword
from oscar.models.fields import AutoSlugField, NullCharField
from oscar.models.fields.slugfield import SlugField
ProductManager, BrowsableProductManager = get_classes(
'catalogue.managers', ['ProductManager', 'BrowsableProductManager'])
ProductAttributesContainer = get_class(
'catalogue.product_attributes', 'ProductAttributesContainer')
Selector = get_class('partner.strategy', 'Selector')
@python_2_unicode_compatible
class AbstractProductClass(models.Model):
"""
Used for defining options and attributes for a subset of products.
E.g. Books, DVDs and Toys. A product can only belong to one product class.
At least one product class must be created when setting up a new
Oscar deployment.
Not necessarily equivalent to top-level categories but usually will be.
"""
name = models.CharField(_('Name'), max_length=128)
slug = AutoSlugField(_('Slug'), max_length=128, unique=True,
populate_from='name')
#: Some product type don't require shipping (eg digital products) - we use
#: this field to take some shortcuts in the checkout.
requires_shipping = models.BooleanField(_("Requires shipping?"),
default=True)
#: Digital products generally don't require their stock levels to be
#: tracked.
track_stock = models.BooleanField(_("Track stock levels?"), default=True)
#: These are the options (set by the user when they add to basket) for this
#: item class. For instance, a product class of "SMS message" would always
#: require a message to be specified before it could be bought.
#: Note that you can also set options on a per-product level.
options = models.ManyToManyField(
'catalogue.Option', blank=True, verbose_name=_("Options"))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['name']
verbose_name = _("Product class")
verbose_name_plural = _("Product classes")
def __str__(self):
return self.name
@property
def has_attributes(self):
return self.attributes.exists()
@python_2_unicode_compatible
class AbstractCategory(MP_Node):
"""
A product category. Merely used for navigational purposes; has no
effects on business logic.
Uses django-treebeard.
"""
name = models.CharField(_('Name'), max_length=255, db_index=True)
description = models.TextField(_('Description'), blank=True)
image = models.ImageField(_('Image'), upload_to='categories', blank=True,
null=True, max_length=255)
slug = SlugField(_('Slug'), max_length=255, db_index=True)
_slug_separator = '/'
_full_name_separator = ' > '
def __str__(self):
return self.full_name
@property
def full_name(self):
"""
Returns a string representation of the category and it's ancestors,
e.g. 'Books > Non-fiction > Essential programming'.
It's rarely used in Oscar's codebase, but used to be stored as a
CharField and is hence kept for backwards compatibility. It's also
sufficiently useful to keep around.
"""
names = [category.name for category in self.get_ancestors_and_self()]
return self._full_name_separator.join(names)
@property
def full_slug(self):
"""
Returns a string of this category's slug concatenated with the slugs
of it's ancestors, e.g. 'books/non-fiction/essential-programming'.
Oscar used to store this as in the 'slug' model field, but this field
has been re-purposed to only store this category's slug and to not
include it's ancestors' slugs.
"""
slugs = [category.slug for category in self.get_ancestors_and_self()]
return self._slug_separator.join(slugs)
def generate_slug(self):
"""
Generates a slug for a category. This makes no attempt at generating
a unique slug.
"""
return slugify(self.name)
def ensure_slug_uniqueness(self):
"""
Ensures that the category's slug is unique amongst it's siblings.
This is inefficient and probably not thread-safe.
"""
unique_slug = self.slug
siblings = self.get_siblings().exclude(pk=self.pk)
next_num = 2
while siblings.filter(slug=unique_slug).exists():
unique_slug = '{slug}_{end}'.format(slug=self.slug, end=next_num)
next_num += 1
if unique_slug != self.slug:
self.slug = unique_slug
self.save()
def save(self, *args, **kwargs):
"""
Oscar traditionally auto-generated slugs from names. As that is
often convenient, we still do so if a slug is not supplied through
other means. If you want to control slug creation, just create
instances with a slug already set, or expose a field on the
appropriate forms.
"""
if self.slug:
# Slug was supplied. Hands off!
super(AbstractCategory, self).save(*args, **kwargs)
else:
self.slug = self.generate_slug()
super(AbstractCategory, self).save(*args, **kwargs)
# We auto-generated a slug, so we need to make sure that it's
# unique. As we need to be able to inspect the category's siblings
# for that, we need to wait until the instance is saved. We
# update the slug and save again if necessary.
self.ensure_slug_uniqueness()
def get_ancestors_and_self(self):
"""
Gets ancestors and includes itself. Use treebeard's get_ancestors
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_ancestors()) + [self]
def get_descendants_and_self(self):
"""
Gets descendants and includes itself. Use treebeard's get_descendants
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_descendants()) + [self]
def get_absolute_url(self):
"""
Our URL scheme means we have to look up the category's ancestors. As
that is a bit more expensive, we cache the generated URL. That is
safe even for a stale cache, as the default implementation of
ProductCategoryView does the lookup via primary key anyway. But if
you change that logic, you'll have to reconsider the caching
approach.
"""
current_locale = get_language()
cache_key = 'CATEGORY_URL_%s_%s' % (current_locale, self.pk)
url = cache.get(cache_key)
if not url:
url = reverse(
'catalogue:category',
kwargs={'category_slug': self.full_slug, 'pk': self.pk})
cache.set(cache_key, url)
return url
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['path']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def has_children(self):
return self.get_num_children() > 0
def get_num_children(self):
return self.get_children().count()
@python_2_unicode_compatible
class AbstractProductCategory(models.Model):
"""
Joining model between products and categories. Exists to allow customising.
"""
product = models.ForeignKey('catalogue.Product', verbose_name=_("Product"))
category = models.ForeignKey('catalogue.Category',
verbose_name=_("Category"))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['product', 'category']
unique_together = ('product', 'category')
verbose_name = _('Product category')
verbose_name_plural = _('Product categories')
def __str__(self):
return u"<productcategory for product '%s'>" % self.product
@python_2_unicode_compatible
class AbstractProduct(models.Model):
"""
The base product object
There's three kinds of products; they're distinguished by the structure
field.
- A stand alone product. Regular product that lives by itself.
- A child product. All child products have a parent product. They're a
specific version of the parent.
- A parent product. It essentially represents a set of products.
An example could be a yoga course, which is a parent product. The different
times/locations of the courses would be associated with the child products.
"""
STANDALONE, PARENT, CHILD = 'standalone', 'parent', 'child'
STRUCTURE_CHOICES = (
(STANDALONE, _('Stand-alone product')),
(PARENT, _('Parent product')),
(CHILD, _('Child product'))
)
structure = models.CharField(
_("Product structure"), max_length=10, choices=STRUCTURE_CHOICES,
default=STANDALONE)
upc = NullCharField(
_("UPC"), max_length=64, blank=True, null=True, unique=True,
help_text=_("Universal Product Code (UPC) is an identifier for "
"a product which is not specific to a particular "
" supplier. Eg an ISBN for a book."))
parent = models.ForeignKey(
'self', null=True, blank=True, related_name='children',
verbose_name=_("Parent product"),
help_text=_("Only choose a parent product if you're creating a child "
"product. For example if this is a size "
"4 of a particular t-shirt. Leave blank if this is a "
"stand-alone product (i.e. there is only one version of"
" this product)."))
# Title is mandatory for canonical products but optional for child products
title = models.CharField(pgettext_lazy(u'Product title', u'Title'),
max_length=255, blank=True)
slug = models.SlugField(_('Slug'), max_length=255, unique=False)
description = models.TextField(_('Description'), blank=True)
#: "Kind" of product, e.g. T-Shirt, Book, etc.
#: None for child products, they inherit their parent's product class
product_class = models.ForeignKey(
'catalogue.ProductClass', null=True, blank=True, on_delete=models.PROTECT,
verbose_name=_('Product type'), related_name="products",
help_text=_("Choose what type of product this is"))
attributes = models.ManyToManyField(
'catalogue.ProductAttribute',
through='ProductAttributeValue',
verbose_name=_("Attributes"),
help_text=_("A product attribute is something that this product may "
"have, such as a size, as specified by its class"))
#: It's possible to have options product class-wide, and per product.
product_options = models.ManyToManyField(
'catalogue.Option', blank=True, verbose_name=_("Product options"),
help_text=_("Options are values that can be associated with a item "
"when it is added to a customer's basket. This could be "
"something like a personalised message to be printed on "
"a T-shirt."))
recommended_products = models.ManyToManyField(
'catalogue.Product', through='ProductRecommendation', blank=True,
verbose_name=_("Recommended products"),
help_text=_("These are products that are recommended to accompany the "
"main product."))
# Denormalised product rating - used by reviews app.
# Product has no ratings if rating is None
rating = models.FloatField(_('Rating'), null=True, editable=False)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
# This field is used by Haystack to reindex search
date_updated = models.DateTimeField(
_("Date updated"), auto_now=True, db_index=True)
categories = models.ManyToManyField(
'catalogue.Category', through='ProductCategory',
verbose_name=_("Categories"))
#: Determines if a product may be used in an offer. It is illegal to
#: discount some types of product (e.g. ebooks) and this field helps
#: merchants from avoiding discounting such products
#: Note that this flag is ignored for child products; they inherit from
#: the parent product.
is_discountable = models.BooleanField(
_("Is discountable?"), default=True, help_text=_(
"This flag indicates if this product can be used in an offer "
"or not"))
objects = ProductManager()
browsable = BrowsableProductManager()
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['-date_created']
verbose_name = _('Product')
verbose_name_plural = _('Products')
def __init__(self, *args, **kwargs):
super(AbstractProduct, self).__init__(*args, **kwargs)
self.attr = ProductAttributesContainer(product=self)
def __str__(self):
if self.title:
return self.title
if self.attribute_summary:
return u"%s (%s)" % (self.get_title(), self.attribute_summary)
else:
return self.get_title()
def get_absolute_url(self):
"""
Return a product's absolute url
"""
return reverse('catalogue:detail',
kwargs={'product_slug': self.slug, 'pk': self.id})
def clean(self):
"""
Validate a product. Those are the rules:
+---------------+-------------+--------------+--------------+
| | stand alone | parent | child |
+---------------+-------------+--------------+--------------+
| title | required | required | optional |
+---------------+-------------+--------------+--------------+
| product class | required | required | must be None |
+---------------+-------------+--------------+--------------+
| parent | forbidden | forbidden | required |
+---------------+-------------+--------------+--------------+
| stockrecords | 0 or more | forbidden | 0 or more |
+---------------+-------------+--------------+--------------+
| categories | 1 or more | 1 or more | forbidden |
+---------------+-------------+--------------+--------------+
| attributes | optional | optional | optional |
+---------------+-------------+--------------+--------------+
| rec. products | optional | optional | unsupported |
+---------------+-------------+--------------+--------------+
| options | optional | optional | forbidden |
+---------------+-------------+--------------+--------------+
Because the validation logic is quite complex, validation is delegated
to the sub method appropriate for the product's structure.
"""
getattr(self, '_clean_%s' % self.structure)()
if not self.is_parent:
self.attr.validate_attributes()
def _clean_standalone(self):
"""
Validates a stand-alone product
"""
if not self.title:
raise ValidationError(_("Your product must have a title."))
if not self.product_class:
raise ValidationError(_("Your product must have a product class."))
if self.parent_id:
raise ValidationError(_("Only child products can have a parent."))
def _clean_child(self):
"""
Validates a child product
"""
if not self.parent_id:
raise ValidationError(_("A child product needs a parent."))
if self.parent_id and not self.parent.is_parent:
raise ValidationError(
_("You can only assign child products to parent products."))
if self.product_class:
raise ValidationError(
_("A child product can't have a product class."))
if self.pk and self.categories.exists():
raise ValidationError(
_("A child product can't have a category assigned."))
# Note that we only forbid options on product level
if self.pk and self.product_options.exists():
raise ValidationError(
_("A child product can't have options."))
def _clean_parent(self):
"""
Validates a parent product.
"""
self._clean_standalone()
if self.has_stockrecords:
raise ValidationError(
_("A parent product can't have stockrecords."))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.get_title())
super(AbstractProduct, self).save(*args, **kwargs)
self.attr.save()
# Properties
@property
def is_standalone(self):
return self.structure == self.STANDALONE
@property
def is_parent(self):
return self.structure == self.PARENT
@property
def is_child(self):
return self.structure == self.CHILD
def can_be_parent(self, give_reason=False):
"""
Helps decide if a the product can be turned into a parent product.
"""
reason = None
if self.is_child:
reason = _('The specified parent product is a child product.')
if self.has_stockrecords:
reason = _(
"One can't add a child product to a product with stock"
" records.")
is_valid = reason is None
if give_reason:
return is_valid, reason
else:
return is_valid
@property
def options(self):
"""
Returns a set of all valid options for this product.
It's possible to have options product class-wide, and per product.
"""
pclass_options = self.get_product_class().options.all()
return set(pclass_options) or set(self.product_options.all())
@property
def is_shipping_required(self):
return self.get_product_class().requires_shipping
@property
def has_stockrecords(self):
"""
Test if this product has any stockrecords
"""
return self.stockrecords.exists()
@property
def num_stockrecords(self):
return self.stockrecords.count()
@property
def attribute_summary(self):
"""
Return a string of all of a product's attributes
"""
attributes = self.attribute_values.all()
pairs = [attribute.summary() for attribute in attributes]
return ", ".join(pairs)
# The two properties below are deprecated because determining minimum
# price is not as trivial as it sounds considering multiple stockrecords,
# currencies, tax, etc.
# The current implementation is very naive and only works for a limited
# set of use cases.
# At the very least, we should pass in the request and
# user. Hence, it's best done as an extension to a Strategy class.
# Once that is accomplished, these properties should be removed.
@property
@deprecated
def min_child_price_incl_tax(self):
"""
Return minimum child product price including tax.
"""
return self._min_child_price('incl_tax')
@property
@deprecated
def min_child_price_excl_tax(self):
"""
Return minimum child product price excluding tax.
This is a very naive approach; see the deprecation notice above. And
only use it for display purposes (e.g. "new Oscar shirt, prices
starting from $9.50").
"""
return self._min_child_price('excl_tax')
def _min_child_price(self, prop):
"""
Return minimum child product price.
This is for visual purposes only. It ignores currencies, most of the
Strategy logic for selecting stockrecords, knows nothing about the
current user or request, etc. It's only here to ensure
backwards-compatibility; the previous implementation wasn't any
better.
"""
strategy = Selector().strategy()
children_stock = strategy.select_children_stockrecords(self)
prices = [
strategy.pricing_policy(child, stockrecord)
for child, stockrecord in children_stock]
raw_prices = sorted([getattr(price, prop) for price in prices])
return raw_prices[0] if raw_prices else None
# Wrappers for child products
def get_title(self):
"""
Return a product's title or it's parent's title if it has no title
"""
title = self.title
if not title and self.parent_id:
title = self.parent.title
return title
get_title.short_description = pgettext_lazy(u"Product title", u"Title")
def get_product_class(self):
"""
Return a product's item class. Child products inherit their parent's.
"""
if self.is_child:
return self.parent.product_class
else:
return self.product_class
get_product_class.short_description = _("Product class")
def get_is_discountable(self):
"""
At the moment, is_discountable can't be set individually for child
products; they inherit it from their parent.
"""
if self.is_child:
return self.parent.is_discountable
else:
return self.is_discountable
def get_categories(self):
"""
Return a product's categories or parent's if there is a parent product.
"""
if self.is_child:
return self.parent.categories
else:
return self.categories
get_categories.short_description = _("Categories")
# Images
def get_missing_image(self):
"""
Returns a missing image object.
"""
# This class should have a 'name' property so it mimics the Django file
# field.
return MissingProductImage()
def primary_image(self):
"""
Returns the primary image for a product. Usually used when one can
only display one product image, e.g. in a list of products.
"""
images = self.images.all()
ordering = self.images.model.Meta.ordering
if not ordering or ordering[0] != 'display_order':
# Only apply order_by() if a custom model doesn't use default
# ordering. Applying order_by() busts the prefetch cache of
# the ProductManager
images = images.order_by('display_order')
try:
return images[0]
except IndexError:
if self.is_child:
# By default, Oscar's dashboard doesn't support child images.
# We just serve the parents image instead.
return self.parent.primary_image
else:
# We return a dict with fields that mirror the key properties of
# the ProductImage class so this missing image can be used
# interchangeably in templates. Strategy pattern ftw!
return {
'original': self.get_missing_image(),
'caption': '',
'is_missing': True}
# Updating methods
def update_rating(self):
"""
Recalculate rating field
"""
self.rating = self.calculate_rating()
self.save()
update_rating.alters_data = True
def calculate_rating(self):
"""
Calculate rating value
"""
result = self.reviews.filter(
status=self.reviews.model.APPROVED
).aggregate(
sum=Sum('score'), count=Count('id'))
reviews_sum = result['sum'] or 0
reviews_count = result['count'] or 0
rating = None
if reviews_count > 0:
rating = float(reviews_sum) / reviews_count
return rating
def has_review_by(self, user):
if user.is_anonymous():
return False
return self.reviews.filter(user=user).exists()
def is_review_permitted(self, user):
"""
Determines whether a user may add a review on this product.
Default implementation respects OSCAR_ALLOW_ANON_REVIEWS and only
allows leaving one review per user and product.
Override this if you want to alter the default behaviour; e.g. enforce
that a user purchased the product to be allowed to leave a review.
"""
if user.is_authenticated() or settings.OSCAR_ALLOW_ANON_REVIEWS:
return not self.has_review_by(user)
else:
return False
@cached_property
def num_approved_reviews(self):
return self.reviews.approved().count()
class AbstractProductRecommendation(models.Model):
"""
'Through' model for product recommendations
"""
primary = models.ForeignKey(
'catalogue.Product', related_name='primary_recommendations',
verbose_name=_("Primary product"))
recommendation = models.ForeignKey(
'catalogue.Product', verbose_name=_("Recommended product"))
ranking = models.PositiveSmallIntegerField(
_('Ranking'), default=0,
help_text=_('Determines order of the products. A product with a higher'
' value will appear before one with a lower ranking.'))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['primary', '-ranking']
unique_together = ('primary', 'recommendation')
verbose_name = _('Product recommendation')
verbose_name_plural = _('Product recomendations')
@python_2_unicode_compatible
class AbstractProductAttribute(models.Model):
"""
Defines an attribute for a product class. (For example, number_of_pages for
a 'book' class)
"""
product_class = models.ForeignKey(
'catalogue.ProductClass', related_name='attributes', blank=True,
null=True, verbose_name=_("Product type"))
name = models.CharField(_('Name'), max_length=128)
code = models.SlugField(
_('Code'), max_length=128,
validators=[
RegexValidator(
regex=r'^[a-zA-Z_][0-9a-zA-Z_]*$',
message=_(
"Code can only contain the letters a-z, A-Z, digits, "
"and underscores, and can't start with a digit.")),
non_python_keyword
])
# Attribute types
TEXT = "text"
INTEGER = "integer"
BOOLEAN = "boolean"
FLOAT = "float"
RICHTEXT = "richtext"
DATE = "date"
OPTION = "option"
ENTITY = "entity"
FILE = "file"
IMAGE = "image"
TYPE_CHOICES = (
(TEXT, _("Text")),
(INTEGER, _("Integer")),
(BOOLEAN, _("True / False")),
(FLOAT, _("Float")),
(RICHTEXT, _("Rich Text")),
(DATE, _("Date")),
(OPTION, _("Option")),
(ENTITY, _("Entity")),
(FILE, _("File")),
(IMAGE, _("Image")),
)
type = models.CharField(
choices=TYPE_CHOICES, default=TYPE_CHOICES[0][0],
max_length=20, verbose_name=_("Type"))
option_group = models.ForeignKey(
'catalogue.AttributeOptionGroup', blank=True, null=True,
verbose_name=_("Option Group"),
help_text=_('Select an option group if using type "Option"'))
required = models.BooleanField(_('Required'), default=False)
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['code']
verbose_name = _('Product attribute')
verbose_name_plural = _('Product attributes')
@property
def is_option(self):
return self.type == self.OPTION
@property
def is_file(self):
return self.type in [self.FILE, self.IMAGE]
def __str__(self):
return self.name
def save_value(self, product, value):
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
try:
value_obj = product.attribute_values.get(attribute=self)
except ProductAttributeValue.DoesNotExist:
# FileField uses False for announcing deletion of the file
# not creating a new value
delete_file = self.is_file and value is False
if value is None or value == '' or delete_file:
return
value_obj = ProductAttributeValue.objects.create(
product=product, attribute=self)
if self.is_file:
# File fields in Django are treated differently, see
# django.db.models.fields.FileField and method save_form_data
if value is None:
# No change
return
elif value is False:
# Delete file
value_obj.delete()
else:
# New uploaded file
value_obj.value = value
value_obj.save()
else:
if value is None or value == '':
value_obj.delete()
return
if value != value_obj.value:
value_obj.value = value
value_obj.save()
def validate_value(self, value):
validator = getattr(self, '_validate_%s' % self.type)
validator(value)
# Validators
def _validate_text(self, value):
if not isinstance(value, six.string_types):
raise ValidationError(_("Must be str or unicode"))
_validate_richtext = _validate_text
def _validate_float(self, value):
try:
float(value)
except ValueError:
raise ValidationError(_("Must be a float"))
def _validate_integer(self, value):
try:
int(value)
except ValueError:
raise ValidationError(_("Must be an integer"))
def _validate_date(self, value):
if not (isinstance(value, datetime) or isinstance(value, date)):
raise ValidationError(_("Must be a date or datetime"))
def _validate_boolean(self, value):
if not type(value) == bool:
raise ValidationError(_("Must be a boolean"))
def _validate_entity(self, value):
if not isinstance(value, models.Model):
raise ValidationError(_("Must be a model instance"))
def _validate_option(self, value):
if not isinstance(value, get_model('catalogue', 'AttributeOption')):
raise ValidationError(
_("Must be an AttributeOption model object instance"))
if not value.pk:
raise ValidationError(_("AttributeOption has not been saved yet"))
valid_values = self.option_group.options.values_list(
'option', flat=True)
if value.option not in valid_values:
raise ValidationError(
_("%(enum)s is not a valid choice for %(attr)s") %
{'enum': value, 'attr': self})
def _validate_file(self, value):
if value and not isinstance(value, File):
raise ValidationError(_("Must be a file field"))
_validate_image = _validate_file
@python_2_unicode_compatible
class AbstractProductAttributeValue(models.Model):
"""
The "through" model for the m2m relationship between catalogue.Product and
catalogue.ProductAttribute. This specifies the value of the attribute for
a particular product
For example: number_of_pages = 295
"""
attribute = models.ForeignKey(
'catalogue.ProductAttribute', verbose_name=_("Attribute"))
product = models.ForeignKey(
'catalogue.Product', related_name='attribute_values',
verbose_name=_("Product"))
value_text = models.TextField(_('Text'), blank=True, null=True)
value_integer = models.IntegerField(_('Integer'), blank=True, null=True)
value_boolean = models.NullBooleanField(_('Boolean'), blank=True)
value_float = models.FloatField(_('Float'), blank=True, null=True)
value_richtext = models.TextField(_('Richtext'), blank=True, null=True)
value_date = models.DateField(_('Date'), blank=True, null=True)
value_option = models.ForeignKey(
'catalogue.AttributeOption', blank=True, null=True,
verbose_name=_("Value option"))
value_file = models.FileField(
upload_to=settings.OSCAR_IMAGE_FOLDER, max_length=255,
blank=True, null=True)
value_image = models.ImageField(
upload_to=settings.OSCAR_IMAGE_FOLDER, max_length=255,
blank=True, null=True)
value_entity = GenericForeignKey(
'entity_content_type', 'entity_object_id')
entity_content_type = models.ForeignKey(
ContentType, null=True, blank=True, editable=False)
entity_object_id = models.PositiveIntegerField(
null=True, blank=True, editable=False)
def _get_value(self):
return getattr(self, 'value_%s' % self.attribute.type)
def _set_value(self, new_value):
if self.attribute.is_option and isinstance(new_value, six.string_types):
# Need to look up instance of AttributeOption
new_value = self.attribute.option_group.options.get(
option=new_value)
setattr(self, 'value_%s' % self.attribute.type, new_value)
value = property(_get_value, _set_value)
class Meta:
abstract = True
app_label = 'catalogue'
unique_together = ('attribute', 'product')
verbose_name = _('Product attribute value')
verbose_name_plural = _('Product attribute values')
def __str__(self):
return self.summary()
def summary(self):
"""
Gets a string representation of both the attribute and it's value,
used e.g in product summaries.
"""
return u"%s: %s" % (self.attribute.name, self.value_as_text)
@property
def value_as_text(self):
"""
Returns a string representation of the attribute's value. To customise
e.g. image attribute values, declare a _image_as_text property and
return something appropriate.
"""
property_name = '_%s_as_text' % self.attribute.type
return getattr(self, property_name, self.value)
@property
def _richtext_as_text(self):
return strip_tags(self.value)
@property
def _entity_as_text(self):
"""
Returns the unicode representation of the related model. You likely
want to customise this (and maybe _entity_as_html) if you use entities.
"""
return six.text_type(self.value)
@property
def value_as_html(self):
"""
Returns a HTML representation of the attribute's value. To customise
e.g. image attribute values, declare a _image_as_html property and
return e.g. an <img> tag. Defaults to the _as_text representation.
"""
property_name = '_%s_as_html' % self.attribute.type
return getattr(self, property_name, self.value_as_text)
@property
def _richtext_as_html(self):
return mark_safe(self.value)
@python_2_unicode_compatible
class AbstractAttributeOptionGroup(models.Model):
"""
Defines a group of options that collectively may be used as an
attribute type
For example, Language
"""
name = models.CharField(_('Name'), max_length=128)
def __str__(self):
return self.name
class Meta:
abstract = True
app_label = 'catalogue'
verbose_name = _('Attribute option group')
verbose_name_plural = _('Attribute option groups')
@property
def option_summary(self):
options = [o.option for o in self.options.all()]
return ", ".join(options)
@python_2_unicode_compatible
class AbstractAttributeOption(models.Model):
"""
Provides an option within an option group for an attribute type
Examples: In a Language group, English, Greek, French
"""
group = models.ForeignKey(
'catalogue.AttributeOptionGroup', related_name='options',
verbose_name=_("Group"))
option = models.CharField(_('Option'), max_length=255)
def __str__(self):
return self.option
class Meta:
abstract = True
app_label = 'catalogue'
unique_together = ('group', 'option')
verbose_name = _('Attribute option')
verbose_name_plural = _('Attribute options')
@python_2_unicode_compatible
class AbstractOption(models.Model):
"""
An option that can be selected for a particular item when the product
is added to the basket.
For example, a list ID for an SMS message send, or a personalised message
to print on a T-shirt.
This is not the same as an 'attribute' as options do not have a fixed value
for a particular item. Instead, option need to be specified by a customer
when they add the item to their basket.
"""
name = models.CharField(_("Name"), max_length=128)
code = AutoSlugField(_("Code"), max_length=128, unique=True,
populate_from='name')
REQUIRED, OPTIONAL = ('Required', 'Optional')
TYPE_CHOICES = (
(REQUIRED, _("Required - a value for this option must be specified")),
(OPTIONAL, _("Optional - a value for this option can be omitted")),
)
type = models.CharField(_("Status"), max_length=128, default=REQUIRED,
choices=TYPE_CHOICES)
class Meta:
abstract = True
app_label = 'catalogue'
verbose_name = _("Option")
verbose_name_plural = _("Options")
def __str__(self):
return self.name
@property
def is_required(self):
return self.type == self.REQUIRED
class MissingProductImage(object):
"""
Mimics a Django file field by having a name property.
sorl-thumbnail requires all it's images to be in MEDIA_ROOT. This class
tries symlinking the default "missing image" image in STATIC_ROOT
into MEDIA_ROOT for convenience, as that is necessary every time an Oscar
project is setup. This avoids the less helpful NotFound IOError that would
be raised when sorl-thumbnail tries to access it.
"""
def __init__(self, name=None):
self.name = name if name else settings.OSCAR_MISSING_IMAGE_URL
media_file_path = os.path.join(settings.MEDIA_ROOT, self.name)
# don't try to symlink if MEDIA_ROOT is not set (e.g. running tests)
if settings.MEDIA_ROOT and not os.path.exists(media_file_path):
self.symlink_missing_image(media_file_path)
def symlink_missing_image(self, media_file_path):
static_file_path = find('oscar/img/%s' % self.name)
if static_file_path is not None:
try:
os.symlink(static_file_path, media_file_path)
except OSError:
raise ImproperlyConfigured((
"Please copy/symlink the "
"'missing image' image at %s into your MEDIA_ROOT at %s. "
"This exception was raised because Oscar was unable to "
"symlink it for you.") % (media_file_path,
settings.MEDIA_ROOT))
else:
logging.info((
"Symlinked the 'missing image' image at %s into your "
"MEDIA_ROOT at %s") % (media_file_path,
settings.MEDIA_ROOT))
@python_2_unicode_compatible
class AbstractProductImage(models.Model):
"""
An image of a product
"""
product = models.ForeignKey(
'catalogue.Product', related_name='images', verbose_name=_("Product"))
original = models.ImageField(
_("Original"), upload_to=settings.OSCAR_IMAGE_FOLDER, max_length=255)
caption = models.CharField(_("Caption"), max_length=200, blank=True)
#: Use display_order to determine which is the "primary" image
display_order = models.PositiveIntegerField(
_("Display order"), default=0,
help_text=_("An image with a display order of zero will be the primary"
" image for a product"))
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
class Meta:
abstract = True
app_label = 'catalogue'
# Any custom models should ensure that this ordering is unchanged, or
# your query count will explode. See AbstractProduct.primary_image.
ordering = ["display_order"]
unique_together = ("product", "display_order")
verbose_name = _('Product image')
verbose_name_plural = _('Product images')
def __str__(self):
return u"Image of '%s'" % self.product
def is_primary(self):
"""
Return bool if image display order is 0
"""
return self.display_order == 0
def delete(self, *args, **kwargs):
"""
Always keep the display_order as consecutive integers. This avoids
issue #855.
"""
super(AbstractProductImage, self).delete(*args, **kwargs)
for idx, image in enumerate(self.product.images.all()):
image.display_order = idx
image.save()
|
|
"""
Support for Pioneer Network Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.pioneer/
"""
import logging
import telnetlib
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_TIMEOUT, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Pioneer AVR'
DEFAULT_PORT = 23 # telnet default. Some Pioneer AVRs use 8102
DEFAULT_TIMEOUT = None
SUPPORT_PIONEER = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
MAX_VOLUME = 185
MAX_SOURCE_NUMBERS = 60
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.socket_timeout,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pioneer platform."""
pioneer = PioneerDevice(
config.get(CONF_NAME), config.get(CONF_HOST), config.get(CONF_PORT),
config.get(CONF_TIMEOUT))
if pioneer.update():
add_entities([pioneer])
class PioneerDevice(MediaPlayerDevice):
"""Representation of a Pioneer device."""
def __init__(self, name, host, port, timeout):
"""Initialize the Pioneer device."""
self._name = name
self._host = host
self._port = port
self._timeout = timeout
self._pwstate = 'PWR1'
self._volume = 0
self._muted = False
self._selected_source = ''
self._source_name_to_number = {}
self._source_number_to_name = {}
@classmethod
def telnet_request(cls, telnet, command, expected_prefix):
"""Execute `command` and return the response."""
try:
telnet.write(command.encode("ASCII") + b"\r")
except telnetlib.socket.timeout:
_LOGGER.debug("Pioneer command %s timed out", command)
return None
# The receiver will randomly send state change updates, make sure
# we get the response we are looking for
for _ in range(3):
result = telnet.read_until(b"\r\n", timeout=0.2).decode("ASCII") \
.strip()
if result.startswith(expected_prefix):
return result
return None
def telnet_command(self, command):
"""Establish a telnet connection and sends command."""
try:
try:
telnet = telnetlib.Telnet(
self._host, self._port, self._timeout)
except (ConnectionRefusedError, OSError):
_LOGGER.warning("Pioneer %s refused connection", self._name)
return
telnet.write(command.encode("ASCII") + b"\r")
telnet.read_very_eager() # skip response
telnet.close()
except telnetlib.socket.timeout:
_LOGGER.debug(
"Pioneer %s command %s timed out", self._name, command)
def update(self):
"""Get the latest details from the device."""
try:
telnet = telnetlib.Telnet(self._host, self._port, self._timeout)
except (ConnectionRefusedError, OSError):
_LOGGER.warning("Pioneer %s refused connection", self._name)
return False
pwstate = self.telnet_request(telnet, "?P", "PWR")
if pwstate:
self._pwstate = pwstate
volume_str = self.telnet_request(telnet, "?V", "VOL")
self._volume = int(volume_str[3:]) / MAX_VOLUME if volume_str else None
muted_value = self.telnet_request(telnet, "?M", "MUT")
self._muted = (muted_value == "MUT0") if muted_value else None
# Build the source name dictionaries if necessary
if not self._source_name_to_number:
for i in range(MAX_SOURCE_NUMBERS):
result = self.telnet_request(
telnet, "?RGB" + str(i).zfill(2), "RGB")
if not result:
continue
source_name = result[6:]
source_number = str(i).zfill(2)
self._source_name_to_number[source_name] = source_number
self._source_number_to_name[source_number] = source_name
source_number = self.telnet_request(telnet, "?F", "FN")
if source_number:
self._selected_source = self._source_number_to_name \
.get(source_number[2:])
else:
self._selected_source = None
telnet.close()
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._pwstate == "PWR1":
return STATE_OFF
if self._pwstate == "PWR0":
return STATE_ON
return None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_PIONEER
@property
def source(self):
"""Return the current input source."""
return self._selected_source
@property
def source_list(self):
"""List of available input sources."""
return list(self._source_name_to_number.keys())
@property
def media_title(self):
"""Title of current playing media."""
return self._selected_source
def turn_off(self):
"""Turn off media player."""
self.telnet_command("PF")
def volume_up(self):
"""Volume up media player."""
self.telnet_command("VU")
def volume_down(self):
"""Volume down media player."""
self.telnet_command("VD")
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
# 60dB max
self.telnet_command(str(round(volume * MAX_VOLUME)).zfill(3) + "VL")
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.telnet_command("MO" if mute else "MF")
def turn_on(self):
"""Turn the media player on."""
self.telnet_command("PO")
def select_source(self, source):
"""Select input source."""
self.telnet_command(self._source_name_to_number.get(source) + "FN")
|
|
"""Helper for aiohttp webclient stuff."""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable, Callable
from contextlib import suppress
from ssl import SSLContext
import sys
from types import MappingProxyType
from typing import Any, cast
import aiohttp
from aiohttp import web
from aiohttp.hdrs import CONTENT_TYPE, USER_AGENT
from aiohttp.web_exceptions import HTTPBadGateway, HTTPGatewayTimeout
import async_timeout
from homeassistant import config_entries
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE, __version__
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.loader import bind_hass
from homeassistant.util import ssl as ssl_util
from .frame import warn_use
DATA_CONNECTOR = "aiohttp_connector"
DATA_CONNECTOR_NOTVERIFY = "aiohttp_connector_notverify"
DATA_CLIENTSESSION = "aiohttp_clientsession"
DATA_CLIENTSESSION_NOTVERIFY = "aiohttp_clientsession_notverify"
SERVER_SOFTWARE = "HomeAssistant/{0} aiohttp/{1} Python/{2[0]}.{2[1]}".format(
__version__, aiohttp.__version__, sys.version_info
)
WARN_CLOSE_MSG = "closes the Home Assistant aiohttp session"
@callback
@bind_hass
def async_get_clientsession(
hass: HomeAssistant, verify_ssl: bool = True
) -> aiohttp.ClientSession:
"""Return default aiohttp ClientSession.
This method must be run in the event loop.
"""
key = DATA_CLIENTSESSION if verify_ssl else DATA_CLIENTSESSION_NOTVERIFY
if key not in hass.data:
hass.data[key] = _async_create_clientsession(
hass,
verify_ssl,
auto_cleanup_method=_async_register_default_clientsession_shutdown,
)
return cast(aiohttp.ClientSession, hass.data[key])
@callback
@bind_hass
def async_create_clientsession(
hass: HomeAssistant,
verify_ssl: bool = True,
auto_cleanup: bool = True,
**kwargs: Any,
) -> aiohttp.ClientSession:
"""Create a new ClientSession with kwargs, i.e. for cookies.
If auto_cleanup is False, you need to call detach() after the session
returned is no longer used. Default is True, the session will be
automatically detached on homeassistant_stop or when being created
in config entry setup, the config entry is unloaded.
This method must be run in the event loop.
"""
auto_cleanup_method = None
if auto_cleanup:
auto_cleanup_method = _async_register_clientsession_shutdown
clientsession = _async_create_clientsession(
hass,
verify_ssl,
auto_cleanup_method=auto_cleanup_method,
**kwargs,
)
return clientsession
@callback
def _async_create_clientsession(
hass: HomeAssistant,
verify_ssl: bool = True,
auto_cleanup_method: Callable[[HomeAssistant, aiohttp.ClientSession], None]
| None = None,
**kwargs: Any,
) -> aiohttp.ClientSession:
"""Create a new ClientSession with kwargs, i.e. for cookies."""
clientsession = aiohttp.ClientSession(
connector=_async_get_connector(hass, verify_ssl),
**kwargs,
)
# Prevent packages accidentally overriding our default headers
# It's important that we identify as Home Assistant
# If a package requires a different user agent, override it by passing a headers
# dictionary to the request method.
# pylint: disable=protected-access
clientsession._default_headers = MappingProxyType({USER_AGENT: SERVER_SOFTWARE}) # type: ignore
clientsession.close = warn_use(clientsession.close, WARN_CLOSE_MSG) # type: ignore
if auto_cleanup_method:
auto_cleanup_method(hass, clientsession)
return clientsession
@bind_hass
async def async_aiohttp_proxy_web(
hass: HomeAssistant,
request: web.BaseRequest,
web_coro: Awaitable[aiohttp.ClientResponse],
buffer_size: int = 102400,
timeout: int = 10,
) -> web.StreamResponse | None:
"""Stream websession request to aiohttp web response."""
try:
async with async_timeout.timeout(timeout):
req = await web_coro
except asyncio.CancelledError:
# The user cancelled the request
return None
except asyncio.TimeoutError as err:
# Timeout trying to start the web request
raise HTTPGatewayTimeout() from err
except aiohttp.ClientError as err:
# Something went wrong with the connection
raise HTTPBadGateway() from err
try:
return await async_aiohttp_proxy_stream(
hass, request, req.content, req.headers.get(CONTENT_TYPE)
)
finally:
req.close()
@bind_hass
async def async_aiohttp_proxy_stream(
hass: HomeAssistant,
request: web.BaseRequest,
stream: aiohttp.StreamReader,
content_type: str | None,
buffer_size: int = 102400,
timeout: int = 10,
) -> web.StreamResponse:
"""Stream a stream to aiohttp web response."""
response = web.StreamResponse()
if content_type is not None:
response.content_type = content_type
await response.prepare(request)
# Suppressing something went wrong fetching data, closed connection
with suppress(asyncio.TimeoutError, aiohttp.ClientError):
while hass.is_running:
async with async_timeout.timeout(timeout):
data = await stream.read(buffer_size)
if not data:
break
await response.write(data)
return response
@callback
def _async_register_clientsession_shutdown(
hass: HomeAssistant, clientsession: aiohttp.ClientSession
) -> None:
"""Register ClientSession close on Home Assistant shutdown or config entry unload.
This method must be run in the event loop.
"""
@callback
def _async_close_websession(*_: Any) -> None:
"""Close websession."""
clientsession.detach()
unsub = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, _async_close_websession
)
if not (config_entry := config_entries.current_entry.get()):
return
config_entry.async_on_unload(unsub)
config_entry.async_on_unload(_async_close_websession)
@callback
def _async_register_default_clientsession_shutdown(
hass: HomeAssistant, clientsession: aiohttp.ClientSession
) -> None:
"""Register default ClientSession close on Home Assistant shutdown.
This method must be run in the event loop.
"""
@callback
def _async_close_websession(event: Event) -> None:
"""Close websession."""
clientsession.detach()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_websession)
@callback
def _async_get_connector(
hass: HomeAssistant, verify_ssl: bool = True
) -> aiohttp.BaseConnector:
"""Return the connector pool for aiohttp.
This method must be run in the event loop.
"""
key = DATA_CONNECTOR if verify_ssl else DATA_CONNECTOR_NOTVERIFY
if key in hass.data:
return cast(aiohttp.BaseConnector, hass.data[key])
if verify_ssl:
ssl_context: bool | SSLContext = ssl_util.client_context()
else:
ssl_context = False
connector = aiohttp.TCPConnector(enable_cleanup_closed=True, ssl=ssl_context)
hass.data[key] = connector
async def _async_close_connector(event: Event) -> None:
"""Close connector pool."""
await connector.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_connector)
return connector
|
|
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.views.main import ChangeList, SEARCH_VAR, ALL_VAR
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats
from django.utils import six
from .admin import (ChildAdmin, QuartetAdmin, BandAdmin, ChordsBandAdmin,
GroupAdmin, ParentAdmin, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, CustomPaginationAdmin,
FilteredChildAdmin, CustomPaginator, site as custom_site,
SwallowAdmin, DynamicListFilterChildAdmin, InvitationAdmin,
DynamicSearchFieldsChildAdmin, NoListDisplayLinksParentAdmin)
from .models import (Event, Child, Parent, Genre, Band, Musician, Group,
Quartet, Membership, ChordsMusician, ChordsBand, Invitation, Swallow,
UnorderedObject, OrderedObject, CustomIdUser)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">(None)</td></tr></tbody>' % link
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get('/admin/admin_changelist/event/')
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['users.json']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/auth/user/'))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
|
from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Polynomials #
#----------------------------------------------------------------------------#
# XXX: extra precision
@defun
def polyval(ctx, coeffs, x, derivative=False):
r"""
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
:func:`~mpmath.polyval` evaluates the polynomial
.. math ::
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
tuple `(P(x), P'(x))`.
>>> from mpmath import *
>>> mp.pretty = True
>>> polyval([3, 0, 2], 0.5)
2.75
>>> polyval([3, 0, 2], 0.5, derivative=True)
(2.75, 3.0)
The coefficients and the evaluation point may be any combination
of real or complex numbers.
"""
if not coeffs:
return ctx.zero
p = ctx.convert(coeffs[0])
q = ctx.zero
for c in coeffs[1:]:
if derivative:
q = p + x*q
p = c + x*p
if derivative:
return p, q
else:
return p
@defun
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
error=False, roots_init=None):
"""
Computes all roots (real or complex) of a given polynomial.
The roots are returned as a sorted list, where real roots appear first
followed by complex conjugate roots as adjacent elements. The polynomial
should be given as a list of coefficients, in the format used by
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
where *err* is an estimate of the maximum error among the computed roots.
**Examples**
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(polyroots([1,-1,-14,24]), 4)
[-4.0, 2.0, 3.0]
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
error estimate::
>>> roots, err = polyroots([4,3,2], error=True)
>>> for r in roots:
... print(r)
...
(-0.375 + 0.59947894041409j)
(-0.375 - 0.59947894041409j)
>>>
>>> err
2.22044604925031e-16
>>>
>>> polyval([4,3,2], roots[0])
(2.22044604925031e-16 + 0.0j)
>>> polyval([4,3,2], roots[1])
(2.22044604925031e-16 + 0.0j)
The following example computes all the 5th roots of unity; that is,
the roots of `x^5 - 1`::
>>> mp.dps = 20
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
... print(r)
...
1.0
(-0.8090169943749474241 + 0.58778525229247312917j)
(-0.8090169943749474241 - 0.58778525229247312917j)
(0.3090169943749474241 + 0.95105651629515357212j)
(0.3090169943749474241 - 0.95105651629515357212j)
**Precision and conditioning**
The roots are computed to the current working precision accuracy. If this
accuracy cannot be achieved in `maxsteps` steps, then a `NoConvergence`
exception is raised. The algorithm internally is using the current working
precision extended by `extraprec`. If `NoConvergence` was raised, that is
caused either by not having enough extra precision to achieve convergence
(in which case increasing `extraprec` should fix the problem) or too low
`maxsteps` (in which case increasing `maxsteps` should fix the problem), or
a combination of both.
The user should always do a convergence study with regards to `extraprec`
to ensure accurate results. It is possible to get convergence to a wrong
answer with too low `extraprec`.
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
typically compute all roots of an arbitrary polynomial to high precision::
>>> mp.dps = 60
>>> for r in polyroots([1, 0, -10, 0, 1]):
... print r
...
-3.14626436994197234232913506571557044551247712918732870123249
-0.317837245195782244725757617296174288373133378433432554879127
0.317837245195782244725757617296174288373133378433432554879127
3.14626436994197234232913506571557044551247712918732870123249
>>>
>>> sqrt(3) + sqrt(2)
3.14626436994197234232913506571557044551247712918732870123249
>>> sqrt(3) - sqrt(2)
0.317837245195782244725757617296174288373133378433432554879127
**Algorithm**
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
uses complex arithmetic to locate all roots simultaneously.
The Durand-Kerner method can be viewed as approximately performing
simultaneous Newton iteration for all the roots. In particular,
the convergence to simple roots is quadratic, just like Newton's
method.
Although all roots are internally calculated using complex arithmetic, any
root found to have an imaginary part smaller than the estimated numerical
error is truncated to a real number (small real parts are also chopped).
Real roots are placed first in the returned list, sorted by value. The
remaining complex roots are sorted by their real parts so that conjugate
roots end up next to each other.
**References**
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
"""
if len(coeffs) <= 1:
if not coeffs or not coeffs[0]:
raise ValueError("Input to polyroots must not be the zero polynomial")
# Constant polynomial with no roots
return []
orig = ctx.prec
tol = +ctx.eps
with ctx.extraprec(extraprec):
deg = len(coeffs) - 1
# Must be monic
lead = ctx.convert(coeffs[0])
if lead == 1:
coeffs = [ctx.convert(c) for c in coeffs]
else:
coeffs = [c/lead for c in coeffs]
f = lambda x: ctx.polyval(coeffs, x)
if roots_init is None:
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
else:
roots = [None]*deg;
deg_init = min(deg, len(roots_init))
roots[:deg_init] = list(roots_init[:deg_init])
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
in xrange(deg_init,deg)]
err = [ctx.one for n in xrange(deg)]
# Durand-Kerner iteration until convergence
for step in xrange(maxsteps):
if abs(max(err)) < tol:
break
for i in xrange(deg):
p = roots[i]
x = f(p)
for j in range(deg):
if i != j:
try:
x /= (p-roots[j])
except ZeroDivisionError:
continue
roots[i] = p - x
err[i] = abs(x)
if abs(max(err)) >= tol:
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
% maxsteps)
# Remove small real or imaginary parts
if cleanup:
for i in xrange(deg):
if abs(roots[i]) < tol:
roots[i] = ctx.zero
elif abs(ctx._im(roots[i])) < tol:
roots[i] = roots[i].real
elif abs(ctx._re(roots[i])) < tol:
roots[i] = roots[i].imag * 1j
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
if error:
err = max(err)
err = max(err, ctx.ldexp(1, -orig+1))
return [+r for r in roots], +err
else:
return [+r for r in roots]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class ReplicationsOperations(object):
"""ReplicationsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The client API version. Constant value: "2017-06-01-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01-preview"
self.config = config
def get(
self, resource_group_name, registry_name, replication_name, custom_headers=None, raw=False, **operation_config):
"""Gets the properties of the specified replication.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Replication
<azure.mgmt.containerregistry.v2017_06_01_preview.models.Replication>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'),
'replicationName': self._serialize.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Replication', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, registry_name, replication_name, location, tags=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a replication for a container registry with the
specified parameters.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param location: The location of the resource. This cannot be changed
after the resource is created.
:type location: str
:param tags: The tags of the resource.
:type tags: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Replication
<azure.mgmt.containerregistry.v2017_06_01_preview.models.Replication>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
replication = models.Replication(location=location, tags=tags)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'),
'replicationName': self._serialize.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(replication, 'Replication')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Replication', response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, registry_name, replication_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a replication from a container registry.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'),
'replicationName': self._serialize.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, registry_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the replications for the specified container registry.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ReplicationPaged
<azure.mgmt.containerregistry.v2017_06_01_preview.models.ReplicationPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ReplicationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ReplicationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The Flowercoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1495303202 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "flowercoin.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for flowercoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if flowercoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('flowercoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run flowercoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("FLOWERCOIND", "flowercoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: flowercoind started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in flowercoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a flowercoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("FLOWERCOIND", "flowercoind")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: flowercoind started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple flowercoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
|
|
from unittest import TestCase
from string_utils import prettify, is_email
class PrettifyTestCase(TestCase):
def test_cannot_handle_non_string_objects(self):
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify(None)
self.assertEqual(str(raised.exception), 'Expected "str", received "NoneType"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify(False)
self.assertEqual(str(raised.exception), 'Expected "str", received "bool"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify(0)
self.assertEqual(str(raised.exception), 'Expected "str", received "int"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify([])
self.assertEqual(str(raised.exception), 'Expected "str", received "list"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
prettify({'a': 1})
self.assertEqual(str(raised.exception), 'Expected "str", received "dict"')
def test_should_return_empty_string_from_empty_string_or_space_only_string(self):
self.assertEqual('', prettify(''))
self.assertEqual('', prettify(' '))
def test_should_uppercase_first_letter(self):
self.assertEqual('Hello world', prettify('hello world'))
def test_should_strip_string(self):
self.assertEqual('Hello world', prettify(' hello world '))
def test_should_strip_empty_lines(self):
self.assertEqual('Hello world', prettify('''
hello world
'''))
def test_should_replace_multiple_brackets_with_single_ones(self):
self.assertEqual('(foo)', prettify('((foo)'))
self.assertEqual('(foo)', prettify('(foo))'))
self.assertEqual('(foo)', prettify('((foo))'))
self.assertEqual('(foo)', prettify('((((((((foo)))'))
self.assertEqual('[foo]', prettify('[[foo]'))
self.assertEqual('[foo]', prettify('[foo]]'))
self.assertEqual('[foo]', prettify('[[foo]]'))
self.assertEqual('[foo]', prettify('[[[[[[[[foo]]]'))
self.assertEqual('{foo}', prettify('{{foo}'))
self.assertEqual('{foo}', prettify('{foo}}'))
self.assertEqual('{foo}', prettify('{{foo}}'))
self.assertEqual('{foo}', prettify('{{{{{{{{foo}}}'))
def test_should_remove_internal_spaces_in_brackets(self):
self.assertEqual('(foo)', prettify('( foo)'))
self.assertEqual('(foo)', prettify('(foo )'))
self.assertEqual('(foo)', prettify('( foo )'))
def test_should_add_spaces_outside_brackets(self):
self.assertEqual('Boo (bar) baz', prettify('boo(bar)baz'))
def test_should_not_add_right_space_after_bracket_if_followed_by_punctuation(self):
self.assertEqual('Foo (bar)? Yes!', prettify('Foo(bar)? Yes!'))
self.assertEqual('Foo (bar): Yes!', prettify('Foo(bar): Yes!'))
self.assertEqual('Foo (bar). Yes!', prettify('Foo(bar). Yes!'))
self.assertEqual('Foo (bar); yes!', prettify('Foo(bar); yes!'))
self.assertEqual('Foo (bar), yes!', prettify('Foo(bar), yes!'))
def test_should_replace_multiple_commas_with_single_ones(self):
self.assertEqual('Hello, world', prettify('Hello,,, world'))
self.assertEqual('Hello, world, banana', prettify('Hello,,, world,, banana'))
def test_should_replace_multiple_colons_with_single_ones(self):
self.assertEqual('Hello: world', prettify('Hello::: world'))
self.assertEqual('Hello: world: banana', prettify('Hello::: world:: banana'))
def test_should_replace_multiple_semicolons_with_single_ones(self):
self.assertEqual('Hello; world', prettify('Hello;;; world'))
self.assertEqual('Hello; world; banana', prettify('Hello;;; world;; banana'))
def test_should_replace_multiple_double_quotes_with_single_ones(self):
self.assertEqual('"hello" world', prettify('""hello"" world'))
self.assertEqual('"hello" world', prettify('""hello" world'))
self.assertEqual('"hello" world', prettify('"hello"" world'))
self.assertEqual('"hello" world', prettify('""""""hello""""" world'))
def test_should_add_spaces_for_double_quotes(self):
self.assertEqual('Foo "bar" baz', prettify('foo"bar"baz'))
self.assertEqual('Foo "bar" baz', prettify('foo"bar" baz'))
self.assertEqual('Foo "bar" baz', prettify('foo "bar"baz'))
def test_should_trim_spaces_inside_double_quotes(self):
self.assertEqual('Foo "bar" baz', prettify('foo " bar " baz'))
self.assertEqual('Foo "bar" baz', prettify('foo "bar " baz'))
self.assertEqual('Foo "bar" baz', prettify('foo " bar" baz'))
def test_should_not_add_right_space_after_double_quotes_if_followed_by_punctuation(self):
self.assertEqual('Foo "bar"? Yes!', prettify('Foo"bar"? Yes!'))
self.assertEqual('Foo "bar": Yes!', prettify('Foo"bar": Yes!'))
self.assertEqual('Foo "bar". Yes!', prettify('Foo"bar". Yes!'))
self.assertEqual('Foo "bar"; yes!', prettify('Foo"bar"; yes!'))
self.assertEqual('Foo "bar", yes!', prettify('Foo"bar", yes!'))
def test_should_replace_multiple_single_quotes_with_single_ones(self):
self.assertEqual('Dave\'s job', prettify("Dave''s job"))
self.assertEqual("'destiny'", prettify("'''destiny'''"))
def test_should_fix_saxon_genitive_spaces(self):
self.assertEqual("Dave's dog", prettify("Dave' s dog"))
self.assertEqual("Dave's dog", prettify("Dave 's dog"))
self.assertEqual("Dave's dog", prettify("Dave 'sdog"))
def test_should_replace_multiple_percentage_with_single_ones(self):
self.assertEqual('%', prettify('%%%'))
self.assertEqual('A % b % c', prettify('a %% b %%%%%% c'))
def test_should_add_space_after_comma_if_missing(self):
self.assertEqual('One, two, three', prettify('one,two,three'))
def test_should_not_add_right_space_after_dot_for_numbers(self):
self.assertEqual('12,55', prettify('12,55'))
def test_should_remove_space_before_comma(self):
self.assertEqual('One, two, three', prettify('one , two , three'))
def test_should_uppercase_first_letter_after_period(self):
self.assertEqual('Foo. Bar', prettify('Foo. bar'))
def test_should_add_space_after_period_if_missing(self):
self.assertEqual('One. Two. Three', prettify('one.two.three'))
def test_should_not_add_right_space_after_comma_for_numbers(self):
self.assertEqual('12.55', prettify('12.55'))
def test_should_remove_space_before_period(self):
self.assertEqual('One. Two. Three', prettify('one . two . three'))
def test_should_add_space_after_colon_if_missing(self):
self.assertEqual('Test: this', prettify('Test:this'))
def test_should_remove_space_before_colon(self):
self.assertEqual('Test: this', prettify('Test :this'))
self.assertEqual('Test:', prettify('Test :'))
def test_should_add_space_after_semicolon_if_missing(self):
self.assertEqual('Test; this', prettify('Test;this'))
def test_should_remove_space_before_semicolon(self):
self.assertEqual('Test; this', prettify('Test ;this'))
self.assertEqual('Test;', prettify('Test ;'))
def test_should_uppercase_first_letter_after_exclamation(self):
self.assertEqual('Foo! Bar', prettify('Foo! bar'))
def test_should_add_space_after_exclamation_if_missing(self):
self.assertEqual('Test! This', prettify('Test!this'))
def test_should_remove_space_before_exclamation(self):
self.assertEqual('Test! This', prettify('Test !this'))
self.assertEqual('Test!', prettify('Test !'))
def test_should_uppercase_first_letter_after_question(self):
self.assertEqual('Foo? Bar', prettify('Foo? bar'))
def test_should_add_space_after_question_if_missing(self):
self.assertEqual('Test? This', prettify('Test?this'))
def test_should_remove_space_before_question(self):
self.assertEqual('Test? This', prettify('Test ?this'))
self.assertEqual('Test?', prettify('Test ?'))
def test_should_remove_space_before_dot(self):
self.assertEqual('Test. This', prettify('Test . This'))
self.assertEqual('Test.', prettify('Test .'))
def test_should_remove_space_after_number_if_followed_by_percentage(self):
self.assertEqual('100% python', prettify('100 % python'))
self.assertEqual('100%', prettify('100 %'))
def test_should_add_space_after_percentage_if_missing(self):
self.assertEqual('100% python code', prettify('100%python code'))
def test_should_add_spaces_around_plus_if_missing(self):
self.assertEqual('5 + 2', prettify('5 +2'))
self.assertEqual('5 + 2', prettify('5+ 2'))
self.assertEqual('5 + 2', prettify('5+2'))
def test_should_add_spaces_around_minus_if_missing(self):
self.assertEqual('5 - 2', prettify('5 -2'))
self.assertEqual('5 - 2', prettify('5- 2'))
self.assertEqual('5 - 2', prettify('5-2'))
def test_should_add_spaces_around_equal_if_missing(self):
self.assertEqual('5 - 2 = 3', prettify('5 - 2=3'))
self.assertEqual('5 - 2 = 3', prettify('5 - 2 =3'))
self.assertEqual('5 - 2 = 3', prettify('5 - 2= 3'))
def test_should_add_spaces_around_division_if_missing(self):
self.assertEqual('5 / 2 = 2.5', prettify('5/ 2 = 2.5'))
self.assertEqual('5 / 2 = 2.5', prettify('5 /2 = 2.5'))
self.assertEqual('5 / 2 = 2.5', prettify('5/2 = 2.5'))
def test_should_add_spaces_around_multiplication_if_missing(self):
self.assertEqual('5 * 2 = 10', prettify('5* 2 = 10'))
self.assertEqual('5 * 2 = 10', prettify('5 *2 = 10'))
self.assertEqual('5 * 2 = 10', prettify('5*2 = 10'))
def test_triple_dot_preserved(self):
self.assertEqual('Test...', prettify('Test...'))
self.assertEqual('Test... This', prettify('Test...This'))
def test_triple_exclamation_preserved(self):
self.assertEqual('Test!!!', prettify('Test!!!'))
self.assertEqual('Test!!! This', prettify('Test!!!This'))
def test_triple_question_preserved(self):
self.assertEqual('Test???', prettify('Test???'))
self.assertEqual('Test??? This', prettify('Test???This'))
def test_should_prettify_string_as_expected(self):
original = ' unprettified string ,, like this one,will be"prettified" .it\' s awesome!( like python)) '
pretty = 'Unprettified string, like this one, will be "prettified". It\'s awesome! (like python)'
self.assertEqual(pretty, prettify(original))
def test_should_work_as_expected_for_multiple_lines_string(self):
original = '''
unprettified string ,,
like this one,will be"prettified"
.it' s awesome!( like python))
'''
pretty = 'Unprettified string, like this one, will be "prettified". It\'s awesome! (like python)'
self.assertEqual(pretty, prettify(original))
def test_does_not_try_to_format_email(self):
email = 'my.email_name@gmail.com'
self.assertTrue(is_email(email))
self.assertEqual(email, prettify(email))
self.assertEqual('This is the email: {}'.format(email), prettify('this is the email : {}'.format(email)))
multiple_emails = ['mail.one@gmail.com', 'mail.two@gmail.com', 'mail.three@gmail.com']
self.assertEqual(prettify(','.join(multiple_emails)), ', '.join(multiple_emails))
def test_does_not_try_to_format_url(self):
url = 'https://www.mysite.com/path/page.php?query=foo'
self.assertEqual(url, prettify(url))
self.assertEqual('This is the url: {}'.format(url), prettify('this is the url : {}'.format(url)))
multiple_urls = ['http://www.site1.com', 'http://foo.com', 'https://www.something.it']
self.assertEqual(prettify(','.join(multiple_urls)), ', '.join(multiple_urls))
def test_does_not_try_to_format_ip(self):
ip = '127.0.0.1'
self.assertEqual(ip, prettify(ip))
self.assertEqual('This is the ip: {}'.format(ip), prettify('this is the ip : {}'.format(ip)))
multiple_ip = ['255.255.10.1', '255.255.10.2', '255.255.10.3']
self.assertEqual(prettify(' '.join(multiple_ip)), ' '.join(multiple_ip))
|
|
# Python import
from functools import partial
from time import time
import os
import stat
# PySide import
from PySide.QtGui import *
from PySide.QtCore import (
Qt, QSize, QRegExp, QPoint, QRect, QModelIndex, QFileSystemWatcher)
# Maya import
import __main__
from maya import mel, cmds, OpenMaya as om
from maya.OpenMaya import MSceneMessage as sceneMsg
# Custom import
import mttResources
import mttViewer
import mttModel
import mttDelegate
import mttProxy
import mttCmdUi
from mttConfig import (
MTTSettings,
WINDOW_NAME, WINDOW_TITLE, WINDOW_ICON, VIEWER_TITLE, VIEWER_DOCK_NAME,
DEFAULT_VALUES, VIEW_COLUMN_SIZE, VIEW_COLUMN_CONTEXT,
TAG, NODE_NAME, NODE_FILE, COLUMN_COUNT, PROMPT_INSTANCE_SESSION, THEMES,
PROMPT_INSTANCE_WAIT_DURATION, PROMPT_INSTANCE_STATE, PROMPT_INSTANCE_ALWAYS,
PROMPT_INSTANCE_WAIT
)
from mttCmd import (
convert_to_relative_path, get_source_file,
check_editor_preferences, mtt_log, set_attr
)
from mttCmdUi import get_maya_window
from mttCustomWidget import RightPushButton, MessageBoxWithCheckbox
from mttDecorators import wait_cursor
from mttSettingsMenu import MTTSettingsMenu
from mttViewStatusLine import MTTStatusLine
# avoid inspection error
from mttSourceControlTemplate import checkout, submit, revert
class MTTDockFrame(QFrame):
""" Workaround to restore DockWidget size """
def __init__(self, parent=None, w=256, h=256):
super(MTTDockFrame, self).__init__(parent)
self.custom_width = w
self.custom_height = h
def sizeHint(self, *args, **kwargs):
return QSize(self.custom_width, self.custom_height)
class MTTDockWidget(QDockWidget):
def __init__(self, title):
super(MTTDockWidget, self).__init__()
self.setWindowTitle(title)
def closeEvent(self, event):
MTTSettings.set_value('viewerState', False)
MTTSettings.set_value('Viewer/windowGeometry', self.saveGeometry())
super(MTTDockWidget, self).closeEvent(event)
# ------------------------------------------------------------------------------
# MAIN WINDOW
# ------------------------------------------------------------------------------
class MTTView(QMainWindow):
""" Maya Texture Manager Main UI """
def __init__(self, parent=None):
super(MTTView, self).__init__(parent)
mttResources.qInitResources()
self.setObjectName(WINDOW_NAME)
self.setWindowTitle(WINDOW_TITLE)
# Callbacks variables
self.is_callbacks_created = False
self.is_batching_change_attr = False
self.scene_callbacks_ids = []
self.selection_callback_id = 0
self.new_callback_id = 0
self.open_callback_id = 0
self.rename_node_callback_id = 0
self.add_node_callback_id = 0
self.remove_node_callback_id = 0
self.attribute_callback_id = dict()
# UI variables
self.viewer_dock = None
self.viewer_view = None
self.image_editor_name = self.__get_image_editor_name()
self.header_menu = None
self.filter_reset_btn = None
self.filter_line_edit = None
self.filter_re_btn = None
self.filter_combo = None
self.table_view = None
self.table_view_selection_model = None
self.quick_action_layout = None
self.quick_reload_btn = None
self.quick_edit_btn = None
self.dock_side_data = dict()
self.dock_side_data['Left'] = Qt.LeftDockWidgetArea
self.dock_side_data['Top'] = Qt.TopDockWidgetArea
self.dock_side_data['Right'] = Qt.RightDockWidgetArea
self.dock_side_data['Bottom'] = Qt.BottomDockWidgetArea
self.supported_format_dict = dict(
[(nodeType, nodeAttrName)
for nodeType, nice, nodeAttrName in MTTSettings.SUPPORTED_TYPE])
# clean old pref
suspend_callback_value = DEFAULT_VALUES['suspendCallbacks']
MTTSettings.remove('suspendCallbacks')
cmds.optionVar(intValue=('suspendCallbacks', suspend_callback_value))
cmds.optionVar(stringValue=('filtered_instances', ''))
# main UI variables
self.file_watcher = QFileSystemWatcher()
self.model = mttModel.MTTModel(watcher=self.file_watcher)
self.delegate = mttDelegate.MTTDelegate()
self.proxy = mttProxy.MTTProxy()
# user completion
self.completion_model = QStringListModel(
self.get_filter_completion_words(), self)
self.quick_filter_words_init = MTTSettings.value(
'defaultQuickFilterWords')
self.quick_filter_words = self.get_filter_quick_words()
# create UI
self.__create_ui()
self.__init_ui()
# create callbacks
self.__create_callbacks()
# -------------------------------------------------------------------------
# UI CREATION
def __create_ui(self):
""" Create main UI """
main_layout = QVBoxLayout(self)
main_layout.setSpacing(1)
main_layout.setContentsMargins(2, 2, 2, 2)
self.settings_menu = MTTSettingsMenu(self)
self.status_line_ui = MTTStatusLine(
self.settings_menu, self.model, self.proxy)
self.status_line_ui.viewerToggled.connect(self.on_toggle_viewer)
self.status_line_ui.pinModeToggled.connect(self.on_pin_toggle)
self.status_line_ui.externalVizToggled.connect(self._update_workspace)
self.status_line_ui.filterSelectionToggled.connect(
self.update_selection_change_callback_state)
main_layout.addLayout(self.status_line_ui)
main_layout.addLayout(self.__create_filter_ui())
main_layout.addWidget(self.__create_table_ui())
main_layout.addLayout(self.__create_action_ui())
central = QWidget()
central.setLayout(main_layout)
self.setCentralWidget(central)
if MTTSettings.value('viewerState'):
self.on_toggle_viewer()
def __init_ui(self):
# restore geometry
self.restoreGeometry(MTTSettings.value('windowGeometry'))
self.centralWidget().setGeometry(
MTTSettings.value('centralGeometry', QRect(0, 0, 400, 200))
)
# update delegate workspace
self._update_workspace()
# restore table header width
if not self.table_view.horizontalHeader().restoreState(
MTTSettings.value('columnsSize')):
# init some UI with default value when no user pref
for columnId, sizeValue in VIEW_COLUMN_SIZE.iteritems():
self.table_view.setColumnWidth(columnId, sizeValue)
# manage focus to avoid hotkey capture
# when tool is called with shortcut key
if MTTSettings.value('filterFocus'):
self.filter_line_edit.setFocus()
else:
self.setFocus()
# update node/file count
self.__update_node_file_count_ui()
# apply theme
self.on_choose_theme(MTTSettings.value('theme', 'Default'))
self.setWindowIcon(QIcon(WINDOW_ICON))
def __create_filter_ui(self):
""" Create filter widgets """
filter_layout = QHBoxLayout()
filter_layout.setSpacing(1)
filter_layout.setContentsMargins(0, 0, 0, 0)
self.filter_reset_btn = QPushButton()
icon = QIcon(':/filtersOff.png')
self.filter_reset_btn.setIcon(icon)
self.filter_reset_btn.setIconSize(QSize(22, 22))
self.filter_reset_btn.setFixedSize(24, 24)
self.filter_reset_btn.setToolTip('Reset filter')
self.filter_reset_btn.setFlat(True)
self.filter_reset_btn.clicked.connect(
partial(self.on_filter_set_text, ''))
self.filter_line_edit = QLineEdit()
self.filter_line_edit.setContextMenuPolicy(Qt.CustomContextMenu)
self.filter_line_edit.customContextMenuRequested.connect(
self.on_filter_quick_filter_menu)
self.filter_line_edit.textChanged.connect(self.on_filter_text_changed)
self.filter_line_edit.editingFinished.connect(
self.on_filter_add_completion_item)
completer = QCompleter(self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
completer.setModel(self.completion_model)
self.filter_line_edit.setCompleter(completer)
self.filter_re_btn = mttCmdUi.create_status_button(
':/fb_regularExpression',
'Use regular expression',
self.on_filter_toggle_re,
True)
self.filter_re_btn.setChecked(MTTSettings.value('filterRE'))
self.filter_combo = QComboBox()
self.filter_combo.addItems(['Nodes', 'Files'])
self.filter_combo.setCurrentIndex(MTTSettings.value('filterType'))
self.filter_combo.currentIndexChanged.connect(
self.on_filter_index_changed)
filter_layout.addWidget(self.filter_reset_btn)
filter_layout.addWidget(self.filter_line_edit)
filter_layout.addWidget(self.filter_re_btn)
filter_layout.addWidget(self.filter_combo)
return filter_layout
def __create_table_ui(self):
""" Create QTableView widget """
self.table_view = QTableView()
self.table_view.setItemDelegate(self.delegate)
self.model.set_table_view(self.table_view)
self.proxy.setSourceModel(self.model)
self.table_view.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table_view.setShowGrid(False)
self.table_view.setAlternatingRowColors(True)
self.table_view.verticalHeader().setVisible(False)
self.table_view.verticalHeader().setDefaultSectionSize(17)
self.table_view.horizontalHeader().setStretchLastSection(True)
self.table_view.horizontalHeader().setMinimumSectionSize(10)
self.table_view.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
self.table_view.setSortingEnabled(True)
# self.proxy.setDynamicSortFilter(True)
self.on_filter_index_changed(MTTSettings.value('filterType'))
# add context menu to show/hide columns
self.table_view.horizontalHeader().setContextMenuPolicy(
Qt.CustomContextMenu)
self.table_view.horizontalHeader().customContextMenuRequested.connect(
self.on_column_header_context_menu)
# add context menu
self.table_view.setContextMenuPolicy(Qt.CustomContextMenu)
self.table_view.customContextMenuRequested.connect(
self.on_table_view_context_menu)
self.table_view.setModel(self.proxy)
self.table_view_selection_model = self.table_view.selectionModel()
self.table_view_selection_model.selectionChanged.connect(
self.on_auto_select_cb)
return self.table_view
def __create_action_ui(self):
""" Create main button widget """
self.quick_action_layout = QHBoxLayout()
self.quick_action_layout.setSpacing(2)
self.quick_action_layout.setContentsMargins(0, 0, 0, 0)
self.quick_reload_btn = self.__create_quick_action_button(
label=r'&Reload',
tooltip="<p style='white-space:pre'>"
"<b>LMB</b> Reload selected files <i>R</i><br>"
"<b>RMB</b> Reload all files <i>Ctrl+Alt+R</i></p>",
help_txt='Reload files (shortcut: R)',
action=self.on_reload_files,
right_action=self.on_reload_all_files)
self.quick_action_layout.addWidget(self.quick_reload_btn)
self.quick_action_layout.addWidget(
self.__create_quick_action_button(
label='&Select',
tooltip="<p style='white-space:pre'>"
"<b>LMB</b> Select nodes <i>S</i><br>"
"<b>RMB</b> Open node in AE <i>Ctrl+Alt+S</i></p>",
help_txt='Select nodes (shortcut: S)',
action=self.on_select_nodes,
right_action=self.on_open_node_in_attribute_editor)
)
self.quick_action_layout.addWidget(
self.__create_quick_action_button(
label='Re&name',
tooltip="<p style='white-space:pre'>"
"<b>LMB</b> Rename nodes with filename <i>N</i><br>"
"<b>RMB</b> Rename all nodes with filename "
"<i>Ctrl+Alt+N</i></p>",
help_txt='Rename nodes (shortcut: N)',
action=self.on_rename_nodes,
right_action=self.on_rename_all_nodes)
)
self.quick_action_layout.addWidget(
self.__create_quick_action_button(
label='&View',
tooltip="<p style='white-space:pre'>"
"<b>LMB</b> Open files in Viewer <i>V</i><br>"
"<b>RMB</b> Open Viewer <i>Ctrl+Alt+V</i></p>",
help_txt='View files in default viewer (shortcut: V)',
action=self.on_view_files,
right_action=self.on_toggle_viewer)
)
self.quick_edit_btn = self.__create_quick_action_button(
label='&Edit',
tooltip='',
help_txt='',
action=self.on_quick_edit)
self.quick_action_layout.addWidget(self.quick_edit_btn)
self.on_set_source_edit_menu(MTTSettings.value('switchEdit'))
return self.quick_action_layout
@staticmethod
def __create_quick_action_button(label='button', tooltip=None, help_txt=None, action=None, right_action=None):
""" Create right click aware button """
new_button = RightPushButton()
new_button.setText(label)
new_button.setToolTip(tooltip)
if help_txt is None:
help_txt = tooltip
new_button.setStatusTip(help_txt)
new_button.clicked.connect(action)
if right_action is not None:
new_button.rightClick.connect(right_action)
return new_button
def __update_node_file_count_ui(self):
self.status_line_ui.update_node_file_count()
# --------------------------------------------------------------------------
# UI LOGIC
def _layout_changed(self):
cmds.optionVar(stringValue=('filtered_instances', ''))
self.model.layoutChanged.emit()
self.__update_node_file_count_ui()
def _update_workspace(self):
workspace_root = os.path.normpath(cmds.workspace(q=True, rd=True))
self.delegate.ws_path = workspace_root
def on_pin_toggle(self, state):
self.model.layoutAboutToBeChanged.emit()
nodes = '' if not state else ';'.join(
[node.data() for node in self.get_selected_table_nodes()])
MTTSettings.set_value('pinnedNode', nodes)
self._layout_changed()
def on_filter_set_text(self, text=''):
""" Set text in filter field """
self.filter_line_edit.setText(text)
def on_filter_text_changed(self, text):
""" Apply filter string """
cmds.optionVar(stringValue=('filtered_instances', ''))
if len(text):
icon = QIcon(':/filtersOn.png')
self.filter_reset_btn.setIcon(icon)
else:
icon = QIcon(':/filtersOff.png')
self.filter_reset_btn.setIcon(icon)
if self.filter_re_btn.isChecked():
search = QRegExp(text, Qt.CaseInsensitive, QRegExp.RegExp)
else:
search = QRegExp(text, Qt.CaseInsensitive, QRegExp.Wildcard)
self.proxy.setFilterRegExp(search)
self.__update_node_file_count_ui()
def on_filter_quick_filter_menu(self, point):
""" Create Quick Filter context menu """
history_menu = QMenu(self)
items = self.quick_filter_words
if items:
for item in items:
item_action = QAction(item, self)
item_action.triggered.connect(partial(self.on_filter_set_text, item))
history_menu.addAction(item_action)
else:
empty = QAction('No Quick Filter', self)
empty.setEnabled(False)
history_menu.addAction(empty)
history_menu.popup(self.filter_line_edit.mapToGlobal(point))
def on_filter_add_completion_item(self):
""" Add new entry to completion cache """
filter_text = self.filter_line_edit.text()
if len(filter_text) < 2:
return
if MTTSettings.value('filterRE'):
setting_name = 'filterCompletionRegExp'
else:
setting_name = 'filterCompletionWildcard'
items = self.get_filter_completion_words()
if items:
if filter_text not in items:
items.append(filter_text)
items.sort()
self.completion_model.setStringList(items)
MTTSettings.set_value(setting_name, ';;'.join(items))
else:
self.completion_model.setStringList([filter_text])
MTTSettings.set_value(setting_name, filter_text)
def on_filter_toggle_re(self):
""" Toggle Regular Expression Filter """
MTTSettings.set_value('filterRE', self.filter_re_btn.isChecked())
filter_text = self.filter_line_edit.text()
self.filter_line_edit.textChanged.disconnect(self.on_filter_text_changed(text=''))
self.filter_line_edit.setText('')
self.filter_line_edit.textChanged.connect(self.on_filter_text_changed)
self.filter_line_edit.setText(filter_text)
self.completion_model.setStringList(self.get_filter_completion_words())
self.quick_filter_words = self.get_filter_quick_words()
def on_filter_index_changed(self, index):
""" Change column filter """
if index == 0:
self.proxy.setFilterKeyColumn(NODE_NAME)
elif index == 1:
self.proxy.setFilterKeyColumn(NODE_FILE)
def on_column_header_context_menu(self, point):
""" Create context menu for header visibility """
if self.header_menu is not None and self.header_menu.isTearOffMenuVisible():
return
self.header_menu = QMenu(self)
self.header_menu.setTearOffEnabled(True)
self.header_menu.setWindowTitle(TAG)
is_last_item = self.table_view.horizontalHeader().hiddenSectionCount() == COLUMN_COUNT - 1
for columnId in range(COLUMN_COUNT):
state = MTTSettings.value('columnVisibility_%s' % columnId, True)
current_action = QAction(VIEW_COLUMN_CONTEXT[columnId], self)
current_action.setCheckable(True)
current_action.setChecked(state)
current_action.setEnabled(not (state & is_last_item))
current_action.triggered.connect(partial(self.on_column_header_show_column, columnId))
self.header_menu.addAction(current_action)
self.header_menu.popup(self.table_view.horizontalHeader().mapToGlobal(point))
def on_table_view_context_menu(self, point):
""" Create table context menu """
table_menu = QMenu(self)
edit_image_action = QAction('Open Files in %s' % self.image_editor_name, self)
edit_image_action.triggered.connect(self.on_edit_files)
table_menu.addAction(edit_image_action)
edit_source_image_action = QAction('Open Source Files in %s' % self.image_editor_name, self)
edit_source_image_action.triggered.connect(self.on_edit_source_files)
table_menu.addAction(edit_source_image_action)
table_menu.addSeparator()
open_file_folder_action = QAction('Open Folders', self)
open_file_folder_action.triggered.connect(self.on_open_file_folder)
table_menu.addAction(open_file_folder_action)
table_menu.addSeparator()
select_objects_action = QAction('Select Objects Using Texture Nodes', self)
select_objects_action.triggered.connect(self.on_select_objects_with_shaders)
table_menu.addAction(select_objects_action)
select_objects_action = QAction('Select Objects Using Texture Files', self)
select_objects_action.triggered.connect(self.on_select_objects_with_textures)
table_menu.addAction(select_objects_action)
table_menu.addSeparator()
convert_to_relative_action = QAction('Convert to Relative Path', self)
convert_to_relative_action.triggered.connect(self.on_convert_to_relative_path)
table_menu.addAction(convert_to_relative_action)
convert_to_absolute_action = QAction('Convert to Absolute Path', self)
convert_to_absolute_action.triggered.connect(self.on_convert_to_absolute_path)
table_menu.addAction(convert_to_absolute_action)
custom_path_action = QAction('Convert to Custom Path', self)
custom_path_action.triggered.connect(self.on_set_custom_path)
table_menu.addAction(custom_path_action)
table_menu.addSeparator()
sourceimages_folder = os.path.basename(self.model.get_sourceimages_path())
copy_to_workspace_action = QAction('Copy Files to "%s"' % sourceimages_folder, self)
copy_to_workspace_action.triggered.connect(self.on_copy_files_to_workspace)
table_menu.addAction(copy_to_workspace_action)
table_menu.addSeparator()
rename_with_node_name_action = QAction('Rename Files with Node Name', self)
rename_with_node_name_action.triggered.connect(self.on_rename_file_with_node_name)
table_menu.addAction(rename_with_node_name_action)
rename_with_custom_name_action = QAction('Rename Files with Custom Name', self)
rename_with_custom_name_action.triggered.connect(self.on_rename_file_with_custom_name)
table_menu.addAction(rename_with_custom_name_action)
if MTTSettings.VCS:
table_menu.addSeparator()
if 'checkout' in MTTSettings.VCS:
check_out_action = QAction('Checkout', self)
check_out_action.triggered.connect(self.on_checkout)
table_menu.addAction(check_out_action)
if 'submit' in MTTSettings.VCS:
check_in_action = QAction('submit', self)
check_in_action.triggered.connect(self.on_submit)
table_menu.addAction(check_in_action)
if 'revert' in MTTSettings.VCS:
revert_action = QAction('Revert', self)
revert_action.triggered.connect(self.on_revert)
table_menu.addAction(revert_action)
if MTTSettings.value('powerUser'):
table_menu.addSeparator()
toggle_readonly = QAction('Toggle Read-Only', self)
toggle_readonly.triggered.connect(self.on_toggle_readonly)
table_menu.addAction(toggle_readonly)
offset = QPoint(0, self.table_view.horizontalHeader().height())
table_menu.popup(self.table_view.mapToGlobal(point) + offset)
def on_column_header_show_column(self, column_id):
""" Hide/Show table column """
state = not MTTSettings.value('columnVisibility_%s' % column_id, True)
self.table_view.setColumnHidden(column_id, not state)
MTTSettings.set_value('columnVisibility_%s' % column_id, state)
@wait_cursor
def on_reload_files(self, all_node=False):
""" Reload selected files """
nodes = self.get_all_table_nodes() if all_node else self.get_selected_table_nodes()
if nodes:
reloaded_files = []
reloaded_files_count = 0
self.model.is_reloading_file = True
for node in [data.data() for data in nodes]:
node_attr_name = self.supported_format_dict[cmds.nodeType(node)]
node_attr_value = cmds.getAttr('%s.%s' % (node, node_attr_name))
if node_attr_value not in reloaded_files:
reloaded_files.append(node_attr_value)
if set_attr(node, node_attr_name, node_attr_value, attr_type="string"):
reloaded_files_count += 1
self.model.is_reloading_file = False
mtt_log('%d/%d texture%s reloaded' % (
reloaded_files_count, len(nodes),
('s' if reloaded_files_count > 1 else '')))
else:
mtt_log('Nothing selected... nothing to reload')
def on_reload_all_files(self):
self.on_reload_files(all_node=True)
@wait_cursor
def on_select_nodes(self):
nodes = self.get_selected_table_nodes()
if nodes:
cmds.select([data.data() for data in nodes], replace=True)
mtt_log('%d node%s selected' % (len(nodes), ('s' if len(nodes) > 1 else '')))
else:
mtt_log('Nothing selected... nothing to select')
def on_open_node_in_attribute_editor(self):
nodes = self.get_selected_table_nodes()
mel.eval('showEditorExact("' + nodes[0].data() + '")')
@wait_cursor
def on_rename_nodes(self, all_node=False):
nodes = self.get_all_table_nodes() if all_node else self.get_selected_table_nodes()
if nodes:
rename_count = 0
for nodeName in [mID.data() for mID in nodes]:
wanted_name = self.model.get_node_file_basename(nodeName)
if len(wanted_name):
new_name = self.model.rename_maya_node(nodeName, wanted_name)
if new_name != nodeName:
rename_count += 1
mtt_log(
'%d/%d node%s renamed with filename' % (rename_count, len(nodes), ('s' if len(nodes) > 1 else '')),
verbose=False
)
else:
mtt_log('Nothing selected... nothing to rename')
def on_rename_all_nodes(self):
self.on_rename_nodes(all_node=True)
def on_view_files(self, edit=False):
nodes = self.get_selected_table_nodes()
if nodes:
viewed_image = []
for node in nodes:
node_name = node.data()
absolute_path = self.model.get_node_file_fullpath(node_name)
if absolute_path not in viewed_image:
viewed_image.append(absolute_path)
if os.path.isfile(absolute_path):
if edit:
cmds.launchImageEditor(editImageFile=absolute_path)
else:
cmds.launchImageEditor(viewImageFile=absolute_path)
else:
filename = os.path.basename(absolute_path)
if filename != '.':
mtt_log('File "%s" not found' % filename, verbose=False)
else:
mtt_log('Nothing selected... nothing to show')
def on_edit_files(self):
self.on_view_files(edit=True)
def on_quick_edit(self):
if MTTSettings.value('switchEdit'):
self.on_edit_source_files()
else:
self.on_edit_files()
def on_set_source_edit_menu(self, state):
if state:
self.quick_edit_btn.setText('Source')
self.quick_edit_btn.setToolTip("<p style='white-space:pre'>Edit source files in %s <i>E</i></p>" % self.image_editor_name)
self.quick_edit_btn.setStatusTip('Edit source files in %s (shortcut: E)' % self.image_editor_name)
else:
self.quick_edit_btn.setText('&Edit')
self.quick_edit_btn.setToolTip("<p style='white-space:pre'>Edit files in %s <i>E</i></p>" % self.image_editor_name)
self.quick_edit_btn.setStatusTip('Edit files in %s (shortcut: E)' % self.image_editor_name)
def on_toggle_viewer(self):
""" Toggle Viewer """
self.table_view_selection_model = self.table_view.selectionModel()
if self.viewer_dock is None:
# init value
MTTSettings.set_value('viewerState', True)
# get default values
default_size = QRect(0, 0, 256, 256)
dock_size = MTTSettings.value('Viewer/dockGeometry', default_size)
dock_is_floating = MTTSettings.value('Viewer/isFloating')
# create widgets
self.viewer_dock = MTTDockWidget(VIEWER_TITLE)
self.viewer_view = mttViewer.MTTViewer()
dock_frame = MTTDockFrame(
self, dock_size.width(), dock_size.height())
# layout widgets
dock_frame_layout = QHBoxLayout()
dock_frame_layout.setContentsMargins(0, 0, 0, 0)
dock_frame_layout.addWidget(self.viewer_view)
dock_frame.setLayout(dock_frame_layout)
self.viewer_dock.setObjectName(VIEWER_DOCK_NAME)
self.viewer_dock.setWidget(dock_frame)
self.addDockWidget(
self.dock_side_data[MTTSettings.value('Viewer/side', 'Right')],
self.viewer_dock
)
# init callback
self.viewer_dock.topLevelChanged.connect(
self.on_viewer_top_level_changed)
self.table_view_selection_model.selectionChanged.connect(
self.on_auto_show_texture)
# update
self.viewer_dock.setFloating(dock_is_floating)
self.viewer_dock.setGeometry(dock_size)
self.viewer_dock.setVisible(True)
self.display_current_texture()
else:
state = not self.viewer_dock.isVisible()
self.viewer_dock.setVisible(state)
MTTSettings.set_value('viewerState', state)
if state:
self.table_view_selection_model.selectionChanged.connect(self.on_auto_show_texture)
self.display_current_texture()
else:
self.table_view_selection_model.selectionChanged.disconnect(self.on_auto_show_texture)
def on_viewer_top_level_changed(self, is_floating):
if is_floating:
self.viewer_dock.setWindowFlags(Qt.Window)
self.viewer_dock.show()
@staticmethod
def on_choose_instance_delay(delay_id, result=-1, prompt=True):
msg = ('When textures path are modified,\n'
'do you want to apply changes to all instances ?')
if prompt:
message_box = QMessageBox()
message_box.setWindowTitle(WINDOW_TITLE)
message_box.setIcon(QMessageBox.Question)
message_box.setText(msg)
message_box.setStandardButtons(QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
message_box.setEscapeButton(QMessageBox.Cancel)
message_box.setDefaultButton(QMessageBox.Yes)
ret = message_box.exec_()
if ret == QMessageBox.Yes:
result = 1
elif ret == QMessageBox.No:
result = 0
else:
return
__main__.mtt_prompt_session = delay_id == PROMPT_INSTANCE_SESSION
cmds.optionVar(iv=['MTT_prompt_instance_state', delay_id])
cmds.optionVar(fv=['MTT_prompt_instance_suspend', time()])
cmds.optionVar(iv=['MTT_prompt_instance_value', result])
def on_choose_theme(self, theme_name):
theme_name = theme_name if theme_name in THEMES else 'Maya Theme'
MTTSettings.set_value('theme', theme_name)
btn_default_bg_color = QApplication.palette().button().color().name()
btn_default_text_color = QApplication.palette().buttonText().color().name()
custom_buttons = self.findChildren(RightPushButton, QRegExp('.*'))
for i in range(len(custom_buttons)):
# select right color
if theme_name == 'Maya Theme':
current_bg_color = btn_default_bg_color
current_text_color = btn_default_text_color
else:
current_bg_color = THEMES[theme_name][i]
# get background luminance
bg_color = QColor(current_bg_color)
photometric_lum = (0.2126 * bg_color.red()) + (0.7152 * bg_color.green()) + (0.0722 * bg_color.blue())
# perceivedLum = (0.299 * bgColor.red()) + (0.587 * bgColor.green()) + (0.114 * bgColor.blue())
# print perceivedLum
current_text_color = '#363636' if photometric_lum > 130 else '#C8C8C8'
# set color
custom_buttons[i].setStyleSheet(
"RightPushButton {background-color: %s; color: %s};" %
(current_bg_color, current_text_color)
)
# noinspection PyUnusedLocal
def on_auto_show_texture(self, selected, deselected):
if cmds.optionVar(query='suspendCallbacks'):
return
self.display_current_texture()
# current_model_id = self.table_view.selectionModel().currentIndex()
# current_node_name = (
# current_model_id.data()
# if current_model_id.column() == 0
# else current_model_id.sibling(current_model_id.row(), NODE_NAME).data()
# )
# file_path = self.model.get_node_file_fullpath(current_node_name)
# self.viewer_view.show_image(file_path)
def on_auto_select_cb(self, selected, deselected):
if MTTSettings.value('autoSelect'):
cmds.optionVar(intValue=('suspendCallbacks', True))
nodes = []
for node in self.get_selected_table_nodes():
nodes.append(node.data())
if nodes:
cmds.select(nodes, replace=True)
cmds.optionVar(intValue=('suspendCallbacks', False))
def on_rename_node(self, node_name):
wanted_name = self.model.get_node_file_basename(node_name)
if wanted_name:
self.model.rename_maya_node(node_name, wanted_name, deferred=True)
def on_edit_source_files(self):
source_files = set()
user_choice_files = set()
missing_files = set()
self._update_workspace()
nodes = self.get_selected_table_nodes()
if not nodes:
mtt_log('Nothing selected... nothing to show')
return
# parse all selected nodes
for node in nodes:
node_name = node.data()
absolute_path = self.model.get_node_file_fullpath(node_name)
# avoid extra processing/request for already scanned files
if absolute_path in source_files:
continue
# get source file for current node
source_file = get_source_file(absolute_path)
# store files without source file
if not source_file:
missing_files.add(absolute_path)
continue
is_external = not source_file.startswith(self.delegate.ws_path)
is_writable = os.access(source_file, os.W_OK)
# finally sort writable and internal source files from others
if is_writable and not is_external:
source_files.add(source_file)
else:
user_choice_files.add((source_file, is_writable, is_external))
# open writable source files from current workspace
for source in source_files:
cmds.launchImageEditor(editImageFile=source)
mtt_log('Opening "%s"' % source, verbose=False)
# open non writable source files if user want it
for source, is_writable, is_external in user_choice_files:
if self.__prompt_to_open_file(source, is_writable, is_external):
cmds.launchImageEditor(editImageFile=source)
mtt_log('Opening "%s"' % source, verbose=False)
else:
mtt_log('Opening Aborted for "%s"' % source, verbose=False)
# log missing source files
for source in missing_files:
mtt_log('No PSD found for "%s"' % source,
verbose=False, msg_type='warning')
def on_open_file_folder(self):
nodes = self.get_selected_table_nodes()
if nodes:
opened_folder = []
for node in nodes:
node_name = node.data()
folder_pat = os.path.dirname(self.model.get_node_file_fullpath(node_name))
if folder_pat not in opened_folder:
opened_folder.append(folder_pat)
if os.path.isdir(folder_pat):
os.startfile(folder_pat)
def on_select_objects_with_shaders(self):
nodes = self.get_selected_table_nodes()
objects = []
if nodes:
shading_groups = self.get_shading_group([node.data() for node in nodes])
if shading_groups:
objects = cmds.sets(shading_groups, query=True)
if objects:
cmds.select(objects, replace=True)
else:
cmds.select(clear=True)
def on_select_objects_with_textures(self):
nodes = []
objects = []
tmp_nodes = self.get_selected_table_nodes()
for tmpNode in tmp_nodes:
node_name = tmpNode.data()
if self.model.get_node_instance_count(node_name) > 1:
for iNode in self.model.get_node_instances_model_id(node_name):
if iNode not in nodes:
nodes.append(iNode)
else:
nodes.append(tmpNode)
if nodes:
shading_groups = self.get_shading_group([node.data() for node in nodes])
if shading_groups:
objects = cmds.sets(shading_groups, query=True)
if objects:
cmds.select(objects, replace=True)
else:
cmds.select(clear=True)
@wait_cursor
def on_convert_to_relative_path(self):
nodes = self.get_selected_table_nodes(is_instance_aware=True)
self.model.suspend_force_sort = True
self.is_batching_change_attr = True
for node in nodes:
node_name = node.data()
if node_name:
if not cmds.lockNode(node_name, query=True, lock=True)[0]:
node_attr_value = self.model.get_node_attribute(node_name)
relative_path = convert_to_relative_path(node_attr_value)
self.model.set_database_node_and_attribute(node_name, relative_path)
self.model.suspend_force_sort = False
self.is_batching_change_attr = False
self.model.request_sort()
@wait_cursor
def on_convert_to_absolute_path(self):
nodes = self.get_selected_table_nodes(is_instance_aware=True)
self.model.suspend_force_sort = True
self.is_batching_change_attr = True
if nodes:
for node in nodes:
node_name = node.data()
if node_name:
if not cmds.lockNode(node_name, query=True, lock=True)[0]:
node_attr_value = self.model.get_node_attribute(node_name)
absolute_path = self.model.get_node_file_fullpath(node_name)
if absolute_path != node_attr_value:
self.model.set_database_node_and_attribute(node_name, absolute_path)
self.model.suspend_force_sort = False
self.is_batching_change_attr = False
self.model.request_sort()
@wait_cursor
def on_set_custom_path(self):
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
custom_path = cmds.fileDialog2(
caption='Select image directory',
# startingDirectory=os.path.expandvars('%ProgramFiles%'),
okCaption='Select',
fileMode=3)
QApplication.restoreOverrideCursor()
if custom_path:
nodes = self.get_selected_table_nodes(is_instance_aware=True)
self.model.suspend_force_sort = True
self.is_batching_change_attr = True
if nodes:
for node in nodes:
node_name = node.data()
if node_name:
if not cmds.lockNode(node_name, query=True, lock=True)[0]:
node_attr_name = self.supported_format_dict[cmds.nodeType(node_name)]
node_attr_value = self.model.get_node_attribute(node_name)
new_path = os.path.normpath(os.path.join(custom_path[0], os.path.basename(node_attr_value)))
new_path = new_path.replace('\\', '/')
set_attr(node_name, node_attr_name, new_path, type="string")
self.model.suspend_force_sort = False
self.is_batching_change_attr = False
self.model.request_sort()
@wait_cursor
def on_copy_files_to_workspace(self):
self.model.suspend_force_sort = True
self.is_batching_change_attr = True
nodes = self.get_selected_table_nodes(is_instance_aware=True)
if nodes:
file_history = dict()
sourceimages_path = self.model.get_sourceimages_path()
for node in nodes:
node_name = node.data()
if not node_name:
continue
if not cmds.lockNode(node_name, query=True, lock=True)[0]:
file_fullpath = self.model.get_node_file_fullpath(node_name)
node_attr_name = self.supported_format_dict[cmds.nodeType(node_name)]
if file_fullpath not in file_history.iterkeys():
if not os.path.isfile(file_fullpath) or os.path.commonprefix([sourceimages_path, file_fullpath]) == sourceimages_path:
continue
destination_path = (os.path.join(sourceimages_path, os.path.basename(file_fullpath))).replace('\\', '/')
if destination_path == file_fullpath.replace('\\', '/'):
file_history[file_fullpath] = None
continue
else:
file_history[file_fullpath] = destination_path
if os.path.isfile(destination_path):
is_readonly = self.model.get_file_state(destination_path) < 1
if not self.__prompt_for_override_file(os.path.basename(destination_path), is_readonly):
continue
if is_readonly:
os.chmod(destination_path, stat.S_IWRITE)
if cmds.sysFile(file_fullpath, copy=destination_path):
mtt_log('%s copied.' % os.path.basename(destination_path), verbose=False)
os.chmod(destination_path, stat.S_IWRITE)
set_attr(node_name, node_attr_name, destination_path, attr_type="string")
else:
mtt_log('%s copy failed.' % os.path.basename(destination_path), msg_type='warning', verbose=False)
else:
if file_history[file_fullpath]:
set_attr(node_name, node_attr_name, file_history[file_fullpath], attr_type="string")
self.model.suspend_force_sort = False
self.is_batching_change_attr = False
self.model.request_sort()
def on_rename_file(self, custom_name=False):
self.model.suspend_force_sort = True
self.is_batching_change_attr = True
nodes = self.get_selected_table_nodes(is_instance_aware=True)
if nodes:
file_history = dict()
for node in nodes:
node_name = node.data()
if node_name:
if cmds.lockNode(node_name, query=True, lock=True)[0]:
continue
file_fullpath = self.model.get_node_file_fullpath(node_name)
node_attr_name = self.supported_format_dict[cmds.nodeType(node_name)]
if file_fullpath not in file_history.iterkeys():
if file_fullpath == '.' or not os.path.isfile(file_fullpath):
continue
file_path = os.path.dirname(file_fullpath)
filename, file_ext = os.path.splitext(os.path.basename(file_fullpath))
if custom_name:
new_path, ok = QInputDialog.getText(
self,
WINDOW_TITLE,
'Enter new name for "%s" :' % filename,
QLineEdit.Normal,
filename)
filename = new_path
new_path = os.path.join(file_path, '%s%s' % (new_path, file_ext))
else:
new_path = os.path.join(file_path, '%s%s' % (node_name.replace(':', '_'), file_ext))
if node_name == filename and not custom_name:
file_history[file_fullpath] = None
continue
else:
file_history[file_fullpath] = new_path
if self.model.get_file_state(file_fullpath) == 1:
if cmds.sysFile(file_fullpath, rename=new_path):
set_attr(node_name, node_attr_name, new_path, attr_type="string")
else:
mtt_log('%s rename failed.' % filename, msg_type='warning', verbose=False)
else:
mtt_log('%s rename aborted (read-only).' % filename, msg_type='warning', verbose=False)
else:
if file_history[file_fullpath]:
set_attr(node_name, node_attr_name, file_history[file_fullpath], attr_type="string")
self.model.suspend_force_sort = False
self.is_batching_change_attr = False
self.model.request_sort()
@wait_cursor
def on_rename_file_with_node_name(self):
if self.__prompt_for_rename_without_undo():
undo_state = cmds.undoInfo(query=True, state=True)
try:
cmds.undoInfo(stateWithoutFlush=False)
self.on_rename_file(custom_name=False)
finally:
cmds.undoInfo(stateWithoutFlush=undo_state)
@wait_cursor
def on_rename_file_with_custom_name(self):
if self.__prompt_for_rename_without_undo():
undo_state = cmds.undoInfo(query=True, state=True)
try:
cmds.undoInfo(stateWithoutFlush=False)
self.on_rename_file(custom_name=True)
finally:
cmds.undoInfo(stateWithoutFlush=undo_state)
def on_checkout(self, files=None):
if not files:
nodes = self.get_selected_table_nodes()
files = [self.model.get_node_file_fullpath(n.data()) for n in nodes]
exec MTTSettings.VCS['checkout']
checkout(set(files))
def on_submit(self):
nodes = self.get_selected_table_nodes()
files = [self.model.get_node_file_fullpath(n.data()) for n in nodes]
exec MTTSettings.VCS['submit']
submit(set(files))
def on_revert(self):
nodes = self.get_selected_table_nodes()
files = [self.model.get_node_file_fullpath(n.data()) for n in nodes]
exec MTTSettings.VCS['revert']
revert(set(files))
@wait_cursor
def on_toggle_readonly(self):
nodes = self.get_selected_table_nodes()
if nodes:
toggled_files = []
for node in nodes:
node_name = node.data()
file_fullpath = self.model.get_node_file_fullpath(node_name)
if not os.path.isfile(file_fullpath) or file_fullpath in toggled_files:
continue
is_readonly = self.model.get_file_state(file_fullpath) < 1
os.chmod(file_fullpath, (stat.S_IWRITE if is_readonly else stat.S_IREAD))
toggled_files.append(file_fullpath)
# --------------------------------------------------------------------------
# TOOLS METHODS
@staticmethod
def __prompt_for_override_file(filename, is_readonly):
msg = '<b>%s</b> already exists' % filename
if is_readonly:
msg += ' and is a read-only file'
msg += '.'
message_box = QMessageBox()
message_box.setWindowTitle(WINDOW_TITLE)
message_box.setIcon(QMessageBox.Question)
message_box.setText(msg)
message_box.setInformativeText('Do you want to <b>override</b> file ?')
message_box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
message_box.setDefaultButton(QMessageBox.Yes)
message_box.setEscapeButton(QMessageBox.Cancel)
ret = message_box.exec_()
if ret == QMessageBox.Yes:
return True
elif ret == QMessageBox.No:
return False
def __prompt_to_open_file(self, filename, is_writable, is_external):
checkout_btn = None
msg = ''
# add non writable comment
if not is_writable:
msg += '<b>%s</b> is a read-only file.' % os.path.basename(filename)
msg += '<br/>'
# add external comment
if is_external:
msg += '<br/>This file is not in the current workspace :'
msg += '<br/>{}'.format(filename)
message_box = QMessageBox()
message_box.setWindowTitle(WINDOW_TITLE)
message_box.setIcon(QMessageBox.Question)
message_box.setText(msg)
message_box.setInformativeText('Do you want to <b>open</b> this file anyway?')
# create buttons
yes_btn = message_box.addButton('Yes', QMessageBox.YesRole)
if not is_external and 'checkout' in MTTSettings.VCS:
checkout_btn = message_box.addButton(
'Yes && Checkout', QMessageBox.AcceptRole)
no_btn = message_box.addButton('No', QMessageBox.DestructiveRole)
# set default buttons
message_box.setDefaultButton(yes_btn)
message_box.setEscapeButton(no_btn)
# show dialog
message_box.exec_()
pressed_btn = message_box.clickedButton()
# result
if pressed_btn == yes_btn:
return True
elif pressed_btn == no_btn:
return False
elif pressed_btn == checkout_btn:
self.on_checkout([filename])
return True
@staticmethod
def __prompt_for_rename_without_undo():
message_box = QMessageBox()
message_box.setWindowTitle(WINDOW_TITLE)
message_box.setIcon(QMessageBox.Question)
message_box.setText('This operation can\'t be undo.')
message_box.setInformativeText('Do you want to <b>continue</b> anyway ?')
message_box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
message_box.setDefaultButton(QMessageBox.Yes)
message_box.setEscapeButton(QMessageBox.Cancel)
ret = message_box.exec_()
if ret == QMessageBox.Yes:
return True
elif ret == QMessageBox.No:
return False
def __prompt_for_instance_propagation(self, show_cancel_button=True):
prompt_instance_state = cmds.optionVar(query='MTT_prompt_instance_state')
if prompt_instance_state:
if prompt_instance_state == PROMPT_INSTANCE_WAIT:
current_time = time()
if current_time - cmds.optionVar(query='MTT_prompt_instance_suspend') < PROMPT_INSTANCE_WAIT_DURATION:
return cmds.optionVar(query='MTT_prompt_instance_value')
elif prompt_instance_state == PROMPT_INSTANCE_SESSION:
if 'mtt_prompt_session' in __main__.__dict__ and __main__.mtt_prompt_session:
return cmds.optionVar(query='MTT_prompt_instance_value')
elif prompt_instance_state == PROMPT_INSTANCE_ALWAYS:
return cmds.optionVar(query='MTT_prompt_instance_value')
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
message_box = MessageBoxWithCheckbox()
message_box.setWindowTitle(WINDOW_TITLE)
message_box.setIcon(QMessageBox.Question)
message_box.setText('-- Nodes with the same texture found. --\n\nDo you want to change all instances ?')
message_box.instance_state_widget.addItems(PROMPT_INSTANCE_STATE.values())
if show_cancel_button:
message_box.setStandardButtons(QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
message_box.setEscapeButton(QMessageBox.Cancel)
else:
message_box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
message_box.setEscapeButton(QMessageBox.No)
message_box.setDefaultButton(QMessageBox.Yes)
ret, suspend = message_box.exec_()
result = -1
if ret == QMessageBox.Yes:
result = 1
elif ret == QMessageBox.No:
result = 0
self.on_choose_instance_delay(suspend, result, False)
QApplication.restoreOverrideCursor()
return result
def __get_image_editor_name(self):
if cmds.optionVar(exists='EditImageDir'):
app_path = cmds.optionVar(query='EditImageDir')
app_name = os.path.splitext(os.path.basename(app_path))[0]
self.image_editor_name = app_name
return app_name
return 'Image Editor'
def keyPressEvent(self, event):
""" Capture keyPress to prevent Maya Shortcut """
if event.isAutoRepeat():
return
if self.viewer_dock:
if self.viewer_dock.isVisible():
self.viewer_view.is_mtt_sender = True
if self.viewer_view.keyPressEvent(event):
self.viewer_view.is_mtt_sender = False
return
self.viewer_view.is_mtt_sender = False
if event.key() == Qt.Key_F and event.modifiers() == Qt.ControlModifier:
self.filter_line_edit.setFocus()
elif event.key() == Qt.Key_R and event.modifiers() == Qt.NoModifier:
self.on_reload_files()
elif event.key() == Qt.Key_R and event.modifiers() == Qt.ControlModifier | Qt.AltModifier:
self.on_reload_all_files()
elif event.key() == Qt.Key_S and event.modifiers() == Qt.NoModifier:
self.on_select_nodes()
elif event.key() == Qt.Key_S and event.modifiers() == Qt.ControlModifier | Qt.AltModifier:
self.on_open_node_in_attribute_editor()
elif event.key() == Qt.Key_N and event.modifiers() == Qt.NoModifier:
self.on_rename_nodes()
elif event.key() == Qt.Key_N and event.modifiers() == Qt.ControlModifier | Qt.AltModifier:
self.on_rename_all_nodes()
elif event.key() == Qt.Key_V and event.modifiers() == Qt.NoModifier:
self.on_view_files()
elif event.key() == Qt.Key_V and event.modifiers() == Qt.ControlModifier | Qt.AltModifier:
self.on_toggle_viewer()
elif event.key() == Qt.Key_E and event.modifiers() == Qt.NoModifier:
self.on_quick_edit()
elif event.key() in [Qt.Key_Left, Qt.Key_Up, Qt.Key_Right, Qt.Key_Down]:
return
else:
super(MTTView, self).keyPressEvent(event)
def keyReleaseEvent(self, event):
if event.isAutoRepeat():
return
if self.viewer_dock:
if self.viewer_dock.isVisible():
self.viewer_view.keyReleaseEvent(event)
return super(MTTView, self).keyReleaseEvent(event)
def get_shading_group(self, nodes):
""" Return ShadingEngine node attach to nodes """
shading_groups = []
shading_nodes = cmds.listHistory(nodes, future=True, pruneDagObjects=True)
if shading_nodes:
for futureNode in shading_nodes[:]:
asset_name = cmds.container(query=True, findContainer=[futureNode])
if asset_name:
self.callback_selection_changed_recursive(shading_nodes, asset_name, True)
shading_groups = cmds.ls(list(set(shading_nodes)), exactType='shadingEngine')
return shading_groups
def get_selected_table_nodes(self, is_instance_aware=False):
nodes = []
nodes_name = []
is_already_prompted = False
collect_instance = False
for index in self.table_view.selectionModel().selectedRows(NODE_NAME):
node_name = index.data()
if node_name not in nodes_name:
nodes_name.append(node_name)
nodes.append(index)
if is_instance_aware:
if self.model.get_node_instance_count(node_name) > 1:
if collect_instance:
result = 1
else:
if not is_already_prompted:
result = self.__prompt_for_instance_propagation()
is_already_prompted = True
if result == -1:
return []
elif result == 1:
collect_instance = True
for instanceIndex in self.model.get_node_instances_model_id(node_name):
instance_index_name = instanceIndex.data()
if instance_index_name not in nodes_name:
nodes_name.append(instance_index_name)
nodes.append(instanceIndex)
return nodes
def get_all_table_nodes(self):
nodes = []
for rowId in xrange(self.proxy.rowCount()):
midx = self.proxy.index(rowId, 0, QModelIndex())
node = midx.data()
if cmds.objExists(node):
nodes.append(midx)
return nodes
@staticmethod
def get_filter_completion_words():
if MTTSettings.value('filterRE'):
item_str = MTTSettings.value('filterCompletionRegExp')
else:
item_str = MTTSettings.value('filterCompletionWildcard')
return item_str.split(';;') if item_str else []
def get_filter_quick_words(self):
if self.quick_filter_words_init:
self.quick_filter_words_init = False
MTTSettings.set_value('defaultQuickFilterWords', False)
MTTSettings.set_value('filterQuickWordsRegExp', r'_DIF$;;_NOR$;;_SPE$;;HEAD;;BODY;;^HEAD\w*DIF$;;^HEAD.*NOR')
MTTSettings.set_value('filterQuickWordsWildcard', '_DIF;;_NOR;;_SPE;;HEAD;;BODY;;HEAD*_DIF;;HEAD*_NOR')
if MTTSettings.value('filterRE'):
item_str = MTTSettings.value('filterQuickWordsRegExp')
else:
item_str = MTTSettings.value('filterQuickWordsWildcard')
return item_str.split(';;') if item_str else []
def display_current_texture(self):
""" Display in viewer the first selected row """
if not self.viewer_dock:
return
if self.viewer_dock.isVisible():
current_model_id = self.table_view.selectionModel().currentIndex()
if current_model_id:
current_node_name = (
current_model_id.data()
if current_model_id.column() == 0
else current_model_id.sibling(current_model_id.row(), NODE_NAME).data()
)
if current_node_name:
file_path = self.model.get_node_file_fullpath(current_node_name)
self.viewer_view.show_image(file_path)
@staticmethod
def callback_open_scene(clientData=None):
cmds.optionVar(intValue=('suspendCallbacks', True))
def callback_rename_node(self, node, old_name, clientData=None):
if cmds.optionVar(query='suspendCallbacks') \
or not old_name \
or MTTSettings.value('suspendRenameCallbacks'):
return
dep_node = om.MFnDependencyNode(node)
if dep_node.typeName() in self.supported_format_dict:
new_name = dep_node.name()
if new_name != old_name:
self.model.rename_database_node(old_name, new_name)
if self.proxy.selected_texture_nodes is not None:
if old_name in self.proxy.selected_texture_nodes:
self.proxy.selected_texture_nodes.remove(old_name)
self.proxy.selected_texture_nodes.add(new_name)
self.model.request_sort()
self.attribute_callback_id[new_name] = self.attribute_callback_id.pop(old_name)
def callback_add_node(self, node, clientData=None):
if cmds.optionVar(query='suspendCallbacks'):
return
new_node_name = om.MFnDependencyNode(node).name()
if cmds.nodeType(new_node_name) in self.supported_format_dict.iterkeys():
self.model.database_add_new_node(new_node_name)
self.model.request_sort()
self.create_attribute_callback(new_node_name)
self.__update_node_file_count_ui()
def callback_remove_node(self, node, clientData=None):
if cmds.optionVar(query='suspendCallbacks'):
return
dep_node = om.MFnDependencyNode(node)
if dep_node.typeName() in self.supported_format_dict:
self.model.database_remove_node(dep_node.name())
self.model.request_sort()
self.remove_attribute_callback(dep_node.name())
self.__update_node_file_count_ui()
def callback_attribute_changed(self, node_msg, plug, otherPlug, clientData=None):
if cmds.optionVar(query='suspendCallbacks'):
return
node, attr = plug.name().split('.')
if node_msg & om.MNodeMessage.kAttributeSet:
if attr == self.supported_format_dict[cmds.nodeType(node)]:
new_path = cmds.getAttr(plug.name())
extra_nodes = []
if not self.is_batching_change_attr and not self.model.is_reloading_file:
if self.model.get_node_instance_count(node) > 1:
if self.__prompt_for_instance_propagation(show_cancel_button=False) == 1:
instance_nodes = self.model.get_node_instances_model_id(node)
for instanceNode in instance_nodes:
extra_node = instanceNode.data()
if extra_node != node:
extra_nodes.append(extra_node)
if self.model.change_node_attribute(node, new_path):
is_auto_rename_activated = MTTSettings.value('autoRename')
if is_auto_rename_activated:
self.on_rename_node(node)
for extra_node in extra_nodes:
cmds.optionVar(intValue=('suspendCallbacks', True))
node_attr_name = self.supported_format_dict[cmds.nodeType(extra_node)]
set_attr(extra_node, node_attr_name, new_path, attr_type="string")
if self.model.change_node_attribute(extra_node, new_path):
if is_auto_rename_activated:
self.on_rename_node(extra_node)
cmds.optionVar(intValue=('suspendCallbacks', False))
self.model.request_sort()
self.__update_node_file_count_ui()
def callback_selection_changed_recursive(self, shading_nodes, asset_node, do_future):
shading_nodes.extend(cmds.container(asset_node, query=True, nodeList=True))
if do_future:
new_nodes_list = [nodeAttr.split('.')[0] for nodeAttr in cmds.container(asset_node, query=True, connectionList=True)]
else:
new_nodes_list = cmds.listHistory(asset_node)
for node in new_nodes_list[:]:
if node not in shading_nodes:
asset_name = cmds.container(query=True, findContainer=[node])
if asset_name:
self.callback_selection_changed_recursive(shading_nodes, asset_name, do_future)
shading_nodes.extend(new_nodes_list)
shading_nodes.extend(cmds.listHistory(new_nodes_list, future=do_future))
def callback_selection_changed(self, clientData=None):
if cmds.optionVar(query='suspendCallbacks'):
return
current_selection = cmds.ls(selection=True, objectsOnly=True)
current_shading_group = []
supported_format = self.supported_format_dict.keys()
if current_selection:
# create SG list
shading_nodes = cmds.listHistory(current_selection, future=True, pruneDagObjects=True)
if shading_nodes is not None:
for futureNode in shading_nodes[:]:
asset_name = cmds.container(query=True, findContainer=[futureNode])
if asset_name:
self.callback_selection_changed_recursive(shading_nodes, asset_name, True)
current_shading_group = cmds.ls(list(set(shading_nodes)), exactType='shadingEngine')
if current_shading_group:
nodes = []
# parse SG
for SG in current_shading_group:
shading_nodes = cmds.listHistory(SG, pruneDagObjects=True)
if shading_nodes is not None:
for nodeName in shading_nodes[:]:
asset_name = cmds.container(query=True, findContainer=[nodeName])
if asset_name:
self.callback_selection_changed_recursive(shading_nodes, asset_name, False)
#shadingNodes.extend(cmds.container(assetName, query=True, nodeList=True))
#shadingNodes.extend(cmds.listHistory(assetName))
nodes.extend(cmds.ls(list(set(shading_nodes)), exactType=supported_format))
self.proxy.selected_texture_nodes = set(nodes)
self.model.request_sort()
self.__update_node_file_count_ui()
return
else:
self.proxy.selected_texture_nodes = None
self.model.request_sort()
# if no selection
else:
self.proxy.selected_texture_nodes = None
self.model.request_sort()
self.__update_node_file_count_ui()
def reset_mtt(self, clientData=None):
cmds.optionVar(stringValue=('filtered_instances', ''))
self.status_line_ui.pin_btn.setChecked(False)
MTTSettings.remove('pinnedNode')
self._update_workspace()
self.clear_all_attribute_callbacks()
self.model.file_watch_remove_all()
self.model.database_reset()
suspend_callback_value = DEFAULT_VALUES['suspendCallbacks']
cmds.optionVar(intValue=('suspendCallbacks', suspend_callback_value))
self.apply_attribute_change_callback()
self.__update_node_file_count_ui()
# --------------------------------------------------------------------------
# MANAGE CALLBACKS
def apply_attribute_change_callback(self):
nodes = self.model.get_all_nodes_name()
for nodeName in nodes:
self.create_attribute_callback(nodeName[0])
def create_attribute_callback(self, node_name):
# get MObject of node_name
sel = om.MSelectionList()
sel.add(node_name)
m_node = om.MObject()
sel.getDependNode(0, m_node)
# create callbacks for this node
self.attribute_callback_id[node_name] = om.MNodeMessage.addAttributeChangedCallback(m_node, self.callback_attribute_changed)
def remove_attribute_callback(self, node_name):
om.MNodeMessage.removeCallback(self.attribute_callback_id[node_name])
self.attribute_callback_id.pop(node_name)
def clear_all_attribute_callbacks(self):
for aCBId in self.attribute_callback_id.itervalues():
om.MNodeMessage.removeCallback(aCBId)
self.attribute_callback_id.clear()
def update_selection_change_callback_state(self, state):
if state:
self.selection_callback_id = om.MEventMessage.addEventCallback('SelectionChanged', self.callback_selection_changed)
self.callback_selection_changed()
else:
if self.selection_callback_id is not 0:
sceneMsg.removeCallback(self.selection_callback_id)
self.selection_callback_id = 0
self.proxy.selected_texture_nodes = None
self.model.request_sort()
self.__update_node_file_count_ui()
def __create_callbacks(self):
""" Create callbacks """
def add_callback(cb_type, func):
self.scene_callbacks_ids.append(
sceneMsg.addCallback(cb_type, func)
)
self.is_callbacks_created = True
self.new_callback_id = sceneMsg.addCallback(sceneMsg.kAfterNew, self.reset_mtt)
add_callback(sceneMsg.kBeforeOpen, self.callback_open_scene)
add_callback(sceneMsg.kAfterOpen, self.reset_mtt)
add_callback(sceneMsg.kBeforeImport, self.callback_open_scene)
add_callback(sceneMsg.kAfterImport, self.reset_mtt)
add_callback(sceneMsg.kBeforeImport, self.callback_open_scene)
add_callback(sceneMsg.kAfterImport, self.reset_mtt)
add_callback(sceneMsg.kBeforeRemoveReference, self.callback_open_scene)
add_callback(sceneMsg.kAfterRemoveReference, self.reset_mtt)
add_callback(sceneMsg.kBeforeImportReference, self.callback_open_scene)
add_callback(sceneMsg.kAfterImportReference, self.reset_mtt)
add_callback(sceneMsg.kBeforeUnloadReference, self.callback_open_scene)
add_callback(sceneMsg.kAfterUnloadReference, self.reset_mtt)
add_callback(sceneMsg.kBeforeLoadReference, self.callback_open_scene)
add_callback(sceneMsg.kAfterLoadReference, self.reset_mtt)
add_callback(sceneMsg.kBeforeCreateReference, self.callback_open_scene)
add_callback(sceneMsg.kAfterCreateReference, self.reset_mtt)
self.rename_node_callback_id = om.MNodeMessage.addNameChangedCallback(om.MObject(), self.callback_rename_node)
self.add_node_callback_id = om.MDGMessage.addNodeAddedCallback(self.callback_add_node)
self.remove_node_callback_id = om.MDGMessage.addNodeRemovedCallback(self.callback_remove_node)
self.apply_attribute_change_callback()
self.update_selection_change_callback_state(MTTSettings.value('onlySelectionState'))
def __remove_callbacks(self):
""" Remove callbacks """
if not self.is_callbacks_created:
return
sceneMsg.removeCallback(self.new_callback_id)
for callbackID in self.scene_callbacks_ids:
sceneMsg.removeCallback(callbackID)
sceneMsg.removeCallback(self.rename_node_callback_id)
sceneMsg.removeCallback(self.add_node_callback_id)
sceneMsg.removeCallback(self.remove_node_callback_id)
self.clear_all_attribute_callbacks()
self.update_selection_change_callback_state(False)
def __remove_filewatch(self):
self.model.file_watch_remove_all()
#-------------------------------------------------------------------------------------------------------------------
# CLEAN EXIT
def __save_dock_settings(self):
if not self.viewer_dock:
return
is_floating = self.viewer_dock.isFloating()
dock_geometry = self.viewer_dock.geometry()
central_geometry = self.centralWidget().geometry()
if not is_floating:
delta_x = central_geometry.x() - dock_geometry.x()
delta_y = central_geometry.y() - dock_geometry.y()
if delta_x > 0 and delta_y == 0:
MTTSettings.set_value('Viewer/side', 'Left')
elif delta_x == 0 and delta_y > 0:
MTTSettings.set_value('Viewer/side', 'Top')
elif delta_x < 0 and delta_y == 0:
MTTSettings.set_value('Viewer/side', 'Right')
elif delta_x == 0 and delta_y < 0:
MTTSettings.set_value('Viewer/side', 'Bottom')
MTTSettings.set_value('Viewer/isFloating', is_floating)
MTTSettings.set_value('Viewer/dockGeometry', dock_geometry)
def __save_settings(self):
""" Save settings to QSettings """
if self.table_view is None:
return
MTTSettings.set_value('windowGeometry', self.saveGeometry())
MTTSettings.set_value('centralGeometry', self.centralWidget().geometry())
MTTSettings.set_value('columnsSize', self.table_view.horizontalHeader().saveState())
MTTSettings.set_value('filterRE', self.filter_re_btn.isChecked())
MTTSettings.set_value('filterType', self.filter_combo.currentIndex())
self.status_line_ui.save_states()
# remove temp variable
MTTSettings.remove('browserFirstStart')
def closeEvent(self, event):
""" closeEvent override to save preferences and close callbacks """
if self.table_view is not None:
# prevent crash when file path editor is open
self.table_view.setFocus()
# save user pref
self.__save_settings()
self.__save_dock_settings()
# remove callbacks
self.__remove_callbacks()
# remove file watch
self.__remove_filewatch()
# delete memory database
self.model.database_close()
# clean widget
self.deleteLater()
event.accept()
def show_ui(toggle=True):
""" INIT TOOL AND SHOW UI
@param toggle: destroy and recreate window when is set to False
"""
# delete UI if exists
if cmds.control(WINDOW_NAME, exists=True):
cmds.deleteUI(WINDOW_NAME, window=True)
if toggle:
return
check_editor_preferences()
dialog = MTTView(parent=get_maya_window())
dialog.show()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for cinder.db.api."""
import datetime
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import uuidutils
from cinder.quota import ReservableResource
from cinder import test
CONF = cfg.CONF
def _quota_reserve(context, project_id):
"""Create sample Quota, QuotaUsage and Reservation objects.
There is no method db.quota_usage_create(), so we have to use
db.quota_reserve() for creating QuotaUsage objects.
Returns reservations uuids.
"""
def get_sync(resource, usage):
def sync(elevated, project_id, session):
return {resource: usage}
return sync
quotas = {}
resources = {}
deltas = {}
for i in range(3):
resource = 'res%d' % i
quotas[resource] = db.quota_create(context, project_id, resource, i)
resources[resource] = ReservableResource(
resource,
get_sync(resource, i), 'quota_res_%d' % i)
deltas[resource] = i
return db.quota_reserve(
context, resources, quotas, deltas,
datetime.datetime.utcnow(), datetime.datetime.utcnow(),
datetime.timedelta(days=1), project_id
)
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return dict([(k, v) for k, v in obj.iteritems()
if k not in ignored_keys])
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1), len(obj2))
for key, value in obj1.iteritems():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
self.assertEqual(len(objs1), len(objs2))
objs2 = dict([(o['id'], o) for o in objs2])
for o1 in objs1:
self._assertEqualObjects(o1, objs2[o1['id']], ignored_keys)
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class BaseTest(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(BaseTest, self).setUp()
self.ctxt = context.get_admin_context()
class DBAPIServiceTestCase(BaseTest):
"""Unit tests for cinder.db.api.service_*."""
def _get_base_values(self):
return {
'host': 'fake_host',
'binary': 'fake_binary',
'topic': 'fake_topic',
'report_count': 3,
'disabled': False
}
def _create_service(self, values):
v = self._get_base_values()
v.update(values)
return db.service_create(self.ctxt, v)
def test_service_create(self):
service = self._create_service({})
self.assertFalse(service['id'] is None)
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, service[key])
def test_service_destroy(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'fake_host2'})
db.service_destroy(self.ctxt, service1['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, service1['id'])
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
service2)
def test_service_update(self):
service = self._create_service({})
new_values = {
'host': 'fake_host1',
'binary': 'fake_binary1',
'topic': 'fake_topic1',
'report_count': 4,
'disabled': True
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.iteritems():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_update, self.ctxt, 100500, {})
def test_service_get(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'some_other_fake_host'})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1)
def test_service_get_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, 100500)
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
service2 = self._create_service({'host': 'host2', 'topic': 'topic2'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
topic='topic1')
self._assertEqualObjects(service1, real_service1)
def test_service_get_all(self):
values = [
{'host': 'host1', 'topic': 'topic1'},
{'host': 'host2', 'topic': 'topic2'},
{'disabled': True}
]
services = [self._create_service(vals) for vals in values]
disabled_services = [services[-1]]
non_disabled_services = services[:-1]
compares = [
(services, db.service_get_all(self.ctxt)),
(disabled_services, db.service_get_all(self.ctxt, True)),
(non_disabled_services, db.service_get_all(self.ctxt, False))
]
for comp in compares:
self._assertEqualListsOfObjects(*comp)
def test_service_get_all_by_topic(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'disabled': True, 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_topic(self.ctxt, 't1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_host(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_by_args(self):
values = [
{'host': 'host1', 'binary': 'a'},
{'host': 'host2', 'binary': 'b'}
]
services = [self._create_service(vals) for vals in values]
service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
self._assertEqualObjects(services[0], service1)
service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
self._assertEqualObjects(services[1], service2)
def test_service_get_by_args_not_found_exception(self):
self.assertRaises(exception.HostBinaryNotFound,
db.service_get_by_args,
self.ctxt, 'non-exists-host', 'a')
def test_service_get_all_volume_sorted(self):
values = [
({'host': 'h1', 'binary': 'a', 'topic': CONF.volume_topic},
100),
({'host': 'h2', 'binary': 'b', 'topic': CONF.volume_topic},
200),
({'host': 'h3', 'binary': 'b', 'topic': CONF.volume_topic},
300)]
services = []
for vals, size in values:
services.append(self._create_service(vals))
db.volume_create(self.ctxt, {'host': vals['host'], 'size': size})
for service, size in db.service_get_all_volume_sorted(self.ctxt):
self._assertEqualObjects(services.pop(0), service)
self.assertEqual(values.pop(0)[1], size)
class DBAPIVolumeTestCase(BaseTest):
"""Unit tests for cinder.db.api.volume_*."""
def test_volume_create(self):
volume = db.volume_create(self.ctxt, {'host': 'host1'})
self.assertTrue(uuidutils.is_uuid_like(volume['id']))
self.assertEqual(volume.host, 'host1')
def test_volume_allocate_iscsi_target_no_more_targets(self):
self.assertRaises(db.NoMoreTargets,
db.volume_allocate_iscsi_target,
self.ctxt, 42, 'host1')
def test_volume_allocate_iscsi_target(self):
host = 'host1'
volume = db.volume_create(self.ctxt, {'host': host})
db.iscsi_target_create_safe(self.ctxt, {'host': host,
'target_num': 42})
target_num = db.volume_allocate_iscsi_target(self.ctxt, volume['id'],
host)
self.assertEqual(target_num, 42)
def test_volume_attached_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID, db.volume_attached, self.ctxt,
42, 'invalid-uuid', None, '/tmp')
def test_volume_attached_to_instance(self):
volume = db.volume_create(self.ctxt, {'host': 'host1'})
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
db.volume_attached(self.ctxt, volume['id'],
instance_uuid, None, '/tmp')
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual(volume['status'], 'in-use')
self.assertEqual(volume['mountpoint'], '/tmp')
self.assertEqual(volume['attach_status'], 'attached')
self.assertEqual(volume['instance_uuid'], instance_uuid)
self.assertEqual(volume['attached_host'], None)
def test_volume_attached_to_host(self):
volume = db.volume_create(self.ctxt, {'host': 'host1'})
host_name = 'fake_host'
db.volume_attached(self.ctxt, volume['id'],
None, host_name, '/tmp')
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual(volume['status'], 'in-use')
self.assertEqual(volume['mountpoint'], '/tmp')
self.assertEqual(volume['attach_status'], 'attached')
self.assertEqual(volume['instance_uuid'], None)
self.assertEqual(volume['attached_host'], host_name)
def test_volume_data_get_for_host(self):
for i in xrange(3):
for j in xrange(3):
db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100})
for i in xrange(3):
self.assertEqual((3, 300),
db.volume_data_get_for_host(
self.ctxt, 'h%d' % i))
def test_volume_data_get_for_project(self):
for i in xrange(3):
for j in xrange(3):
db.volume_create(self.ctxt, {'project_id': 'p%d' % i,
'size': 100,
'host': 'h-%d-%d' % (i, j),
})
for i in xrange(3):
self.assertEqual((3, 300),
db.volume_data_get_for_project(
self.ctxt, 'p%d' % i))
def test_volume_detached_from_instance(self):
volume = db.volume_create(self.ctxt, {})
db.volume_attached(self.ctxt, volume['id'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None, '/tmp')
db.volume_detached(self.ctxt, volume['id'])
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual('available', volume['status'])
self.assertEqual('detached', volume['attach_status'])
self.assertIsNone(volume['mountpoint'])
self.assertIsNone(volume['instance_uuid'])
self.assertIsNone(volume['attached_host'])
def test_volume_detached_from_host(self):
volume = db.volume_create(self.ctxt, {})
db.volume_attached(self.ctxt, volume['id'],
None, 'fake_host', '/tmp')
db.volume_detached(self.ctxt, volume['id'])
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual('available', volume['status'])
self.assertEqual('detached', volume['attach_status'])
self.assertIsNone(volume['mountpoint'])
self.assertIsNone(volume['instance_uuid'])
self.assertIsNone(volume['attached_host'])
def test_volume_get(self):
volume = db.volume_create(self.ctxt, {})
self._assertEqualObjects(volume, db.volume_get(self.ctxt,
volume['id']))
def test_volume_destroy(self):
volume = db.volume_create(self.ctxt, {})
db.volume_destroy(self.ctxt, volume['id'])
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.ctxt, volume['id'])
def test_volume_get_all(self):
volumes = [db.volume_create(self.ctxt,
{'host': 'h%d' % i, 'size': i})
for i in xrange(3)]
self._assertEqualListsOfObjects(volumes, db.volume_get_all(
self.ctxt, None, None, 'host', None))
def test_volume_get_all_marker_passed(self):
volumes = [
db.volume_create(self.ctxt, {'id': 1}),
db.volume_create(self.ctxt, {'id': 2}),
db.volume_create(self.ctxt, {'id': 3}),
db.volume_create(self.ctxt, {'id': 4}),
]
self._assertEqualListsOfObjects(volumes[2:], db.volume_get_all(
self.ctxt, 2, 2, 'id', None))
def test_volume_get_all_by_host(self):
volumes = []
for i in xrange(3):
volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i})
for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_host(
self.ctxt, 'h%d' % i))
def test_volume_get_all_by_instance_uuid(self):
instance_uuids = []
volumes = []
for i in xrange(3):
instance_uuid = str(uuidutils.uuid.uuid1())
instance_uuids.append(instance_uuid)
volumes.append([db.volume_create(self.ctxt,
{'instance_uuid': instance_uuid})
for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_instance_uuid(
self.ctxt, instance_uuids[i]))
def test_volume_get_all_by_project(self):
volumes = []
for i in xrange(3):
volumes.append([db.volume_create(self.ctxt, {
'project_id': 'p%d' % i}) for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_project(
self.ctxt, 'p%d' % i, None,
None, 'host', None))
def test_volume_get_iscsi_target_num(self):
target = db.iscsi_target_create_safe(self.ctxt, {'volume_id': 42,
'target_num': 43})
self.assertEqual(43, db.volume_get_iscsi_target_num(self.ctxt, 42))
def test_volume_get_iscsi_target_num_nonexistent(self):
self.assertRaises(exception.ISCSITargetNotFoundForVolume,
db.volume_get_iscsi_target_num, self.ctxt, 42)
def test_volume_update(self):
volume = db.volume_create(self.ctxt, {'host': 'h1'})
db.volume_update(self.ctxt, volume['id'], {'host': 'h2'})
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual('h2', volume['host'])
def test_volume_update_nonexistent(self):
self.assertRaises(exception.VolumeNotFound, db.volume_update,
self.ctxt, 42, {})
def test_volume_metadata_get(self):
metadata = {'a': 'b', 'c': 'd'}
db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata})
self.assertEquals(metadata, db.volume_metadata_get(self.ctxt, 1))
def test_volume_metadata_update(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '5'}
should_be = {'a': '3', 'c': '2', 'd': '5'}
db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1})
db.volume_metadata_update(self.ctxt, 1, metadata2, False)
self.assertEquals(should_be, db.volume_metadata_get(self.ctxt, 1))
def test_volume_metadata_update_delete(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '4'}
should_be = metadata2
db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1})
db.volume_metadata_update(self.ctxt, 1, metadata2, True)
self.assertEquals(should_be, db.volume_metadata_get(self.ctxt, 1))
class DBAPISnapshotTestCase(BaseTest):
def test_snapshot_metadata_get(self):
metadata = {'a': 'b', 'c': 'd'}
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata})
self.assertEquals(metadata, db.snapshot_metadata_get(self.ctxt, 1))
def test_snapshot_metadata_update(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '5'}
should_be = {'a': '3', 'c': '2', 'd': '5'}
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata1})
db.snapshot_metadata_update(self.ctxt, 1, metadata2, False)
self.assertEquals(should_be, db.snapshot_metadata_get(self.ctxt, 1))
def test_snapshot_metadata_update_delete(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '5'}
should_be = metadata2
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata1})
db.snapshot_metadata_update(self.ctxt, 1, metadata2, True)
self.assertEquals(should_be, db.snapshot_metadata_get(self.ctxt, 1))
def test_snapshot_metadata_delete(self):
metadata = {'a': '1', 'c': '2'}
should_be = {'a': '1'}
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata})
db.snapshot_metadata_delete(self.ctxt, 1, 'c')
self.assertEquals(should_be, db.snapshot_metadata_get(self.ctxt, 1))
class DBAPIReservationTestCase(BaseTest):
"""Tests for db.api.reservation_* methods."""
def setUp(self):
super(DBAPIReservationTestCase, self).setUp()
self.values = {
'uuid': 'sample-uuid',
'project_id': 'project1',
'resource': 'resource',
'delta': 42,
'expire': (datetime.datetime.utcnow() +
datetime.timedelta(days=1)),
'usage': {'id': 1}
}
def test_reservation_create(self):
reservation = db.reservation_create(self.ctxt, **self.values)
self._assertEqualObjects(self.values, reservation, ignored_keys=(
'deleted', 'updated_at',
'deleted_at', 'id',
'created_at', 'usage',
'usage_id'))
self.assertEqual(reservation['usage_id'], self.values['usage']['id'])
def test_reservation_get(self):
reservation = db.reservation_create(self.ctxt, **self.values)
reservation_db = db.reservation_get(self.ctxt, self.values['uuid'])
self._assertEqualObjects(reservation, reservation_db)
def test_reservation_get_nonexistent(self):
self.assertRaises(exception.ReservationNotFound,
db.reservation_get,
self.ctxt,
'non-exitent-resevation-uuid')
def test_reservation_commit(self):
reservations = _quota_reserve(self.ctxt, 'project1')
expected = {'project_id': 'project1',
'res0': {'reserved': 0, 'in_use': 0},
'res1': {'reserved': 1, 'in_use': 1},
'res2': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt, 'project1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_commit(self.ctxt, reservations, 'project1')
self.assertRaises(exception.ReservationNotFound,
db.reservation_get,
self.ctxt,
reservations[0])
expected = {'project_id': 'project1',
'res0': {'reserved': 0, 'in_use': 0},
'res1': {'reserved': 0, 'in_use': 2},
'res2': {'reserved': 0, 'in_use': 4}}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
def test_reservation_rollback(self):
reservations = _quota_reserve(self.ctxt, 'project1')
expected = {'project_id': 'project1',
'res0': {'reserved': 0, 'in_use': 0},
'res1': {'reserved': 1, 'in_use': 1},
'res2': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_rollback(self.ctxt, reservations, 'project1')
self.assertRaises(exception.ReservationNotFound,
db.reservation_get,
self.ctxt,
reservations[0])
expected = {'project_id': 'project1',
'res0': {'reserved': 0, 'in_use': 0},
'res1': {'reserved': 0, 'in_use': 1},
'res2': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
def test_reservation_expire(self):
self.values['expire'] = datetime.datetime.utcnow() + \
datetime.timedelta(days=1)
reservations = _quota_reserve(self.ctxt, 'project1')
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1',
'res0': {'reserved': 0, 'in_use': 0},
'res1': {'reserved': 0, 'in_use': 1},
'res2': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
class DBAPIQuotaTestCase(BaseTest):
"""Tests for db.api.reservation_* methods."""
def test_quota_create(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
self.assertEqual(quota.resource, 'resource')
self.assertEqual(quota.hard_limit, 99)
self.assertEqual(quota.project_id, 'project1')
def test_quota_get(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
self._assertEqualObjects(quota, quota_db)
def test_quota_get_all_by_project(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'res%d' % j, j)
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'res0': 0,
'res1': 1,
'res2': 2})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
db.quota_update(self.ctxt, 'project1', 'resource1', 42)
quota = db.quota_get(self.ctxt, 'project1', 'resource1')
self.assertEqual(quota.hard_limit, 42)
self.assertEqual(quota.resource, 'resource1')
self.assertEqual(quota.project_id, 'project1')
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_update,
self.ctxt,
'project1',
'resource1',
42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_get,
self.ctxt,
'project1',
'resource1')
def test_quota_reserve(self):
reservations = _quota_reserve(self.ctxt, 'project1')
self.assertEqual(len(reservations), 3)
res_names = ['res0', 'res1', 'res2']
for uuid in reservations:
reservation = db.reservation_get(self.ctxt, uuid)
self.assertTrue(reservation.resource in res_names)
res_names.remove(reservation.resource)
def test_quota_destroy_all_by_project(self):
reservations = _quota_reserve(self.ctxt, 'project1')
db.quota_destroy_all_by_project(self.ctxt, 'project1')
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
{'project_id': 'project1'})
self.assertEqual(db.quota_usage_get_all_by_project(self.ctxt,
'project1'),
{'project_id': 'project1'})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
db.reservation_get,
self.ctxt,
r)
def test_quota_usage_get_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound,
db.quota_usage_get,
self.ctxt,
'p1',
'nonexitent_resource')
def test_quota_usage_get(self):
reservations = _quota_reserve(self.ctxt, 'p1')
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'res0')
expected = {'resource': 'res0', 'project_id': 'p1',
'in_use': 0, 'reserved': 0, 'total': 0}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key])
def test_quota_usage_get_all_by_project(self):
reservations = _quota_reserve(self.ctxt, 'p1')
expected = {'project_id': 'p1',
'res0': {'in_use': 0, 'reserved': 0},
'res1': {'in_use': 1, 'reserved': 1},
'res2': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project(
self.ctxt, 'p1'))
class DBAPIIscsiTargetTestCase(BaseTest):
"""Unit tests for cinder.db.api.iscsi_target_*."""
def _get_base_values(self):
return {'target_num': 10, 'host': 'fake_host'}
def test_iscsi_target_create_safe(self):
target = db.iscsi_target_create_safe(self.ctxt,
self._get_base_values())
self.assertTrue(target['id'])
self.assertEqual(target['host'], 'fake_host')
self.assertEqual(target['target_num'], 10)
def test_iscsi_target_count_by_host(self):
for i in range(3):
values = self._get_base_values()
values['target_num'] += i
db.iscsi_target_create_safe(self.ctxt, values)
self.assertEqual(db.iscsi_target_count_by_host(self.ctxt, 'fake_host'),
3)
@test.testtools.skip("bug 1187367")
def test_integrity_error(self):
db.iscsi_target_create_safe(self.ctxt, self._get_base_values())
self.assertFalse(db.iscsi_target_create_safe(self.ctxt,
self._get_base_values()))
class DBAPIBackupTestCase(BaseTest):
"""Tests for db.api.backup_* methods."""
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(DBAPIBackupTestCase, self).setUp()
self.created = [db.backup_create(self.ctxt, values)
for values in self._get_values()]
def _get_values(self, one=False):
base_values = {
'user_id': 'user',
'project_id': 'project',
'volume_id': 'volume',
'host': 'host',
'availability_zone': 'zone',
'display_name': 'display',
'display_description': 'description',
'container': 'container',
'status': 'status',
'fail_reason': 'test',
'service_metadata': 'metadata',
'service': 'service',
'size': 1000,
'object_count': 100}
if one:
return base_values
def compose(val, step):
if isinstance(val, str):
step = str(step)
return val + step
return [dict([(k, compose(v, i)) for k, v in base_values.items()])
for i in range(1, 4)]
def test_backup_create(self):
values = self._get_values()
for i, backup in enumerate(self.created):
self.assertTrue(backup['id'])
self._assertEqualObjects(values[i], backup, self._ignored_keys)
def test_backup_get(self):
for backup in self.created:
backup_get = db.backup_get(self.ctxt, backup['id'])
self._assertEqualObjects(backup, backup_get)
def tests_backup_get_all(self):
all_backups = db.backup_get_all(self.ctxt)
self._assertEqualListsOfObjects(self.created, all_backups)
def test_backup_get_all_by_host(self):
byhost = db.backup_get_all_by_host(self.ctxt,
self.created[1]['host'])
self._assertEqualObjects(self.created[1], byhost[0])
def test_backup_get_all_by_project(self):
byproj = db.backup_get_all_by_project(self.ctxt,
self.created[1]['project_id'])
self._assertEqualObjects(self.created[1], byproj[0])
def test_backup_update(self):
updated_values = self._get_values(one=True)
update_id = self.created[1]['id']
updated_backup = db.backup_update(self.ctxt, update_id,
updated_values)
self._assertEqualObjects(updated_values, updated_backup,
self._ignored_keys)
def test_backup_destroy(self):
for backup in self.created:
db.backup_destroy(self.ctxt, backup['id'])
self.assertFalse(db.backup_get_all(self.ctxt))
def test_backup_not_found(self):
self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt,
'notinbase')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Tool to copy a flickr stream to Commons.
# Get a set to work on (start with just a username).
# * Make it possible to delimit the set (from/to)
#For each image
#*Check the license
#*Check if it isn't already on Commons
#*Build suggested filename
#**Check for name collision and maybe alter it
#*Pull description from Flinfo
#*Show image and description to user
#**Add a nice hotcat lookalike for the adding of categories
#**Filter the categories
#*Upload the image
Todo:
*Check if the image is already uploaded (SHA hash)
*Check and prevent filename collisions
**Initial suggestion
**User input
*Filter the categories
"""
#
# (C) Multichill, 2009
# (C) Pywikibot team, 2009-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import base64
import hashlib
import io
import re
import sys
import time
if sys.version_info[0] > 2:
from urllib.parse import urlencode
else:
from urllib import urlencode
try:
import flickrapi # see: http://stuvel.eu/projects/flickrapi
except ImportError as e:
print('This script requires the python flickrapi module. \n'
'See: http://stuvel.eu/projects/flickrapi')
print(e)
sys.exit(1)
import pywikibot
from pywikibot import config, textlib
from pywikibot.comms.http import fetch
from scripts import upload
try:
from pywikibot.userinterfaces.gui import Tkdialog
except ImportError as _tk_error:
Tkdialog = None
flickr_allowed_license = {
0: False, # All Rights Reserved
1: False, # Creative Commons Attribution-NonCommercial-ShareAlike License
2: False, # Creative Commons Attribution-NonCommercial License
3: False, # Creative Commons Attribution-NonCommercial-NoDerivs License
4: True, # Creative Commons Attribution License
5: True, # Creative Commons Attribution-ShareAlike License
6: False, # Creative Commons Attribution-NoDerivs License
7: True, # No known copyright restrictions
8: True, # United States Government Work
}
def getPhoto(flickr=None, photo_id=''):
"""
Get the photo info and the photo sizes so we can use these later on.
TODO: Add exception handling
"""
while True:
try:
photoInfo = flickr.photos_getInfo(photo_id=photo_id)
# xml.etree.ElementTree.dump(photoInfo)
photoSizes = flickr.photos_getSizes(photo_id=photo_id)
# xml.etree.ElementTree.dump(photoSizes)
return photoInfo, photoSizes
except flickrapi.exceptions.FlickrError:
pywikibot.output(u'Flickr api problem, sleeping')
time.sleep(30)
def isAllowedLicense(photoInfo=None):
"""
Check if the image contains the right license.
TODO: Maybe add more licenses
"""
license = photoInfo.find('photo').attrib['license']
if flickr_allowed_license[int(license)]:
return True
else:
return False
def getPhotoUrl(photoSizes=None):
"""Get the url of the jpg file with the highest resolution."""
url = ''
# The assumption is that the largest image is last
for size in photoSizes.find('sizes').findall('size'):
url = size.attrib['source']
return url
def downloadPhoto(photoUrl=''):
"""
Download the photo and store it in a io.BytesIO object.
TODO: Add exception handling
"""
imageFile = fetch(photoUrl).raw
return io.BytesIO(imageFile)
def findDuplicateImages(photo, site=None):
"""Find duplicate images.
Take the photo, calculate the SHA1 hash and ask the MediaWiki api
for a list of duplicates.
TODO: Add exception handling.
@param photo: Photo
@type photo: io.BytesIO
@param site: Site to search for duplicates.
Defaults to using Wikimedia Commons if not supplied.
@type site: APISite or None
"""
if not site:
site = pywikibot.Site('commons', 'commons')
hashObject = hashlib.sha1()
hashObject.update(photo.getvalue())
return site.getFilesFromAnHash(base64.b16encode(hashObject.digest()))
def getTags(photoInfo=None):
"""Get all the tags on a photo."""
result = []
for tag in photoInfo.find('photo').find('tags').findall('tag'):
result.append(tag.text.lower())
return result
def getFlinfoDescription(photo_id=0):
"""
Get the description from http://wikipedia.ramselehof.de/flinfo.php.
TODO: Add exception handling, try a couple of times
"""
parameters = urlencode({'id': photo_id, 'raw': 'on'})
return fetch(
'http://wikipedia.ramselehof.de/flinfo.php?%s' % parameters).content
def getFilename(photoInfo=None, site=None, project=u'Flickr'):
"""Build a good filename for the upload based on the username and title.
Prevents naming collisions.
"""
if not site:
site = pywikibot.Site(u'commons', u'commons')
username = photoInfo.find('photo').find('owner').attrib['username']
title = photoInfo.find('photo').find('title').text
if title:
title = cleanUpTitle(title)
if not title:
# find the max length for a mw title
maxBytes = 240 - len(project.encode('utf-8')) \
- len(username.encode('utf-8'))
description = photoInfo.find('photo').find('description').text
if description:
descBytes = len(description.encode('utf-8'))
if descBytes > maxBytes:
# maybe we cut more than needed, anyway we do it
items = max(min(len(description), maxBytes // 4),
len(description) - descBytes + maxBytes)
description = description[:items]
title = cleanUpTitle(description)
else:
title = u''
# Should probably have the id of the photo as last resort.
if pywikibot.Page(site, u'File:%s - %s - %s.jpg'
% (title, project, username)).exists():
i = 1
while True:
name = '%s - %s - %s (%d).jpg' % (title, project, username, i)
if pywikibot.Page(site, 'File:' + name).exists():
i += 1
else:
return name
else:
return u'%s - %s - %s.jpg' % (title, project, username)
def cleanUpTitle(title):
"""Clean up the title of a potential MediaWiki page.
Otherwise the title of the page might not be allowed by the software.
"""
title = title.strip()
title = re.sub(u"[<{\\[]", u"(", title)
title = re.sub(u"[>}\\]]", u")", title)
title = re.sub(u"[ _]?\\(!\\)", u"", title)
title = re.sub(u",:[ _]", u", ", title)
title = re.sub(u"[;:][ _]", u", ", title)
title = re.sub(u"[\t\n ]+", u" ", title)
title = re.sub(u"[\r\n ]+", u" ", title)
title = re.sub(u"[\n]+", u"", title)
title = re.sub(u"[?!]([.\"]|$)", u"\\1", title)
title = re.sub(u"[&#%?!]", u"^", title)
title = re.sub(u"[;]", u",", title)
title = re.sub(u"[/+\\\\:]", u"-", title)
title = re.sub(u"--+", u"-", title)
title = re.sub(u",,+", u",", title)
title = re.sub(u"[-,^]([.]|$)", u"\\1", title)
title = title.replace(u" ", u"_")
title = title.strip(u"_")
return title
def buildDescription(flinfoDescription=u'', flickrreview=False, reviewer=u'',
override=u'', addCategory=u'', removeCategories=False):
"""Build the final description for the image.
The description is based on the info from flickrinfo and improved.
"""
description = u'== {{int:filedesc}} ==\n%s' % flinfoDescription
if removeCategories:
description = textlib.removeCategoryLinks(description,
pywikibot.Site(
'commons', 'commons'))
if override:
description = description.replace(u'{{cc-by-sa-2.0}}\n', u'')
description = description.replace(u'{{cc-by-2.0}}\n', u'')
description = description.replace(u'{{flickrreview}}\n', u'')
description = description.replace(
'{{copyvio|Flickr, licensed as "All Rights Reserved" which is not '
'a free license --~~~~}}\n',
'')
description = description.replace(u'=={{int:license}}==',
u'=={{int:license}}==\n' + override)
elif flickrreview:
if reviewer:
description = description.replace(
'{{flickrreview}}',
'{{flickrreview|' + reviewer +
'|{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-{{subst:CURRENTDAY2}}}}')
if addCategory:
description = description.replace(u'{{subst:unc}}\n', u'')
description = description + u'\n[[Category:' + addCategory + ']]\n'
description = description.replace(u'\r\n', u'\n')
return description
def processPhoto(flickr=None, photo_id=u'', flickrreview=False, reviewer=u'',
override=u'', addCategory=u'', removeCategories=False,
autonomous=False):
"""Process a single Flickr photo."""
if photo_id:
pywikibot.output(str(photo_id))
(photoInfo, photoSizes) = getPhoto(flickr, photo_id)
if isAllowedLicense(photoInfo) or override:
# Get the url of the largest photo
photoUrl = getPhotoUrl(photoSizes)
# Should download the photo only once
photo = downloadPhoto(photoUrl)
# Don't upload duplicate images, should add override option
duplicates = findDuplicateImages(photo)
if duplicates:
pywikibot.output(u'Found duplicate image at %s' % duplicates.pop())
else:
filename = getFilename(photoInfo)
flinfoDescription = getFlinfoDescription(photo_id)
photoDescription = buildDescription(flinfoDescription,
flickrreview, reviewer,
override, addCategory,
removeCategories)
# pywikibot.output(photoDescription)
if Tkdialog is not None and not autonomous:
try:
(newPhotoDescription, newFilename, skip) = Tkdialog(
photoDescription, photo, filename).show_dialog()
except ImportError as e:
pywikibot.warning(e)
pywikibot.warning('Switching to autonomous mode.')
autonomous = True
elif not autonomous:
pywikibot.warning('Switching to autonomous mode because GUI '
'interface cannot be used')
pywikibot.warning(_tk_error)
autonomous = True
if autonomous:
newPhotoDescription = photoDescription
newFilename = filename
skip = False
# pywikibot.output(newPhotoDescription)
# if (pywikibot.Page(title=u'File:'+ filename, site=pywikibot.Site()).exists()):
# TODO: Check if the hash is the same and if not upload it under a different name
# pywikibot.output(u'File:' + filename + u' already exists!')
# else:
# Do the actual upload
# Would be nice to check before I upload if the file is already at Commons
# Not that important for this program, but maybe for derived programs
if not skip:
bot = upload.UploadRobot(photoUrl,
description=newPhotoDescription,
useFilename=newFilename,
keepFilename=True,
verifyDescription=False)
bot.upload_image(debug=False)
return 1
else:
pywikibot.output(u'Invalid license')
return 0
def getPhotos(flickr=None, user_id=u'', group_id=u'', photoset_id=u'',
start_id='', end_id='', tags=u''):
"""Loop over a set of Flickr photos."""
found_start_id = not start_id
# https://www.flickr.com/services/api/flickr.groups.pools.getPhotos.html
# Get the photos in a group
if group_id:
# First get the total number of photo's in the group
photos = flickr.groups_pools_getPhotos(group_id=group_id,
user_id=user_id, tags=tags,
per_page='100', page='1')
pages = photos.find('photos').attrib['pages']
gen = lambda i: flickr.groups_pools_getPhotos(
group_id=group_id, user_id=user_id, tags=tags,
per_page='100', page=i
).find('photos').getchildren()
# https://www.flickr.com/services/api/flickr.photosets.getPhotos.html
# Get the photos in a photoset
elif photoset_id:
photos = flickr.photosets_getPhotos(photoset_id=photoset_id,
per_page='100', page='1')
pages = photos.find('photoset').attrib['pages']
gen = lambda i: flickr.photosets_getPhotos(
photoset_id=photoset_id, per_page='100', page=i
).find('photoset').getchildren()
# https://www.flickr.com/services/api/flickr.people.getPublicPhotos.html
# Get the (public) photos uploaded by a user
elif user_id:
photos = flickr.people_getPublicPhotos(user_id=user_id,
per_page='100', page='1')
pages = photos.find('photos').attrib['pages']
gen = lambda i: flickr.people_getPublicPhotos(
user_id=user_id, per_page='100', page=i
).find('photos').getchildren()
for i in range(1, int(pages) + 1):
gotPhotos = False
while not gotPhotos:
try:
for photo in gen(i):
gotPhotos = True
if photo.attrib['id'] == start_id:
found_start_id = True
if found_start_id:
if photo.attrib['id'] == end_id:
pywikibot.output('Found end_id')
return
else:
yield photo.attrib['id']
except flickrapi.exceptions.FlickrError:
gotPhotos = False
pywikibot.output(u'Flickr api problem, sleeping')
time.sleep(30)
return
def usage():
"""
Print usage information.
TODO : Need more.
"""
pywikibot.output(
u"Flickrripper is a tool to transfer flickr photos to Wikimedia Commons")
pywikibot.output(u"-group_id:<group_id>\n")
pywikibot.output(u"-photoset_id:<photoset_id>\n")
pywikibot.output(u"-user_id:<user_id>\n")
pywikibot.output(u"-tags:<tag>\n")
return
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
local_args = pywikibot.handle_args(args)
# Get the api key
if not config.flickr['api_key']:
pywikibot.output('Flickr api key not found! Get yourself an api key')
pywikibot.output(
'Any flickr user can get a key at https://www.flickr.com/services/api/keys/apply/')
return
if 'api_secret' in config.flickr and config.flickr['api_secret']:
flickr = flickrapi.FlickrAPI(config.flickr['api_key'], config.flickr['api_secret'])
(token, frob) = flickr.get_token_part_one(perms='read')
if not token:
# The user still hasn't authorised this app yet, get_token_part_one()
# will have spawn a browser window
pywikibot.input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
else:
pywikibot.output('Accessing public content only')
flickr = flickrapi.FlickrAPI(config.flickr['api_key'])
group_id = u''
photoset_id = u''
user_id = u''
start_id = u''
end_id = u''
tags = u''
addCategory = u''
removeCategories = False
autonomous = False
totalPhotos = 0
uploadedPhotos = 0
# Do we mark the images as reviewed right away?
if config.flickr['review']:
flickrreview = config.flickr['review']
else:
flickrreview = False
# Set the Flickr reviewer
if config.flickr['reviewer']:
reviewer = config.flickr['reviewer']
elif 'commons' in config.sysopnames['commons']:
pywikibot.output(config.sysopnames['commons'])
reviewer = config.sysopnames['commons']['commons']
elif 'commons' in config.usernames['commons']:
reviewer = config.usernames['commons']['commons']
else:
reviewer = u''
# Should be renamed to overrideLicense or something like that
override = u''
for arg in local_args:
if arg.startswith('-group_id'):
if len(arg) == 9:
group_id = pywikibot.input(u'What is the group_id of the pool?')
else:
group_id = arg[10:]
elif arg.startswith('-photoset_id'):
if len(arg) == 12:
photoset_id = pywikibot.input(u'What is the photoset_id?')
else:
photoset_id = arg[13:]
elif arg.startswith('-user_id'):
if len(arg) == 8:
user_id = pywikibot.input(
u'What is the user_id of the flickr user?')
else:
user_id = arg[9:]
elif arg.startswith('-start_id'):
if len(arg) == 9:
start_id = pywikibot.input(
u'What is the id of the photo you want to start at?')
else:
start_id = arg[10:]
elif arg.startswith('-end_id'):
if len(arg) == 7:
end_id = pywikibot.input(
u'What is the id of the photo you want to end at?')
else:
end_id = arg[8:]
elif arg.startswith('-tags'):
if len(arg) == 5:
tags = pywikibot.input(
u'What is the tag you want to filter out (currently only one supported)?')
else:
tags = arg[6:]
elif arg == '-flickrreview':
flickrreview = True
elif arg.startswith('-reviewer'):
if len(arg) == 9:
reviewer = pywikibot.input(u'Who is the reviewer?')
else:
reviewer = arg[10:]
elif arg.startswith('-override'):
if len(arg) == 9:
override = pywikibot.input(u'What is the override text?')
else:
override = arg[10:]
elif arg.startswith('-addcategory'):
if len(arg) == 12:
addCategory = pywikibot.input(
u'What category do you want to add?')
else:
addCategory = arg[13:]
elif arg == '-removecategories':
removeCategories = True
elif arg == '-autonomous':
autonomous = True
if user_id or group_id or photoset_id:
for photo_id in getPhotos(flickr, user_id, group_id, photoset_id,
start_id, end_id, tags):
uploadedPhotos += processPhoto(flickr, photo_id, flickrreview,
reviewer, override, addCategory,
removeCategories, autonomous)
totalPhotos += 1
else:
usage()
pywikibot.output(u'Finished running')
pywikibot.output(u'Total photos: ' + str(totalPhotos))
pywikibot.output(u'Uploaded photos: ' + str(uploadedPhotos))
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows specific tests. These are implicitly run by test_psutil.py."""
import errno
import os
import platform
import signal
import subprocess
import sys
import time
import traceback
from test_psutil import WINDOWS, get_test_subprocess, reap_children, unittest
import mock
try:
import wmi
except ImportError:
wmi = None
try:
import win32api
import win32con
except ImportError:
win32api = win32con = None
from psutil._compat import PY3, callable, long
import psutil
cext = psutil._psplatform.cext
def wrap_exceptions(fun):
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
from psutil._pswindows import ACCESS_DENIED_SET
if err.errno in ACCESS_DENIED_SET:
raise psutil.AccessDenied(None, None)
if err.errno == errno.ESRCH:
raise psutil.NoSuchProcess(None, None)
raise
return wrapper
@unittest.skipUnless(WINDOWS, "not a Windows system")
class WindowsSpecificTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_issue_24(self):
p = psutil.Process(0)
self.assertRaises(psutil.AccessDenied, p.kill)
def test_special_pid(self):
p = psutil.Process(4)
self.assertEqual(p.name(), 'System')
# use __str__ to access all common Process properties to check
# that nothing strange happens
str(p)
p.username()
self.assertTrue(p.create_time() >= 0.0)
try:
rss, vms = p.memory_info()
except psutil.AccessDenied:
# expected on Windows Vista and Windows 7
if not platform.uname()[1] in ('vista', 'win-7', 'win7'):
raise
else:
self.assertTrue(rss > 0)
def test_send_signal(self):
p = psutil.Process(self.pid)
self.assertRaises(ValueError, p.send_signal, signal.SIGINT)
def test_nic_names(self):
p = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE)
out = p.communicate()[0]
if PY3:
out = str(out, sys.stdout.encoding)
nics = psutil.net_io_counters(pernic=True).keys()
for nic in nics:
if "pseudo-interface" in nic.replace(' ', '-').lower():
continue
if nic not in out:
self.fail(
"%r nic wasn't found in 'ipconfig /all' output" % nic)
def test_exe(self):
for p in psutil.process_iter():
try:
self.assertEqual(os.path.basename(p.exe()), p.name())
except psutil.Error:
pass
# --- Process class tests
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_name(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(p.name(), w.Caption)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_exe(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
# Note: wmi reports the exe as a lower case string.
# Being Windows paths case-insensitive we ignore that.
self.assertEqual(p.exe().lower(), w.ExecutablePath.lower())
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_cmdline(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(' '.join(p.cmdline()),
w.CommandLine.replace('"', ''))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_username(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
domain, _, username = w.GetOwner()
username = "%s\\%s" % (domain, username)
self.assertEqual(p.username(), username)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_rss_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
rss = p.memory_info().rss
self.assertEqual(rss, int(w.WorkingSetSize))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_vms_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
vms = p.memory_info().vms
# http://msdn.microsoft.com/en-us/library/aa394372(VS.85).aspx
# ...claims that PageFileUsage is represented in Kilo
# bytes but funnily enough on certain platforms bytes are
# returned instead.
wmi_usage = int(w.PageFileUsage)
if (vms != wmi_usage) and (vms != wmi_usage * 1024):
self.fail("wmi=%s, psutil=%s" % (wmi_usage, vms))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_create_time(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
wmic_create = str(w.CreationDate.split('.')[0])
psutil_create = time.strftime("%Y%m%d%H%M%S",
time.localtime(p.create_time()))
self.assertEqual(wmic_create, psutil_create)
# --- psutil namespace functions and constants tests
@unittest.skipUnless('NUMBER_OF_PROCESSORS' in os.environ,
'NUMBER_OF_PROCESSORS env var is not available')
def test_cpu_count(self):
num_cpus = int(os.environ['NUMBER_OF_PROCESSORS'])
self.assertEqual(num_cpus, psutil.cpu_count())
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_total_phymem(self):
w = wmi.WMI().Win32_ComputerSystem()[0]
self.assertEqual(int(w.TotalPhysicalMemory),
psutil.virtual_memory().total)
# @unittest.skipIf(wmi is None, "wmi module is not installed")
# def test__UPTIME(self):
# # _UPTIME constant is not public but it is used internally
# # as value to return for pid 0 creation time.
# # WMI behaves the same.
# w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
# p = psutil.Process(0)
# wmic_create = str(w.CreationDate.split('.')[0])
# psutil_create = time.strftime("%Y%m%d%H%M%S",
# time.localtime(p.create_time()))
#
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_pids(self):
# Note: this test might fail if the OS is starting/killing
# other processes in the meantime
w = wmi.WMI().Win32_Process()
wmi_pids = [x.ProcessId for x in w]
wmi_pids.sort()
psutil_pids = psutil.pids()
psutil_pids.sort()
if wmi_pids != psutil_pids:
difference = \
filter(lambda x: x not in wmi_pids, psutil_pids) + \
filter(lambda x: x not in psutil_pids, wmi_pids)
self.fail("difference: " + str(difference))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_disks(self):
ps_parts = psutil.disk_partitions(all=True)
wmi_parts = wmi.WMI().Win32_LogicalDisk()
for ps_part in ps_parts:
for wmi_part in wmi_parts:
if ps_part.device.replace('\\', '') == wmi_part.DeviceID:
if not ps_part.mountpoint:
# this is usually a CD-ROM with no disk inserted
break
try:
usage = psutil.disk_usage(ps_part.mountpoint)
except OSError as err:
if err.errno == errno.ENOENT:
# usually this is the floppy
break
else:
raise
self.assertEqual(usage.total, int(wmi_part.Size))
wmi_free = int(wmi_part.FreeSpace)
self.assertEqual(usage.free, wmi_free)
# 10 MB tollerance
if abs(usage.free - wmi_free) > 10 * 1024 * 1024:
self.fail("psutil=%s, wmi=%s" % (
usage.free, wmi_free))
break
else:
self.fail("can't find partition %s" % repr(ps_part))
@unittest.skipIf(win32api is None, "pywin32 module is not installed")
def test_num_handles(self):
p = psutil.Process(os.getpid())
before = p.num_handles()
handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
win32con.FALSE, os.getpid())
after = p.num_handles()
self.assertEqual(after, before + 1)
win32api.CloseHandle(handle)
self.assertEqual(p.num_handles(), before)
@unittest.skipIf(win32api is None, "pywin32 module is not installed")
def test_num_handles_2(self):
# Note: this fails from time to time; I'm keen on thinking
# it doesn't mean something is broken
def call(p, attr):
attr = getattr(p, name, None)
if attr is not None and callable(attr):
attr()
else:
attr
p = psutil.Process(self.pid)
failures = []
for name in dir(psutil.Process):
if name.startswith('_') \
or name in ('terminate', 'kill', 'suspend', 'resume',
'nice', 'send_signal', 'wait', 'children',
'as_dict'):
continue
else:
try:
call(p, name)
num1 = p.num_handles()
call(p, name)
num2 = p.num_handles()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
else:
if num2 > num1:
fail = \
"failure while processing Process.%s method " \
"(before=%s, after=%s)" % (name, num1, num2)
failures.append(fail)
if failures:
self.fail('\n' + '\n'.join(failures))
def test_name_always_available(self):
# On Windows name() is never supposed to raise AccessDenied,
# see https://github.com/giampaolo/psutil/issues/627
for p in psutil.process_iter():
try:
p.name()
except psutil.NoSuchProcess():
pass
@unittest.skipUnless(WINDOWS, "not a Windows system")
class TestDualProcessImplementation(unittest.TestCase):
"""
Certain APIs on Windows have 2 internal implementations, one
based on documented Windows APIs, another one based
NtQuerySystemInformation() which gets called as fallback in
case the first fails because of limited permission error.
Here we test that the two methods return the exact same value,
see:
https://github.com/giampaolo/psutil/issues/304
"""
fun_names = [
# function name, tolerance
('proc_cpu_times', 0.2),
('proc_create_time', 0.5),
('proc_num_handles', 1), # 1 because impl #1 opens a handle
('proc_memory_info', 1024), # KB
('proc_io_counters', 0),
]
def test_compare_values(self):
def assert_ge_0(obj):
if isinstance(obj, tuple):
for value in obj:
self.assertGreaterEqual(value, 0, msg=obj)
elif isinstance(obj, (int, long, float)):
self.assertGreaterEqual(obj, 0)
else:
assert 0 # case not handled which needs to be fixed
def compare_with_tolerance(ret1, ret2, tolerance):
if ret1 == ret2:
return
else:
if isinstance(ret2, (int, long, float)):
diff = abs(ret1 - ret2)
self.assertLessEqual(diff, tolerance)
elif isinstance(ret2, tuple):
for a, b in zip(ret1, ret2):
diff = abs(a - b)
self.assertLessEqual(diff, tolerance)
from psutil._pswindows import ntpinfo
failures = []
for p in psutil.process_iter():
try:
nt = ntpinfo(*cext.proc_info(p.pid))
except psutil.NoSuchProcess:
continue
assert_ge_0(nt)
for name, tolerance in self.fun_names:
if name == 'proc_memory_info' and p.pid == os.getpid():
continue
if name == 'proc_create_time' and p.pid in (0, 4):
continue
meth = wrap_exceptions(getattr(cext, name))
try:
ret = meth(p.pid)
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
# compare values
try:
if name == 'proc_cpu_times':
compare_with_tolerance(ret[0], nt.user_time, tolerance)
compare_with_tolerance(ret[1],
nt.kernel_time, tolerance)
elif name == 'proc_create_time':
compare_with_tolerance(ret, nt.create_time, tolerance)
elif name == 'proc_num_handles':
compare_with_tolerance(ret, nt.num_handles, tolerance)
elif name == 'proc_io_counters':
compare_with_tolerance(ret[0], nt.io_rcount, tolerance)
compare_with_tolerance(ret[1], nt.io_wcount, tolerance)
compare_with_tolerance(ret[2], nt.io_rbytes, tolerance)
compare_with_tolerance(ret[3], nt.io_wbytes, tolerance)
elif name == 'proc_memory_info':
try:
rawtupl = cext.proc_memory_info_2(p.pid)
except psutil.NoSuchProcess:
continue
compare_with_tolerance(ret, rawtupl, tolerance)
except AssertionError:
trace = traceback.format_exc()
msg = '%s\npid=%s, method=%r, ret_1=%r, ret_2=%r' % (
trace, p.pid, name, ret, nt)
failures.append(msg)
break
if failures:
self.fail('\n\n'.join(failures))
# ---
# same tests as above but mimicks the AccessDenied failure of
# the first (fast) method failing with AD.
# TODO: currently does not take tolerance into account.
def test_name(self):
name = psutil.Process().name()
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as fun:
psutil.Process().name() == name
assert fun.called
def test_memory_info(self):
mem = psutil.Process().memory_info()
with mock.patch("psutil._psplatform.cext.proc_memory_info",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().memory_info() == mem
assert fun.called
def test_create_time(self):
ctime = psutil.Process().create_time()
with mock.patch("psutil._psplatform.cext.proc_create_time",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().create_time() == ctime
assert fun.called
def test_cpu_times(self):
cpu_times = psutil.Process().cpu_times()
with mock.patch("psutil._psplatform.cext.proc_cpu_times",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().cpu_times() == cpu_times
assert fun.called
def test_io_counters(self):
io_counters = psutil.Process().io_counters()
with mock.patch("psutil._psplatform.cext.proc_io_counters",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().io_counters() == io_counters
assert fun.called
def test_num_handles(self):
io_counters = psutil.Process().io_counters()
with mock.patch("psutil._psplatform.cext.proc_io_counters",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().io_counters() == io_counters
assert fun.called
# --- other tests
def test_compare_name_exe(self):
for p in psutil.process_iter():
try:
a = os.path.basename(p.exe())
b = p.name()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
else:
self.assertEqual(a, b)
def test_zombies(self):
# test that NPS is raised by the 2nd implementation in case a
# process no longer exists
ZOMBIE_PID = max(psutil.pids()) + 5000
for name, _ in self.fun_names:
meth = wrap_exceptions(getattr(cext, name))
self.assertRaises(psutil.NoSuchProcess, meth, ZOMBIE_PID)
def main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(WindowsSpecificTestCase))
test_suite.addTest(unittest.makeSuite(TestDualProcessImplementation))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not main():
sys.exit(1)
|
|
import random
import numpy as np
import pytest
from pandas import DatetimeIndex, IntervalIndex, MultiIndex, Series
import pandas._testing as tm
class TestSeriesSortIndex:
def test_sort_index_name(self, datetime_series):
result = datetime_series.sort_index(ascending=False)
assert result.name == datetime_series.name
def test_sort_index(self, datetime_series):
datetime_series.index = datetime_series.index._with_freq(None)
rindex = list(datetime_series.index)
random.shuffle(rindex)
random_order = datetime_series.reindex(rindex)
sorted_series = random_order.sort_index()
tm.assert_series_equal(sorted_series, datetime_series)
# descending
sorted_series = random_order.sort_index(ascending=False)
tm.assert_series_equal(
sorted_series, datetime_series.reindex(datetime_series.index[::-1])
)
# compat on level
sorted_series = random_order.sort_index(level=0)
tm.assert_series_equal(sorted_series, datetime_series)
# compat on axis
sorted_series = random_order.sort_index(axis=0)
tm.assert_series_equal(sorted_series, datetime_series)
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
random_order.sort_values(axis=1)
sorted_series = random_order.sort_index(level=0, axis=0)
tm.assert_series_equal(sorted_series, datetime_series)
with pytest.raises(ValueError, match=msg):
random_order.sort_index(level=0, axis=1)
def test_sort_index_inplace(self, datetime_series):
datetime_series.index = datetime_series.index._with_freq(None)
# For GH#11402
rindex = list(datetime_series.index)
random.shuffle(rindex)
# descending
random_order = datetime_series.reindex(rindex)
result = random_order.sort_index(ascending=False, inplace=True)
assert result is None
expected = datetime_series.reindex(datetime_series.index[::-1])
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(random_order, expected)
# ascending
random_order = datetime_series.reindex(rindex)
result = random_order.sort_index(ascending=True, inplace=True)
assert result is None
expected = datetime_series.copy()
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(random_order, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level="A")
tm.assert_series_equal(backwards, res)
res = s.sort_index(level=["A", "B"])
tm.assert_series_equal(backwards, res)
res = s.sort_index(level="A", sort_remaining=False)
tm.assert_series_equal(s, res)
res = s.sort_index(level=["A", "B"], sort_remaining=False)
tm.assert_series_equal(s, res)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
# implicit sort_remaining=True
res = s.sort_index(level=level)
tm.assert_series_equal(backwards, res)
# GH#13496
# sort has no effect without remaining lvls
res = s.sort_index(level=level, sort_remaining=False)
tm.assert_series_equal(s, res)
def test_sort_index_kind(self):
# GH#14444 & GH#13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(kind="mergesort")
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="quicksort")
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="heapsort")
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_na_position(self):
series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(na_position="first")
tm.assert_series_equal(expected_series_first, index_sorted_series)
expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
index_sorted_series = series.sort_index(na_position="last")
tm.assert_series_equal(expected_series_last, index_sorted_series)
def test_sort_index_intervals(self):
s = Series(
[np.nan, 1, 2, 3], IntervalIndex.from_arrays([0, 1, 2, 3], [1, 2, 3, 4])
)
result = s.sort_index()
expected = s
tm.assert_series_equal(result, expected)
result = s.sort_index(ascending=False)
expected = Series(
[3, 2, 1, np.nan], IntervalIndex.from_arrays([3, 2, 1, 0], [4, 3, 2, 1])
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_list, sorted_list, ascending, ignore_index, output_index",
[
([2, 3, 6, 1], [2, 3, 6, 1], True, True, [0, 1, 2, 3]),
([2, 3, 6, 1], [2, 3, 6, 1], True, False, [0, 1, 2, 3]),
([2, 3, 6, 1], [1, 6, 3, 2], False, True, [0, 1, 2, 3]),
([2, 3, 6, 1], [1, 6, 3, 2], False, False, [3, 2, 1, 0]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_list, sorted_list, ascending, ignore_index, output_index
):
# GH 30114
ser = Series(original_list)
expected = Series(sorted_list, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_ser = ser.copy()
result_ser.sort_index(**kwargs)
else:
result_ser = ser.sort_index(**kwargs)
tm.assert_series_equal(result_ser, expected)
tm.assert_series_equal(ser, Series(original_list))
def test_sort_index_ascending_list(self):
# GH#16934
# Set up a Series with a three level MultiIndex
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
[4, 3, 2, 1, 4, 3, 2, 1],
]
tuples = zip(*arrays)
mi = MultiIndex.from_tuples(tuples, names=["first", "second", "third"])
ser = Series(range(8), index=mi)
# Sort with boolean ascending
result = ser.sort_index(level=["third", "first"], ascending=False)
expected = ser.iloc[[4, 0, 5, 1, 6, 2, 7, 3]]
tm.assert_series_equal(result, expected)
# Sort with list of boolean ascending
result = ser.sort_index(level=["third", "first"], ascending=[False, True])
expected = ser.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
class TestSeriesSortIndexKey:
def test_sort_index_multiindex_key(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
result = s.sort_index(level="C", key=lambda x: -x)
tm.assert_series_equal(s, result)
result = s.sort_index(level="C", key=lambda x: x) # nothing happens
tm.assert_series_equal(backwards, result)
def test_sort_index_multiindex_key_multi_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
result = s.sort_index(level=["A", "C"], key=lambda x: -x)
tm.assert_series_equal(s, result)
result = s.sort_index(level=["A", "C"], key=lambda x: x) # nothing happens
tm.assert_series_equal(backwards, result)
def test_sort_index_key(self):
series = Series(np.arange(6, dtype="int64"), index=list("aaBBca"))
result = series.sort_index()
expected = series.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: x.str.lower())
expected = series.iloc[[0, 1, 5, 2, 3, 4]]
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: x.str.lower(), ascending=False)
expected = series.iloc[[4, 2, 3, 0, 1, 5]]
tm.assert_series_equal(result, expected)
def test_sort_index_key_int(self):
series = Series(np.arange(6, dtype="int64"), index=np.arange(6, dtype="int64"))
result = series.sort_index()
tm.assert_series_equal(result, series)
result = series.sort_index(key=lambda x: -x)
expected = series.sort_index(ascending=False)
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: 2 * x)
tm.assert_series_equal(result, series)
def test_sort_index_kind_key(self, sort_by_key):
# GH #14444 & #13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(kind="mergesort", key=sort_by_key)
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="quicksort", key=sort_by_key)
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="heapsort", key=sort_by_key)
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_kind_neg_key(self):
# GH #14444 & #13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[4, 3, 3, 2, 1], dtype=object)
index_sorted_series = series.sort_index(kind="mergesort", key=lambda x: -x)
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="quicksort", key=lambda x: -x)
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="heapsort", key=lambda x: -x)
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_na_position_key(self, sort_by_key):
series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(na_position="first", key=sort_by_key)
tm.assert_series_equal(expected_series_first, index_sorted_series)
expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
index_sorted_series = series.sort_index(na_position="last", key=sort_by_key)
tm.assert_series_equal(expected_series_last, index_sorted_series)
def test_changes_length_raises(self):
s = Series([1, 2, 3])
with pytest.raises(ValueError, match="change the shape"):
s.sort_index(key=lambda x: x[:1])
def test_sort_values_key_type(self):
s = Series([1, 2, 3], DatetimeIndex(["2008-10-24", "2008-11-23", "2007-12-22"]))
result = s.sort_index(key=lambda x: x.month)
expected = s.iloc[[0, 1, 2]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.day)
expected = s.iloc[[2, 1, 0]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.year)
expected = s.iloc[[2, 0, 1]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.month_name())
expected = s.iloc[[2, 1, 0]]
tm.assert_series_equal(result, expected)
|
|
import os
from functools import partial, wraps
import docopt
import six
from six.moves import urllib
import dcoscli
from dcos import (cmds, config, emitting, errors,
http, mesos, packagemanager, subprocess, util)
from dcos.cosmos import get_cosmos_url
from dcos.errors import DCOSException, DefaultError
from dcoscli import log, metrics, tables
from dcoscli.subcommand import default_command_info, default_doc
from dcoscli.util import confirm, decorate_docopt_usage
logger = util.get_logger(__name__)
emitter = emitting.FlatEmitter()
DIAGNOSTICS_BASE_URL = '/system/health/v1/report/diagnostics/'
# if a bundle size if more then 100Mb then warn user.
BUNDLE_WARN_SIZE = 100 * 1000 * 1000
def main(argv):
try:
return _main(argv)
except DCOSException as e:
emitter.publish(e)
return 1
@decorate_docopt_usage
def _main(argv):
args = docopt.docopt(
default_doc("node"),
argv=argv,
version="dcos-node version {}".format(dcoscli.version))
return cmds.execute(_cmds(), args)
def _cmds():
"""
:returns: All of the supported commands
:rtype: [Command]
"""
return [
cmds.Command(
hierarchy=['node', '--info'],
arg_keys=[],
function=_info),
cmds.Command(
hierarchy=['node', 'log'],
arg_keys=['--follow', '--lines', '--leader', '--mesos-id',
'--component', '--filter'],
function=_log),
cmds.Command(
hierarchy=['node', 'metrics', 'details'],
arg_keys=['<mesos-id>', '--json'],
function=partial(_metrics, False)),
cmds.Command(
hierarchy=['node', 'metrics', 'summary'],
arg_keys=['<mesos-id>', '--json'],
function=partial(_metrics, True)),
cmds.Command(
hierarchy=['node', 'list-components'],
arg_keys=['--leader', '--mesos-id', '--json'],
function=_list_components),
cmds.Command(
hierarchy=['node', 'ssh'],
arg_keys=['--leader', '--mesos-id', '--option', '--config-file',
'--user', '--master-proxy', '--proxy-ip', '--private-ip',
'<command>'],
function=_ssh),
cmds.Command(
hierarchy=['node', 'diagnostics', 'create'],
arg_keys=['<nodes>'],
function=_bundle_create),
cmds.Command(
hierarchy=['node', 'diagnostics', 'delete'],
arg_keys=['<bundle>'],
function=_bundle_delete),
cmds.Command(
hierarchy=['node', 'diagnostics', 'download'],
arg_keys=['<bundle>', '--location'],
function=_bundle_download),
cmds.Command(
hierarchy=['node', 'diagnostics'],
arg_keys=['--list', '--status', '--cancel', '--json'],
function=_bundle_manage),
cmds.Command(
hierarchy=['node'],
arg_keys=['--json', '--field'],
function=_list)
]
def diagnostics_error(fn):
@wraps(fn)
def check_for_diagnostics_error(*args, **kwargs):
response = fn(*args, **kwargs)
if response.status_code != 200:
err_msg = ('Error making {} request\nURL: '
'{}, status_code: {}.'.format(args[1], args[0],
response.status_code))
if not kwargs.get('stream'):
err_status = _read_http_response_body(response).get('status')
if err_status:
err_msg = err_status
raise DCOSException(err_msg)
return response
return check_for_diagnostics_error
def _check_3dt_version():
"""
The function checks if cluster has diagnostics capability.
:raises: DCOSException if cluster does not have diagnostics capability
"""
cosmos = packagemanager.PackageManager(get_cosmos_url())
if not cosmos.has_capability('SUPPORT_CLUSTER_REPORT'):
raise DCOSException(
'DC/OS backend does not support diagnostics capabilities in this '
'version. Must be DC/OS >= 1.8')
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size # noqa
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def _get_bundles_json():
"""
Get a json with a list of diagnostics bundles.
:return: available diagnostics bundles on a cluster.
:rtype: dict
"""
return _do_diagnostics_request(
urllib.parse.urljoin(DIAGNOSTICS_BASE_URL, 'list/all'),
'GET')
def _get_bundle_list():
"""
Get a list of tuples (bundle_file_name, file_size), ..
:return: list of diagnostic bundles
:rtype: list of tuples
"""
available_bundles = []
for _, bundle_files in _get_bundles_json().items():
if bundle_files is None:
continue
for bundle_file_obj in bundle_files:
if ('file_name' not in bundle_file_obj
or 'file_size' not in bundle_file_obj):
raise DCOSException(
'Request to get a list of available diagnostic bundles '
'returned unexpected response {}'.format(bundle_file_obj))
available_bundles.append(
(os.path.basename(bundle_file_obj['file_name']),
bundle_file_obj['file_size']))
return available_bundles
def _bundle_manage(list_bundles, status, cancel, json):
"""
Manage diagnostic bundles
:param list_bundles: a list of available bundles
:type list_bundles: bool
:param status: show diagnostics job status
:type status: bool
:param cancel: cancel diagnostics job
:type cancel: bool
:return: process return code
:rtype: int
"""
_check_3dt_version()
if list_bundles:
if json:
emitter.publish(_get_bundles_json())
return 0
available_bundles = _get_bundle_list()
if not available_bundles:
emitter.publish("No available diagnostic bundles")
return 0
emitter.publish("Available diagnostic bundles:")
for available_bundle in sorted(available_bundles,
key=lambda t: t[0]):
emitter.publish('{} {}'.format(available_bundle[0],
sizeof_fmt(available_bundle[1])))
return 0
elif status:
url = urllib.parse.urljoin(DIAGNOSTICS_BASE_URL, 'status/all')
bundle_response = _do_diagnostics_request(url, 'GET')
if json:
emitter.publish(bundle_response)
return 0
for host, props in sorted(bundle_response.items()):
emitter.publish(host)
for key, value in sorted(props.items()):
emitter.publish(' {}: {}'.format(key, value))
emitter.publish('\n')
return 0
elif cancel:
url = urllib.parse.urljoin(DIAGNOSTICS_BASE_URL, 'cancel')
bundle_response = _do_diagnostics_request(url, 'POST')
if json:
emitter.publish(bundle_response)
return 0
if 'status' not in bundle_response:
raise DCOSException(
'Request to cancel a diagnostics job {} returned '
'an unexpected response {}'.format(url, bundle_response))
emitter.publish(bundle_response['status'])
return 0
else:
raise DCOSException(
'Must specify one of list_bundles, status, cancel')
@diagnostics_error
def _do_request(url, method, timeout=None, stream=False, **kwargs):
"""
make HTTP request
:param url: url
:type url: string
:param method: HTTP method, GET or POST
:type method: string
:param timeout: HTTP request timeout, default 3 seconds
:type timeout: integer
:param stream: stream parameter for requests lib
:type stream: bool
:return: http response
:rtype: requests.Response
"""
def _is_success(status_code):
# consider 400 and 503 to be successful status codes.
# API will return the error message.
if status_code in [200, 400, 503]:
return True
return False
# if timeout is not passed, try to read `core.timeout`
# if `core.timeout` is not set, default to 3 min.
if timeout is None:
timeout = config.get_config_val('core.timeout')
if not timeout:
timeout = 180
base_url = config.get_config_val("core.dcos_url")
if not base_url:
raise config.missing_config_exception(['core.dcos_url'])
url = urllib.parse.urljoin(base_url, url)
if method.lower() == 'get':
http_response = http.get(url, is_success=_is_success, timeout=timeout,
**kwargs)
elif method.lower() == 'post':
http_response = http.post(url, is_success=_is_success, timeout=timeout,
stream=stream, **kwargs)
else:
raise DCOSException('Unsupported HTTP method: ' + method)
return http_response
def _do_diagnostics_request(url, method, **kwargs):
"""
Make HTTP request and expect a JSON response.
:param url: url
:type url: string
:param method: HTTP method, GET or POST
:type method: string
:return: bundle JSON response
:rtype: dict
"""
http_response = _do_request(url, method, **kwargs)
return _read_http_response_body(http_response)
def _read_http_response_body(http_response):
"""
Get an requests HTTP response, read it and deserialize to json.
:param http_response: http response
:type http_response: requests.Response onject
:return: deserialized json
:rtype: dict
"""
data = b''
try:
for chunk in http_response.iter_content(1024):
data += chunk
bundle_response = util.load_jsons(data.decode('utf-8'))
return bundle_response
except DCOSException:
raise
def _bundle_download(bundle, location):
"""
Download diagnostics bundle.
:param bundle: bundle file name.
:type bundle: string
:param location: location on a local filesystem.
:type location: string
:return: status code
:rtype: int
"""
# make sure the requested bundle exists
bundle_size = 0
for available_bundle in _get_bundle_list():
# _get_bundle_list must return a list of tuples
# where first element is file name and second is its size.
if len(available_bundle) != 2:
raise DCOSException(
'Request to get a list of diagnostic bundles returned an '
'unexpected response: {}'.format(available_bundle))
# available_bundle[0] is a file name
# available_bundle[1] is a file size
if available_bundle[0] == bundle:
bundle_size = available_bundle[1]
url = urllib.parse.urljoin(DIAGNOSTICS_BASE_URL, 'serve/' + bundle)
bundle_location = os.path.join(os.getcwd(), bundle)
if location:
if os.path.isdir(location):
bundle_location = os.path.join(location, bundle)
else:
bundle_location = location
if bundle_size > BUNDLE_WARN_SIZE:
msg = ('Diagnostics bundle size is {}, '
'are you sure you want to download it?')
if not confirm(msg.format(sizeof_fmt(bundle_size)), False):
return 0
r = _do_request(url, 'GET', stream=True)
try:
with open(bundle_location, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
except Exception as e:
raise DCOSException(e)
emitter.publish('Diagnostics bundle downloaded to ' + bundle_location)
return 0
def _bundle_delete(bundle):
"""
Delete a bundle
:param bundle: file name
:type: str
:return: status code
:rtype: int
"""
_check_3dt_version()
url = urllib.parse.urljoin(
DIAGNOSTICS_BASE_URL, 'delete/' + bundle)
response = _do_diagnostics_request(url, 'POST')
if 'status' not in response:
raise DCOSException(
'Request to delete the diagnostics bundle {} returned an '
'unexpected response {}'.format(url, response))
emitter.publish(response['status'])
return 0
def _bundle_create(nodes):
"""
Create a diagnostics bundle.
:param nodes: a list of nodes to collect the logs from.
:type nodes: list
:returns: process return code
:rtype: int
"""
_check_3dt_version()
url = urllib.parse.urljoin(DIAGNOSTICS_BASE_URL, 'create')
response = _do_diagnostics_request(url,
'POST',
json={'nodes': nodes})
if ('status' not in response or 'extra' not in response
or 'bundle_name' not in response['extra']):
raise DCOSException(
'Request to create a diagnostics bundle {} returned an '
'unexpected response {}'.format(url, response))
emitter.publish('\n{}, available bundle: {}'.format(
response['status'],
response['extra']['bundle_name']))
return 0
def _info():
"""Print node cli information.
:returns: process return code
:rtype: int
"""
emitter.publish(default_command_info("node"))
return 0
def _list(json_, extra_field_names):
"""List DC/OS nodes
:param json_: If true, output json.
Otherwise, output a human readable table.
:type json_: bool
:param extra_field_names: List of additional field names to include in
table output
:type extra_field_names: [str]
:returns: process return code
:rtype: int
"""
client = mesos.DCOSClient()
masters = mesos.MesosDNSClient().hosts('master.mesos.')
master_state = client.get_master_state()
slaves = client.get_state_summary()['slaves']
for master in masters:
if master['ip'] == master_state['hostname']:
master['type'] = 'master (leader)'
for key in ('id', 'pid', 'version'):
master[key] = master_state.get(key)
else:
master['type'] = 'master'
for slave in slaves:
slave['type'] = 'agent'
nodes = masters + slaves
if json_:
emitter.publish(nodes)
else:
for extra_field_name in extra_field_names:
field_name = extra_field_name.split(':')[-1]
if len(slaves) > 0:
try:
tables._dotted_itemgetter(field_name)(slaves[0])
except KeyError:
emitter.publish(errors.DefaultError(
'Field "%s" is invalid.' % field_name))
return
table = tables.node_table(nodes, extra_field_names)
output = six.text_type(table)
if output:
emitter.publish(output)
else:
emitter.publish(errors.DefaultError('No agents found.'))
def _log(follow, lines, leader, slave, component, filters):
""" Prints the contents of leader and slave logs.
:param follow: same as unix tail's -f
:type follow: bool
:param lines: number of lines to print
:type lines: int
:param leader: whether to print the leading master's log
:type leader: bool
:param slave: the slave ID to print
:type slave: str | None
:param component: DC/OS component name
:type component: string
:param filters: a list of filters ["key:value", ...]
:type filters: list
:returns: process return code
:rtype: int
"""
if not (leader or slave):
raise DCOSException('You must choose one of --leader or --mesos-id.')
if lines is None:
lines = 10
lines = util.parse_int(lines)
if not log.has_journald_capability():
if component or filters:
raise DCOSException('--component or --filter is not '
'supported by files API')
# fall back to mesos files API.
mesos_files = _mesos_files(leader, slave)
log.log_files(mesos_files, follow, lines)
return 0
# dcos-log does not support logs from leader and agent.
if leader and slave:
raise DCOSException(
'You must choose one of --leader or --mesos-id.')
# if journald logging enabled.
_dcos_log(follow, lines, leader, slave, component, filters)
return 0
def _metrics(summary, mesos_id, json_):
""" Get metrics from the specified agent.
:param summary: summarise output if true, output all if false
:type summary: bool
:param mesos_id: mesos node id
:type mesos_id: str
:param json_: print raw JSON
:type json_: bool
:returns: Process status
:rtype: int
"""
endpoint = '/system/v1/agent/{}/metrics/v0/node'.format(mesos_id)
dcos_url = config.get_config_val('core.dcos_url').rstrip('/')
if not dcos_url:
raise config.missing_config_exception(['core.dcos_url'])
url = dcos_url + endpoint
return metrics.print_node_metrics(url, summary, json_)
def _get_slave_ip(slave):
""" Get an agent IP address based on mesos id.
If slave parameter is empty, the function will return
:param slave: mesos node id
:type slave: str
:return: node ip address
:rtype: str
"""
if not slave:
return
summary = mesos.DCOSClient().get_state_summary()
if 'slaves' not in summary:
raise DCOSException(
'Invalid summary report. '
'Missing field `slaves`. {}'.format(summary))
for s in summary['slaves']:
if 'hostname' not in s or 'id' not in s:
raise DCOSException(
'Invalid summary report. Missing field `id` '
'or `hostname`. {}'.format(summary))
if s['id'] == slave:
return s['hostname']
raise DCOSException('Agent `{}` not found'.format(slave))
def _list_components(leader, slave, use_json):
""" List components for a leader or slave_ip node
:param leader: use leader ip flag
:type leader: bool
:param slave_ip: agent ip address
:type slave_ip: str
:param use_json: print components in json format
:type use_json: bool
"""
if not (leader or slave):
raise DCOSException('--leader or --mesos-id must be provided')
if leader and slave:
raise DCOSException(
'Unable to use leader and mesos id at the same time')
slave_ip = _get_slave_ip(slave)
if slave_ip:
print_components(slave_ip, use_json)
return
leaders = mesos.MesosDNSClient().hosts('leader.mesos')
if len(leaders) != 1:
raise DCOSException('Expecting one leader. Got {}'.format(leaders))
if 'ip' not in leaders[0]:
raise DCOSException(
'Invalid leader response, missing field `ip`. '
'Got {}'.format(leaders[0]))
print_components(leaders[0]['ip'], use_json)
def print_components(ip, use_json):
""" Print components for a given node ip.
The data is taked from 3dt endpoint:
/system/health/v1/nodes/<ip>/units
:param ip: DC/OS node ip address
:type ip: str
:param use_json: print components in json format
:type use_json: bool
"""
dcos_url = config.get_config_val('core.dcos_url').rstrip("/")
if not dcos_url:
raise config.missing_config_exception(['core.dcos_url'])
url = dcos_url + '/system/health/v1/nodes/{}/units'.format(ip)
response = http.get(url).json()
if 'units' not in response:
raise DCOSException(
'Invalid response. Missing field `units`. {}'.format(response))
if use_json:
emitter.publish(response['units'])
else:
for component in response['units']:
emitter.publish(component['id'])
def _get_unit_type(unit_name):
""" Get the full unit name including the type postfix
or default to service.
:param unit_name: unit name with or without type
:type unit_name: str
:return: unit name with type
:rtype: str
"""
if not unit_name:
raise DCOSException('Empty systemd unit parameter')
# https://www.freedesktop.org/software/systemd/man/systemd.unit.html
unit_types = ['service', 'socket', 'device', 'mount', 'automount',
'swap', 'target', 'path', 'timer', 'slice', 'scope']
for unit_type in unit_types:
if unit_name.endswith('.{}'.format(unit_type)):
return unit_name
return '{}.service'.format(unit_name)
def _build_leader_url(component):
""" Return a leader URL based on passed component name.
:param component: DC/OS component name
:type component: str
:return: leader logs url
:rtype: str
"""
leaders_map = {
'dcos-marathon.service': 'marathon',
'dcos-mesos-master.service': 'mesos',
}
# set default leader to 'mesos' to be consistent with files API
leader_prefix = 'mesos'
if component:
component_name = _get_unit_type(component)
leader_prefix = leaders_map.get(component_name)
if not leader_prefix:
raise DCOSException('Component {} does not have a leader'.format(
component))
return '/leader/{}/logs/v1/'.format(leader_prefix)
def _dcos_log(follow, lines, leader, slave, component, filters):
""" Print logs from dcos-log backend.
:param follow: same as unix tail's -f
:type follow: bool
:param lines: number of lines to print
:type lines: int
:param leader: whether to print the leading master's log
:type leader: bool
:param slave: the slave ID to print
:type slave: str | None
:param component: DC/OS component name
:type component: string
:param filters: a list of filters ["key:value", ...]
:type filters: list
"""
filter_query = ''
if component:
filters.append('_SYSTEMD_UNIT:{}'.format(_get_unit_type(component)))
for f in filters:
key_value = f.split(':')
if len(key_value) != 2:
raise SystemExit('Invalid filter parameter {}. '
'Must be --filter=key:value'.format(f))
filter_query += '&filter={}'.format(f)
endpoint = '/system/v1'
if leader:
endpoint += _build_leader_url(component)
elif slave:
endpoint += '/agent/{}/logs/v1/'.format(slave)
endpoint_type = 'range'
if follow:
endpoint_type = 'stream'
dcos_url = config.get_config_val('core.dcos_url').rstrip("/")
if not dcos_url:
raise config.missing_config_exception(['core.dcos_url'])
url = (dcos_url + endpoint + endpoint_type +
'/?skip_prev={}'.format(lines) + filter_query)
if follow:
return log.follow_logs(url)
return log.print_logs_range(url)
def _mesos_files(leader, slave_id):
"""Returns the MesosFile objects to log
:param leader: whether to include the leading master's log file
:type leader: bool
:param slave_id: the ID of a slave. used to include a slave's log
file
:type slave_id: str | None
:returns: MesosFile objects
:rtype: [MesosFile]
"""
files = []
if leader:
files.append(mesos.MesosFile('/master/log'))
if slave_id:
slave = mesos.get_master().slave(slave_id)
files.append(mesos.MesosFile('/slave/log', slave=slave))
return files
def _ssh(leader, slave, option, config_file, user, master_proxy, proxy_ip,
private_ip, command):
"""SSH into a DC/OS node using the IP addresses found in master's
state.json
:param leader: True if the user has opted to SSH into the leading
master
:type leader: bool | None
:param slave: The slave ID if the user has opted to SSH into a slave
:type slave: str | None
:param option: SSH option
:type option: [str]
:param config_file: SSH config file
:type config_file: str | None
:param user: SSH user
:type user: str | None
:param master_proxy: If True, SSH-hop from a master
:type master_proxy: bool | None
:param proxy_ip: If set, SSH-hop from this IP address
:type proxy_ip: str | None
:param private_ip: The private IP address of the node we want to SSH to.
:type private_ip: str | None
:param command: Command to run on the node
:type command: str | None
:rtype: int
:returns: process return code
"""
ssh_options = util.get_ssh_options(config_file, option)
dcos_client = mesos.DCOSClient()
if leader:
host = mesos.MesosDNSClient().hosts('leader.mesos.')[0]['ip']
elif private_ip:
host = private_ip
else:
summary = dcos_client.get_state_summary()
slave_obj = next((slave_ for slave_ in summary['slaves']
if slave_['id'] == slave),
None)
if slave_obj:
host = mesos.parse_pid(slave_obj['pid'])[1]
else:
raise DCOSException('No slave found with ID [{}]'.format(slave))
if command is None:
command = ''
master_public_ip = dcos_client.metadata().get('PUBLIC_IPV4')
if master_proxy:
if not master_public_ip:
raise DCOSException(("Cannot use --master-proxy. Failed to find "
"'PUBLIC_IPV4' at {}").format(
dcos_client.get_dcos_url('metadata')))
proxy_ip = master_public_ip
if proxy_ip:
if not os.environ.get('SSH_AUTH_SOCK'):
raise DCOSException(
"There is no SSH_AUTH_SOCK env variable, which likely means "
"you aren't running `ssh-agent`. `dcos node ssh "
"--master-proxy/--proxy-ip` depends on `ssh-agent` to safely "
"use your private key to hop between nodes in your cluster. "
"Please run `ssh-agent`, then add your private key with "
"`ssh-add`.")
cmd = "ssh -A -t {0}{1}@{2} ssh -A -t {0}{1}@{3} {4}".format(
ssh_options,
user,
proxy_ip,
host,
command)
else:
cmd = "ssh -t {0}{1}@{2} {3}".format(
ssh_options,
user,
host,
command)
emitter.publish(DefaultError("Running `{}`".format(cmd)))
if (not master_proxy and not proxy_ip) and master_public_ip:
emitter.publish(
DefaultError("If you are running this command from a separate "
"network than DC/OS, consider using "
"`--master-proxy` or `--proxy-ip`"))
return subprocess.Subproc().call(cmd, shell=True)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC network functionality - Port Forwarding Rules.
"""
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.integration.lib.base import (stopRouter,
startRouter,
Account,
VpcOffering,
VPC,
ServiceOffering,
NATRule,
NetworkACL,
PublicIPAddress,
NetworkOffering,
Network,
VirtualMachine,
LoadBalancerRule,
StaticNATRule)
from marvin.integration.lib.common import (get_domain,
get_zone,
get_template,
cleanup_resources,
list_routers)
import socket
import time
class Services:
"""Test VPC network services - Port Forwarding Rules Test Data Class.
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"host1": None,
"host2": None,
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
"startport": 22,
"endport": 2222,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"lbrule_http": {
"name": "HTTP",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 80,
"publicport": 8888,
"openfirewall": False,
"startport": 80,
"endport": 8888,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"http_rule": {
"privateport": 80,
"publicport": 80,
"startport": 80,
"endport": 80,
"cidrlist": '0.0.0.0/0',
"protocol": "TCP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"timeout": 10,
}
class TestVPCNetworkPFRules(cloudstackTestCase):
@classmethod
def setUpClass(cls):
# We want to fail quicker if it's failure
socket.setdefaulttimeout(60)
cls.api_client = super(
TestVPCNetworkPFRules,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
self.debug("Creating a VPC offering..")
self.vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self.debug("Enabling the VPC offering created")
self.vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating a VPC network in the account: %s" % self.account.name)
self.services["vpc"]["cidr"] = '10.1.1.1/16'
self.vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
return
def tearDown(self):
try:
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
return
def get_vpcrouter(self):
routers = list_routers(self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(len(routers),
0,
"Check list router response"
)
router = routers[0]
return router
def stop_vpcrouter(self):
router = self.get_vpcrouter()
self.debug("Stopping router ID: %s" % router.id)
cmd = stopRouter.stopRouterCmd()
cmd.id = router.id
self.apiclient.stopRouter(cmd)
routers = list_routers(self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
router = routers[0]
self.assertEqual(router.state,
'Stopped',
"Check list router response for router state"
)
return router
def start_vpcrouter(self, router):
# Start the VPC Router
self.debug("Starting router ID: %s" % router.id)
cmd = startRouter.startRouterCmd()
cmd.id = router.id
self.apiclient.startRouter(cmd)
routers = list_routers(self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
zoneid=self.zone.id
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
router = routers[0]
self.assertEqual(router.state,
'Running',
"Check list router response for router state"
)
def check_ssh_into_vm(self, vm, public_ip, testnegative=False):
self.debug("Checking if we can SSH into VM=%s on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
try:
vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
if not testnegative:
self.debug("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.fail("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
except:
if not testnegative:
self.fail("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
else:
self.debug("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
def check_wget_from_vm(self, vm, public_ip, testnegative=False):
import urllib
self.debug("Checking if we can wget from a VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
try:
urllib.urlretrieve("http://%s/test.html" % public_ip.ipaddress.ipaddress, filename="test.html")
if not testnegative:
self.debug("Successesfull to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.fail("Successesfull to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
except:
if not testnegative:
self.fail("Failed to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.debug("Failed to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
def create_natrule(self, vm, public_ip, network, services=None):
self.debug("Creating NAT rule in network for vm with public IP")
if not services:
services = self.services["natrule"]
nat_rule = NATRule.create(self.apiclient,
vm,
services,
ipaddressid=public_ip.ipaddress.id,
openfirewall=False,
networkid=network.id,
vpcid=self.vpc.id
)
self.debug("Adding NetworkACL rules to make NAT rule accessible")
nwacl_nat = NetworkACL.create(self.apiclient,
networkid=network.id,
services=services,
traffictype='Ingress'
)
self.debug('nwacl_nat=%s' % nwacl_nat.__dict__)
return nat_rule
def acquire_publicip(self, network):
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id,
vpcid=self.vpc.id
)
self.debug("Associated %s with network %s" % (public_ip.ipaddress.ipaddress,
network.id
))
return public_ip
def create_vpc(self, cidr='10.1.2.1/16'):
self.debug("Creating a VPC offering..")
self.services["vpc_offering"]["name"] = self.services["vpc_offering"]["name"] + str(cidr)
vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self._cleanup.append(vpc_off)
self.debug("Enabling the VPC offering created")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating a VPC network in the account: %s" % self.account.name)
self.services["vpc"]["cidr"] = cidr
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
return vpc
def create_network(self, net_offerring, gateway='10.1.1.1',vpc=None):
try:
self.debug('Create NetworkOffering')
net_offerring["name"] = "NET_OFF-" + str(gateway)
nw_off = NetworkOffering.create(self.apiclient,
net_offerring,
conservemode=False
)
# Enable Network offering
nw_off.update(self.apiclient, state='Enabled')
self._cleanup.append(nw_off)
self.debug('Created and Enabled NetworkOffering')
self.services["network"]["name"] = "NETWORK-" + str(gateway)
self.debug('Adding Network=%s' % self.services["network"])
obj_network = Network.create(self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=nw_off.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id if vpc else self.vpc.id
)
self.debug("Created network with ID: %s" % obj_network.id)
return obj_network
except Exception, e:
self.fail('Unable to create a Network with offering=%s because of %s ' % (net_offerring, e))
def deployvm_in_network(self, network, host_id=None):
try:
self.debug('Creating VM in network=%s' % network.name)
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)],
hostid=host_id
)
self.debug('Created VM=%s in network=%s' % (vm.id, network.name))
return vm
except:
self.fail('Unable to create VM in a Network=%s' % network.name)
def create_lbrule(self, public_ip, network, vmarray, services=None):
self.debug("Creating LB rule for IP address: %s" %
public_ip.ipaddress.ipaddress)
objservices = None
if services:
objservices = services
else:
objservices = self.services["lbrule"]
lb_rule = LoadBalancerRule.create(
self.apiclient,
objservices,
ipaddressid=public_ip.ipaddress.id,
accountid=self.account.name,
networkid=network.id,
vpcid=self.vpc.id,
domainid=self.account.domainid
)
self.debug("Adding virtual machines %s and %s to LB rule" % (vmarray))
lb_rule.assign(self.apiclient, vmarray)
return lb_rule
def open_egress_to_world(self, network):
self.debug("Adding Egress rules to network %s and %s to allow access to internet" % (network.name,self.services["http_rule"]))
nwacl_internet_1 = NetworkACL.create(
self.apiclient,
networkid=network.id,
services=self.services["http_rule"],
traffictype='Ingress'
)
return nwacl_internet_1
@attr(tags=["advanced", "intervlan"])
def test_01_network_services_VPC_StopCreatePF(self):
""" Test : Create VPC PF rules on acquired public ip when VpcVirtualRouter is stopped
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Stop the VPC Virtual Router.
# 6. Use the Create PF rule for vm in network1.
# 7. Start VPC Virtual Router.
# 8. Successfully ssh into the Guest VM using the PF rule
network_1 = self.create_network(self.services["network_offering"])
vm_1 = self.deployvm_in_network(network_1)
public_ip_1 = self.acquire_publicip(network_1)
#ensure vm is accessible over public ip
nat_rule = self.create_natrule(vm_1, public_ip_1, network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
#remove the nat rule
nat_rule.delete(self.apiclient)
router = self.stop_vpcrouter()
#recreate nat rule
self.create_natrule(vm_1, public_ip_1, network_1)
self.start_vpcrouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
def test_02_network_services_VPC_CreatePF(self):
""" Test Create VPC PF rules on acquired public ip when VpcVirtualRouter is Running
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule
network_1 = self.create_network(self.services["network_offering"])
vm_1 = self.deployvm_in_network(network_1)
public_ip_1 = self.acquire_publicip(network_1)
self.create_natrule( vm_1, public_ip_1, network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
def test_03_network_services_VPC_StopCreateMultiplePF(self):
""" Test Create multiple VPC PF rules on acquired public ip in diff't networks when VpcVirtualRouter is stopped
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 in network1.
# 6. Deploy vm2 in network2.
# 7. Stop the VPC Virtual Router.
# 8. Use the Create PF rule for vm1 in network1.
# 9. Use the Create PF rule for vm2 in network2.
# 10. Start VPC Virtual Router.
# 11. Successfully ssh into the Guest VM1 and VM2 using the PF rule
network_1 = self.create_network(self.services["network_offering_no_lb"])
network_2 = self.create_network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.deployvm_in_network(network_1)
vm_2 = self.deployvm_in_network(network_2)
# wait until VM is up before stop the VR
time.sleep(120)
public_ip_1 = self.acquire_publicip(network_1)
public_ip_2 = self.acquire_publicip(network_2)
router = self.stop_vpcrouter()
self.create_natrule(vm_1, public_ip_1, network_1)
self.create_natrule(vm_2, public_ip_2, network_2)
self.start_vpcrouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
def test_04_network_services_VPC_CreateMultiplePF(self):
""" Test Create multiple VPC PF rules on acquired public ip in diff't networks when VpcVirtualRouter is running
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 in network1.
# 6. Deploy vm2 in network2.
# 7. Use the Create PF rule for vm1 in network1.
# 8. Use the Create PF rule for vm2 in network2.
# 9. Successfully ssh into the Guest VM1 and VM2 using the PF rule
network_1 = self.create_network(self.services["network_offering"])
network_2 = self.create_network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.deployvm_in_network(network_1)
vm_2 = self.deployvm_in_network(network_2)
public_ip_1 = self.acquire_publicip(network_1)
public_ip_2 = self.acquire_publicip(network_2)
self.create_natrule(vm_1, public_ip_1, network_1)
self.create_natrule(vm_2, public_ip_2, network_2)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
def test_05_network_services_VPC_StopDeletePF(self):
""" Test delete a PF rule in VPC when VpcVirtualRouter is Stopped
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 8. Stop the VPC Virtual Router.
# 9. Delete internet PF rule
# 10. Start VPC Virtual Router.
# 11. wget a file present on http server of VM1 should fail
network_1 = self.create_network(self.services["network_offering"])
vm_1 = self.deployvm_in_network(network_1)
public_ip_1 = self.acquire_publicip(network_1)
self.create_natrule(vm_1, public_ip_1, network_1)
http_rule = self.create_natrule(vm_1, public_ip_1, network_1, self.services["http_rule"])
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
router = self.stop_vpcrouter()
http_rule.delete(self.apiclient)
self.start_vpcrouter(router)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
def test_06_network_services_VPC_DeletePF(self):
""" Test delete a PF rule in VPC when VpcVirtualRouter is Running
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 9. Delete internet PF rule
# 10. wget a file present on http server of VM1 should fail
network_1 = self.create_network(self.services["network_offering"])
vm_1 = self.deployvm_in_network(network_1)
public_ip_1 = self.acquire_publicip(network_1)
self.create_natrule(vm_1, public_ip_1, network_1)
http_rule=self.create_natrule(vm_1, public_ip_1, network_1, self.services["http_rule"])
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
http_rule.delete(self.apiclient)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
def test_07_network_services_VPC_StopDeleteAllPF(self):
""" Test delete all PF rules in VPC when VpcVirtualRouter is Stopped
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 8. Stop the VPC Virtual Router.
# 9. Delete all PF rule
# 10. Start VPC Virtual Router.
# 11. wget a file present on http server of VM1 should fail
# 12. ssh into Guest VM using the PF rule should fail
network_1 = self.create_network(self.services["network_offering"])
vm_1 = self.deployvm_in_network(network_1)
public_ip_1 = self.acquire_publicip(network_1)
nat_rule = self.create_natrule(vm_1, public_ip_1, network_1)
http_rule = self.create_natrule(vm_1, public_ip_1, network_1, self.services["http_rule"])
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
router = self.stop_vpcrouter()
http_rule.delete(self.apiclient)
nat_rule.delete(self.apiclient)
self.start_vpcrouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
def test_08_network_services_VPC_DeleteAllPF(self):
""" Test delete all PF rules in VPC when VpcVirtualRouter is Running
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 8. Delete all PF rule
# 9. wget a file present on http server of VM1 should fail
# 10. ssh into Guest VM using the PF rule should fail
network_1 = self.create_network(self.services["network_offering"])
vm_1 = self.deployvm_in_network(network_1)
public_ip_1 = self.acquire_publicip(network_1)
nat_rule = self.create_natrule(vm_1, public_ip_1, network_1)
http_rule = self.create_natrule(vm_1, public_ip_1, network_1, self.services["http_rule"])
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
http_rule.delete(self.apiclient)
nat_rule.delete(self.apiclient)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
def test_09_network_services_VPC_StopDeleteAllMultiplePF(self):
""" Test delete all PF rules in VPC across multiple networks when VpcVirtualRouter is Stopped
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16.
# 2. Create a Network offering - NO1 with all supported services.
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 and vm2 in network1.
# 6. Deploy vm3 and vm4 in network2.
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
# 11. Stop VPC Virtual Router.
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
# 12. Start VPC Virtual Router.
# 13. Fail to ssh and http to vm1, vm2, vm3 and vm4.
network_1 = self.create_network(self.services["network_offering"])
network_2 = self.create_network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.deployvm_in_network(network_1)
vm_2 = self.deployvm_in_network(network_1)
vm_3 = self.deployvm_in_network(network_2)
vm_4 = self.deployvm_in_network(network_2)
public_ip_1 = self.acquire_publicip(network_1)
public_ip_2 = self.acquire_publicip(network_1)
nat_rule1 = self.create_natrule(vm_1, public_ip_1, network_1)
nat_rule2 = self.create_natrule(vm_2, public_ip_2, network_1)
http_rule1 = self.create_natrule(vm_1, public_ip_1, network_1, self.services["http_rule"])
http_rule2 = self.create_natrule(vm_2, public_ip_2, network_1, self.services["http_rule"])
public_ip_3 = self.acquire_publicip(network_2)
public_ip_4 = self.acquire_publicip(network_2)
nat_rule3 = self.create_natrule(vm_3, public_ip_3, network_2)
nat_rule4 = self.create_natrule(vm_4, public_ip_4, network_2)
http_rule3 = self.create_natrule(vm_3, public_ip_3, network_2, self.services["http_rule"])
http_rule4 = self.create_natrule(vm_4, public_ip_4, network_2, self.services["http_rule"])
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
self.check_ssh_into_vm(vm_3, public_ip_3, testnegative=False)
self.check_ssh_into_vm(vm_4, public_ip_4, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_3, public_ip_3, testnegative=False)
self.check_wget_from_vm(vm_4, public_ip_4, testnegative=False)
router = self.stop_vpcrouter()
nat_rule1.delete(self.apiclient)
nat_rule2.delete(self.apiclient)
nat_rule3.delete(self.apiclient)
nat_rule4.delete(self.apiclient)
http_rule1.delete(self.apiclient)
http_rule2.delete(self.apiclient)
http_rule3.delete(self.apiclient)
http_rule4.delete(self.apiclient)
self.start_vpcrouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=True)
self.check_ssh_into_vm(vm_3, public_ip_3, testnegative=True)
self.check_ssh_into_vm(vm_4, public_ip_4, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_3, public_ip_3, testnegative=True)
self.check_wget_from_vm(vm_4, public_ip_4, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
def test_10_network_services_VPC_DeleteAllMultiplePF(self):
""" Test delete all PF rules in VPC across multiple networks when VpcVirtualRouter is Running
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16.
# 2. Create a Network offering - NO1 with all supported services.
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 and vm2 in network1.
# 6. Deploy vm3 and vm4 in network2.
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
# 13. Fail to ssh and http to vm1, vm2, vm3 and vm4.
network_1 = self.create_network(self.services["network_offering"])
network_2 = self.create_network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.deployvm_in_network(network_1)
vm_2 = self.deployvm_in_network(network_1)
vm_3 = self.deployvm_in_network(network_2)
vm_4 = self.deployvm_in_network(network_2)
public_ip_1 = self.acquire_publicip(network_1)
public_ip_2 = self.acquire_publicip(network_1)
nat_rule1 = self.create_natrule(vm_1, public_ip_1, network_1)
nat_rule2 = self.create_natrule(vm_2, public_ip_2, network_1)
http_rule1 = self.create_natrule(vm_1, public_ip_1, network_1, self.services["http_rule"])
http_rule2 = self.create_natrule(vm_2, public_ip_2, network_1, self.services["http_rule"])
public_ip_3 = self.acquire_publicip(network_2)
public_ip_4 = self.acquire_publicip(network_2)
nat_rule3 = self.create_natrule(vm_3, public_ip_3, network_2)
nat_rule4 = self.create_natrule(vm_4, public_ip_4, network_2)
http_rule3 = self.create_natrule(vm_3, public_ip_3, network_2, self.services["http_rule"])
http_rule4 = self.create_natrule(vm_4, public_ip_4, network_2, self.services["http_rule"])
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
self.check_ssh_into_vm(vm_3, public_ip_3, testnegative=False)
self.check_ssh_into_vm(vm_4, public_ip_4, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_3, public_ip_3, testnegative=False)
self.check_wget_from_vm(vm_4, public_ip_4, testnegative=False)
nat_rule1.delete(self.apiclient)
nat_rule2.delete(self.apiclient)
nat_rule3.delete(self.apiclient)
nat_rule4.delete(self.apiclient)
http_rule1.delete(self.apiclient)
http_rule2.delete(self.apiclient)
http_rule3.delete(self.apiclient)
http_rule4.delete(self.apiclient)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=True)
self.check_ssh_into_vm(vm_3, public_ip_3, testnegative=True)
self.check_ssh_into_vm(vm_4, public_ip_4, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_3, public_ip_3, testnegative=True)
self.check_wget_from_vm(vm_4, public_ip_4, testnegative=True)
return
|
|
import string
import random
from django import forms
from django.conf import settings
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ValidationError
from django.utils.http import is_safe_url
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from oscar.core.loading import get_profile_class, get_class, get_model
from oscar.core.compat import get_user_model, existing_user_fields
from oscar.apps.customer.utils import get_password_reset_url, normalise_email
from oscar.core.validators import password_validators
Dispatcher = get_class('customer.utils', 'Dispatcher')
CommunicationEventType = get_model('customer', 'communicationeventtype')
ProductAlert = get_model('customer', 'ProductAlert')
User = get_user_model()
def generate_username():
# Python 3 uses ascii_letters. If not available, fallback to letters
try:
letters = string.ascii_letters
except AttributeError:
letters = string.letters
uname = ''.join([random.choice(letters + string.digits + '_')
for i in range(30)])
try:
User.objects.get(username=uname)
return generate_username()
except User.DoesNotExist:
return uname
class PasswordResetForm(auth_forms.PasswordResetForm):
"""
This form takes the same structure as its parent from django.contrib.auth
"""
communication_type_code = "PASSWORD_RESET"
def save(self, domain_override=None, use_https=False, request=None,
**kwargs):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
site = get_current_site(request)
if domain_override is not None:
site.domain = site.name = domain_override
email = self.cleaned_data['email']
active_users = User._default_manager.filter(
email__iexact=email, is_active=True)
for user in active_users:
reset_url = self.get_reset_url(site, request, user, use_https)
ctx = {
'user': user,
'site': site,
'reset_url': reset_url}
messages = CommunicationEventType.objects.get_and_render(
code=self.communication_type_code, context=ctx)
Dispatcher().dispatch_user_messages(user, messages)
def get_reset_url(self, site, request, user, use_https):
# the request argument isn't used currently, but implementors might
# need it to determine the correct subdomain
reset_url = "%s://%s%s" % (
'https' if use_https else 'http',
site.domain,
get_password_reset_url(user))
return reset_url
class SetPasswordForm(auth_forms.SetPasswordForm):
def __init__(self, *args, **kwargs):
super(SetPasswordForm, self).__init__(*args, **kwargs)
# Enforce password validations for the new password
self.fields['new_password1'].validators += password_validators
class PasswordChangeForm(auth_forms.PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(PasswordChangeForm, self).__init__(*args, **kwargs)
# Enforce password validations for the new password
self.fields['new_password1'].validators += password_validators
class EmailAuthenticationForm(AuthenticationForm):
"""
Extends the standard django AuthenticationForm, to support 75 character
usernames. 75 character usernames are needed to support the EmailOrUsername
auth backend.
"""
username = forms.EmailField(label=_('Email address'))
redirect_url = forms.CharField(
widget=forms.HiddenInput, required=False)
def __init__(self, host, *args, **kwargs):
self.host = host
super(EmailAuthenticationForm, self).__init__(*args, **kwargs)
def clean_redirect_url(self):
url = self.cleaned_data['redirect_url'].strip()
if url and is_safe_url(url):
return url
class ConfirmPasswordForm(forms.Form):
"""
Extends the standard django AuthenticationForm, to support 75 character
usernames. 75 character usernames are needed to support the EmailOrUsername
auth backend.
"""
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
self.user = user
def clean_password(self):
password = self.cleaned_data['password']
if not self.user.check_password(password):
raise forms.ValidationError(
_("The entered password is not valid!"))
return password
class EmailUserCreationForm(forms.ModelForm):
email = forms.EmailField(label=_('Email address'))
password1 = forms.CharField(
label=_('Password'), widget=forms.PasswordInput,
validators=password_validators)
password2 = forms.CharField(
label=_('Confirm password'), widget=forms.PasswordInput)
redirect_url = forms.CharField(
widget=forms.HiddenInput, required=False)
class Meta:
model = User
fields = ('email',)
def __init__(self, host=None, *args, **kwargs):
self.host = host
super(EmailUserCreationForm, self).__init__(*args, **kwargs)
def clean_email(self):
"""
Checks for existing users with the supplied email address.
"""
email = normalise_email(self.cleaned_data['email'])
if User._default_manager.filter(email__iexact=email).exists():
raise forms.ValidationError(
_("A user with that email address already exists"))
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data.get('password2', '')
if password1 != password2:
raise forms.ValidationError(
_("The two password fields didn't match."))
return password2
def clean_redirect_url(self):
url = self.cleaned_data['redirect_url'].strip()
if url and is_safe_url(url):
return url
return settings.LOGIN_REDIRECT_URL
def save(self, commit=True):
user = super(EmailUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if 'username' in [f.name for f in User._meta.fields]:
user.username = generate_username()
if commit:
user.save()
return user
class OrderSearchForm(forms.Form):
date_from = forms.DateField(
required=False, label=pgettext_lazy("start date", "From"))
date_to = forms.DateField(
required=False, label=pgettext_lazy("end date", "To"))
order_number = forms.CharField(required=False, label=_("Order number"))
def clean(self):
if self.is_valid() and not any([self.cleaned_data['date_from'],
self.cleaned_data['date_to'],
self.cleaned_data['order_number']]):
raise forms.ValidationError(_("At least one field is required."))
return super(OrderSearchForm, self).clean()
def description(self):
"""
Uses the form's data to build a useful description of what orders
are listed.
"""
if not self.is_bound or not self.is_valid():
return _('All orders')
else:
date_from = self.cleaned_data['date_from']
date_to = self.cleaned_data['date_to']
order_number = self.cleaned_data['order_number']
return self._orders_description(date_from, date_to, order_number)
def _orders_description(self, date_from, date_to, order_number):
if date_from and date_to:
if order_number:
desc = _('Orders placed between %(date_from)s and '
'%(date_to)s and order number containing '
'%(order_number)s')
else:
desc = _('Orders placed between %(date_from)s and '
'%(date_to)s')
elif date_from:
if order_number:
desc = _('Orders placed since %(date_from)s and '
'order number containing %(order_number)s')
else:
desc = _('Orders placed since %(date_from)s')
elif date_to:
if order_number:
desc = _('Orders placed until %(date_to)s and '
'order number containing %(order_number)s')
else:
desc = _('Orders placed until %(date_to)s')
elif order_number:
desc = _('Orders with order number containing %(order_number)s')
else:
return None
params = {
'date_from': date_from,
'date_to': date_to,
'order_number': order_number,
}
return desc % params
def get_filters(self):
date_from = self.cleaned_data['date_from']
date_to = self.cleaned_data['date_to']
order_number = self.cleaned_data['order_number']
kwargs = {}
if date_from and date_to:
kwargs['date_placed__range'] = [date_from, date_to]
elif date_from and not date_to:
kwargs['date_placed__gt'] = date_from
elif not date_from and date_to:
kwargs['date_placed__lt'] = date_to
if order_number:
kwargs['number__contains'] = order_number
return kwargs
class UserForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
kwargs['instance'] = user
super(UserForm, self).__init__(*args, **kwargs)
if 'email' in self.fields:
self.fields['email'].required = True
def clean_email(self):
"""
Make sure that the email address is aways unique as it is
used instead of the username. This is necessary because the
unique-ness of email addresses is *not* enforced on the model
level in ``django.contrib.auth.models.User``.
"""
email = normalise_email(self.cleaned_data['email'])
if User._default_manager.filter(
email__iexact=email).exclude(id=self.user.id).exists():
raise ValidationError(
_("A user with this email address already exists"))
# Save the email unaltered
return email
class Meta:
model = User
fields = existing_user_fields(['first_name', 'last_name', 'email'])
Profile = get_profile_class()
if Profile:
class UserAndProfileForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
try:
instance = Profile.objects.get(user=user)
except Profile.DoesNotExist:
# User has no profile, try a blank one
instance = Profile(user=user)
kwargs['instance'] = instance
super(UserAndProfileForm, self).__init__(*args, **kwargs)
# Get profile field names to help with ordering later
profile_field_names = list(self.fields.keys())
# Get user field names (we look for core user fields first)
core_field_names = set([f.name for f in User._meta.fields])
user_field_names = ['email']
for field_name in ('first_name', 'last_name'):
if field_name in core_field_names:
user_field_names.append(field_name)
user_field_names.extend(User._meta.additional_fields)
# Store user fields so we know what to save later
self.user_field_names = user_field_names
# Add additional user form fields
additional_fields = forms.fields_for_model(
User, fields=user_field_names)
self.fields.update(additional_fields)
# Ensure email is required and initialised correctly
self.fields['email'].required = True
# Set initial values
for field_name in user_field_names:
self.fields[field_name].initial = getattr(user, field_name)
# Ensure order of fields is email, user fields then profile fields
self.fields.keyOrder = user_field_names + profile_field_names
class Meta:
model = Profile
exclude = ('user',)
def clean_email(self):
email = normalise_email(self.cleaned_data['email'])
users_with_email = User._default_manager.filter(
email__iexact=email).exclude(id=self.instance.user.id)
if users_with_email.exists():
raise ValidationError(
_("A user with this email address already exists"))
return email
def save(self, *args, **kwargs):
user = self.instance.user
# Save user also
for field_name in self.user_field_names:
setattr(user, field_name, self.cleaned_data[field_name])
user.save()
return super(ProfileForm, self).save(*args, **kwargs)
ProfileForm = UserAndProfileForm
else:
ProfileForm = UserForm
class ProductAlertForm(forms.ModelForm):
email = forms.EmailField(required=True, label=_(u'Send notification to'),
widget=forms.TextInput(attrs={
'placeholder': _('Enter your email')
}))
def __init__(self, user, product, *args, **kwargs):
self.user = user
self.product = product
super(ProductAlertForm, self).__init__(*args, **kwargs)
# Only show email field to unauthenticated users
if user and user.is_authenticated():
self.fields['email'].widget = forms.HiddenInput()
self.fields['email'].required = False
def save(self, commit=True):
alert = super(ProductAlertForm, self).save(commit=False)
if self.user.is_authenticated():
alert.user = self.user
alert.product = self.product
if commit:
alert.save()
return alert
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get('email')
if email:
try:
ProductAlert.objects.get(
product=self.product, email__iexact=email,
status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"There is already an active stock alert for %s") % email)
elif self.user.is_authenticated():
try:
ProductAlert.objects.get(product=self.product,
user=self.user,
status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"You already have an active alert for this product"))
return cleaned_data
class Meta:
model = ProductAlert
exclude = ('user', 'key',
'status', 'date_confirmed', 'date_cancelled', 'date_closed',
'product')
|
|
from collections.abc import Iterable
from io import StringIO
from numbers import Real
from warnings import warn
import numpy as np
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from openmc.stats import Univariate, Tabular, Uniform, Legendre
from .function import INTERPOLATION_SCHEME
from .data import EV_PER_MEV
from .endf import get_head_record, get_cont_record, get_tab1_record, \
get_list_record, get_tab2_record
class AngleDistribution(EqualityMixin):
"""Angle distribution as a function of incoming energy
Parameters
----------
energy : Iterable of float
Incoming energies in eV at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines corresponding to each incoming energy
Attributes
----------
energy : Iterable of float
Incoming energies in eV at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines corresponding to each incoming energy
"""
def __init__(self, energy, mu):
super().__init__()
self.energy = energy
self.mu = mu
@property
def energy(self):
return self._energy
@property
def mu(self):
return self._mu
@energy.setter
def energy(self, energy):
cv.check_type('angle distribution incoming energy', energy,
Iterable, Real)
self._energy = energy
@mu.setter
def mu(self, mu):
cv.check_type('angle distribution scattering cosines', mu,
Iterable, Univariate)
self._mu = mu
def to_hdf5(self, group):
"""Write angle distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
dset = group.create_dataset('energy', data=self.energy)
# Make sure all data is tabular
mu_tabular = [mu_i if isinstance(mu_i, Tabular) else
mu_i.to_tabular() for mu_i in self.mu]
# Determine total number of (mu,p) pairs and create array
n_pairs = sum([len(mu_i.x) for mu_i in mu_tabular])
pairs = np.empty((3, n_pairs))
# Create array for offsets
offsets = np.empty(len(mu_tabular), dtype=int)
interpolation = np.empty(len(mu_tabular), dtype=int)
j = 0
# Populate offsets and pairs array
for i, mu_i in enumerate(mu_tabular):
n = len(mu_i.x)
offsets[i] = j
interpolation[i] = 1 if mu_i.interpolation == 'histogram' else 2
pairs[0, j:j+n] = mu_i.x
pairs[1, j:j+n] = mu_i.p
pairs[2, j:j+n] = mu_i.c
j += n
# Create dataset for distributions
dset = group.create_dataset('mu', data=pairs)
# Write interpolation as attribute
dset.attrs['offsets'] = offsets
dset.attrs['interpolation'] = interpolation
@classmethod
def from_hdf5(cls, group):
"""Generate angular distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""
energy = group['energy'].value
data = group['mu']
offsets = data.attrs['offsets']
interpolation = data.attrs['interpolation']
mu = []
n_energy = len(energy)
for i in range(n_energy):
# Determine length of outgoing energy distribution and number of
# discrete lines
j = offsets[i]
if i < n_energy - 1:
n = offsets[i+1] - j
else:
n = data.shape[1] - j
interp = INTERPOLATION_SCHEME[interpolation[i]]
mu_i = Tabular(data[0, j:j+n], data[1, j:j+n], interp)
mu_i.c = data[2, j:j+n]
mu.append(mu_i)
return cls(energy, mu)
@classmethod
def from_ace(cls, ace, location_dist, location_start):
"""Generate an angular distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
location_dist : int
Index in the XSS array corresponding to the start of a block,
e.g. JXS(9).
location_start : int
Index in the XSS array corresponding to the start of an angle
distribution array
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""
# Set starting index for angle distribution
idx = location_dist + location_start - 1
# Number of energies at which angular distributions are tabulated
n_energies = int(ace.xss[idx])
idx += 1
# Incoming energy grid
energy = ace.xss[idx:idx + n_energies]*EV_PER_MEV
idx += n_energies
# Read locations for angular distributions
lc = ace.xss[idx:idx + n_energies].astype(int)
idx += n_energies
mu = []
for i in range(n_energies):
if lc[i] > 0:
# Equiprobable 32 bin distribution
idx = location_dist + abs(lc[i]) - 1
cos = ace.xss[idx:idx + 33]
pdf = np.zeros(33)
pdf[:32] = 1.0/(32.0*np.diff(cos))
cdf = np.linspace(0.0, 1.0, 33)
mu_i = Tabular(cos, pdf, 'histogram', ignore_negative=True)
mu_i.c = cdf
elif lc[i] < 0:
# Tabular angular distribution
idx = location_dist + abs(lc[i]) - 1
intt = int(ace.xss[idx])
n_points = int(ace.xss[idx + 1])
data = ace.xss[idx + 2:idx + 2 + 3*n_points]
data.shape = (3, n_points)
mu_i = Tabular(data[0], data[1], INTERPOLATION_SCHEME[intt])
mu_i.c = data[2]
else:
# Isotropic angular distribution
mu_i = Uniform(-1., 1.)
mu.append(mu_i)
return cls(energy, mu)
@classmethod
def from_endf(cls, ev, mt):
"""Generate an angular distribution from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
mt : int
The MT value of the reaction to get angular distributions for
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""
file_obj = StringIO(ev.section[4, mt])
# Read HEAD record
items = get_head_record(file_obj)
lvt = items[2]
ltt = items[3]
# Read CONT record
items = get_cont_record(file_obj)
li = items[2]
nk = items[4]
center_of_mass = (items[3] == 2)
# Check for obsolete energy transformation matrix. If present, just skip
# it and keep reading
if lvt > 0:
warn('Obsolete energy transformation matrix in MF=4 angular '
'distribution.')
for _ in range((nk + 5)//6):
file_obj.readline()
if ltt == 0 and li == 1:
# Purely isotropic
energy = np.array([0., ev.info['energy_max']])
mu = [Uniform(-1., 1.), Uniform(-1., 1.)]
elif ltt == 1 and li == 0:
# Legendre polynomial coefficients
params, tab2 = get_tab2_record(file_obj)
n_energy = params[5]
energy = np.zeros(n_energy)
mu = []
for i in range(n_energy):
items, al = get_list_record(file_obj)
temperature = items[0]
energy[i] = items[1]
coefficients = np.asarray([1.0] + al)
mu.append(Legendre(coefficients))
elif ltt == 2 and li == 0:
# Tabulated probability distribution
params, tab2 = get_tab2_record(file_obj)
n_energy = params[5]
energy = np.zeros(n_energy)
mu = []
for i in range(n_energy):
params, f = get_tab1_record(file_obj)
temperature = params[0]
energy[i] = params[1]
if f.n_regions > 1:
raise NotImplementedError('Angular distribution with multiple '
'interpolation regions not supported.')
mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))
elif ltt == 3 and li == 0:
# Legendre for low energies / tabulated for high energies
params, tab2 = get_tab2_record(file_obj)
n_energy_legendre = params[5]
energy_legendre = np.zeros(n_energy_legendre)
mu = []
for i in range(n_energy_legendre):
items, al = get_list_record(file_obj)
temperature = items[0]
energy_legendre[i] = items[1]
coefficients = np.asarray([1.0] + al)
mu.append(Legendre(coefficients))
params, tab2 = get_tab2_record(file_obj)
n_energy_tabulated = params[5]
energy_tabulated = np.zeros(n_energy_tabulated)
for i in range(n_energy_tabulated):
params, f = get_tab1_record(file_obj)
temperature = params[0]
energy_tabulated[i] = params[1]
if f.n_regions > 1:
raise NotImplementedError('Angular distribution with multiple '
'interpolation regions not supported.')
mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))
energy = np.concatenate((energy_legendre, energy_tabulated))
return AngleDistribution(energy, mu)
|
|
#!/usr/bin/env python
# coding:utf-8
# public DNS servers https://en.wikipedia.org/wiki/Public_recursive_name_server
import json
import os
import sys
import threading
import socket
import time
import re
import ssl
import random
import struct
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
top_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir))
data_path = os.path.join(top_path, "data", 'smart_router')
python_path = root_path
noarch_lib = os.path.join(python_path, 'lib', 'noarch')
sys.path.append(noarch_lib)
import simple_queue
import lru_cache
import utils
import simple_http_client
from dnslib import DNSRecord, DNSHeader, A, AAAA, RR, DNSQuestion, QTYPE
from . import global_var as g
from xlog import getLogger
xlog = getLogger("smart_router")
def query_dns_from_xxnet(domain, dns_type=None):
if not g.x_tunnel:
return []
t0 = time.time()
content, status, response = g.x_tunnel.front_dispatcher.request(
"GET", "dns.xx-net.net", path="/query?domain=%s" % (utils.to_str(domain)), timeout=5)
t1 = time.time()
if status != 200:
xlog.warn("query_dns_from_xxnet fail status:%d, cost=%f", status, t1 - t0)
return []
if isinstance(content, memoryview):
content = content.tobytes()
content = utils.to_str(content)
try:
rs = json.loads(content)
ips = rs["ip"]
xlog.debug("query_dns_from_xxnet %s cost:%f return:%s", domain, t1 - t0, ips)
#if dns_type == 1:
# ips = [ip for ip in ips if "." in ip]
ips_out = []
for ip_cn in ips:
ip, cn = ip_cn.split("|")
ips_out.append(ip)
return ips_out
except Exception as e:
xlog.warn("query_dns_from_xxnet %s json:%s parse fail:%s", domain, content, e)
return []
class LocalDnsQuery():
def __init__(self, timeout=3):
self.timeout = timeout
self.waiters = lru_cache.LruCache(100)
self.dns_server = self.get_local_dns_server()
self.start()
def get_local_dns_server(self):
iplist = []
if os.name == 'nt':
import ctypes, ctypes.wintypes, struct, socket
DNS_CONFIG_DNS_SERVER_LIST = 6
buf = ctypes.create_string_buffer(2048)
ctypes.windll.dnsapi.DnsQueryConfig(DNS_CONFIG_DNS_SERVER_LIST, 0, None, None, ctypes.byref(buf),
ctypes.byref(ctypes.wintypes.DWORD(len(buf))))
ipcount = struct.unpack('I', buf[0:4])[0]
# iplist = [socket.inet_ntoa(buf[i:i + 4]) for i in range(4, ipcount * 4 + 4, 4)]
iplist = []
for i in range(4, ipcount * 4 + 4, 4):
ip = socket.inet_ntoa(buf[i:i + 4])
iplist.append(ip)
elif os.path.isfile('/etc/resolv.conf'):
with open('/etc/resolv.conf', 'rb') as fp:
iplist = re.findall(br'(?m)^nameserver\s+(\S+)', fp.read())
out_list = []
for ip in iplist:
if ip == b"127.0.0.1":
continue
out_list.append(ip)
xlog.info("Local DNS server:%s", ip)
return out_list
def start(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(1)
self.running = True
self.th = threading.Thread(target=self.recv_worker)
self.th.start()
def stop(self):
self.running = False
self.sock.close()
def recv_worker(self):
while self.running:
try:
try:
response, server = self.sock.recvfrom(8192)
server, port = server
except Exception as e:
# xlog.exception("sock.recvfrom except:%r", e)
continue
if not response:
continue
try:
p = DNSRecord.parse(response)
except Exception as e:
xlog.exception("dns client parse response fail:%r", e)
continue
if len(p.questions) == 0:
xlog.warn("received response without question")
continue
id = p.header.id
if id not in self.waiters:
continue
que = self.waiters[id]
org_domain = que.domain
domain = str(p.questions[0].qname)
xlog.debug("DNS local query received %s from:%s domain:%s org:%s", len(p.rr), server, domain, org_domain)
ips = []
for r in p.rr:
ip = utils.to_bytes(str(r.rdata))
ips.append(ip)
if ips:
que.put(ips)
except Exception as e:
xlog.exception("dns recv_worker except:%r", e)
xlog.info("DNS Client recv worker exit.")
self.sock.close()
def send_request(self, id, server_ip, domain, dns_type):
try:
d = DNSRecord(DNSHeader(id))
d.add_question(DNSQuestion(domain, dns_type))
req4_pack = d.pack()
self.sock.sendto(req4_pack, (server_ip, 53))
except Exception as e:
xlog.warn("send_request except:%r", e)
def query(self, domain, dns_type=1, timeout=3):
t0 = time.time()
end_time = t0 + timeout
while True:
id = random.randint(0, 65535)
if id not in self.waiters:
break
que = simple_queue.Queue()
que.domain = domain
ips = []
for server_ip in self.dns_server:
new_time = time.time()
if new_time > end_time:
break
self.waiters[id] = que
self.send_request(id, server_ip, domain, dns_type)
ips += que.get(self.timeout) or []
if ips:
ips = list(set(ips))
break
if id in self.waiters:
del self.waiters[id]
t1 = time.time()
xlog.debug("query by udp, %s cost:%f, return:%s", domain, t1-t0, ips)
return ips
class DnsOverTcpQuery():
def __init__(self, server_list=[b"114.114.114.114"], port=53):
self.protocol = "Tcp"
self.timeout = 3
self.connection_timeout = 60
self.public_list = server_list
self.port = port
self.connections = []
def get_server(self):
return self.public_list[0]
def direct_connect(self, host, port):
connect_timeout = 30
if b':' in host:
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", (host, port, 0, 0))]
elif utils.check_ip_valid4(host):
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
else:
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
except socket.gaierror:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
for res in info:
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32 * 1024)
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
s.settimeout(connect_timeout)
s.connect((host, port))
return s
except socket.error:
if s:
s.close()
except Exception as e:
xlog.warn("Connect to Dns server %s:%d fail:%r", host, port)
return None
def get_connection(self):
while len(self.connections):
try:
[sock, last_query_time] = self.connections.pop()
if time.time() - last_query_time < self.connection_timeout:
return sock
except:
pass
server_ip = self.get_server()
if not server_ip:
return None
if not g.config.PROXY_ENABLE:
sock = self.direct_connect(server_ip, self.port)
else:
connect_timeout = 5
import socks
sock = socks.socksocket(socket.AF_INET)
sock.set_proxy(proxy_type=g.config.PROXY_TYPE,
addr=g.config.PROXY_HOST,
port=g.config.PROXY_PORT, rdns=True,
username=g.config.PROXY_USER,
password=g.config.PROXY_PASSWD)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
sock.settimeout(connect_timeout)
sock.connect((server_ip, self.port))
return sock
def query(self, domain, dns_type=1):
t0 = time.time()
try:
sock = self.get_connection()
if not sock:
xlog.warn("query_over_tcp %s type:%s connect fail.", domain, dns_type)
return []
d = DNSRecord(DNSHeader())
d.add_question(DNSQuestion(domain, dns_type))
data = d.pack()
data = struct.pack("!H", len(data)) + data
sock.sendall(data)
response = sock.recv(8192)
if not response:
return []
length = struct.unpack("!H", bytes(response[:2]))[0]
while len(response) - 2 < length:
response += sock.recv(8192)
t2 = time.time()
p = DNSRecord.parse(response[2:])
if len(p.rr) == 0:
xlog.warn("query_over_tcp for %s type:%d return none, cost:%f+%f",
domain, dns_type, t2-t0)
ips = []
for r in p.rr:
ip = utils.to_bytes(str(r.rdata))
ips.append(ip)
xlog.debug("Dns %s %s return %s t:%f", self.protocol, domain, ips, t2-t0)
self.connections.append([sock, time.time()])
return ips
except Exception as e:
xlog.exception("query_over_tcp %s type:%s except:%r", domain, dns_type, e)
return []
class DnsOverTlsQuery(DnsOverTcpQuery):
def __init__(self, server_list=[b"1.1.1.1", b"9.9.9.9"]):
super(DnsOverTlsQuery, self).__init__(server_list=server_list, port=853)
self.protocol = "DoT"
def get_connection(self):
try:
s = super(DnsOverTlsQuery, self).get_connection()
if isinstance(s, ssl.SSLSocket):
return s
sock = ssl.wrap_socket(s, ca_certs=os.path.join(current_path, "cloudflare_cert.pem"))
except Exception as e:
xlog.warn("DnsOverTlsQuery wrap_socket fail %r", e)
return None
sock.settimeout(self.timeout)
return sock
class DnsOverHttpsQuery(object):
def __init__(self, timeout=3):
self.protocol = "DoH"
self.timeout = timeout
self.server = "https://1.1.1.1/dns-query"
self.connection_timeout = 60
self.connections = []
def get_connection(self):
while len(self.connections):
try:
[client, last_query_time] = self.connections.pop()
if time.time() - last_query_time < self.connection_timeout:
return client
except:
pass
if g.config.PROXY_ENABLE == 1:
return simple_http_client.Client(proxy={
"type": g.config.PROXY_TYPE,
"host": g.config.PROXY_HOST,
"port": g.config.PROXY_PORT,
"user": g.config.PROXY_USER,
"pass": g.config.PROXY_PASSWD,
}, timeout=self.timeout)
else:
return simple_http_client.Client(timeout=self.timeout)
def query_json(self, domain, dns_type=1):
try:
t0 = time.time()
client = self.get_connection()
url = self.server + "?name=" + domain + "&type=A" # type need to map to Text.
r = client.request("GET", url, headers={"accept": "application/dns-json"})
t = utils.to_str(r.text)
t2 = time.time()
ips = []
data = json.loads(t)
for answer in data["Answer"]:
ips.append(answer["data"])
self.connections.append([client, time.time()])
xlog.debug("Dns s %s return %s t:%f", self.server, domain, ips, t2 - t0)
return ips
except Exception as e:
xlog.warn("DnsOverHttpsQuery query fail:%r", e)
return []
def query(self, domain, dns_type=1):
try:
t0 = time.time()
client = self.get_connection()
url = self.server
d = DNSRecord(DNSHeader())
d.add_question(DNSQuestion(domain, dns_type))
data = d.pack()
r = client.request("POST", url, headers={"accept": "application/dns-message",
"content-type": "application/dns-message"}, body=data)
t2 = time.time()
p = DNSRecord.parse(r.text)
ips = []
for r in p.rr:
ip = utils.to_bytes(str(r.rdata))
ips.append(ip)
self.connections.append([client, time.time()])
xlog.debug("Dns %s %s return %s t:%f", self.protocol, domain, ips, t2 - t0)
return ips
except Exception as e:
xlog.exception("DnsOverHttpsQuery query fail:%r", e)
return []
class ParallelQuery():
def query_worker(self, task, function):
ips = function(task.domain, task.dns_type)
if len(ips):
g.domain_cache.set_ips(task.domain, ips, task.dns_type)
task.put(ips)
def query(self, domain, dns_type, funcs):
task = simple_queue.Queue()
task.domain = domain
task.dns_type = dns_type
for func in funcs:
threading.Thread(target=self.query_worker, args=(task, func)).start()
ips = task.get(3) or []
return ips
class CombineDnsQuery():
def __init__(self):
self.domain_allowed_pattern = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$")
self.local_dns_resolve = LocalDnsQuery()
self.tcp_query = DnsOverTcpQuery()
self.tls_query = DnsOverTlsQuery()
self.https_query = DnsOverHttpsQuery()
self.parallel_query = ParallelQuery()
def is_valid_hostname(self, hostname):
hostname = hostname.upper()
if len(hostname) > 255:
return False
if hostname.endswith(b"."):
hostname = hostname[:-1]
return all(self.domain_allowed_pattern.match(x) for x in hostname.split(b"."))
def query_blocked_domain(self, domain, dns_type):
return self.parallel_query.query(domain, dns_type, [self.https_query.query, self.tls_query.query, query_dns_from_xxnet])
def query_unknown_domain(self, domain, dns_type):
return self.parallel_query.query(domain, dns_type, [self.https_query.query, self.tls_query.query, self.tcp_query.query, query_dns_from_xxnet])
def query(self, domain, dns_type=1):
domain = utils.to_bytes(domain)
if utils.check_ip_valid(domain):
return [domain]
if not self.is_valid_hostname(domain):
xlog.warn("DNS query:%s not valid, type:%d", domain, dns_type)
return []
ips = g.domain_cache.get_ips(domain, dns_type)
if ips:
return ips
rule = g.user_rules.check_host(domain, 0)
if rule == "black":
# user define black list like advertisement or malware server.
ips = ["127.0.0.1"]
xlog.debug("DNS query:%s in black", domain)
return ips
elif b"." not in domain or g.gfwlist.in_white_list(domain):
ips = self.local_dns_resolve.query(domain, timeout=1)
g.domain_cache.set_ips(domain, ips, dns_type)
return ips
elif g.gfwlist.in_block_list(domain) or rule in ["gae", "socks"]:
ips = self.query_blocked_domain(domain, dns_type)
else:
ips = self.query_unknown_domain(domain, dns_type)
if not ips:
ips = self.local_dns_resolve.query(domain, timeout=1)
return ips
def query_recursively(self, domain, dns_type=None):
if not dns_type:
dns_types = [1, 28]
else:
dns_types = [dns_type]
ips_out = []
for dns_type in dns_types:
ips = self.query(domain, dns_type)
for ip in ips:
if utils.check_ip_valid(ip):
ips_out.append(ip)
else:
ips_s = self.query_recursively(ip, dns_type)
ips_out += ips_s
return ips_out
def stop(self):
self.local_dns_resolve.stop()
|
|
"""
This is a collection of functions for loading files generated by the test UI
(TAC test data) and calculating TAC test metrics, including:
- completion time
- path efficiency
- reaction time
"""
import json
import numpy as np
# check for success by adding this to the last target entered timestamp and
# checking if the final timestamp in the trial is greater than that (basically
# it is dwell time plus some margin)
success_check = 2.5
def completion_time(data):
"""
Gets the movement time for the given trial, defined as the amount of time
from movement initiation to the last time the target was entered. On
unsuccessful trials, the last timestamp of the trial is returned.
"""
ts = movement_initiation(data)
te = last_target_enter(data)
if te is None:
return end_time(data)
else:
return te - ts
def cumulative_completion_rate(completion_times, inc, top):
"""
Gets the cumulative completion rate data from an array of completion times.
Starting from zero, time is incremented by `inc` until `top` is reached
(inclusive) and the number of timestamps in `completion_times` under the
current time is added to `counts`. The timestamps can be obtained from
`completions()`.
Parameters
----------
completion_times : array
List of completion times (one per trial).
inc : float
Amount of time increment per check.
top : float
Largest time at which to check the completion rate.
Returns
-------
cutoffs : ndarray
Array of cutoff times at which the completion rate is checked.
counts : ndarray
The number of trials completed faster than the corresponding cutoff.
"""
ts = np.array(completion_times)
cutoffs = np.arange(0, top+inc, inc)
counts = np.zeros(cutoffs.shape)
for i, t in enumerate(cutoffs):
counts[i] = np.sum(ts < t)
return cutoffs, counts
def path_efficiency(data):
"""
Calculates path efficiency for a TAC test trial. The path efficiency is the
optimal distance to the final point of a successful trial divided by the
distance travelled in the trial. The distances are calculated as Euclidean
distances in joint space.
Parameters
----------
pose : ndarray, shape = (n_timestamps, n_joints)
Array of joint angles in time, where each row is the pose at a given
time, and each column corresponds to a single joint.
Returns
-------
perc : float
The path efficiency for the given trial. `None` if the trial was not
successful.
"""
pose = pose_array(data)
optdist = _trajectory_dist(np.vstack([pose[0], pose[-1]]))
dist = _trajectory_dist(pose)
return optdist / dist
def _trajectory_dist(pose):
"""
Calculates the cumulative Euclidean distance travelled over a trajectory.
Parameters
----------
pose : 2D ndarray
Array of poses in time where each row is a pose at a specific time
and each column is a component (coordinate) of the pose, i.e. the shape
is (n_timestamps, n_joints).
Returns
-------
dist : float
The cumulative distance travelled over the array of poses.
"""
distances = np.sqrt(np.sum(np.square(np.diff(pose, axis=0)), axis=1))
total = np.sum(distances)
return total
def loadfile(filename):
"""
Gets the data from a single log file.
"""
data = None
with open(filename, 'r') as fid:
data = json.load(fid)
return data
def movement_initiation(data):
"""
Gets the timestamp corresponding to movement initiation, i.e. the first
time a command other than 'no-contraction' was output.
"""
commands = data['trial_data']['command']
first = [i for i, c in enumerate(commands) if c != 'no-contraction'][0]
ts = data['trial_data']['timestamp'][first]
return ts
def last_target_enter(data):
"""
Gets the timestamp corresponding to the last time the target was entered.
Returns None if the target wasn't hit.
"""
entered = data['trial_data']['target_entered']
et = end_time(data)
if entered:
te = entered[-1]
if et > te + success_check:
te = None
return te
else:
return None
def end_time(data):
"""
Gets the final timestamp of the trial.
"""
try:
et = data['trial_data']['timestamp'][-1]
except IndexError:
print(data)
et = 0
return et
def check_success(data):
"""
Returns whether or not the given trial was successful.
"""
te = last_target_enter(data)
if te is not None:
return True
else:
return False
def get_target(data):
"""
Gets the target pose.
"""
return data['target']['pose']
def get_complexity(data):
"""
Gets the complexity of the given trial data -- the number of degrees of
freedom that are active, or under control of the subject.
"""
return int(len(data['active_classes'])/2)
def get_dofs(data):
"""
Gets the number of target DOFs (i.e. the number of different motion
classes required to complete a trial).
"""
return len(data['target']['pose'])
def pose_array(data):
"""
Gets a 2D array of poses over time. The shape is (n_timestamps,
n_joints).
"""
pose = data['trial_data']['pose']
timestamps = data['trial_data']['timestamp']
n_timestamps = len(timestamps)
n_joints = len(pose)
pose_array = np.zeros((n_timestamps, n_joints))
for i, (joint, angles) in enumerate(sorted(pose.items())):
pose_array[:, i] = angles
pose_array -= pose_array[0, :]
return pose_array
|
|
# event/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base implementation classes.
The public-facing ``Events`` serves as the base class for an event interface;
its public attributes represent different kinds of events. These attributes
are mirrored onto a ``_Dispatch`` class, which serves as a container for
collections of listener functions. These collections are represented both
at the class level of a particular ``_Dispatch`` class as well as within
instances of ``_Dispatch``.
"""
from __future__ import absolute_import
from .. import util
from .attr import _JoinedDispatchDescriptor, _EmptyListener, _DispatchDescriptor
_registrars = util.defaultdict(list)
def _is_event_name(name):
return not name.startswith('_') and name != 'dispatch'
class _UnpickleDispatch(object):
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _parent_cls):
for cls in _parent_cls.__mro__:
if 'dispatch' in cls.__dict__:
return cls.__dict__['dispatch'].dispatch_cls(_parent_cls)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
_events = None
"""reference the :class:`.Events` class which this
:class:`._Dispatch` is created for."""
def __init__(self, _parent_cls):
self._parent_cls = _parent_cls
@util.classproperty
def _listen(cls):
return cls._events._listen
def _join(self, other):
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
"""
if '_joined_dispatch_cls' not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher, self.__class__), {}
)
for ls in _event_descriptors(self):
setattr(cls, ls.name, _JoinedDispatchDescriptor(ls.name))
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self):
return _UnpickleDispatch(), (self._parent_cls, )
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in _event_descriptors(other):
if isinstance(ls, _EmptyListener):
continue
getattr(self, ls.name).\
for_modify(self)._update(ls, only_propagate=only_propagate)
@util.hybridmethod
def _clear(self):
for attr in dir(self):
if _is_event_name(attr):
getattr(self, attr).for_modify(self).clear()
def _event_descriptors(target):
return [getattr(target, k) for k in dir(target) if _is_event_name(k)]
class _EventMeta(type):
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
_create_dispatcher_class(cls, classname, bases, dict_)
return type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
dispatch_base = getattr(cls, 'dispatch', _Dispatch)
dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {})
cls._set_dispatch(cls, dispatch_cls)
for k in dict_:
if _is_event_name(k):
setattr(dispatch_cls, k, _DispatchDescriptor(cls, dict_[k]))
_registrars[k].append(cls)
if getattr(cls, '_dispatch_target', None):
cls._dispatch_target.dispatch = dispatcher(cls)
def _remove_dispatcher(cls):
for k in dir(cls):
if _is_event_name(k):
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class Events(util.with_metaclass(_EventMeta, object)):
"""Define event listening functions for a particular target type."""
@staticmethod
def _set_dispatch(cls, dispatch_cls):
# this allows an Events subclass to define additional utility
# methods made available to the target via
# "self.dispatch._events.<utilitymethod>"
# @staticemethod to allow easy "super" calls while in a metaclass
# constructor.
cls.dispatch = dispatch_cls
dispatch_cls._events = cls
@classmethod
def _accept_with(cls, target):
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, 'dispatch') and (
isinstance(target.dispatch, cls.dispatch) or \
isinstance(target.dispatch, type) and \
issubclass(target.dispatch, cls.dispatch)
):
return target
else:
return None
@classmethod
def _listen(cls, event_key, propagate=False, insert=False, named=False):
event_key.base_listen(propagate=propagate, insert=insert, named=named)
@classmethod
def _remove(cls, event_key):
event_key.remove()
@classmethod
def _clear(cls):
cls.dispatch._clear()
class _JoinedDispatcher(object):
"""Represent a connection between two _Dispatch objects."""
def __init__(self, local, parent):
self.local = local
self.parent = parent
self._parent_cls = local._parent_cls
class dispatcher(object):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events):
self.dispatch_cls = events.dispatch
self.events = events
def __get__(self, obj, cls):
if obj is None:
return self.dispatch_cls
obj.__dict__['dispatch'] = disp = self.dispatch_cls(cls)
return disp
|
|
"""Offer reusable conditions."""
from datetime import timedelta
import logging
import sys
from blumate.components import (
zone as zone_cmp, sun as sun_cmp)
from blumate.const import (
ATTR_GPS_ACCURACY, ATTR_LATITUDE, ATTR_LONGITUDE,
CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, CONF_CONDITION,
WEEKDAYS, CONF_STATE, CONF_ZONE, CONF_BEFORE,
CONF_AFTER, CONF_WEEKDAY, SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET,
CONF_BELOW, CONF_ABOVE)
from blumate.exceptions import TemplateError, BluMateError
import blumate.helpers.config_validation as cv
from blumate.helpers.template import render
import blumate.util.dt as dt_util
FROM_CONFIG_FORMAT = '{}_from_config'
_LOGGER = logging.getLogger(__name__)
def from_config(config, config_validation=True):
"""Turn a condition configuration into a method."""
factory = getattr(
sys.modules[__name__],
FROM_CONFIG_FORMAT.format(config.get(CONF_CONDITION)), None)
if factory is None:
raise BluMateError('Invalid condition "{}" specified {}'.format(
config.get(CONF_CONDITION), config))
return factory(config, config_validation)
def and_from_config(config, config_validation=True):
"""Create multi condition matcher using 'AND'."""
if config_validation:
config = cv.AND_CONDITION_SCHEMA(config)
checks = [from_config(entry) for entry in config['conditions']]
def if_and_condition(hass, variables=None):
"""Test and condition."""
for check in checks:
try:
if not check(hass, variables):
return False
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning('Error during and-condition: %s', ex)
return False
return True
return if_and_condition
def or_from_config(config, config_validation=True):
"""Create multi condition matcher using 'OR'."""
if config_validation:
config = cv.OR_CONDITION_SCHEMA(config)
checks = [from_config(entry) for entry in config['conditions']]
def if_or_condition(hass, variables=None):
"""Test and condition."""
for check in checks:
try:
if check(hass, variables):
return True
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning('Error during or-condition: %s', ex)
return False
return if_or_condition
# pylint: disable=too-many-arguments
def numeric_state(hass, entity, below=None, above=None, value_template=None,
variables=None):
"""Test a numeric state condition."""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
if value_template is None:
value = entity.state
else:
variables = dict(variables or {})
variables['state'] = entity
try:
value = render(hass, value_template, variables)
except TemplateError as ex:
_LOGGER.error(ex)
return False
try:
value = float(value)
except ValueError:
_LOGGER.warning("Value cannot be processed as a number: %s", value)
return False
if below is not None and value > below:
return False
if above is not None and value < above:
return False
return True
def numeric_state_from_config(config, config_validation=True):
"""Wrap action method with state based condition."""
if config_validation:
config = cv.NUMERIC_STATE_CONDITION_SCHEMA(config)
entity_id = config.get(CONF_ENTITY_ID)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
value_template = config.get(CONF_VALUE_TEMPLATE)
def if_numeric_state(hass, variables=None):
"""Test numeric state condition."""
return numeric_state(hass, entity_id, below, above, value_template,
variables)
return if_numeric_state
def state(hass, entity, req_state, for_period=None):
"""Test if state matches requirements."""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
is_state = entity.state == req_state
if for_period is None or not is_state:
return is_state
return dt_util.utcnow() - for_period > entity.last_changed
def state_from_config(config, config_validation=True):
"""Wrap action method with state based condition."""
if config_validation:
config = cv.STATE_CONDITION_SCHEMA(config)
entity_id = config.get(CONF_ENTITY_ID)
req_state = config.get(CONF_STATE)
for_period = config.get('for')
def if_state(hass, variables=None):
"""Test if condition."""
return state(hass, entity_id, req_state, for_period)
return if_state
def sun(hass, before=None, after=None, before_offset=None, after_offset=None):
"""Test if current time matches sun requirements."""
now = dt_util.now().time()
before_offset = before_offset or timedelta(0)
after_offset = after_offset or timedelta(0)
if before == SUN_EVENT_SUNRISE and now > (sun_cmp.next_rising(hass) +
before_offset).time():
return False
elif before == SUN_EVENT_SUNSET and now > (sun_cmp.next_setting(hass) +
before_offset).time():
return False
if after == SUN_EVENT_SUNRISE and now < (sun_cmp.next_rising(hass) +
after_offset).time():
return False
elif after == SUN_EVENT_SUNSET and now < (sun_cmp.next_setting(hass) +
after_offset).time():
return False
return True
def sun_from_config(config, config_validation=True):
"""Wrap action method with sun based condition."""
if config_validation:
config = cv.SUN_CONDITION_SCHEMA(config)
before = config.get('before')
after = config.get('after')
before_offset = config.get('before_offset')
after_offset = config.get('after_offset')
def time_if(hass, variables=None):
"""Validate time based if-condition."""
return sun(hass, before, after, before_offset, after_offset)
return time_if
def template(hass, value_template, variables=None):
"""Test if template condition matches."""
try:
value = render(hass, value_template, variables)
except TemplateError as ex:
_LOGGER.error('Error duriong template condition: %s', ex)
return False
return value.lower() == 'true'
def template_from_config(config, config_validation=True):
"""Wrap action method with state based condition."""
if config_validation:
config = cv.TEMPLATE_CONDITION_SCHEMA(config)
value_template = config.get(CONF_VALUE_TEMPLATE)
def template_if(hass, variables=None):
"""Validate template based if-condition."""
return template(hass, value_template, variables)
return template_if
def time(before=None, after=None, weekday=None):
"""Test if local time condition matches.
Handle the fact that time is continuous and we may be testing for
a period that crosses midnight. In that case it is easier to test
for the opposite. "(23:59 <= now < 00:01)" would be the same as
"not (00:01 <= now < 23:59)".
"""
now = dt_util.now()
now_time = now.time()
if after is None:
after = dt_util.dt.time(0)
if before is None:
before = dt_util.dt.time(23, 59, 59, 999999)
if after < before:
if not after <= now_time < before:
return False
else:
if before <= now_time < after:
return False
if weekday is not None:
now_weekday = WEEKDAYS[now.weekday()]
if isinstance(weekday, str) and weekday != now_weekday or \
now_weekday not in weekday:
return False
return True
def time_from_config(config, config_validation=True):
"""Wrap action method with time based condition."""
if config_validation:
config = cv.TIME_CONDITION_SCHEMA(config)
before = config.get(CONF_BEFORE)
after = config.get(CONF_AFTER)
weekday = config.get(CONF_WEEKDAY)
def time_if(hass, variables=None):
"""Validate time based if-condition."""
return time(before, after, weekday)
return time_if
def zone(hass, zone_ent, entity):
"""Test if zone-condition matches."""
if isinstance(zone_ent, str):
zone_ent = hass.states.get(zone_ent)
if zone_ent is None:
return False
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
latitude = entity.attributes.get(ATTR_LATITUDE)
longitude = entity.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
return False
return zone_cmp.in_zone(zone_ent, latitude, longitude,
entity.attributes.get(ATTR_GPS_ACCURACY, 0))
def zone_from_config(config, config_validation=True):
"""Wrap action method with zone based condition."""
if config_validation:
config = cv.ZONE_CONDITION_SCHEMA(config)
entity_id = config.get(CONF_ENTITY_ID)
zone_entity_id = config.get(CONF_ZONE)
def if_in_zone(hass, variables=None):
"""Test if condition."""
return zone(hass, zone_entity_id, entity_id)
return if_in_zone
|
|
from __future__ import unicode_literals
import django
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from model_utils.managers import InheritanceManager
from bazaar.listings.managers import PublishingsManager
from bazaar.listings.querysets import PublishingsQuerySet
from bazaar.warehouse import api
from ..fields import MoneyField, SKUField, create_sku
from ..goods.models import Product
from ..settings import bazaar_settings
from .managers import ListingManager
@python_2_unicode_compatible
class Listing(models.Model):
# DELETE in future
# products = models.ManyToManyField(Product, related_name="listings_old", through="ListingSet")
# title = models.CharField(max_length=100)
# picture_url = models.URLField(blank=True)
# description = models.TextField(blank=True)
# END of delete section
product = models.ForeignKey(Product, related_name="listings", null=True)
sku = SKUField(default=create_sku)
objects = ListingManager()
class Meta:
ordering = ["product__name"]
@property
def _title(self):
"""
Returns product's name
"""
if self.product:
return self.product.name
return 'No product'
@property
def _description(self):
"""
Returns product's description
"""
if self.product:
return self.product.description
return 'No product'
@property
def _picture_url(self):
"""
Returns product picture's url
"""
if self.product:
if hasattr(self.product, 'photo') and hasattr(self.product.photo, 'url'):
return self.product.photo.url
return ''
@property
def available_units(self):
"""
Returns available units of the whole listing in the storage
"""
return api.get_storage_quantity(self.product)
@property
def cost(self):
"""
Returns global cost for the listing
"""
return api.get_storage_price(self.product)
def is_unavailable(self):
"""
Returns True when products stock cannot satisfy published listings
"""
try:
product_quantity = api.get_storage_quantity(self.product)
except models.ObjectDoesNotExist:
product_quantity = 0
for publishing in self.publishings.all():
if publishing.is_active():
if product_quantity < publishing.available_units:
return True
return False
def is_highly_available(self):
"""
Return True when products stock has more elements than published one
"""
try:
product_quantity = api.get_storage_quantity(self.product)
except models.ObjectDoesNotExist:
product_quantity = 0
for publishing in self.publishings.all():
if publishing.is_active():
if product_quantity - publishing.available_units > 2:
return True
return False
def is_low_cost(self):
listing_cost = self.cost
for publishing in self.publishings.filter(status=Publishing.ACTIVE_PUBLISHING):
if listing_cost > publishing.price:
return True
return False
def __str__(self):
name = ''
has_product_name = hasattr(self.product, 'name')
if has_product_name:
name = self.product.name
return ' '.join(filter(None, (name, '({})'.format(self.sku))))
@receiver(post_save, sender=Product)
def create_listing_for_product(sender, instance, **kwargs):
if not bazaar_settings.AUTOMATIC_LISTING_CREATION_ON_PRODUCT_CREATION:
return
if not kwargs['created']:
return
if hasattr(instance, 'compositeproduct'):
return
# TODO: Is needed here?
from bazaar.goods.api import listing_bulk_creation
listing_bulk_creation(Product.objects.filter(pk=instance.id).all())
class ListingSet(models.Model):
quantity = models.DecimalField(max_digits=30, decimal_places=4, default=1)
product = models.ForeignKey(Product, related_name="listing_sets")
listing = models.ForeignKey(Listing, related_name="listing_sets")
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(unique=True)
url = models.URLField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publishing(models.Model):
ACTIVE_PUBLISHING = 0
COMPLETED_PUBLISHING = 1
PUBLISHING_STATUS_CHOICES = (
(ACTIVE_PUBLISHING, _("Active")),
(COMPLETED_PUBLISHING, _("Completed")),
)
external_id = models.CharField(max_length=128, db_index=True)
title = models.CharField(max_length=100, blank=True, null=True)
# TODO: Here we keep both picture_url and photo for compatibility, but picture_url will have to be removed
picture_url = models.URLField(null=True, blank=True)
photo = models.ImageField(upload_to='publishings', null=True, blank=True)
description = models.TextField(blank=True, null=True)
# Effective purchase cost, with purchase currency
original_price = MoneyField()
# Current selling price, with selling currency
price = MoneyField()
available_units = models.IntegerField(default=0)
pub_date = models.DateTimeField(null=True, blank=True)
last_modified = models.DateTimeField(auto_now=True)
status = models.IntegerField(choices=PUBLISHING_STATUS_CHOICES, default=ACTIVE_PUBLISHING)
listing = models.ForeignKey(Listing, related_name="publishings", null=True, blank=False)
store = models.ForeignKey(Store, related_name="publishings")
objects = PublishingsManager.from_queryset(PublishingsQuerySet)()
def get_template_name(self):
return NotImplementedError("We doesn't provide a default publishing template.")
def is_active(self):
return self.status == self.ACTIVE_PUBLISHING
def __str__(self):
return "Publishing %s on %s" % (self.external_id, self.store)
class Meta:
unique_together = ("external_id", "store")
@python_2_unicode_compatible
class Order(models.Model):
ORDER_PENDING = 0
ORDER_COMPLETED = 1
ORDER_STATUS_CHOICES = (
(ORDER_PENDING, _("Pending")),
(ORDER_COMPLETED, _("Completed")),
)
external_id = models.CharField(max_length=256)
store = models.ForeignKey(Store)
publishing = models.ForeignKey(Publishing, null=True, blank=True)
processed = models.BooleanField(default=False)
bypass = models.BooleanField(default=False)
quantity = models.IntegerField(default=1)
status = models.IntegerField(max_length=50, choices=ORDER_STATUS_CHOICES, default=ORDER_PENDING)
objects = InheritanceManager()
def __str__(self):
return "Order %s from %s" % (self.external_id, self.store)
class Meta:
unique_together = ("external_id", "store")
if django.VERSION < (1, 7):
from . import signals # noqa
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp block storage C-mode library
"""
import ddt
import mock
from cinder import exception
from cinder.objects import fields
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as\
fake_utils
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
@ddt.ddt
class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
"""Test case for NetApp's C-Mode iSCSI library."""
def setUp(self):
super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp()
kwargs = {
'configuration': self.get_config_cmode(),
'host': 'openstack@cdotblock',
}
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.library.perf_library = mock.Mock()
self.library.ssc_library = mock.Mock()
self.library.vserver = mock.Mock()
self.fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME,
fake.SIZE, None)
self.fake_snapshot_lun = block_base.NetAppLun(
fake.SNAPSHOT_LUN_HANDLE, fake.SNAPSHOT_NAME, fake.SIZE, None)
self.mock_object(self.library, 'lun_table')
self.library.lun_table = {
fake.LUN_NAME: self.fake_lun,
fake.SNAPSHOT_NAME: self.fake_snapshot_lun,
}
self.mock_object(block_base.NetAppBlockStorageLibrary, 'delete_volume')
def get_config_cmode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'iscsi'
config.netapp_login = 'admin'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'https'
config.netapp_server_port = '443'
config.netapp_vserver = 'openstack'
return config
@mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(capabilities.CapabilitiesLibrary,
'cluster_user_supported')
@mock.patch.object(capabilities.CapabilitiesLibrary,
'check_api_permissions')
@mock.patch.object(na_utils, 'check_flags')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_check_flags,
mock_check_api_permissions, mock_cluster_user_supported):
self.mock_object(client_base.Client, '_init_ssh_client')
self.mock_object(
dot_utils, 'get_backend_configuration',
return_value=self.get_config_cmode())
context = mock.Mock()
self.library.do_setup(context)
super_do_setup.assert_called_once_with(context)
self.assertEqual(1, mock_check_flags.call_count)
mock_check_api_permissions.assert_called_once_with()
mock_cluster_user_supported.assert_called_once_with()
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
mock_get_pool_map = self.mock_object(
self.library, '_get_flexvol_to_pool_map',
return_value={'fake_map': None})
mock_add_looping_tasks = self.mock_object(
self.library, '_add_looping_tasks')
self.library.check_for_setup_error()
self.assertEqual(1, super_check_for_setup_error.call_count)
self.assertEqual(1, mock_add_looping_tasks.call_count)
mock_get_pool_map.assert_called_once_with()
mock_add_looping_tasks.assert_called_once_with()
def test_check_for_setup_error_no_filtered_pools(self):
self.mock_object(block_base.NetAppBlockStorageLibrary,
'check_for_setup_error')
self.mock_object(self.library, '_add_looping_tasks')
self.mock_object(
self.library, '_get_flexvol_to_pool_map', return_value={})
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
@ddt.data({'replication_enabled': True, 'failed_over': False,
'cluster_credentials': True},
{'replication_enabled': True, 'failed_over': True,
'cluster_credentials': True},
{'replication_enabled': False, 'failed_over': False,
'cluster_credentials': False})
@ddt.unpack
def test_handle_housekeeping_tasks(
self, replication_enabled, failed_over, cluster_credentials):
self.library.using_cluster_credentials = cluster_credentials
ensure_mirrors = self.mock_object(data_motion.DataMotionMixin,
'ensure_snapmirrors')
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
return_value=fake_utils.SSC.keys())
mock_remove_unused_qos_policy_groups = self.mock_object(
self.zapi_client, 'remove_unused_qos_policy_groups')
self.library.replication_enabled = replication_enabled
self.library.failed_over = failed_over
self.library._handle_housekeeping_tasks()
if self.library.using_cluster_credentials:
mock_remove_unused_qos_policy_groups.assert_called_once_with()
else:
mock_remove_unused_qos_policy_groups.assert_not_called()
if replication_enabled and not failed_over:
ensure_mirrors.assert_called_once_with(
self.library.configuration, self.library.backend_name,
fake_utils.SSC.keys())
else:
self.assertFalse(ensure_mirrors.called)
def test_handle_ems_logging(self):
volume_list = ['vol0', 'vol1', 'vol2']
self.mock_object(
self.library.ssc_library, 'get_ssc_flexvol_names',
return_value=volume_list)
self.mock_object(
dot_utils, 'build_ems_log_message_0',
return_value='fake_base_ems_log_message')
self.mock_object(
dot_utils, 'build_ems_log_message_1',
return_value='fake_pool_ems_log_message')
mock_send_ems_log_message = self.mock_object(
self.zapi_client, 'send_ems_log_message')
self.library._handle_ems_logging()
mock_send_ems_log_message.assert_has_calls([
mock.call('fake_base_ems_log_message'),
mock.call('fake_pool_ems_log_message'),
])
dot_utils.build_ems_log_message_0.assert_called_once_with(
self.library.driver_name, self.library.app_version)
dot_utils.build_ems_log_message_1.assert_called_once_with(
self.library.driver_name, self.library.app_version,
self.library.vserver, volume_list, [])
def test_find_mapped_lun_igroup(self):
igroups = [fake.IGROUP1]
self.zapi_client.get_igroup_by_initiators.return_value = igroups
lun_maps = [{'initiator-group': fake.IGROUP1_NAME,
'lun-id': '1',
'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertEqual(fake.IGROUP1_NAME, igroup)
self.assertEqual('1', lun_id)
def test_find_mapped_lun_igroup_initiator_mismatch(self):
self.zapi_client.get_igroup_by_initiators.return_value = []
lun_maps = [{'initiator-group': fake.IGROUP1_NAME,
'lun-id': '1',
'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_name_mismatch(self):
igroups = [{'initiator-group-os-type': 'linux',
'initiator-group-type': 'fcp',
'initiator-group-name': 'igroup2'}]
self.zapi_client.get_igroup_by_initiators.return_value = igroups
lun_maps = [{'initiator-group': fake.IGROUP1_NAME,
'lun-id': '1',
'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_no_igroup_prefix(self):
igroups = [{'initiator-group-os-type': 'linux',
'initiator-group-type': 'fcp',
'initiator-group-name': 'igroup2'}]
self.zapi_client.get_igroup_by_initiators.return_value = igroups
lun_maps = [{'initiator-group': 'igroup2',
'lun-id': '1',
'vserver': fake.VSERVER_NAME}]
self.zapi_client.get_lun_map.return_value = lun_maps
(igroup, lun_id) = self.library._find_mapped_lun_igroup(
fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_clone_lun_zero_block_count(self):
"""Test for when clone lun is not passed a block count."""
self.library._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [
mock.Mock(spec=netapp_api.NaElement)]
lun = fake.FAKE_LUN
self.library._get_lun_by_args = mock.Mock(return_value=[lun])
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false')
self.library.zapi_client.clone_lun.assert_called_once_with(
'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0,
dest_block=0, src_block=0, qos_policy_group_name=None,
source_snapshot=None, is_snapshot=False)
def test_clone_lun_blocks(self):
"""Test for when clone lun is passed block information."""
block_count = 10
src_block = 10
dest_block = 30
self.library._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [
mock.Mock(spec=netapp_api.NaElement)]
lun = fake.FAKE_LUN
self.library._get_lun_by_args = mock.Mock(return_value=[lun])
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false',
block_count=block_count, src_block=src_block,
dest_block=dest_block)
self.library.zapi_client.clone_lun.assert_called_once_with(
'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false',
block_count=block_count, dest_block=dest_block,
src_block=src_block, qos_policy_group_name=None,
source_snapshot=None, is_snapshot=False)
def test_clone_lun_no_space_reservation(self):
"""Test for when space_reservation is not passed."""
self.library._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.lun_space_reservation = 'false'
self.library.zapi_client.get_lun_by_args.return_value = [
mock.Mock(spec=netapp_api.NaElement)]
lun = fake.FAKE_LUN
self.library._get_lun_by_args = mock.Mock(return_value=[lun])
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True)
self.library.zapi_client.clone_lun.assert_called_once_with(
'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0,
dest_block=0, src_block=0, qos_policy_group_name=None,
source_snapshot=None, is_snapshot=True)
def test_get_fc_target_wwpns(self):
ports = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
self.zapi_client.get_fc_target_wwpns.return_value = ports
result = self.library._get_fc_target_wwpns()
self.assertSetEqual(set(ports), set(result))
def test_create_lun(self):
self.library._create_lun(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
None)
def test_get_preferred_target_from_list(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
operational_addresses = [
target['address']
for target in target_details_list[2:]]
self.zapi_client.get_operational_lif_addresses = (
mock.Mock(return_value=operational_addresses))
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertEqual(target_details_list[2], result)
@ddt.data({'replication_backends': [], 'cluster_credentials': False},
{'replication_backends': ['target_1', 'target_2'],
'cluster_credentials': True})
@ddt.unpack
def test_get_pool_stats(self, replication_backends, cluster_credentials):
self.library.using_cluster_credentials = cluster_credentials
ssc = {
'vola': {
'pool_name': 'vola',
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'true',
'netapp_aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
},
}
mock_get_ssc = self.mock_object(self.library.ssc_library,
'get_ssc',
return_value=ssc)
mock_get_aggrs = self.mock_object(self.library.ssc_library,
'get_ssc_aggregates',
return_value=['aggr1'])
self.mock_object(self.library, 'get_replication_backend_names',
return_value=replication_backends)
self.library.reserved_percentage = 5
self.library.max_over_subscription_ratio = 10
self.library.perf_library.get_node_utilization_for_pool = (
mock.Mock(return_value=30.0))
mock_capacities = {
'size-total': 10737418240.0,
'size-available': 2147483648.0,
}
self.mock_object(self.zapi_client,
'get_flexvol_capacity',
return_value=mock_capacities)
self.mock_object(self.zapi_client,
'get_flexvol_dedupe_used_percent',
return_value=55.0)
aggr_capacities = {
'aggr1': {
'percent-used': 45,
'size-available': 59055800320.0,
'size-total': 107374182400.0,
},
}
mock_get_aggr_capacities = self.mock_object(
self.zapi_client, 'get_aggregate_capacities',
return_value=aggr_capacities)
result = self.library._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{
'pool_name': 'vola',
'QoS_support': True,
'consistencygroup_support': True,
'consistent_group_snapshot_enabled': True,
'reserved_percentage': 5,
'max_over_subscription_ratio': 10.0,
'multiattach': False,
'total_capacity_gb': 10.0,
'free_capacity_gb': 2.0,
'netapp_dedupe_used_percent': 55.0,
'netapp_aggregate_used_percent': 45,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness',
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'true',
'netapp_aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
'replication_enabled': False,
}]
expected[0].update({'QoS_support': cluster_credentials})
if not cluster_credentials:
expected[0].update({
'netapp_aggregate_used_percent': 0,
'netapp_dedupe_used_percent': 0
})
if replication_backends:
expected[0].update({
'replication_enabled': True,
'replication_count': len(replication_backends),
'replication_targets': replication_backends,
'replication_type': 'async',
})
self.assertEqual(expected, result)
mock_get_ssc.assert_called_once_with()
if cluster_credentials:
mock_get_aggrs.assert_called_once_with()
mock_get_aggr_capacities.assert_called_once_with(['aggr1'])
@ddt.data({}, None)
def test_get_pool_stats_no_ssc_vols(self, ssc):
mock_get_ssc = self.mock_object(self.library.ssc_library,
'get_ssc',
return_value=ssc)
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
mock_get_ssc.assert_called_once_with()
@ddt.data(r'open+|demix+', 'open.+', r'.+\d', '^((?!mix+).)*$',
'open123, open321')
def test_get_pool_map_match_selected_pools(self, patterns):
self.library.configuration.netapp_pool_name_search_pattern = patterns
mock_list_flexvols = self.mock_object(
self.zapi_client, 'list_flexvols',
return_value=fake.FAKE_CMODE_VOLUMES)
result = self.library._get_flexvol_to_pool_map()
expected = {
'open123': {
'pool_name': 'open123',
},
'open321': {
'pool_name': 'open321',
},
}
self.assertEqual(expected, result)
mock_list_flexvols.assert_called_once_with()
@ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321',
'.*?')
def test_get_pool_map_match_all_pools(self, patterns):
self.library.configuration.netapp_pool_name_search_pattern = patterns
mock_list_flexvols = self.mock_object(
self.zapi_client, 'list_flexvols',
return_value=fake.FAKE_CMODE_VOLUMES)
result = self.library._get_flexvol_to_pool_map()
self.assertEqual(fake.FAKE_CMODE_POOL_MAP, result)
mock_list_flexvols.assert_called_once_with()
def test_get_pool_map_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid"""
self.library.configuration.netapp_pool_name_search_pattern = '(.+'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_flexvol_to_pool_map)
@ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack',
'abc*', '^$')
def test_get_pool_map_non_matching_patterns(self, patterns):
self.library.configuration.netapp_pool_name_search_pattern = patterns
mock_list_flexvols = self.mock_object(
self.zapi_client, 'list_flexvols',
return_value=fake.FAKE_CMODE_VOLUMES)
result = self.library._get_flexvol_to_pool_map()
self.assertEqual({}, result)
mock_list_flexvols.assert_called_once_with()
def test_update_ssc(self):
mock_get_pool_map = self.mock_object(
self.library, '_get_flexvol_to_pool_map',
return_value=fake.FAKE_CMODE_VOLUMES)
result = self.library._update_ssc()
self.assertIsNone(result)
mock_get_pool_map.assert_called_once_with()
self.library.ssc_library.update_ssc.assert_called_once_with(
fake.FAKE_CMODE_VOLUMES)
def test_delete_volume(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
return_value=fake.QOS_POLICY_GROUP_INFO)
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.delete_volume(fake.VOLUME)
(block_base.NetAppBlockStorageLibrary.delete_volume.
assert_called_once_with(fake.VOLUME))
na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
fake.VOLUME)
(self.library._mark_qos_policy_group_for_deletion.
assert_called_once_with(fake.QOS_POLICY_GROUP_INFO))
def test_delete_volume_get_valid_qos_policy_group_info_exception(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
side_effect=exception.Invalid)
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.delete_volume(fake.VOLUME)
(block_base.NetAppBlockStorageLibrary.delete_volume.
assert_called_once_with(fake.VOLUME))
(self.library._mark_qos_policy_group_for_deletion.
assert_called_once_with(None))
def test_setup_qos_for_volume(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
return_value=fake.QOS_POLICY_GROUP_INFO)
self.mock_object(self.zapi_client, 'provision_qos_policy_group')
result = self.library._setup_qos_for_volume(fake.VOLUME,
fake.EXTRA_SPECS)
self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result)
self.zapi_client.provision_qos_policy_group.\
assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
def test_setup_qos_for_volume_exception_path(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
side_effect=exception.Invalid)
self.mock_object(self.zapi_client, 'provision_qos_policy_group')
self.assertRaises(exception.VolumeBackendAPIException,
self.library._setup_qos_for_volume, fake.VOLUME,
fake.EXTRA_SPECS)
self.assertEqual(0,
self.zapi_client.
provision_qos_policy_group.call_count)
def test_mark_qos_policy_group_for_deletion(self):
self.mock_object(self.zapi_client,
'mark_qos_policy_group_for_deletion')
self.library._mark_qos_policy_group_for_deletion(
fake.QOS_POLICY_GROUP_INFO)
self.zapi_client.mark_qos_policy_group_for_deletion\
.assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
def test_unmanage(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
return_value=fake.QOS_POLICY_GROUP_INFO)
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage')
self.library.unmanage(fake.VOLUME)
na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
fake.VOLUME)
self.library._mark_qos_policy_group_for_deletion\
.assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with(
fake.VOLUME)
def test_unmanage_w_invalid_qos_policy(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
side_effect=exception.Invalid)
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage')
self.library.unmanage(fake.VOLUME)
na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
fake.VOLUME)
self.library._mark_qos_policy_group_for_deletion\
.assert_called_once_with(None)
block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with(
fake.VOLUME)
def test_manage_existing_lun_same_name(self):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._setup_qos_for_volume = mock.Mock()
self.mock_object(na_utils, 'get_qos_policy_group_name_from_info',
return_value=fake.QOS_POLICY_GROUP_NAME)
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
mock_set_lun_qos_policy_group = self.mock_object(
self.zapi_client, 'set_lun_qos_policy_group')
self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.assertEqual(0, self.zapi_client.move_lun.call_count)
self.assertEqual(1, mock_set_lun_qos_policy_group.call_count)
def test_manage_existing_lun_new_path(self):
mock_lun = block_base.NetAppLun(
'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
self.assertEqual(
2, self.library._get_existing_vol_with_manage_ref.call_count)
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.zapi_client.move_lun.assert_called_once_with(
'/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume')
@ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']},
{'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']},
{'secondary_id': 'dev1', 'configured_targets': []},
{'secondary_id': None, 'configured_targets': []})
@ddt.unpack
def test_failover_host_invalid_replication_target(self, secondary_id,
configured_targets):
"""This tests executes a method in the DataMotionMixin."""
self.library.backend_name = 'dev0'
self.mock_object(data_motion.DataMotionMixin,
'get_replication_backend_names',
return_value=configured_targets)
complete_failover_call = self.mock_object(
data_motion.DataMotionMixin, '_complete_failover')
self.assertRaises(exception.InvalidReplicationTarget,
self.library.failover_host, 'fake_context', [],
secondary_id=secondary_id)
self.assertFalse(complete_failover_call.called)
def test_failover_host_unable_to_failover(self):
"""This tests executes a method in the DataMotionMixin."""
self.library.backend_name = 'dev0'
self.mock_object(
data_motion.DataMotionMixin, '_complete_failover',
side_effect=exception.NetAppDriverException)
self.mock_object(data_motion.DataMotionMixin,
'get_replication_backend_names',
return_value=['dev1', 'dev2'])
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
return_value=fake_utils.SSC.keys())
self.mock_object(self.library, '_update_zapi_client')
self.assertRaises(exception.UnableToFailOver,
self.library.failover_host, 'fake_context', [],
secondary_id='dev1')
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
failover_target='dev1')
self.assertFalse(self.library._update_zapi_client.called)
def test_failover_host(self):
"""This tests executes a method in the DataMotionMixin."""
self.library.backend_name = 'dev0'
self.mock_object(data_motion.DataMotionMixin, '_complete_failover',
return_value=('dev1', []))
self.mock_object(data_motion.DataMotionMixin,
'get_replication_backend_names',
return_value=['dev1', 'dev2'])
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
return_value=fake_utils.SSC.keys())
self.mock_object(self.library, '_update_zapi_client')
actual_active, vol_updates, __ = self.library.failover_host(
'fake_context', [], secondary_id='dev1', groups=[])
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
failover_target='dev1')
self.library._update_zapi_client.assert_called_once_with('dev1')
self.assertTrue(self.library.failed_over)
self.assertEqual('dev1', self.library.failed_over_backend_name)
self.assertEqual('dev1', actual_active)
self.assertEqual([], vol_updates)
def test_add_looping_tasks(self):
mock_update_ssc = self.mock_object(self.library, '_update_ssc')
mock_handle_housekeeping = self.mock_object(
self.library, '_handle_housekeeping_tasks')
mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task')
mock_super_add_looping_tasks = self.mock_object(
block_base.NetAppBlockStorageLibrary, '_add_looping_tasks')
self.library._add_looping_tasks()
mock_update_ssc.assert_called_once_with()
mock_add_task.assert_has_calls([
mock.call(mock_update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR),
mock.call(mock_handle_housekeeping,
loopingcalls.TEN_MINUTES,
0)])
mock_super_add_looping_tasks.assert_called_once_with()
def test_get_backing_flexvol_names(self):
mock_ssc_library = self.mock_object(
self.library.ssc_library, 'get_ssc')
self.library._get_backing_flexvol_names()
mock_ssc_library.assert_called_once_with()
def test_create_group(self):
model_update = self.library.create_group(
fake.VOLUME_GROUP)
self.assertEqual('available', model_update['status'])
def test_delete_group_volume_delete_failure(self):
self.mock_object(block_cmode, 'LOG')
self.mock_object(self.library, '_delete_lun', side_effect=Exception)
model_update, volumes = self.library.delete_group(
fake.VOLUME_GROUP, [fake.VG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('error_deleting', volumes[0]['status'])
self.assertEqual(1, block_cmode.LOG.exception.call_count)
def test_update_group(self):
model_update, add_volumes_update, remove_volumes_update = (
self.library.update_group(fake.VOLUME_GROUP))
self.assertIsNone(model_update)
self.assertIsNone(add_volumes_update)
self.assertIsNone(remove_volumes_update)
def test_delete_group_not_found(self):
self.mock_object(block_cmode, 'LOG')
self.mock_object(self.library, '_get_lun_attr', return_value=None)
model_update, volumes = self.library.delete_group(
fake.VOLUME_GROUP, [fake.VG_VOLUME])
self.assertEqual(0, block_cmode.LOG.error.call_count)
self.assertEqual(0, block_cmode.LOG.info.call_count)
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
def test_create_group_snapshot_raise_exception(self):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
mock_extract_host = self.mock_object(
volume_utils, 'extract_host', return_value=fake.POOL_NAME)
self.mock_object(self.zapi_client, 'create_cg_snapshot',
side_effect=netapp_api.NaApiError)
self.assertRaises(exception.NetAppDriverException,
self.library.create_group_snapshot,
fake.VOLUME_GROUP,
[fake.VG_SNAPSHOT])
mock_extract_host.assert_called_once_with(
fake.VG_SNAPSHOT['volume']['host'], level='pool')
def test_create_group_snapshot(self):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=False)
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.mock_object(self.library, '_get_lun_from_table',
return_value=fake_lun)
mock__clone_lun = self.mock_object(self.library, '_clone_lun')
model_update, snapshots_model_update = (
self.library.create_group_snapshot(fake.VOLUME_GROUP,
[fake.SNAPSHOT]))
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock__clone_lun.assert_called_once_with(fake_lun.name,
fake.SNAPSHOT['name'],
space_reserved='false',
is_snapshot=True)
def test_create_consistent_group_snapshot(self):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
self.mock_object(volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
mock_create_cg_snapshot = self.mock_object(
self.zapi_client, 'create_cg_snapshot')
mock__clone_lun = self.mock_object(self.library, '_clone_lun')
mock_wait_for_busy_snapshot = self.mock_object(
self.zapi_client, 'wait_for_busy_snapshot')
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
model_update, snapshots_model_update = (
self.library.create_group_snapshot(fake.VOLUME_GROUP,
[fake.VG_SNAPSHOT]))
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock_create_cg_snapshot.assert_called_once_with(
set([fake.POOL_NAME]), fake.VOLUME_GROUP['id'])
mock__clone_lun.assert_called_once_with(
fake.VG_SNAPSHOT['volume']['name'],
fake.VG_SNAPSHOT['name'],
source_snapshot=fake.VOLUME_GROUP['id'])
mock_wait_for_busy_snapshot.assert_called_once_with(
fake.POOL_NAME, fake.VOLUME_GROUP['id'])
mock_delete_snapshot.assert_called_once_with(
fake.POOL_NAME, fake.VOLUME_GROUP['id'])
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_group_from_src_snapshot(self, volume_model_update):
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination',
return_value=volume_model_update)
actual_return_value = self.library.create_group_from_src(
fake.VOLUME_GROUP, [fake.VOLUME], group_snapshot=fake.VG_SNAPSHOT,
snapshots=[fake.VG_VOLUME_SNAPSHOT])
clone_source_to_destination_args = {
'name': fake.VG_SNAPSHOT['name'],
'size': fake.VG_SNAPSHOT['volume_size'],
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
self.assertEqual(expected_return_value, actual_return_value)
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_group_from_src_group(self, volume_model_update):
lun_name = fake.SOURCE_VG_VOLUME['name']
mock_lun = block_base.NetAppLun(
lun_name, lun_name, '3', {'UUID': 'fake_uuid'})
self.mock_object(self.library, '_get_lun_from_table',
return_value=mock_lun)
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination',
return_value=volume_model_update)
actual_return_value = self.library.create_group_from_src(
fake.VOLUME_GROUP, [fake.VOLUME],
source_group=fake.SOURCE_VOLUME_GROUP,
source_vols=[fake.SOURCE_VG_VOLUME])
clone_source_to_destination_args = {
'name': fake.SOURCE_VG_VOLUME['name'],
'size': fake.SOURCE_VG_VOLUME['size'],
}
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
self.assertEqual(expected_return_value, actual_return_value)
def test_delete_group_snapshot(self):
mock__delete_lun = self.mock_object(self.library, '_delete_lun')
model_update, snapshots_model_update = (
self.library.delete_group_snapshot(fake.VOLUME_GROUP,
[fake.VG_SNAPSHOT]))
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock__delete_lun.assert_called_once_with(fake.VG_SNAPSHOT['name'])
|
|
#!/usr/bin/env python3
# VagrIRC Virc library
# Written in 2015 by Daniel Oaks <daniel@danieloaks.net>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
import os
import re
from ..base import BaseServer
# Removal Regexes
config_initial_replacements = [
re.compile(r'/\*(.|[\r\n])*?\*/'), # c style comments
re.compile(r'\n\s*//.*'), # c++ style comments
re.compile(r'\n\s*#.*'), # shell style comments
(re.compile(r'\n(?:\s*\n)+'), r'\n'), # remove blank lines
(re.compile(r'^[\s\n]*([\S\s]*?)[\s\n]*$'), r'\1\n'), # remove start/end blank space, make sure newline at end
re.compile(r'\nmotd \{([^\}]*?)\};'),
re.compile(r'\nlisten \{([^\}]*?)\};'),
re.compile(r'\noperator \{([^\}]*?)\};'),
re.compile(r'\nconnect \{([^\}]*?)\};'),
re.compile(r'\ncluster \{([^\}]*?)\};'),
re.compile(r'\nshared \{([^\}]*?)\};'),
re.compile(r'\nkill \{([^\}]*?)\};'),
re.compile(r'\ndeny \{([^\}]*?)\};'),
re.compile(r'\nexempt \{([^\}]*?)\};'),
re.compile(r'\ngecos \{([^\}]*?)\};'),
re.compile(r'\nauth \{([^\}]*?)letmein([^\}]*?)\};'),
re.compile(r'\nauth \{([^\}]*?)tld([^\}]*?)\};'),
re.compile(r'\nresv \{([^\}]*?)helsinki([^\}]*?)\};'),
re.compile(r'(\nauth \{[^\}]+redirserv[^\}]+\};)'),
# basic config options
re.compile(r'\n\s*havent_read_conf.+'),
re.compile(r'\n\s*flags = need_ident;'),
('hub = no;', 'hub = yes;'),
('throttle_time = 1 second;', 'throttle_time = 0;'), # else we get locked out during config
('services.rizon.net', 'services--network-suffix--'),
('hidden_name = "*.rizon.net";', 'hidden_name = "*--network-suffix--";'),
]
config_replacements = {
'name': (re.compile(r'(serverinfo \{\n\s*name = )[^\;]+(;)'), r'\1"{value}"\2'),
'sid': (re.compile(r'(\n\s*sid = )"[^"]+?"(;)'), r'\1"{value}"\2'),
'network_name': (r'Rizon', r'{value}'),
}
CONN_BLOCK = r"""connect {{
name = "{remote_name}";
host = "127.0.0.1";
send_password = "{password}";
accept_password = "{password}";
encrypted = no;
port = {port};
}};"""
LISTEN_BLOCK = r"""\1\nlisten {{
port = {client_port};
flags = hidden, server;
port = {link_ports};
}};
{connect_blocks}"""
OPERATOR_BLOCK = '''operator {{
name = "{name}";
user = "*@127.0.0.1";
user = "*@localhost";
user = "*@10.*";
password = "{password}";
encrypted = no;
whois = "is an IRC Operator";
ssl_connection_required = no;
class = "opers";
umodes = locops, servnotice, wallop, external, cconn, debug, farconnect,
skill, unauth;
snomasks = full, rej, skill, link, link:remote, unauth, spy;
flags = kill, kill:remote, connect, connect:remote, kline, unkline,
xline, globops, restart, die, rehash, admin, operwall, module;
}};
'''
class Plexus4Server(BaseServer):
"""A fork of ircd-hybrid for Rizon Chat Network."""
name = 'plexus4'
vcs = 'git'
url = 'https://gitlab.com/rizon/plexus4.git'
def write_config(self, folder, info):
"""Write config file to the given folder."""
# load original config file
original_config_file = os.path.join(self.source_folder, 'doc', 'reference.conf')
with open(original_config_file, 'r') as config_file:
config_data = config_file.read()
# removing useless junk
for rep in config_initial_replacements:
# replacement
if isinstance(rep, (list, tuple)):
rep, sub = rep
# lazy variables
sub = sub.replace('--network-suffix--', self.info['network_suffix'])
# removal
else:
sub = ''
if isinstance(rep, str):
config_data = config_data.replace(rep, sub)
else:
config_data = rep.sub(sub, config_data)
# inserting actual values
for key, value in self.info.items():
if key in config_replacements:
rep, sub = config_replacements[key]
sub = sub.format(value=value)
if isinstance(rep, str):
config_data = config_data.replace(rep, sub)
else:
config_data = rep.sub(sub, config_data)
# listening ports
lregex = re.compile(r'(\nauth \{[^\}]+\};)')
connect_blocks = []
ports = []
for link in self.info['links']:
ports.append(str(link['port']))
connect_blocks.append(CONN_BLOCK.format(remote_name=link['remote_name'],
password=link['password'],
port=link['port']))
link_ports = ', '.join(ports)
sub = LISTEN_BLOCK.format(client_port=self.info['client_port'],
link_ports=link_ports,
connect_blocks='\n'.join(connect_blocks))
config_data = lregex.sub(sub, config_data)
# users
for name, info in info['users'].items():
if 'ircd' not in info:
continue
if 'oper' in info['ircd'] and info['ircd']['oper']:
oper_name = info['ircd']['oper_name'] if 'oper_name' in info['ircd'] else name
oper_pass = info['ircd']['oper_pass']
config_data += OPERATOR_BLOCK.format(name=oper_name, password=oper_pass)
# writing out config file
if not os.path.exists(folder):
os.makedirs(folder)
output_config_file = os.path.join(folder, 'reference.conf')
with open(output_config_file, 'w') as config_file:
config_file.write(config_data)
def write_build_files(self, folder, src_folder, bin_folder, build_folder, config_folder):
"""Write build files to the given folder."""
build_file = """#!/usr/bin/env sh
cd {src_folder}
sh autogen.sh
chmod +x ./configure
./configure --prefix={bin_folder} --enable-lua --enable-libjansson --enable-halfops --enable-chanaq
make
make install
cp {config_folder}/reference.conf {bin_folder}/etc/ircd.conf
""".format(src_folder=src_folder, bin_folder=bin_folder, config_folder=config_folder)
build_filename = os.path.join(folder, 'build')
with open(build_filename, 'w') as b_file:
b_file.write(build_file)
return True
def write_launch_files(self, folder, src_folder, bin_folder, build_folder, config_folder):
"""Write launch files to the given folder."""
launch_file = """#!/usr/bin/env sh
{bin_folder}/bin/ircd
""".format(src_folder=src_folder, bin_folder=bin_folder, config_folder=config_folder)
launch_filename = os.path.join(folder, 'launch')
with open(launch_filename, 'w') as l_file:
l_file.write(launch_file)
return True
|
|
"""Tests for gt.GraphTensor extension type (go/tf-gnn-api)."""
import functools
from typing import Mapping, Union
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_gnn.graph import adjacency as adj
from tensorflow_gnn.graph import graph_constants as const
from tensorflow_gnn.graph import graph_tensor as gt
from tensorflow_gnn.graph import graph_tensor_ops as ops
partial = functools.partial
as_tensor = tf.convert_to_tensor
as_ragged = tf.ragged.constant
GraphPiece = Union[gt.Context, gt.NodeSet, gt.EdgeSet]
class PoolingTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for pooling operations."""
@parameterized.parameters([
dict(
description='max pooling of edge features to source and targed nodes',
pooling='max',
node_set=gt.NodeSet.from_fields(sizes=as_tensor([1, 2]), features={}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 2]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 1, 2])),
const.TARGET: ('node', as_tensor([0, 0, 2, 1]))
}),
features={
'scalar':
as_tensor([1., 2., 3., 4.]),
'ragged':
as_ragged(
[[[1, 8], [2, 7]], [[3, 6]], [[4, 5]], [[5, 4]]],
ragged_rank=1)
}),
expected_source_fields={
'scalar':
as_tensor([2., 3., 4.]),
'ragged':
as_ragged([[[3, 8], [2, 7]], [[4, 5]], [[5, 4]]],
ragged_rank=1),
},
expected_target_fields={
'scalar':
as_tensor([2., 4., 3.]),
'ragged':
as_ragged([[[3, 8], [2, 7]], [[5, 4]], [[4, 5]]],
ragged_rank=1),
}),
dict(
description='sum pooling of edge features to source and targed nodes',
pooling='sum',
node_set=gt.NodeSet.from_fields(sizes=as_tensor([1, 2]), features={}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 3]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 0, 2, 2])),
const.TARGET: ('node', as_tensor([2, 1, 0, 0, 0]))
}),
features={
'scalar':
as_tensor([1., 2., 3., 4., 5.]),
'vector':
as_tensor([[1., 5.], [2., 4.], [3., 3.], [4., 2.],
[5., 1.]]),
'matrix':
as_tensor([[[1.]], [[2.]], [[3.]], [[4.]], [[5.]]]),
'ragged.1':
as_ragged([[1, 2], [], [3, 4], [], [5]]),
'ragged.2':
as_ragged([[[1], [2]], [], [[3]], [], []])
}),
expected_source_fields={
'scalar':
as_tensor([1. + 2. + 3., 0., 4. + 5.]),
'vector':
as_tensor([[1. + 2. + 3., 5. + 4. + 3.], [0., 0.],
[4. + 5., 2. + 1.]]),
'matrix':
as_tensor([[[1. + 2. + 3.]], [[0.]], [[4. + 5.]]]),
'ragged.1':
as_ragged([[1 + 3, 2 + 4], [], [5]]),
'ragged.2':
as_ragged([[[1 + 3], [2]], [], []]),
},
expected_target_fields={
'scalar':
as_tensor([3. + 4. + 5., 2., 1.]),
'vector':
as_tensor([[3. + 4. + 5., 3. + 2. + 1.], [2., 4.], [1., 5.]]),
'matrix':
as_tensor([[[3. + 4. + 5.]], [[2.]], [[1.]]]),
'ragged.1':
as_ragged([[3 + 5, 4], [], [1, 2]]),
'ragged.2':
as_ragged([[[3]], [], [[1], [2]]]),
})
])
def testEdgeFieldToNode(self, description: str, pooling: str,
node_set: gt.NodeSet, edge_set: gt.EdgeSet,
expected_source_fields: Mapping[str, const.Field],
expected_target_fields: Mapping[str, const.Field]):
graph = gt.GraphTensor.from_pieces(
node_sets={'node': node_set}, edge_sets={'edge': edge_set})
for fname, expected in expected_source_fields.items():
self.assertAllEqual(
expected,
ops.pool_edges_to_node(
graph, 'edge', const.SOURCE, pooling, feature_name=fname))
self.assertAllEqual(
expected,
ops.pool(graph, const.SOURCE, edge_set_name='edge',
reduce_type=pooling, feature_name=fname))
for fname, expected in expected_target_fields.items():
self.assertAllEqual(
expected,
ops.pool_edges_to_node(
graph, 'edge', const.TARGET, pooling, feature_name=fname))
self.assertAllEqual(
expected,
ops.pool(graph, const.TARGET, edge_set_name='edge',
reduce_type=pooling, feature_name=fname))
@parameterized.parameters([
dict(
description='sum pooling of node features to context, 1 component',
pooling='sum',
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([3]),
features={
'scalar': as_tensor([1., 2., 3]),
'vector': as_tensor([[1., 0.], [2., 0.], [3., 0.]]),
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
'ragged.1': as_ragged([[1, 2], [3], []])
}),
expected_context_fields={
'scalar': as_tensor([1. + 2. + 3.]),
'vector': as_tensor([[1. + 2. + 3., 0. + 0. + 0.]]),
'matrix': as_tensor([[[1. + 2. + 3.]]]),
'ragged.1': as_ragged([[1 + 3, 2]])
}),
dict(
description='sum pooling of node features to context, 2 components',
pooling='sum',
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([2, 1]),
features={
'scalar': as_tensor([1., 2., 3]),
'vector': as_tensor([[1., 0.], [2., 0.], [3., 0.]]),
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
'ragged.1': as_ragged([[1, 2], [3], []]),
'ragged.2': as_ragged([[[1, 2], []], [[3]], []])
}),
expected_context_fields={
'scalar': as_tensor([1. + 2., 3.]),
'vector': as_tensor([[1. + 2., 0. + 0.], [3., 0.]]),
'matrix': as_tensor([[[1. + 2.]], [[3.]]]),
'ragged.1': as_ragged([[1 + 3, 2], []]),
'ragged.2': as_ragged([[[1 + 3, 2], []], []])
})
])
def testNodeFieldToContext(self, description: str, pooling: str,
node_set: gt.NodeSet,
expected_context_fields: Mapping[str,
const.Field]):
graph = gt.GraphTensor.from_pieces(node_sets={'node': node_set})
for fname, expected in expected_context_fields.items():
self.assertAllEqual(
expected,
ops.pool_nodes_to_context(graph, 'node', pooling, feature_name=fname))
self.assertAllEqual(
expected,
ops.pool(graph, const.CONTEXT, node_set_name='node',
reduce_type=pooling, feature_name=fname))
@parameterized.parameters([
dict(
description='max pooling of edge features to graph context',
pooling='max',
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([3]),
adjacency=adj.HyperAdjacency.from_indices({
0: ('node', as_tensor([0, 0, 0])),
}),
features={
'scalar': as_tensor([1., 2., 3]),
'vector': as_tensor([[1., 0.], [2., 0.], [3., 0.]]),
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
'ragged.1': as_ragged([[1, 2], [3], []])
}),
expected_context_fields={
'scalar': as_tensor([3.]),
'vector': as_tensor([[3., 0.]]),
'matrix': as_tensor([[[3.]]]),
'ragged.1': as_ragged([[3, 2]])
}),
dict(
description='min pooling of node features to graph context',
pooling='min',
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 1]),
adjacency=adj.HyperAdjacency.from_indices({
0: ('node', as_tensor([0, 0, 0])),
}),
features={
'scalar': as_tensor([1., 2., 3]),
'vector': as_tensor([[1., 0.], [2., 0.], [3., 0.]]),
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
'ragged.1': as_ragged([[1, 2], [3], []]),
'ragged.2': as_ragged([[[1, 2], []], [[3]], []])
}),
expected_context_fields={
'scalar': as_tensor([1., 3.]),
'vector': as_tensor([[1., 0.], [3., 0.]]),
'matrix': as_tensor([[[1.]], [[3.]]]),
'ragged.1': as_ragged([[1, 2], []]),
'ragged.2': as_ragged([[[1, 2], []], []])
})
])
def testEdgeFieldToContext(self, description: str, pooling: str,
edge_set: gt.EdgeSet,
expected_context_fields: Mapping[str,
const.Field]):
graph = gt.GraphTensor.from_pieces(edge_sets={'edge': edge_set})
for fname, expected in expected_context_fields.items():
self.assertAllEqual(
expected,
ops.pool_edges_to_context(graph, 'edge', pooling, feature_name=fname))
self.assertAllEqual(
expected,
ops.pool(graph, const.CONTEXT, edge_set_name='edge',
reduce_type=pooling, feature_name=fname))
class BroadcastingTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for broadcasting operations."""
@parameterized.parameters([
dict(
description='source and target node features to edges broadcasting',
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([3]),
features={
'scalar': as_tensor([1., 2., 3]),
'vector': as_tensor([[1., 3.], [2., 2.], [3., 1.]]),
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
'ragged': as_ragged([[1, 2], [3], []])
}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 2]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 0, 2, 2])),
const.TARGET: ('node', as_tensor([2, 1, 0, 0, 0]))
}),
features={}),
expected_source_fields={
'scalar':
as_tensor([1., 1., 1., 3., 3.]),
'vector':
as_tensor([[1., 3.], [1., 3.], [1., 3.], [3., 1.], [3., 1.]]),
'matrix':
as_tensor([[[1.]], [[1.]], [[1.]], [[3.]], [[3.]]]),
'ragged':
as_ragged([[1, 2], [1, 2], [1, 2], [], []])
},
expected_target_fields={
'scalar':
as_tensor([3., 2., 1., 1., 1.]),
'vector':
as_tensor([[3., 1.], [2., 2.], [1., 3.], [1., 3.], [1., 3.]]),
'matrix':
as_tensor([[[3.]], [[2.]], [[1.]], [[1.]], [[1.]]]),
'ragged':
as_ragged([[], [3], [1, 2], [1, 2], [1, 2]])
})
])
def testEdgeFieldFromNode(self, description: str, node_set: gt.NodeSet,
edge_set: gt.EdgeSet,
expected_source_fields: Mapping[str, const.Field],
expected_target_fields: Mapping[str, const.Field]):
graph = gt.GraphTensor.from_pieces(
node_sets={'node': node_set}, edge_sets={'edge': edge_set})
for fname, expected in expected_source_fields.items():
self.assertAllEqual(
expected,
ops.broadcast_node_to_edges(
graph, 'edge', const.SOURCE, feature_name=fname))
self.assertAllEqual(
expected,
ops.broadcast(
graph, const.SOURCE, edge_set_name='edge', feature_name=fname))
for fname, expected in expected_target_fields.items():
self.assertAllEqual(
expected,
ops.broadcast_node_to_edges(
graph, 'edge', const.TARGET, feature_name=fname))
self.assertAllEqual(
expected,
ops.broadcast(
graph, const.TARGET, edge_set_name='edge', feature_name=fname))
@parameterized.parameters([
dict(
description='context features to nodes broadcasting, 1 component',
context=gt.Context.from_fields(features={
'scalar': as_tensor([1]),
'vector': as_tensor([[1., 2.]]),
'matrix': as_tensor([[[1., 2., 3.], [4., 5., 6.]]]),
'ragged': as_ragged([[[], [1], [], [2, 3]]]),
}),
node_set=gt.NodeSet.from_fields(sizes=as_tensor([3]), features={}),
expected_node_fields={
'scalar':
as_tensor([1] * 3),
'vector':
as_tensor([[1., 2.]] * 3),
'matrix':
as_tensor([[[1., 2., 3.], [4., 5., 6.]]] * 3),
'ragged':
as_ragged([[[], [1], [], [2, 3]], [[], [1], [], [2, 3]],
[[], [1], [], [2, 3]]]),
}),
dict(
description='context features to nodes broadcasting, 2 components',
context=gt.Context.from_fields(features={
'scalar': as_tensor([1, 2]),
'vector': as_tensor([[1.], [2.]]),
'ragged': as_ragged([[[], [1], []], [[1], [], [2]]]),
}),
node_set=gt.NodeSet.from_fields(sizes=as_tensor([3, 2]), features={}),
expected_node_fields={
'scalar':
as_tensor([1, 1, 1, 2, 2]),
'vector':
as_tensor([[1.], [1.], [1.], [2.], [2.]]),
'ragged':
as_ragged([[[], [1], []], [[], [1], []], [[], [1], []],
[[1], [], [2]], [[1], [], [2]]]),
})
])
def testNodeFieldFromContext(self, description: str, context: gt.Context,
node_set: gt.NodeSet,
expected_node_fields: Mapping[str, const.Field]):
graph = gt.GraphTensor.from_pieces(
context=context, node_sets={'node': node_set})
for fname, expected in expected_node_fields.items():
self.assertAllEqual(
expected,
ops.broadcast_context_to_nodes(graph, 'node', feature_name=fname))
self.assertAllEqual(
expected,
ops.broadcast(
graph, const.CONTEXT, node_set_name='node', feature_name=fname))
@parameterized.parameters([
dict(
description='context features to edges broadcasting, 1 component',
context=gt.Context.from_fields(features={
'scalar': as_tensor([1]),
'vector': as_tensor([[1., 2.]]),
'matrix': as_tensor([[[1., 2., 3.], [4., 5., 6.]]]),
'ragged': as_ragged([[[], [1], [], [2, 3]]]),
}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([3]),
adjacency=adj.HyperAdjacency.from_indices({
0: ('node', as_tensor([0, 0, 0])),
}),
features={}),
expected_edge_fields={
'scalar':
as_tensor([1] * 3),
'vector':
as_tensor([[1., 2.]] * 3),
'matrix':
as_tensor([[[1., 2., 3.], [4., 5., 6.]]] * 3),
'ragged':
as_ragged([[[], [1], [], [2, 3]], [[], [1], [], [2, 3]],
[[], [1], [], [2, 3]]]),
}),
dict(
description='context features to nodes broadcasting, 2 components',
context=gt.Context.from_fields(features={
'scalar': as_tensor([1, 2]),
'vector': as_tensor([[1.], [2.]]),
'ragged': as_ragged([[[], [1], []], [[1], [], [2]]]),
}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([3, 2]),
adjacency=adj.HyperAdjacency.from_indices({
0: ('node', as_tensor([0, 0, 0, 0, 0])),
}),
features={}),
expected_edge_fields={
'scalar':
as_tensor([1, 1, 1, 2, 2]),
'vector':
as_tensor([[1.], [1.], [1.], [2.], [2.]]),
'ragged':
as_ragged([[[], [1], []], [[], [1], []], [[], [1], []],
[[1], [], [2]], [[1], [], [2]]]),
})
])
def testEdgeFieldFromContext(self, description: str, context: gt.Context,
edge_set: gt.EdgeSet,
expected_edge_fields: Mapping[str, const.Field]):
graph = gt.GraphTensor.from_pieces(
context=context, edge_sets={'edge': edge_set})
for fname, expected in expected_edge_fields.items():
self.assertAllEqual(
expected,
ops.broadcast_context_to_edges(graph, 'edge', feature_name=fname))
self.assertAllEqual(
expected,
ops.broadcast(
graph, const.CONTEXT, edge_set_name='edge', feature_name=fname))
class FirstNodeOpsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for operations on first nodes per component (e.g., root nodes)."""
@parameterized.parameters([
dict(
description='1 component',
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([3]),
features={
'scalar': as_tensor([1., 2., 3]),
'vector': as_tensor([[1., 3.], [2., 2.], [3., 1.]]),
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
'ragged': as_ragged([[1, 2], [3], []])
}),
expected_fields={
'scalar': as_tensor([1.]),
'vector': as_tensor([[1., 3.]]),
'matrix': as_tensor([[[1.]]]),
'ragged': as_ragged([[1, 2]])
}),
dict(
description='2 components',
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([2, 1]),
features={
'scalar': as_tensor([1., 2., 3]),
'vector': as_tensor([[1., 3.], [2., 2.], [3., 1.]]),
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
'ragged': as_ragged([[1, 2], [3], []])
}),
expected_fields={
'scalar': as_tensor([1., 3.]),
'vector': as_tensor([[1., 3.], [3., 1.]]),
'matrix': as_tensor([[[1.]], [[3.]]]),
'ragged': as_ragged([[1, 2], []])
})
])
def testGatherFirstNode(self, description: str, node_set: gt.NodeSet,
expected_fields: Mapping[str, const.Field]):
graph = gt.GraphTensor.from_pieces(node_sets={'node': node_set})
for fname, expected in expected_fields.items():
self.assertAllEqual(
expected, ops.gather_first_node(graph, 'node', feature_name=fname))
def testGatherFirstNodeFails(self):
graph = gt.GraphTensor.from_pieces(node_sets={
'node': gt.NodeSet.from_fields(
sizes=as_tensor([2, 0, 1]),
features={'scalar': as_tensor([1., 2., 3])})})
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
r'gather_first_node.* no nodes'):
_ = ops.gather_first_node(graph, 'node', feature_name='scalar')
class ShuffleOpsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([
dict(
description='scalar',
context=gt.Context.from_fields(features={
'scalar': as_tensor([1, 2, 3]),
}),
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([2, 1]),
features={
'scalar': as_tensor([1., 2., 3]),
}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 3]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 0, 2, 2])),
const.TARGET: ('node', as_tensor([2, 1, 0, 0, 0]))
}),
features={
'scalar': as_tensor([1., 2., 3., 4., 5.]),
}),
expected_fields={
gt.Context: {
'scalar': [2, 1, 3]
},
gt.NodeSet: {
'scalar': [2., 1., 3.]
},
gt.EdgeSet: {
'scalar': [5., 2., 3., 1., 4.]
},
}),
dict(
description='vector',
context=gt.Context.from_fields(features={
'vector': as_tensor([[1], [2], [3]]),
}),
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([2, 1]),
features={
'vector': as_tensor([[1., 3.], [2., 2.], [3., 1.]]),
}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 3]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 0, 2, 2])),
const.TARGET: ('node', as_tensor([2, 1, 0, 0, 0]))
}),
features={
'vector':
as_tensor([[1., 5.], [2., 4.], [3., 3.], [4., 2.],
[5., 1.]]),
}),
expected_fields={
gt.Context: {
'vector': [[2], [1], [3]]
},
gt.NodeSet: {
'vector': [[2., 2.], [1., 3.], [3., 1.]]
},
gt.EdgeSet: {
'vector': [[5., 1.], [2., 4.], [3., 3.], [1., 5.], [4., 2.]]
},
}),
dict(
description='matrix',
context=gt.Context.from_fields(),
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([2, 1]),
features={
'matrix': as_tensor([[[1.]], [[2.]], [[3.]]]),
}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 3]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 0, 2, 2])),
const.TARGET: ('node', as_tensor([2, 1, 0, 0, 0]))
}),
features={
'matrix': as_tensor([[[1.]], [[2.]], [[3.]], [[4.]], [[5.]]]),
}),
expected_fields={
gt.NodeSet: {
'matrix': [[[2.]], [[1.]], [[3.]]]
},
gt.EdgeSet: {
'matrix': [[[5.]], [[2.]], [[3.]], [[1.]], [[4.]]]
},
}),
dict(
description='ragged.1',
context=gt.Context.from_fields(features={
'ragged.1': as_ragged([[[], [1], []], [[1], [3], [4], [2]]]),
}),
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([2, 1]),
features={
'ragged.1':
as_ragged([[[1, 2], [4, 4], [5, 5]], [[3, 3]], []]),
}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 3]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 0, 2, 2])),
const.TARGET: ('node', as_tensor([2, 1, 0, 0, 0]))
}),
features={
'ragged.1': as_ragged([[[1, 2]], [], [[3, 4]], [], [[5, 5]]]),
}),
expected_fields={
gt.Context: {
'ragged.1': [[[], [4], [2]], [[1], [], [1], [3]]]
},
gt.NodeSet: {
'ragged.1': [[[4, 4], [5, 5], [3, 3]], [[1, 2]], []]
},
gt.EdgeSet: {
'ragged.1': [[[5, 5]], [], [[1, 2]], [], [[3, 4]]]
},
}),
dict(
description='ragged.2',
context=gt.Context.from_fields(),
node_set=gt.NodeSet.from_fields(
sizes=as_tensor([2, 1]),
features={'ragged.2': as_ragged([[[1, 2, 4], []], [[3]],
[[5]]])}),
edge_set=gt.EdgeSet.from_fields(
sizes=as_tensor([2, 3]),
adjacency=adj.HyperAdjacency.from_indices({
const.SOURCE: ('node', as_tensor([0, 0, 0, 2, 2])),
const.TARGET: ('node', as_tensor([2, 1, 0, 0, 0]))
}),
features={
'ragged.2':
as_ragged([[[1], [2], [4]], [], [[3], [5]], [], []])
}),
expected_fields={
gt.NodeSet: {
'ragged.2': [[[1, 2, 4], [5]], [[3]], [[]]]
},
gt.EdgeSet: {
'ragged.2': [[[2], [4], [3]], [], [[1], [5]], [], []]
},
}),
])
def testShuffleScalarComponents(
self,
description: str,
context: gt.Context,
node_set: gt.NodeSet,
edge_set: gt.EdgeSet,
expected_fields: Mapping[GraphPiece, Mapping[str, const.Field]]):
graph = gt.GraphTensor.from_pieces(
context,
{'node': node_set},
{'edge': edge_set})
shuffled = ops.shuffle_scalar_components(graph, seed=8191)
for fname, expected in expected_fields.get(gt.Context, {}).items():
self.assertAllEqual(expected, shuffled.context.features[fname])
for fname, expected in expected_fields.get(gt.NodeSet, {}).items():
self.assertAllEqual(expected, shuffled.node_sets['node'].features[fname])
for fname, expected in expected_fields.get(gt.EdgeSet, {}).items():
self.assertAllEqual(expected, shuffled.edge_sets['edge'].features[fname])
if __name__ == '__main__':
tf.test.main()
|
|
# pylint: disable-msg=C0111,C0103
import math
import unittest
from openmdao.main.api import Assembly, Component, Driver, set_as_top
from openmdao.main.datatypes.api import Float, Array
from openmdao.main.hasconstraints import HasConstraints
from openmdao.main.hasobjective import HasObjectives
from openmdao.main.hasparameters import HasParameters
from openmdao.util.decorators import add_delegate
from openmdao.util.testutil import assert_rel_error
exec_order = []
@add_delegate(HasObjectives, HasParameters, HasConstraints)
class DumbDriver(Driver):
def __init__(self):
self.oldval = 11
super(DumbDriver, self).__init__()
def execute(self):
global exec_order
exec_order.append(self.name)
self.oldval += 1
self.set_parameters([self.oldval] * len(self.get_parameters()))
super(DumbDriver, self).execute()
class Simple(Component):
a = Float(iotype='in', units='ft')
b = Float(iotype='in', units='ft')
c = Float(iotype='out', units='ft')
d = Float(iotype='out', units='ft')
def __init__(self):
super(Simple, self).__init__()
self.a = 1
self.b = 2
self.c = 3
self.d = -1
def execute(self):
global exec_order
exec_order.append(self.name)
self.c = self.a + self.b
self.d = self.a - self.b
allcomps = ['sub.comp1', 'sub.comp2', 'sub.comp3', 'sub.comp4', 'sub.comp5', 'sub.comp6',
'comp7', 'comp8']
topouts = ['sub.c2', 'sub.c4', 'sub.d1', 'sub.d3', 'sub.d5'
'comp7.c', 'comp7.d', 'comp8.c', 'comp8.d']
topins = ['sub.a1', 'sub.a3', 'sub.b2', 'sub.b4', 'sub.b6'
'comp7.a', 'comp7.b', 'comp8.a', 'comp8.b']
subins = ['comp1.a', 'comp1.b',
'comp2.a', 'comp2.b',
'comp3.a', 'comp3.b',
'comp4.a', 'comp4.b',
'comp5.a', 'comp5.b',
'comp6.a', 'comp6.b', ]
subouts = ['comp1.c', 'comp1.d',
'comp2.c', 'comp2.d',
'comp3.c', 'comp3.d',
'comp4.c', 'comp4.d',
'comp5.c', 'comp5.d',
'comp6.c', 'comp6.d', ]
subvars = subins + subouts
def fullvnames(cname, vnames):
return ['.'.join([cname, n]) for n in vnames]
def _nested_model():
global exec_order
exec_order = []
top = set_as_top(Assembly())
top.add('sub', Assembly())
top.add('comp7', Simple())
top.add('comp8', Simple())
sub = top.sub
sub.add('comp1', Simple())
sub.add('comp2', Simple())
sub.add('comp3', Simple())
sub.add('comp4', Simple())
sub.add('comp5', Simple())
sub.add('comp6', Simple())
top.driver.workflow.add(['comp7', 'sub', 'comp8'])
sub.driver.workflow.add(['comp1', 'comp2', 'comp3',
'comp4', 'comp5', 'comp6'])
sub.create_passthrough('comp1.a', 'a1')
sub.create_passthrough('comp2.b', 'b2')
sub.create_passthrough('comp3.a', 'a3')
sub.create_passthrough('comp3.d', 'd3')
sub.create_passthrough('comp4.b', 'b4')
sub.create_passthrough('comp4.c', 'c4')
sub.create_passthrough('comp6.b', 'b6')
sub.create_passthrough('comp2.c', 'c2')
sub.create_passthrough('comp1.d', 'd1')
sub.create_passthrough('comp5.d', 'd5')
return top
class DependsTestCase(unittest.TestCase):
def setUp(self):
top = self.top = _nested_model()
sub = top.sub
sub.connect('comp1.c', 'comp4.a')
sub.connect('comp5.c', 'comp1.b')
sub.connect('comp2.d', 'comp5.b')
sub.connect('comp3.c', 'comp5.a')
sub.connect('comp4.d', 'comp6.a')
top.connect('sub.c4', 'comp8.a')
## 'auto' passthroughs
# top.connect('comp7.c', 'sub.comp3.a')
# top.connect('sub.comp3.d', 'comp8.b')
top.connect('comp7.c', 'sub.a3')
top.connect('sub.d3', 'comp8.b')
def test_simple(self):
top = set_as_top(Assembly())
top.add('comp1', Simple())
top.driver.workflow.add('comp1')
vars = ['comp1.a', 'comp1.b', 'comp1.c', 'comp1.d']
top.run()
self.assertEqual(top.comp1.c, 3)
self.assertEqual(top.comp1.d, -1)
top.set('comp1.a', 5)
top.run()
self.assertEqual(top.comp1.c, 7)
self.assertEqual(top.comp1.d, 3)
top.run()
# now add another comp and connect them
top.add('comp2', Simple())
top.driver.workflow.add('comp2')
top.connect('comp1.c', 'comp2.a')
self.assertEqual(top.comp2.c, 3)
self.assertEqual(top.comp2.d, -1)
top.run()
self.assertEqual(top.comp2.c, 9)
self.assertEqual(top.comp2.d, 5)
def test_disconnect(self):
self.top.disconnect('comp7.c', 'sub.comp3.a')
self.top.sub.disconnect('c4')
self.top.disconnect('comp8')
def test_disconnect2(self):
self.assertEqual(set(self.top._depgraph.list_outputs('sub', connected=True)),
set(['sub.d3', 'sub.c4']))
self.top.disconnect('comp8')
self.assertEqual(self.top._depgraph.list_outputs('sub', connected=True),
[])
self.assertEqual(self.top.sub._exprmapper.get_source('c4'), 'comp4.c')
def test_lazy1(self):
self.top.run()
exec_counts = [self.top.get(x).exec_count for x in allcomps]
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1], exec_counts)
outs = [(5, -3), (3, -1), (5, 1), (7, 3), (4, 6), (5, 1), (3, -1), (8, 6)]
newouts = []
for comp in allcomps:
newouts.append((self.top.get(comp + '.c'), self.top.get(comp + '.d')))
self.assertEqual(outs, newouts)
self.top.run()
def test_lazy2(self):
self.top.run()
self.top.sub.b6 = 3
self.top.run()
outs = [(5, -3), (3, -1), (5, 1), (7, 3), (4, 6), (6, 0), (3, -1), (8, 6)]
for comp, vals in zip(allcomps, outs):
self.assertEqual((comp, vals[0], vals[1]),
(comp, self.top.get(comp + '.c'), self.top.get(comp + '.d')))
def test_lazy3(self):
self.top.run()
self.top.comp7.a = 3
self.top.run()
outs = [(7, -5), (3, -1), (7, 3), (9, 5), (6, 8), (7, 3), (5, 1), (12, 6)]
for comp, vals in zip(allcomps, outs):
self.assertEqual((comp, vals[0], vals[1]),
(comp, self.top.get(comp + '.c'), self.top.get(comp + '.d')))
def test_lazy4(self):
self.top.run()
self.top.sub.set('b2', 5)
self.top.run()
outs = [(2, 0), (6, -4), (5, 1), (4, 0), (1, 9), (2, -2), (3, -1), (5, 3)]
for comp, vals in zip(allcomps, outs):
self.assertEqual((comp, vals[0], vals[1]),
(comp, self.top.get(comp + '.c'), self.top.get(comp + '.d')))
# def test_lazy_inside_out(self):
# self.top.run()
# self.top.comp7.b = 4
## now run sub.comp1 directly to make sure it will force
## running of all components that supply its inputs
# self.top.sub.comp1.run()
# outs = [(7,-5),(3,-1),(7,3),(7,3),(6,8),(5,1),(5,-3),(8,6)]
# for comp,vals in zip(allcomps,outs):
# self.assertEqual((comp,vals[0],vals[1]),
# (comp,self.top.get(comp+'.c'),self.top.get(comp+'.d')))
## now run comp8 directly, which should force sub.comp4 to run
# self.top.comp8.run()
# outs = [(7,-5),(3,-1),(7,3),(9,5),(6,8),(5,1),(5,-3),(12,6)]
# for comp,vals in zip(allcomps,outs):
# self.assertEqual((comp,vals[0],vals[1]),
# (comp,self.top.get(comp+'.c'),self.top.get(comp+'.d')))
def test_sequential(self):
# verify that if components aren't connected they should execute in the
# order that they were added to the workflow instead of hash order
global exec_order
top = set_as_top(Assembly())
top.add('c2', Simple())
top.add('c1', Simple())
top.add('c3', Simple())
top.add('c4', Simple())
top.driver.workflow.add(['c1', 'c2', 'c3', 'c4'])
top.run()
self.assertEqual(exec_order, ['c1', 'c2', 'c3', 'c4'])
top.connect('c4.c', 'c3.a') # now make c3 depend on c4
exec_order = []
# top.c4.a = 2 # makes c4 run again
top.run()
self.assertEqual(exec_order, ['c1', 'c2', 'c4', 'c3'])
def test_expr_deps(self):
top = set_as_top(Assembly())
top.add('driver1', DumbDriver())
top.add('driver2', DumbDriver())
top.add('c1', Simple())
top.add('c2', Simple())
top.add('c3', Simple())
top.driver.workflow.add(['driver1', 'driver2', 'c3'])
top.driver1.workflow.add('c2')
top.driver2.workflow.add('c1')
top.connect('c1.c', 'c2.a')
top.driver1.add_objective("c2.c*c2.d")
top.driver2.add_objective("c1.c")
top.run()
# FIXME: without lazy evaluation, c1 runs in the wrong order
self.assertEqual(exec_order, ['driver1', 'c2', 'driver2', 'c1', 'c3'])
def test_force_with_input_updates(self):
top = set_as_top(Assembly())
top.add('c2', Simple())
top.add('c1', Simple())
top.connect('c1.c', 'c2.a')
top.driver.workflow.add(['c1', 'c2'])
top.run()
self.assertEqual(top.c2.a, 3)
top.c1.a = 2
top.run()
self.assertEqual(top.c2.a, 4)
def test_get_required_compnames(self):
sub = self.top.sub
sub.add('driver', DumbDriver())
sub.driver.add_objective('comp6.c')
sub.driver.add_objective('comp5.d')
self.assertEqual(sub.driver._get_required_compnames(),
set(['comp5', 'comp6', '_pseudo_0', '_pseudo_1']))
sub.driver.add_parameter('comp2.a', low=0.0, high=10.0)
self.assertEqual(sub.driver._get_required_compnames(),
set(['comp2', 'comp5', 'comp1', 'comp4', 'comp6', '_pseudo_0', '_pseudo_1']))
sub.driver.add_parameter('comp3.b', low=0.0, high=10.0)
self.assertEqual(sub.driver._get_required_compnames(),
set(['comp6', 'comp5', 'comp1', 'comp4', 'comp3', 'comp2', '_pseudo_0', '_pseudo_1']))
def test_auto_workflow(self):
top = set_as_top(Assembly())
top.add('comp1', Simple())
top.add('comp2', Simple())
top.add('comp3', Simple())
top.add('driver', DumbDriver())
top.driver.add_parameter('comp2.a', low=-99, high=99)
top.driver.add_objective('comp3.c')
top.connect('comp1.c', 'comp2.b')
top.connect('comp2.c', 'comp3.a')
self.assertEqual(top.comp1.exec_count, 0)
self.assertEqual(top.comp2.exec_count, 0)
self.assertEqual(top.comp3.exec_count, 0)
top.run()
self.assertEqual(top.comp1.exec_count, 1)
self.assertEqual(top.comp2.exec_count, 1)
self.assertEqual(top.comp3.exec_count, 1)
top.run()
self.assertEqual(top.comp1.exec_count, 2)
self.assertEqual(top.comp2.exec_count, 2)
self.assertEqual(top.comp3.exec_count, 2)
class ArrSimple(Component):
ain = Array([0., 1., 2., 3.], iotype='in')
aout = Array([0., 1., 2., 3.], iotype='out')
def __init__(self):
super(ArrSimple, self).__init__()
def execute(self):
global exec_order
exec_order.append(self.name)
self.aout = self.ain * 2.0
class SimplePTAsm(Assembly):
def configure(self):
self.add('c2', Simple())
self.add('c1', Simple())
self.driver.workflow.add(['c1', 'c2'])
self.connect('c1.c', 'c2.a')
self.connect('c1.d', 'c2.b')
self.create_passthrough('c1.a', 'a1')
self.create_passthrough('c2.d', 'd2')
class DependsTestCase2(unittest.TestCase):
def setUp(self):
global exec_order
self.top = set_as_top(Assembly())
self.top.add('c2', Simple())
self.top.add('c1', Simple())
self.top.driver.workflow.add(['c1', 'c2'])
def test_connected_vars(self):
self.assertEqual(self.top._depgraph.list_outputs('c1', connected=True), [])
self.assertEqual(self.top._depgraph.list_outputs('c2', connected=True), [])
self.top.connect('c1.c', 'c2.a')
self.assertEqual(self.top._depgraph.list_outputs('c1', connected=True), ['c1.c'])
self.assertEqual(self.top._depgraph.list_inputs('c2', connected=True), ['c2.a'])
self.top.connect('c1.d', 'c2.b')
self.assertEqual(set(self.top._depgraph.list_outputs('c1', connected=True)), set(['c1.c', 'c1.d']))
self.assertEqual(set(self.top._depgraph.list_inputs('c2', connected=True)), set(['c2.a', 'c2.b']))
self.top.disconnect('c1.d', 'c2.b')
self.assertEqual(self.top._depgraph.list_outputs('c1', connected=True), ['c1.c'])
self.assertEqual(self.top._depgraph.list_inputs('c2', connected=True), ['c2.a'])
def test_unconnected_vars(self):
c1extras = set(['.'.join(('c1', n)) for n in self.top.c1.list_vars()]) - set(['c1.a', 'c1.b', 'c1.c', 'c1.d'])
c2extras = set(['.'.join(('c2', n)) for n in self.top.c2.list_vars()]) - set(['c2.a', 'c2.b', 'c2.c', 'c2.d'])
self.assertEqual(set(self.top._depgraph.list_outputs('c1', connected=False)) - c1extras, set(['c1.c', 'c1.d']))
self.assertEqual(set(self.top._depgraph.list_inputs('c2', connected=False)) - c2extras, set(['c2.a', 'c2.b']))
self.top.connect('c1.c', 'c2.a')
self.assertEqual(set(self.top._depgraph.list_outputs('c1', connected=False)) - c1extras, set(['c1.d']))
self.assertEqual(set(self.top._depgraph.list_inputs('c2', connected=False)) - c2extras, set(['c2.b']))
self.top.connect('c1.d', 'c2.b')
self.assertEqual(set(self.top._depgraph.list_outputs('c1', connected=False)) - c1extras, set())
self.assertEqual(set(self.top._depgraph.list_inputs('c2', connected=False)) - c2extras, set())
self.top.disconnect('c1.d', 'c2.b')
self.assertEqual(set(self.top._depgraph.list_outputs('c1', connected=False)) - c1extras, set(['c1.d']))
self.assertEqual(set(self.top._depgraph.list_inputs('c2', connected=False)) - c2extras, set(['c2.b']))
def test_simple_run(self):
self.top.connect('c1.c', 'c2.a')
self.top.connect('c1.d', 'c2.b')
self.top.run()
self.assertEqual(self.top.c1.a, 1)
self.assertEqual(self.top.c1.b, 2)
self.assertEqual(self.top.c1.c, 3)
self.assertEqual(self.top.c1.d, -1)
self.assertEqual(self.top.c2.a, 3)
self.assertEqual(self.top.c2.b, -1)
self.assertEqual(self.top.c2.c, 2)
self.assertEqual(self.top.c2.d, 4)
self.top.c1.a = 2
self.top.run()
self.assertEqual(self.top.c1.a, 2)
self.assertEqual(self.top.c1.b, 2)
self.assertEqual(self.top.c1.c, 4)
self.assertEqual(self.top.c1.d, 0)
self.assertEqual(self.top.c2.a, 4)
self.assertEqual(self.top.c2.b, 0)
self.assertEqual(self.top.c2.c, 4)
self.assertEqual(self.top.c2.d, 4)
def test_simple_passthrough(self):
cnames = ['a', 'b', 'c', 'd']
c1names = ['.'.join(['c1', n]) for n in cnames]
c2names = ['.'.join(['c2', n]) for n in cnames]
modnames = ['model.a1', 'model.d2']
self.top.add('model', SimplePTAsm())
self.top.driver.workflow.add(['model'])
self.top.connect('c1.c', 'model.a1')
self.top.connect('model.d2', 'c2.a')
self.top.run()
def test_array_expr(self):
class Dummy(Component):
x = Array([[-1, 1], [-2, 2]], iotype="in", shape=(2, 2))
y = Array([[-1, 1], [-2, 2]], iotype="out", shape=(2, 2))
def execute(self):
self.y = self.x
class Stuff(Assembly):
def configure(self):
self.add('d1', Dummy())
self.add('d2', Dummy())
self.connect('d1.y[0][0]', 'd2.x[1][0]')
self.connect('d1.y[1][0]', 'd2.x[0][0]')
self.driver.workflow.add(['d1', 'd2'])
s = set_as_top(Stuff())
s.d1.x = [[-5, -6], [-7, -8]]
s.run()
self.assertEqual(s.d2.x[0, 0], -7)
self.assertEqual(s.d2.x[1, 0], -5)
self.assertEqual(s.d2.x[0, 1], 1)
self.assertEqual(s.d2.x[1, 1], 2)
def test_array2(self):
top = set_as_top(Assembly())
top.add('c1', ArrSimple())
top.add('c3', ArrSimple())
top.driver.workflow.add(['c1', 'c3'])
top.connect('c1.aout[1]', 'c3.ain[2]')
top.run()
top.c1.ain = [55., 44., 33.]
top.run()
self.assertEqual(top.c3.ain[2], 88.)
def test_array3(self):
top = set_as_top(Assembly())
top.add('c1', ArrSimple())
top.add('sub', Assembly())
top.sub.add('c2', ArrSimple())
top.sub.create_passthrough('c2.ain')
top.sub.create_passthrough('c2.aout')
top.add('c3', ArrSimple())
top.driver.workflow.add(['c1', 'sub', 'c3'])
top.sub.driver.workflow.add('c2')
top.connect('c1.aout[1]', 'sub.ain[1]')
top.connect('sub.aout[1]', 'c3.ain[1]')
top.run()
top.c1.ain = [55., 44., 33.]
top.run()
self.assertEqual(top.sub.ain[1], 88.)
self.assertEqual(top.sub.aout[1], 176.)
self.assertEqual(top.c3.ain[1], 176.)
def test_units(self):
top = self.top
top.c2.add("velocity", Float(3.0, iotype='in', units='inch/s'))
top.c1.add("length", Float(9.0, iotype='out', units='inch'))
try:
top.connect('c1.c', 'c2.velocity')
except Exception as err:
self.assertEqual(str(err),
": Can't connect 'c1.c' to 'c2.velocity': Incompatible units for 'c1.c' and 'c2.velocity': units 'ft' are incompatible with assigning units of 'inch/s'")
else:
self.fail("Exception expected")
top.c1.a = 1.
top.c1.b = 2.
top.c1.length = 24.
top.connect('c1.length', 'c2.a')
top.run()
assert_rel_error(self, top.c2.a, 2., 0.0001)
class DependsTestCase3(unittest.TestCase):
def test_input_pseudocomp(self):
top = set_as_top(Assembly())
top.add('comp', ArrayComp())
top.add('driver', DumbDriver())
top.driver.workflow.add('comp')
top.driver.add_parameter('comp.a[0]', low=-100, high=100)
top.driver.add_constraint('comp.a[0] < 100')
# The first time it runs, the pcomp inputs update
top.run()
self.assertEqual(top.comp.a[0], top._pseudo_0.in0)
# The second time it runs, the pcomp inputs no longer update
top.run()
self.assertEqual(top.comp.a[0], top._pseudo_0.in0)
class ArrayComp(Component):
a = Array([1, 2, 3, 4, 5], iotype="in")
b = Array([1, 2, 3, 4, 5], iotype='in')
c = Array([2, 4, 6, 8, 10], iotype='out')
d = Array([0, 0, 0, 0, 0], iotype='out')
def execute(self):
global exec_order
exec_order.append(self.name)
self.c = self.a + self.b
self.d = self.a - self.b
class ExprDependsTestCase(unittest.TestCase):
def setUp(self):
global exec_order
exec_order = []
self.top = set_as_top(Assembly())
self.top.add('c2', ArrayComp())
self.top.add('c1', ArrayComp())
self.top.driver.workflow.add(['c1', 'c2'])
def test_basic(self):
self.top.connect('c1.c', 'c2.a')
self.top.connect('c1.d', 'c2.b')
self.top.run()
self.assertEqual(list(self.top.c1.c), [2, 4, 6, 8, 10])
self.assertEqual(list(self.top.c1.d), [0, 0, 0, 0, 0])
self.assertEqual(list(self.top.c2.a), [2, 4, 6, 8, 10])
self.assertEqual(list(self.top.c2.b), [0, 0, 0, 0, 0])
self.assertEqual(list(self.top.c2.c), [2, 4, 6, 8, 10])
self.assertEqual(list(self.top.c2.d), [2, 4, 6, 8, 10])
def test_entry_connect(self):
self.top.connect('c1.c[2]', 'c2.a[3]')
self.top.run()
self.assertEqual(list(self.top.c1.c), [2, 4, 6, 8, 10])
self.assertEqual(list(self.top.c1.d), [0, 0, 0, 0, 0])
self.assertEqual(list(self.top.c2.a), [1, 2, 3, 6, 5])
self.assertEqual(list(self.top.c2.b), [1, 2, 3, 4, 5])
self.assertEqual(list(self.top.c2.c), [2, 4, 6, 10, 10])
self.assertEqual(list(self.top.c2.d), [0, 0, 0, 2, 0])
# now see if we can connect to another entry on c2.a
self.top.connect('c1.d[2]', 'c2.a[1]')
self.top.run()
self.assertEqual(list(self.top.c1.c), [2, 4, 6, 8, 10])
self.assertEqual(list(self.top.c1.d), [0, 0, 0, 0, 0])
self.assertEqual(list(self.top.c2.a), [1, 0, 3, 6, 5])
self.assertEqual(list(self.top.c2.b), [1, 2, 3, 4, 5])
self.assertEqual(list(self.top.c2.c), [2, 2, 6, 10, 10])
self.assertEqual(list(self.top.c2.d), [0, -2, 0, 2, 0])
# make sure only one connection allowed to a particular array entry
try:
self.top.connect('c1.d[1]', 'c2.a[1]')
except Exception as err:
self.assertEqual(str(err),
": Can't connect 'c1.d[1]' to 'c2.a[1]': : 'c2.a[1]' is already connected to source 'c1.d[2]'")
def test_invalidation(self):
vnames = ['a', 'b', 'c', 'd']
self.top.run()
self.top.connect('c1.c[2]', 'c2.a[3]')
exec_order = []
self.top.run()
exec_order = []
self.top.c1.a = [9, 9, 9, 9, 9]
self.top.run()
self.assertEqual(list(self.top.c2.a), [1, 2, 3, 12, 5])
def test_src_exprs(self):
vnames = ['a', 'b', 'c', 'd']
top = _nested_model()
top.run()
total = top.sub.comp1.c + top.sub.comp2.c + top.sub.comp3.c
top.sub.connect('comp1.c+comp2.c+comp3.c', 'comp4.a')
exec_order = []
top.run()
self.assertEqual(total, top.sub.comp4.a)
top.sub.comp2.a = 99
exec_order = []
top.sub.run()
total = top.sub.comp1.c + top.sub.comp2.c + top.sub.comp3.c
self.assertEqual(total, top.sub.comp4.a)
top.sub.comp2.a = 88
top.comp7.a = 11
top.sub.run()
total = top.sub.comp1.c + top.sub.comp2.c + top.sub.comp3.c
self.assertEqual(total, top.sub.comp4.a)
def test_float_exprs(self):
vnames = ['a', 'b', 'c', 'd']
top = _nested_model()
top.run()
total = math.sin(3.14) * top.sub.comp2.c
top.sub.connect('sin(3.14)*comp2.c', 'comp4.a')
exec_order = []
top.run()
self.assertEqual(total, top.sub.comp4.a)
top.sub.disconnect('sin(3.14)*comp2.c', 'comp4.a')
total = 3.0 * top.sub.comp1.c
top.sub.connect('3.0*comp1.c', 'comp4.a')
top.run()
self.assertEqual(total, top.sub.comp4.a)
def test_slice_exprs(self):
vnames = ['a[0:2:]', 'a', 'b', 'c', 'd']
top = self.top
top.run()
total = top.c1.c[3:]
top.connect('c1.c[3:]', 'c2.a[0:2]')
exec_order = []
top.run()
self.assertEqual(list(total), list(top.c2.a[0:2]))
def _all_nested_connections(self, obj):
"""Return a list of all connections from ExprMappers and DepGraphs all the way down."""
visited = set()
connection_set = set()
objstack = [obj]
while objstack:
obj = objstack.pop()
if obj not in visited:
visited.add(obj)
if isinstance(obj, Assembly):
connection_set.update(obj.list_connections())
connection_set.update(obj._exprmapper.list_connections())
connection_set.update(obj._depgraph.list_connections())
for name in obj.list_containers():
comp = getattr(obj, name)
if isinstance(comp, Assembly):
connection_set.update(comp._depgraph.list_connections())
if isinstance(comp, Assembly):
objstack.append(comp)
return connection_set
def test_connection_cleanup(self):
global exec_order
vnames = ['a', 'b', 'c', 'd']
top = _nested_model()
initial_connections = set(top.sub.list_connections())
top.run()
top.sub.connect('comp1.c', 'comp3.b')
self.assertEqual(set(top.sub.list_connections()) - initial_connections,
set([('comp1.c', 'comp3.b')]))
top.sub.disconnect('comp1')
self.assertEqual(set(top.sub.list_connections()) - initial_connections, set())
for u, v in self._all_nested_connections(top.sub):
self.assertTrue('comp1.' not in u and 'comp1.' not in v)
def test_connection_cleanup2(self):
top = _nested_model()
initial_connections = set(top.sub.list_connections())
top.run()
top.sub.connect('comp1.c*3.0', 'comp4.a')
top.sub.connect('comp1.c', 'comp3.b')
top.sub.disconnect('comp1.c', 'comp3.b')
self.assertEqual(set(top.sub.list_connections()) - initial_connections,
set([('_pseudo_0.out0', 'comp4.a'),
('comp1.c', '_pseudo_0.in0')]))
self.assertEqual(initial_connections - set(top.sub.list_connections()),
set())
self.assertEqual(set(top.sub.list_connections(visible_only=True, show_expressions=True)) - initial_connections,
set([('comp1.c*3.0', 'comp4.a')]))
for u, v in self._all_nested_connections(top.sub):
self.assertTrue(not ('comp1.c' in u and 'comp3.b' in v))
def test_bad_exprs(self):
top = _nested_model()
try:
top.sub.connect('comp1.c', 'comp4.a+comp4.b')
except Exception as err:
self.assertEqual(str(err),
"sub: Can't connect 'comp1.c' to 'comp4.a+comp4.b': bad connected expression 'comp4.a+comp4.b' must reference exactly one variable")
else:
self.fail("Exception expected")
try:
top.sub.connect('comp1.c', 'comp4.a[foo]')
except Exception as err:
self.assertEqual(str(err),
"sub: Can't connect 'comp1.c' to 'comp4.a[foo]': bad destination expression 'comp4.a[foo]': only constant indices are allowed for arrays and slices")
else:
self.fail("Exception expected")
try:
top.sub.connect('comp1.c', 'comp4.a(5)')
except Exception as err:
self.assertEqual(str(err),
"sub: Can't connect 'comp1.c' to 'comp4.a(5)': bad destination expression 'comp4.a(5)': not assignable")
else:
self.fail("Exception expected")
if __name__ == "__main__":
# import cProfile
# cProfile.run('unittest.main()', 'profout')
# import pstats
# p = pstats.Stats('profout')
# p.strip_dirs()
# p.sort_stats('time')
# p.print_stats()
# print '\n\n---------------------\n\n'
# p.print_callers()
# print '\n\n---------------------\n\n'
# p.print_callees()
unittest.main()
|
|
# $Id$
#
# Copyright (C) 2006 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import RDConfig
import unittest,sys,os,math
from rdkit import Chem
from rdkit.Chem.FeatMaps import FeatMaps,FeatMapParser,FeatMapUtils
from rdkit.Chem.FeatMaps.FeatMapPoint import FeatMapPoint
from rdkit.Geometry import Point3D
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<=tol
def pteq(p1,p2,tol=1e-4):
return feq((p1-p2).LengthSq(),0.0,tol)
class TestCase(unittest.TestCase):
def setUp(self):
self.paramTxt="""
BeginParams
family=Acceptor radius=0.5 profile=Box
EndParams
"""
self.p = FeatMapParser.FeatMapParser()
def test1Basics(self):
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.1, 0.0, 0.0) weight=1.0
family=Acceptor pos=(3.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==3)
self.failIf(FeatMapUtils.MergeFeatPoints(fm1))
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance))
self.failUnless(fm1.GetNumFeatures()==2)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.05,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(3.0,0,0)))
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.1, 0.0, 0.0) weight=1.0
family=Acceptor pos=(3.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(4.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance))
self.failUnless(fm1.GetNumFeatures()==2)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.05,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(3.5,0,0)))
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.2, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.3, 0.0, 0.0) weight=1.0
family=Acceptor pos=(4.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance))
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.00,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.25,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(4.0,0,0)))
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.2, 0.0, 0.0) weight=3.0
family=Acceptor pos=(1.3, 0.0, 0.0) weight=1.0
family=Acceptor pos=(4.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance,
mergeMethod=FeatMapUtils.MergeMethod.Average))
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.00,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.25,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(4.0,0,0)))
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance,
mergeMethod=FeatMapUtils.MergeMethod.WeightedAverage))
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.00,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.225,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(4.0,0,0)))
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance,
mergeMethod=FeatMapUtils.MergeMethod.UseLarger))
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.00,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.2,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(4.0,0,0)))
def _test1BasicsRepeated(self):
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(0.7, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.2, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.3, 0.0, 0.0) weight=1.0
family=Acceptor pos=(4.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==5)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance))
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(0.7,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.0,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(1.25,0,0)))
self.failUnless(pteq(fm1.GetFeature(3).GetPos(),Point3D(4.0,0,0)))
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance))
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(0.7,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.125,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(4.0,0,0)))
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Distance))
self.failUnless(fm1.GetNumFeatures()==2)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(0.9125,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(4.0,0,0)))
def test2ScoreBasics(self):
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.2, 0.0, 0.0) weight=3.0
family=Acceptor pos=(4.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Overlap,
mergeMethod=FeatMapUtils.MergeMethod.Average))
self.failUnless(fm1.GetNumFeatures()==2)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.1,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(4.0,0,0)))
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.1, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.3, 0.0, 0.0) weight=3.0
family=Acceptor pos=(4.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Overlap,
mergeMethod=FeatMapUtils.MergeMethod.Average))
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.15,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.1,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(4.0,0,0)))
txt = self.paramTxt+"""
BeginPoints
family=Acceptor pos=(1.0, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.2, 0.0, 0.0) weight=1.0
family=Acceptor pos=(1.6, 0.0, 0.0) weight=3.0
family=Acceptor pos=(4.0, 0.0, 0.0) weight=1.0
EndPoints
"""
self.p.SetData(txt)
fm1 = self.p.Parse()
self.failUnless(fm1.GetNumFeatures()==4)
self.failUnless(FeatMapUtils.MergeFeatPoints(fm1,FeatMapUtils.MergeMetric.Overlap,
mergeMethod=FeatMapUtils.MergeMethod.Average))
self.failUnless(fm1.GetNumFeatures()==3)
self.failUnless(pteq(fm1.GetFeature(0).GetPos(),Point3D(1.0,0,0)))
self.failUnless(pteq(fm1.GetFeature(1).GetPos(),Point3D(1.4,0,0)))
self.failUnless(pteq(fm1.GetFeature(2).GetPos(),Point3D(4.0,0,0)))
if __name__ == '__main__':
unittest.main()
|
|
import re
import boto3
import logging
import requests
from datetime import datetime, timezone, timedelta
from botocore import UNSIGNED
from botocore.client import Config
from time import sleep
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from indra import get_config, has_config
from indra.util.nested_dict import NestedDict
logger = logging.getLogger(__name__)
def get_ids(job_list):
if job_list is None:
return None
return [job['jobId'] for job in job_list]
def kill_all(job_queue, reason='None given', states=None, kill_list=None):
"""Terminates/cancels all jobs on the specified queue.
Parameters
----------
job_queue : str
The name of the Batch job queue on which you wish to terminate/cancel
jobs.
reason : str
Provide a reason for the kill that will be recorded with the job's
record on AWS.
states : None or list[str]
A list of job states to remove. Possible states are 'STARTING',
'RUNNABLE', and 'RUNNING'. If None, all jobs in all states will be
ended (modulo the `kill_list` below).
kill_list : None or list[dict]
A list of job dictionaries (as returned by the submit function) that
you specifically wish to kill. All other jobs on the queue will be
ignored. If None, all jobs on the queue will be ended (modulo the
above).
Returns
-------
killed_ids : list[str]
A list of the job ids for jobs that were killed.
"""
# Default is all states.
if states is None:
states = ['STARTING', 'RUNNABLE', 'RUNNING']
# Get batch client
batch = boto3.client('batch')
# Get all other jobs, and terminate them.
killed_ids = []
for status in states:
running = batch.list_jobs(jobQueue=job_queue, jobStatus=status)
active_job_list = running.get('jobSummaryList')
if active_job_list is None:
continue
for job in active_job_list:
# Check if this is one of the specified jobs, if any specified.
ids_to_kill = get_ids(kill_list)
if ids_to_kill is not None and job['jobId'] not in ids_to_kill:
continue
# End the job.
if status == 'RUNNING':
logger.info('Terminating {jobName} ({jobId})'.format(**job))
res = batch.terminate_job(jobId=job['jobId'], reason=reason)
else:
logger.info('Canceling {jobName} ({jobId})'.format(**job))
res = batch.cancel_job(jobId=job['jobId'], reason=reason)
# Record the result of the kill
killed_ids.append(res)
return killed_ids
def tag_instance(instance_id, **tags):
"""Tag a single ec2 instance."""
logger.debug("Got request to add tags %s to instance %s."
% (str(tags), instance_id))
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
# Remove None's from `tags` and reformat to the list format that
# boto3 expects
tag_list = [{'Key': k, 'Value': v} for k, v in tags.items() if k and v]
if tag_list:
logger.info('Adding project tags "%s" to instance %s'
% (str(tag_list), instance_id))
instance.create_tags(Tags=tag_list)
vols = instance.volumes.all()
for page in vols.pages():
for vol in page:
vol.create_tags(Tags=tag_list)
else:
logger.info('No new tags from: %s' % str(tags))
instance_tags = {tag.get('Key'): tag.get('Value') for tag in instance.tags}
logger.info('Updated instance tags: %s' % instance_tags)
return
def tag_myself(project='aske', **other_tags):
"""Function run when indra is used in an EC2 instance to apply tags."""
base_url = "http://169.254.169.254"
try:
resp = requests.get(base_url + "/latest/meta-data/instance-id")
except requests.exceptions.ConnectionError:
logger.warning("Could not connect to service. Note this should only "
"be run from within a batch job.")
return
instance_id = resp.text
tag_instance(instance_id, project=project, **other_tags)
return
def get_batch_command(command_list, project=None, purpose=None):
"""Get the command appropriate for running something on batch."""
command_str = ' '.join(command_list)
ret = ['python3', '-m', 'indra.util.aws', 'run_in_batch', command_str]
if not project and has_config('DEFAULT_AWS_PROJECT'):
project = get_config('DEFAULT_AWS_PROJECT')
if project:
ret += ['--project', project]
if purpose:
ret += ['--purpose', purpose]
return ret
def run_in_batch(command_list, project, purpose):
from subprocess import call
tag_myself(project, purpose=purpose)
logger.info("Running command list: %s" % str(command_list))
logger.info('\n'+20*'='+' Begin Primary Command Output '+20*'='+'\n')
ret_code = call(command_list)
logger.info('\n'+21*'='+' End Primary Command Output '+21*'='+'\n')
return ret_code
def get_jobs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Returns a list of dicts with jobName and jobId for each job with the
given status."""
batch = boto3.client('batch')
jobs = batch.list_jobs(jobQueue=job_queue, jobStatus=job_status)
return jobs.get('jobSummaryList')
s3_path_patt = re.compile('^s3:([-a-zA-Z0-9_]+)/(.*?)$')
class JobLog(object):
"""Gets the Cloudwatch log associated with the given job.
Parameters
----------
job_info : dict
dict containing entries for 'jobName' and 'jobId', e.g., as returned
by get_jobs()
log_group_name : string
Name of the log group; defaults to '/aws/batch/job'
Returns
-------
list of strings
The event messages in the log, with the earliest events listed first.
"""
_suffix_base = '/part_'
def __init__(self, job_info, log_group_name='/aws/batch/job',
verbose=False, append_dumps=True):
self.job_name = job_info['jobName']
self.job_id = job_info['jobId']
self.logs_client = boto3.client('logs')
self.verbose = verbose
self.log_group_name = log_group_name
batch = boto3.client('batch')
resp = batch.describe_jobs(jobs=[self.job_id])
job_desc = resp['jobs'][0]
job_def_name = job_desc['jobDefinition'].split('/')[-1].split(':')[0]
task_arn_id = job_desc['container']['taskArn'].split('/')[-1]
self.log_stream_name = '%s/default/%s' % (job_def_name, task_arn_id)
self.latest_timestamp = None
self.lines = []
self.nextToken = None
self.__len = 0
self.append = append_dumps
return
def __len__(self):
return self.__len
def clear_lines(self):
self.lines = []
def dump(self, out_file, append=None):
"""Dump the logs in their entirety to the specified file."""
if append is None:
append = self.append
elif append != self.append:
logger.info("Overriding default append behavior. This could muddy "
"future loads.")
m = s3_path_patt.match(out_file)
if m is not None:
# If the user wants the files on s3...
bucket, prefix = m.groups()
s3 = boto3.client('s3')
# Find the largest part number among the current suffixes
if append:
max_num = 0
for key in iter_s3_keys(s3, bucket, prefix, do_retry=False):
if key[len(prefix):].startswith(self._suffix_base):
num = int(key[len(prefix + self._suffix_base):])
if max_num > num:
max_num = num
# Create the new suffix, and dump the lines to s3.
new_suffix = self._suffix_base + str(max_num + 1)
key = prefix + new_suffix
else:
key = prefix
s3.put_object(Bucket=bucket, Key=key, Body=self.dumps())
else:
# Otherwise, if they want them locally...
with open(out_file, 'wt' if append else 'w') as f:
for line in self.lines:
f.write(line)
return
def load(self, out_file):
"""Load the log lines from the cached files."""
m = s3_path_patt.match(out_file)
if m is not None:
bucket, prefix = m.groups()
s3 = boto3.client('s3')
if self.append:
prior_line_bytes = []
for key in sorted(iter_s3_keys(s3, bucket, prefix)):
if key[len(prefix):].startswith(self._suffix_base):
res = s3.get_object(Bucket=bucket, Key=key)
prior_line_bytes += res['Body'].read().splitlines()
else:
res = s3.get_object(Bucket=bucket, Key=prefix)
prior_line_bytes = res['Body'].read().splitlines()
prior_lines = [s.decode('utf-8') + '\n'
for s in prior_line_bytes]
else:
with open(out_file, 'r') as f:
prior_lines = f.readlines()
self.lines = prior_lines + self.lines
return
def dumps(self):
return ''.join(self.lines)
def get_lines(self):
kwargs = {'logGroupName': self.log_group_name,
'logStreamName': self.log_stream_name,
'startFromHead': True}
while True:
if self.nextToken is not None:
kwargs['nextToken'] = self.nextToken
response = self.logs_client.get_log_events(**kwargs)
# If we've gotten all the events already, the nextForwardToken for
# this call will be the same as the last one
if response.get('nextForwardToken') == self.nextToken:
break
else:
events = response.get('events')
if events:
for evt in events:
line = '%s: %s\n' % (evt['timestamp'], evt['message'])
self.lines.append(line)
self.latest_timestamp = \
(datetime.fromtimestamp(evt['timestamp']/1000)
.astimezone(timezone.utc)
.replace(tzinfo=None))
self.__len += 1
if self.verbose:
logger.info('%d %s' % (len(self.lines), line))
self.nextToken = response.get('nextForwardToken')
return
def dump_logs(job_queue='run_reach_queue', job_status='RUNNING'):
"""Write logs for all jobs with given the status to files."""
jobs = get_jobs(job_queue, job_status)
for job in jobs:
log = JobLog(job)
log.get_lines()
log.dump('{jobName}_{jobId}.log'.format(**job))
def get_date_from_str(date_str):
"""Get a utc datetime object from a string of format %Y-%m-%d-%H-%M-%S
Parameters
----------
date_str : str
A string of the format %Y(-%m-%d-%H-%M-%S). The string is assumed
to represent a UTC time.
Returns
-------
datetime.datetime
"""
date_format = '%Y-%m-%d-%H-%M-%S'
# Pad date_str specifying less than full format
if 1 <= len(date_str.split('-')) < 6:
# Add Jan if not present
if len(date_str.split('-')) == 1:
date_str += '-01'
# Add day after month if not present
if len(date_str.split('-')) == 2:
date_str += '-01'
# Pad with 0 hours, 0 minutes and 0 seconds
while len(date_str.split('-')) < 6:
date_str += '-0'
return datetime.strptime(
date_str, date_format).replace(
tzinfo=timezone.utc)
def iter_s3_keys(s3, bucket, prefix, date_cutoff=None, after=True,
with_dt=False, do_retry=True):
"""Iterate over the keys in an s3 bucket given a prefix
Parameters
----------
s3 : boto3.client.S3
A boto3.client.S3 instance
bucket : str
The name of the bucket to list objects in
prefix : str
The prefix filtering of the objects for list
date_cutoff : str|datetime.datetime
A datestring of format %Y(-%m-%d-%H-%M-%S) or a datetime.datetime
object. The date is assumed to be in UTC. By default no filtering
is done. Default: None.
after : bool
If True, only return objects after the given date cutoff.
Otherwise, return objects before. Default: True
with_dt : bool
If True, yield a tuple (key, datetime.datetime(LastModified)) of
the s3 Key and the object's LastModified date as a
datetime.datetime object, only yield s3 key otherwise.
Default: False.
do_retry : bool
If True, and no contents appear, try again in case there was simply a
brief lag. If False, do not retry, and just accept the "directory" is
empty.
Returns
-------
iterator[key]|iterator[(key, datetime.datetime)]
An iterator over s3 keys or (key, LastModified) tuples.
"""
if date_cutoff:
date_cutoff = date_cutoff if\
isinstance(date_cutoff, datetime) else\
get_date_from_str(date_cutoff)
# Check timezone info
if date_cutoff.utcoffset() is None:
date_cutoff = date_cutoff.replace(tzinfo=timezone.utc)
if date_cutoff.utcoffset() != timedelta():
date_cutoff = date_cutoff.astimezone(timezone.utc)
is_truncated = True
marker = None
while is_truncated:
# Get the (next) batch of contents.
if marker:
resp = s3.list_objects(Bucket=bucket, Prefix=prefix, Marker=marker)
else:
resp = s3.list_objects(Bucket=bucket, Prefix=prefix)
# Handle case where no contents are found.
if not resp.get('Contents'):
if do_retry:
logger.info("Prefix \"%s\" does not seem to have children. "
"Retrying once." % prefix)
do_retry = False
sleep(0.1)
continue
else:
logger.info("No contents found for \"%s\"." % prefix)
break
# Filter by time.
for entry in resp['Contents']:
if entry['Key'] != marker:
if date_cutoff and after and\
entry['LastModified'] > date_cutoff\
or\
date_cutoff and not after and\
entry['LastModified'] < date_cutoff\
or \
date_cutoff is None:
yield (entry['Key'], entry['LastModified']) if with_dt \
else entry['Key']
is_truncated = resp['IsTruncated']
marker = entry['Key']
def rename_s3_prefix(s3, bucket, old_prefix, new_prefix):
"""Change an s3 prefix within the same bucket."""
to_delete = []
for key in iter_s3_keys(s3, bucket, old_prefix):
# Copy the object to the new key (with prefix replaced)
new_key = key.replace(old_prefix, new_prefix)
s3.copy_object(Bucket=bucket, Key=new_key,
CopySource={'Bucket': bucket, 'Key': key},
MetadataDirective='COPY',
TaggingDirective='COPY')
# Keep track of the objects that will need to be deleted (the old keys)
to_delete.append({'Key': key})
# Delete objects in maximum batches of 1000.
if len(to_delete) >= 1000:
s3.delete_objects(Bucket=bucket,
Delete={'Objects': to_delete[:1000]})
del to_delete[:1000]
# Get any stragglers.
s3.delete_objects(Bucket=bucket,
Delete={'Objects': to_delete})
return
def get_s3_file_tree(s3, bucket, prefix, date_cutoff=None, after=True,
with_dt=False):
"""Overcome s3 response limit and return NestedDict tree of paths.
The NestedDict object also allows the user to search by the ends of a path.
The tree mimics a file directory structure, with the leave nodes being the
full unbroken key. For example, 'path/to/file.txt' would be retrieved by
ret['path']['to']['file.txt']['key']
The NestedDict object returned also has the capability to get paths that
lead to a certain value. So if you wanted all paths that lead to something
called 'file.txt', you could use
ret.get_paths('file.txt')
For more details, see the NestedDict docs.
Parameters
----------
s3 : boto3.client.S3
A boto3.client.S3 instance
bucket : str
The name of the bucket to list objects in
prefix : str
The prefix filtering of the objects for list
date_cutoff : str|datetime.datetime
A datestring of format %Y(-%m-%d-%H-%M-%S) or a datetime.datetime
object. The date is assumed to be in UTC. By default no filtering
is done. Default: None.
after : bool
If True, only return objects after the given date cutoff.
Otherwise, return objects before. Default: True
with_dt : bool
If True, yield a tuple (key, datetime.datetime(LastModified)) of
the s3 Key and the object's LastModified date as a
datetime.datetime object, only yield s3 key otherwise.
Default: False.
Returns
-------
NestedDict
A file tree represented as an NestedDict
"""
file_tree = NestedDict()
pref_path = prefix.split('/')[:-1] # avoid the trailing empty str.
for k in iter_s3_keys(s3, bucket, prefix, date_cutoff, after, with_dt):
if with_dt:
key, dt = k
else:
key, dt = k, None
full_path = key.split('/')
relevant_path = full_path[len(pref_path):]
curr = file_tree
for step in relevant_path:
curr = curr[step]
curr['key'] = k
return file_tree
def get_s3_client(unsigned=True):
"""Return a boto3 S3 client with optional unsigned config.
Parameters
----------
unsigned : Optional[bool]
If True, the client will be using unsigned mode in which public
resources can be accessed without credentials. Default: True
Returns
-------
botocore.client.S3
A client object to AWS S3.
"""
if unsigned:
return boto3.client('s3', config=Config(signature_version=UNSIGNED))
else:
return boto3.client('s3')
if __name__ == '__main__':
parser = ArgumentParser(
'aws.py',
description=('Use some of INDRA\'s aws tools. For more specific help, '
'select one of the Methods with the `-h` option.')
)
subparsers = parser.add_subparsers(title='Task')
subparsers.required = True
subparsers.dest = 'task'
# Create parent parser classes for second layer of options
parent_run_parser = ArgumentParser(add_help=False)
parent_run_parser.add_argument(
'command',
help=('Enter the command as a single string to be run as if in a '
'batch environment.')
)
parent_run_parser.add_argument(
'--project', '-P',
default='aske',
help='Give a name for the project.'
)
parent_run_parser.add_argument(
'--purpose', '-p',
help='Give the task some meaning.'
)
parent_kill_parser = ArgumentParser(add_help=False)
parent_kill_parser.add_argument(
'queue_name',
help='Select the batch queue in which all jobs should be terminated.'
)
parent_kill_parser.add_argument(
'--reason', '-R',
help='Give a reason for killing all the jobs.'
)
# Make non_db_parser and get subparsers
run_parser = subparsers.add_parser(
'run_in_batch',
parents=[parent_run_parser],
description=('This should be called to run any command wtihin an aws '
'batch job instance.'),
formatter_class=ArgumentDefaultsHelpFormatter
)
# Make db parser and get subparsers.
kill_parser = subparsers.add_parser(
'kill_all',
parents=[parent_kill_parser],
description='Kill all the jobs running in a given queue.',
formatter_class=ArgumentDefaultsHelpFormatter
)
args = parser.parse_args()
if args.task == 'run_in_batch':
ret_code = run_in_batch(args.command.split(), args.project,
args.purpose)
if ret_code == 0:
logger.info('Job ended well.')
else:
logger.error('Job failed!')
import sys
sys.exit(ret_code)
elif args.task == 'kill_all':
kill_all(args.queue_name, args.reason)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
import timeit
import unittest
import sys
import io
import time
from textwrap import dedent
from test.support import run_unittest
from test.support import captured_stdout
from test.support import captured_stderr
# timeit's default number of iterations.
DEFAULT_NUMBER = 1000000
# timeit's default number of repetitions.
DEFAULT_REPEAT = 3
# XXX: some tests are commented out that would improve the coverage but take a
# long time to run because they test the default number of loops, which is
# large. The tests could be enabled if there was a way to override the default
# number of loops during testing, but this would require changing the signature
# of some functions that use the default as a default argument.
class FakeTimer:
BASE_TIME = 42.0
def __init__(self, seconds_per_increment=1.0):
self.count = 0
self.setup_calls = 0
self.seconds_per_increment=seconds_per_increment
timeit._fake_timer = self
def __call__(self):
return self.BASE_TIME + self.count * self.seconds_per_increment
def inc(self):
self.count += 1
def setup(self):
self.setup_calls += 1
def wrap_timer(self, timer):
"""Records 'timer' and returns self as callable timer."""
self.saved_timer = timer
return self
class TestTimeit(unittest.TestCase):
def tearDown(self):
try:
del timeit._fake_timer
except AttributeError:
pass
def test_reindent_empty(self):
self.assertEqual(timeit.reindent("", 0), "")
self.assertEqual(timeit.reindent("", 4), "")
def test_reindent_single(self):
self.assertEqual(timeit.reindent("pass", 0), "pass")
self.assertEqual(timeit.reindent("pass", 4), "pass")
def test_reindent_multi_empty(self):
self.assertEqual(timeit.reindent("\n\n", 0), "\n\n")
self.assertEqual(timeit.reindent("\n\n", 4), "\n \n ")
def test_reindent_multi(self):
self.assertEqual(timeit.reindent(
"print()\npass\nbreak", 0),
"print()\npass\nbreak")
self.assertEqual(timeit.reindent(
"print()\npass\nbreak", 4),
"print()\n pass\n break")
def test_timer_invalid_stmt(self):
self.assertRaises(ValueError, timeit.Timer, stmt=None)
self.assertRaises(SyntaxError, timeit.Timer, stmt='return')
self.assertRaises(SyntaxError, timeit.Timer, stmt='yield')
self.assertRaises(SyntaxError, timeit.Timer, stmt='yield from ()')
self.assertRaises(SyntaxError, timeit.Timer, stmt='break')
self.assertRaises(SyntaxError, timeit.Timer, stmt='continue')
self.assertRaises(SyntaxError, timeit.Timer, stmt='from timeit import *')
def test_timer_invalid_setup(self):
self.assertRaises(ValueError, timeit.Timer, setup=None)
self.assertRaises(SyntaxError, timeit.Timer, setup='return')
self.assertRaises(SyntaxError, timeit.Timer, setup='yield')
self.assertRaises(SyntaxError, timeit.Timer, setup='yield from ()')
self.assertRaises(SyntaxError, timeit.Timer, setup='break')
self.assertRaises(SyntaxError, timeit.Timer, setup='continue')
self.assertRaises(SyntaxError, timeit.Timer, setup='from timeit import *')
fake_setup = "import timeit; timeit._fake_timer.setup()"
fake_stmt = "import timeit; timeit._fake_timer.inc()"
def fake_callable_setup(self):
self.fake_timer.setup()
def fake_callable_stmt(self):
self.fake_timer.inc()
def timeit(self, stmt, setup, number=None):
self.fake_timer = FakeTimer()
t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer)
kwargs = {}
if number is None:
number = DEFAULT_NUMBER
else:
kwargs['number'] = number
delta_time = t.timeit(**kwargs)
self.assertEqual(self.fake_timer.setup_calls, 1)
self.assertEqual(self.fake_timer.count, number)
self.assertEqual(delta_time, number)
# Takes too long to run in debug build.
#def test_timeit_default_iters(self):
# self.timeit(self.fake_stmt, self.fake_setup)
def test_timeit_zero_iters(self):
self.timeit(self.fake_stmt, self.fake_setup, number=0)
def test_timeit_few_iters(self):
self.timeit(self.fake_stmt, self.fake_setup, number=3)
def test_timeit_callable_stmt(self):
self.timeit(self.fake_callable_stmt, self.fake_setup, number=3)
def test_timeit_callable_stmt_and_setup(self):
self.timeit(self.fake_callable_stmt,
self.fake_callable_setup, number=3)
# Takes too long to run in debug build.
#def test_timeit_function(self):
# delta_time = timeit.timeit(self.fake_stmt, self.fake_setup,
# timer=FakeTimer())
# self.assertEqual(delta_time, DEFAULT_NUMBER)
def test_timeit_function_zero_iters(self):
delta_time = timeit.timeit(self.fake_stmt, self.fake_setup, number=0,
timer=FakeTimer())
self.assertEqual(delta_time, 0)
def repeat(self, stmt, setup, repeat=None, number=None):
self.fake_timer = FakeTimer()
t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer)
kwargs = {}
if repeat is None:
repeat = DEFAULT_REPEAT
else:
kwargs['repeat'] = repeat
if number is None:
number = DEFAULT_NUMBER
else:
kwargs['number'] = number
delta_times = t.repeat(**kwargs)
self.assertEqual(self.fake_timer.setup_calls, repeat)
self.assertEqual(self.fake_timer.count, repeat * number)
self.assertEqual(delta_times, repeat * [float(number)])
# Takes too long to run in debug build.
#def test_repeat_default(self):
# self.repeat(self.fake_stmt, self.fake_setup)
def test_repeat_zero_reps(self):
self.repeat(self.fake_stmt, self.fake_setup, repeat=0)
def test_repeat_zero_iters(self):
self.repeat(self.fake_stmt, self.fake_setup, number=0)
def test_repeat_few_reps_and_iters(self):
self.repeat(self.fake_stmt, self.fake_setup, repeat=3, number=5)
def test_repeat_callable_stmt(self):
self.repeat(self.fake_callable_stmt, self.fake_setup,
repeat=3, number=5)
def test_repeat_callable_stmt_and_setup(self):
self.repeat(self.fake_callable_stmt, self.fake_callable_setup,
repeat=3, number=5)
# Takes too long to run in debug build.
#def test_repeat_function(self):
# delta_times = timeit.repeat(self.fake_stmt, self.fake_setup,
# timer=FakeTimer())
# self.assertEqual(delta_times, DEFAULT_REPEAT * [float(DEFAULT_NUMBER)])
def test_repeat_function_zero_reps(self):
delta_times = timeit.repeat(self.fake_stmt, self.fake_setup, repeat=0,
timer=FakeTimer())
self.assertEqual(delta_times, [])
def test_repeat_function_zero_iters(self):
delta_times = timeit.repeat(self.fake_stmt, self.fake_setup, number=0,
timer=FakeTimer())
self.assertEqual(delta_times, DEFAULT_REPEAT * [0.0])
def assert_exc_string(self, exc_string, expected_exc_name):
exc_lines = exc_string.splitlines()
self.assertGreater(len(exc_lines), 2)
self.assertTrue(exc_lines[0].startswith('Traceback'))
self.assertTrue(exc_lines[-1].startswith(expected_exc_name))
def test_print_exc(self):
s = io.StringIO()
t = timeit.Timer("1/0")
try:
t.timeit()
except:
t.print_exc(s)
self.assert_exc_string(s.getvalue(), 'ZeroDivisionError')
MAIN_DEFAULT_OUTPUT = "10 loops, best of 3: 1 sec per loop\n"
def run_main(self, seconds_per_increment=1.0, switches=None, timer=None):
if timer is None:
timer = FakeTimer(seconds_per_increment=seconds_per_increment)
if switches is None:
args = []
else:
args = switches[:]
args.append(self.fake_stmt)
# timeit.main() modifies sys.path, so save and restore it.
orig_sys_path = sys.path[:]
with captured_stdout() as s:
timeit.main(args=args, _wrap_timer=timer.wrap_timer)
sys.path[:] = orig_sys_path[:]
return s.getvalue()
def test_main_bad_switch(self):
s = self.run_main(switches=['--bad-switch'])
self.assertEqual(s, dedent("""\
option --bad-switch not recognized
use -h/--help for command line help
"""))
def test_main_seconds(self):
s = self.run_main(seconds_per_increment=5.5)
self.assertEqual(s, "10 loops, best of 3: 5.5 sec per loop\n")
def test_main_milliseconds(self):
s = self.run_main(seconds_per_increment=0.0055)
self.assertEqual(s, "100 loops, best of 3: 5.5 msec per loop\n")
def test_main_microseconds(self):
s = self.run_main(seconds_per_increment=0.0000025, switches=['-n100'])
self.assertEqual(s, "100 loops, best of 3: 2.5 usec per loop\n")
def test_main_fixed_iters(self):
s = self.run_main(seconds_per_increment=2.0, switches=['-n35'])
self.assertEqual(s, "35 loops, best of 3: 2 sec per loop\n")
def test_main_setup(self):
s = self.run_main(seconds_per_increment=2.0,
switches=['-n35', '-s', 'print("CustomSetup")'])
self.assertEqual(s, "CustomSetup\n" * 3 +
"35 loops, best of 3: 2 sec per loop\n")
def test_main_fixed_reps(self):
s = self.run_main(seconds_per_increment=60.0, switches=['-r9'])
self.assertEqual(s, "10 loops, best of 9: 60 sec per loop\n")
def test_main_negative_reps(self):
s = self.run_main(seconds_per_increment=60.0, switches=['-r-5'])
self.assertEqual(s, "10 loops, best of 1: 60 sec per loop\n")
@unittest.skipIf(sys.flags.optimize >= 2, "need __doc__")
def test_main_help(self):
s = self.run_main(switches=['-h'])
# Note: It's not clear that the trailing space was intended as part of
# the help text, but since it's there, check for it.
self.assertEqual(s, timeit.__doc__ + ' ')
def test_main_using_time(self):
fake_timer = FakeTimer()
s = self.run_main(switches=['-t'], timer=fake_timer)
self.assertEqual(s, self.MAIN_DEFAULT_OUTPUT)
self.assertIs(fake_timer.saved_timer, time.time)
def test_main_using_clock(self):
fake_timer = FakeTimer()
s = self.run_main(switches=['-c'], timer=fake_timer)
self.assertEqual(s, self.MAIN_DEFAULT_OUTPUT)
self.assertIs(fake_timer.saved_timer, time.clock)
def test_main_verbose(self):
s = self.run_main(switches=['-v'])
self.assertEqual(s, dedent("""\
10 loops -> 10 secs
raw times: 10 10 10
10 loops, best of 3: 1 sec per loop
"""))
def test_main_very_verbose(self):
s = self.run_main(seconds_per_increment=0.000050, switches=['-vv'])
self.assertEqual(s, dedent("""\
10 loops -> 0.0005 secs
100 loops -> 0.005 secs
1000 loops -> 0.05 secs
10000 loops -> 0.5 secs
raw times: 0.5 0.5 0.5
10000 loops, best of 3: 50 usec per loop
"""))
def test_main_exception(self):
with captured_stderr() as error_stringio:
s = self.run_main(switches=['1/0'])
self.assert_exc_string(error_stringio.getvalue(), 'ZeroDivisionError')
def test_main_exception_fixed_reps(self):
with captured_stderr() as error_stringio:
s = self.run_main(switches=['-n1', '1/0'])
self.assert_exc_string(error_stringio.getvalue(), 'ZeroDivisionError')
def test_main():
run_unittest(TestTimeit)
if __name__ == '__main__':
test_main()
=======
import timeit
import unittest
import sys
import io
import time
from textwrap import dedent
from test.support import run_unittest
from test.support import captured_stdout
from test.support import captured_stderr
# timeit's default number of iterations.
DEFAULT_NUMBER = 1000000
# timeit's default number of repetitions.
DEFAULT_REPEAT = 3
# XXX: some tests are commented out that would improve the coverage but take a
# long time to run because they test the default number of loops, which is
# large. The tests could be enabled if there was a way to override the default
# number of loops during testing, but this would require changing the signature
# of some functions that use the default as a default argument.
class FakeTimer:
BASE_TIME = 42.0
def __init__(self, seconds_per_increment=1.0):
self.count = 0
self.setup_calls = 0
self.seconds_per_increment=seconds_per_increment
timeit._fake_timer = self
def __call__(self):
return self.BASE_TIME + self.count * self.seconds_per_increment
def inc(self):
self.count += 1
def setup(self):
self.setup_calls += 1
def wrap_timer(self, timer):
"""Records 'timer' and returns self as callable timer."""
self.saved_timer = timer
return self
class TestTimeit(unittest.TestCase):
def tearDown(self):
try:
del timeit._fake_timer
except AttributeError:
pass
def test_reindent_empty(self):
self.assertEqual(timeit.reindent("", 0), "")
self.assertEqual(timeit.reindent("", 4), "")
def test_reindent_single(self):
self.assertEqual(timeit.reindent("pass", 0), "pass")
self.assertEqual(timeit.reindent("pass", 4), "pass")
def test_reindent_multi_empty(self):
self.assertEqual(timeit.reindent("\n\n", 0), "\n\n")
self.assertEqual(timeit.reindent("\n\n", 4), "\n \n ")
def test_reindent_multi(self):
self.assertEqual(timeit.reindent(
"print()\npass\nbreak", 0),
"print()\npass\nbreak")
self.assertEqual(timeit.reindent(
"print()\npass\nbreak", 4),
"print()\n pass\n break")
def test_timer_invalid_stmt(self):
self.assertRaises(ValueError, timeit.Timer, stmt=None)
self.assertRaises(SyntaxError, timeit.Timer, stmt='return')
self.assertRaises(SyntaxError, timeit.Timer, stmt='yield')
self.assertRaises(SyntaxError, timeit.Timer, stmt='yield from ()')
self.assertRaises(SyntaxError, timeit.Timer, stmt='break')
self.assertRaises(SyntaxError, timeit.Timer, stmt='continue')
self.assertRaises(SyntaxError, timeit.Timer, stmt='from timeit import *')
def test_timer_invalid_setup(self):
self.assertRaises(ValueError, timeit.Timer, setup=None)
self.assertRaises(SyntaxError, timeit.Timer, setup='return')
self.assertRaises(SyntaxError, timeit.Timer, setup='yield')
self.assertRaises(SyntaxError, timeit.Timer, setup='yield from ()')
self.assertRaises(SyntaxError, timeit.Timer, setup='break')
self.assertRaises(SyntaxError, timeit.Timer, setup='continue')
self.assertRaises(SyntaxError, timeit.Timer, setup='from timeit import *')
fake_setup = "import timeit; timeit._fake_timer.setup()"
fake_stmt = "import timeit; timeit._fake_timer.inc()"
def fake_callable_setup(self):
self.fake_timer.setup()
def fake_callable_stmt(self):
self.fake_timer.inc()
def timeit(self, stmt, setup, number=None):
self.fake_timer = FakeTimer()
t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer)
kwargs = {}
if number is None:
number = DEFAULT_NUMBER
else:
kwargs['number'] = number
delta_time = t.timeit(**kwargs)
self.assertEqual(self.fake_timer.setup_calls, 1)
self.assertEqual(self.fake_timer.count, number)
self.assertEqual(delta_time, number)
# Takes too long to run in debug build.
#def test_timeit_default_iters(self):
# self.timeit(self.fake_stmt, self.fake_setup)
def test_timeit_zero_iters(self):
self.timeit(self.fake_stmt, self.fake_setup, number=0)
def test_timeit_few_iters(self):
self.timeit(self.fake_stmt, self.fake_setup, number=3)
def test_timeit_callable_stmt(self):
self.timeit(self.fake_callable_stmt, self.fake_setup, number=3)
def test_timeit_callable_stmt_and_setup(self):
self.timeit(self.fake_callable_stmt,
self.fake_callable_setup, number=3)
# Takes too long to run in debug build.
#def test_timeit_function(self):
# delta_time = timeit.timeit(self.fake_stmt, self.fake_setup,
# timer=FakeTimer())
# self.assertEqual(delta_time, DEFAULT_NUMBER)
def test_timeit_function_zero_iters(self):
delta_time = timeit.timeit(self.fake_stmt, self.fake_setup, number=0,
timer=FakeTimer())
self.assertEqual(delta_time, 0)
def repeat(self, stmt, setup, repeat=None, number=None):
self.fake_timer = FakeTimer()
t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer)
kwargs = {}
if repeat is None:
repeat = DEFAULT_REPEAT
else:
kwargs['repeat'] = repeat
if number is None:
number = DEFAULT_NUMBER
else:
kwargs['number'] = number
delta_times = t.repeat(**kwargs)
self.assertEqual(self.fake_timer.setup_calls, repeat)
self.assertEqual(self.fake_timer.count, repeat * number)
self.assertEqual(delta_times, repeat * [float(number)])
# Takes too long to run in debug build.
#def test_repeat_default(self):
# self.repeat(self.fake_stmt, self.fake_setup)
def test_repeat_zero_reps(self):
self.repeat(self.fake_stmt, self.fake_setup, repeat=0)
def test_repeat_zero_iters(self):
self.repeat(self.fake_stmt, self.fake_setup, number=0)
def test_repeat_few_reps_and_iters(self):
self.repeat(self.fake_stmt, self.fake_setup, repeat=3, number=5)
def test_repeat_callable_stmt(self):
self.repeat(self.fake_callable_stmt, self.fake_setup,
repeat=3, number=5)
def test_repeat_callable_stmt_and_setup(self):
self.repeat(self.fake_callable_stmt, self.fake_callable_setup,
repeat=3, number=5)
# Takes too long to run in debug build.
#def test_repeat_function(self):
# delta_times = timeit.repeat(self.fake_stmt, self.fake_setup,
# timer=FakeTimer())
# self.assertEqual(delta_times, DEFAULT_REPEAT * [float(DEFAULT_NUMBER)])
def test_repeat_function_zero_reps(self):
delta_times = timeit.repeat(self.fake_stmt, self.fake_setup, repeat=0,
timer=FakeTimer())
self.assertEqual(delta_times, [])
def test_repeat_function_zero_iters(self):
delta_times = timeit.repeat(self.fake_stmt, self.fake_setup, number=0,
timer=FakeTimer())
self.assertEqual(delta_times, DEFAULT_REPEAT * [0.0])
def assert_exc_string(self, exc_string, expected_exc_name):
exc_lines = exc_string.splitlines()
self.assertGreater(len(exc_lines), 2)
self.assertTrue(exc_lines[0].startswith('Traceback'))
self.assertTrue(exc_lines[-1].startswith(expected_exc_name))
def test_print_exc(self):
s = io.StringIO()
t = timeit.Timer("1/0")
try:
t.timeit()
except:
t.print_exc(s)
self.assert_exc_string(s.getvalue(), 'ZeroDivisionError')
MAIN_DEFAULT_OUTPUT = "10 loops, best of 3: 1 sec per loop\n"
def run_main(self, seconds_per_increment=1.0, switches=None, timer=None):
if timer is None:
timer = FakeTimer(seconds_per_increment=seconds_per_increment)
if switches is None:
args = []
else:
args = switches[:]
args.append(self.fake_stmt)
# timeit.main() modifies sys.path, so save and restore it.
orig_sys_path = sys.path[:]
with captured_stdout() as s:
timeit.main(args=args, _wrap_timer=timer.wrap_timer)
sys.path[:] = orig_sys_path[:]
return s.getvalue()
def test_main_bad_switch(self):
s = self.run_main(switches=['--bad-switch'])
self.assertEqual(s, dedent("""\
option --bad-switch not recognized
use -h/--help for command line help
"""))
def test_main_seconds(self):
s = self.run_main(seconds_per_increment=5.5)
self.assertEqual(s, "10 loops, best of 3: 5.5 sec per loop\n")
def test_main_milliseconds(self):
s = self.run_main(seconds_per_increment=0.0055)
self.assertEqual(s, "100 loops, best of 3: 5.5 msec per loop\n")
def test_main_microseconds(self):
s = self.run_main(seconds_per_increment=0.0000025, switches=['-n100'])
self.assertEqual(s, "100 loops, best of 3: 2.5 usec per loop\n")
def test_main_fixed_iters(self):
s = self.run_main(seconds_per_increment=2.0, switches=['-n35'])
self.assertEqual(s, "35 loops, best of 3: 2 sec per loop\n")
def test_main_setup(self):
s = self.run_main(seconds_per_increment=2.0,
switches=['-n35', '-s', 'print("CustomSetup")'])
self.assertEqual(s, "CustomSetup\n" * 3 +
"35 loops, best of 3: 2 sec per loop\n")
def test_main_fixed_reps(self):
s = self.run_main(seconds_per_increment=60.0, switches=['-r9'])
self.assertEqual(s, "10 loops, best of 9: 60 sec per loop\n")
def test_main_negative_reps(self):
s = self.run_main(seconds_per_increment=60.0, switches=['-r-5'])
self.assertEqual(s, "10 loops, best of 1: 60 sec per loop\n")
@unittest.skipIf(sys.flags.optimize >= 2, "need __doc__")
def test_main_help(self):
s = self.run_main(switches=['-h'])
# Note: It's not clear that the trailing space was intended as part of
# the help text, but since it's there, check for it.
self.assertEqual(s, timeit.__doc__ + ' ')
def test_main_using_time(self):
fake_timer = FakeTimer()
s = self.run_main(switches=['-t'], timer=fake_timer)
self.assertEqual(s, self.MAIN_DEFAULT_OUTPUT)
self.assertIs(fake_timer.saved_timer, time.time)
def test_main_using_clock(self):
fake_timer = FakeTimer()
s = self.run_main(switches=['-c'], timer=fake_timer)
self.assertEqual(s, self.MAIN_DEFAULT_OUTPUT)
self.assertIs(fake_timer.saved_timer, time.clock)
def test_main_verbose(self):
s = self.run_main(switches=['-v'])
self.assertEqual(s, dedent("""\
10 loops -> 10 secs
raw times: 10 10 10
10 loops, best of 3: 1 sec per loop
"""))
def test_main_very_verbose(self):
s = self.run_main(seconds_per_increment=0.000050, switches=['-vv'])
self.assertEqual(s, dedent("""\
10 loops -> 0.0005 secs
100 loops -> 0.005 secs
1000 loops -> 0.05 secs
10000 loops -> 0.5 secs
raw times: 0.5 0.5 0.5
10000 loops, best of 3: 50 usec per loop
"""))
def test_main_exception(self):
with captured_stderr() as error_stringio:
s = self.run_main(switches=['1/0'])
self.assert_exc_string(error_stringio.getvalue(), 'ZeroDivisionError')
def test_main_exception_fixed_reps(self):
with captured_stderr() as error_stringio:
s = self.run_main(switches=['-n1', '1/0'])
self.assert_exc_string(error_stringio.getvalue(), 'ZeroDivisionError')
def test_main():
run_unittest(TestTimeit)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import timeit
import unittest
import sys
import io
import time
from textwrap import dedent
from test.support import run_unittest
from test.support import captured_stdout
from test.support import captured_stderr
# timeit's default number of iterations.
DEFAULT_NUMBER = 1000000
# timeit's default number of repetitions.
DEFAULT_REPEAT = 3
# XXX: some tests are commented out that would improve the coverage but take a
# long time to run because they test the default number of loops, which is
# large. The tests could be enabled if there was a way to override the default
# number of loops during testing, but this would require changing the signature
# of some functions that use the default as a default argument.
class FakeTimer:
BASE_TIME = 42.0
def __init__(self, seconds_per_increment=1.0):
self.count = 0
self.setup_calls = 0
self.seconds_per_increment=seconds_per_increment
timeit._fake_timer = self
def __call__(self):
return self.BASE_TIME + self.count * self.seconds_per_increment
def inc(self):
self.count += 1
def setup(self):
self.setup_calls += 1
def wrap_timer(self, timer):
"""Records 'timer' and returns self as callable timer."""
self.saved_timer = timer
return self
class TestTimeit(unittest.TestCase):
def tearDown(self):
try:
del timeit._fake_timer
except AttributeError:
pass
def test_reindent_empty(self):
self.assertEqual(timeit.reindent("", 0), "")
self.assertEqual(timeit.reindent("", 4), "")
def test_reindent_single(self):
self.assertEqual(timeit.reindent("pass", 0), "pass")
self.assertEqual(timeit.reindent("pass", 4), "pass")
def test_reindent_multi_empty(self):
self.assertEqual(timeit.reindent("\n\n", 0), "\n\n")
self.assertEqual(timeit.reindent("\n\n", 4), "\n \n ")
def test_reindent_multi(self):
self.assertEqual(timeit.reindent(
"print()\npass\nbreak", 0),
"print()\npass\nbreak")
self.assertEqual(timeit.reindent(
"print()\npass\nbreak", 4),
"print()\n pass\n break")
def test_timer_invalid_stmt(self):
self.assertRaises(ValueError, timeit.Timer, stmt=None)
self.assertRaises(SyntaxError, timeit.Timer, stmt='return')
self.assertRaises(SyntaxError, timeit.Timer, stmt='yield')
self.assertRaises(SyntaxError, timeit.Timer, stmt='yield from ()')
self.assertRaises(SyntaxError, timeit.Timer, stmt='break')
self.assertRaises(SyntaxError, timeit.Timer, stmt='continue')
self.assertRaises(SyntaxError, timeit.Timer, stmt='from timeit import *')
def test_timer_invalid_setup(self):
self.assertRaises(ValueError, timeit.Timer, setup=None)
self.assertRaises(SyntaxError, timeit.Timer, setup='return')
self.assertRaises(SyntaxError, timeit.Timer, setup='yield')
self.assertRaises(SyntaxError, timeit.Timer, setup='yield from ()')
self.assertRaises(SyntaxError, timeit.Timer, setup='break')
self.assertRaises(SyntaxError, timeit.Timer, setup='continue')
self.assertRaises(SyntaxError, timeit.Timer, setup='from timeit import *')
fake_setup = "import timeit; timeit._fake_timer.setup()"
fake_stmt = "import timeit; timeit._fake_timer.inc()"
def fake_callable_setup(self):
self.fake_timer.setup()
def fake_callable_stmt(self):
self.fake_timer.inc()
def timeit(self, stmt, setup, number=None):
self.fake_timer = FakeTimer()
t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer)
kwargs = {}
if number is None:
number = DEFAULT_NUMBER
else:
kwargs['number'] = number
delta_time = t.timeit(**kwargs)
self.assertEqual(self.fake_timer.setup_calls, 1)
self.assertEqual(self.fake_timer.count, number)
self.assertEqual(delta_time, number)
# Takes too long to run in debug build.
#def test_timeit_default_iters(self):
# self.timeit(self.fake_stmt, self.fake_setup)
def test_timeit_zero_iters(self):
self.timeit(self.fake_stmt, self.fake_setup, number=0)
def test_timeit_few_iters(self):
self.timeit(self.fake_stmt, self.fake_setup, number=3)
def test_timeit_callable_stmt(self):
self.timeit(self.fake_callable_stmt, self.fake_setup, number=3)
def test_timeit_callable_stmt_and_setup(self):
self.timeit(self.fake_callable_stmt,
self.fake_callable_setup, number=3)
# Takes too long to run in debug build.
#def test_timeit_function(self):
# delta_time = timeit.timeit(self.fake_stmt, self.fake_setup,
# timer=FakeTimer())
# self.assertEqual(delta_time, DEFAULT_NUMBER)
def test_timeit_function_zero_iters(self):
delta_time = timeit.timeit(self.fake_stmt, self.fake_setup, number=0,
timer=FakeTimer())
self.assertEqual(delta_time, 0)
def repeat(self, stmt, setup, repeat=None, number=None):
self.fake_timer = FakeTimer()
t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer)
kwargs = {}
if repeat is None:
repeat = DEFAULT_REPEAT
else:
kwargs['repeat'] = repeat
if number is None:
number = DEFAULT_NUMBER
else:
kwargs['number'] = number
delta_times = t.repeat(**kwargs)
self.assertEqual(self.fake_timer.setup_calls, repeat)
self.assertEqual(self.fake_timer.count, repeat * number)
self.assertEqual(delta_times, repeat * [float(number)])
# Takes too long to run in debug build.
#def test_repeat_default(self):
# self.repeat(self.fake_stmt, self.fake_setup)
def test_repeat_zero_reps(self):
self.repeat(self.fake_stmt, self.fake_setup, repeat=0)
def test_repeat_zero_iters(self):
self.repeat(self.fake_stmt, self.fake_setup, number=0)
def test_repeat_few_reps_and_iters(self):
self.repeat(self.fake_stmt, self.fake_setup, repeat=3, number=5)
def test_repeat_callable_stmt(self):
self.repeat(self.fake_callable_stmt, self.fake_setup,
repeat=3, number=5)
def test_repeat_callable_stmt_and_setup(self):
self.repeat(self.fake_callable_stmt, self.fake_callable_setup,
repeat=3, number=5)
# Takes too long to run in debug build.
#def test_repeat_function(self):
# delta_times = timeit.repeat(self.fake_stmt, self.fake_setup,
# timer=FakeTimer())
# self.assertEqual(delta_times, DEFAULT_REPEAT * [float(DEFAULT_NUMBER)])
def test_repeat_function_zero_reps(self):
delta_times = timeit.repeat(self.fake_stmt, self.fake_setup, repeat=0,
timer=FakeTimer())
self.assertEqual(delta_times, [])
def test_repeat_function_zero_iters(self):
delta_times = timeit.repeat(self.fake_stmt, self.fake_setup, number=0,
timer=FakeTimer())
self.assertEqual(delta_times, DEFAULT_REPEAT * [0.0])
def assert_exc_string(self, exc_string, expected_exc_name):
exc_lines = exc_string.splitlines()
self.assertGreater(len(exc_lines), 2)
self.assertTrue(exc_lines[0].startswith('Traceback'))
self.assertTrue(exc_lines[-1].startswith(expected_exc_name))
def test_print_exc(self):
s = io.StringIO()
t = timeit.Timer("1/0")
try:
t.timeit()
except:
t.print_exc(s)
self.assert_exc_string(s.getvalue(), 'ZeroDivisionError')
MAIN_DEFAULT_OUTPUT = "10 loops, best of 3: 1 sec per loop\n"
def run_main(self, seconds_per_increment=1.0, switches=None, timer=None):
if timer is None:
timer = FakeTimer(seconds_per_increment=seconds_per_increment)
if switches is None:
args = []
else:
args = switches[:]
args.append(self.fake_stmt)
# timeit.main() modifies sys.path, so save and restore it.
orig_sys_path = sys.path[:]
with captured_stdout() as s:
timeit.main(args=args, _wrap_timer=timer.wrap_timer)
sys.path[:] = orig_sys_path[:]
return s.getvalue()
def test_main_bad_switch(self):
s = self.run_main(switches=['--bad-switch'])
self.assertEqual(s, dedent("""\
option --bad-switch not recognized
use -h/--help for command line help
"""))
def test_main_seconds(self):
s = self.run_main(seconds_per_increment=5.5)
self.assertEqual(s, "10 loops, best of 3: 5.5 sec per loop\n")
def test_main_milliseconds(self):
s = self.run_main(seconds_per_increment=0.0055)
self.assertEqual(s, "100 loops, best of 3: 5.5 msec per loop\n")
def test_main_microseconds(self):
s = self.run_main(seconds_per_increment=0.0000025, switches=['-n100'])
self.assertEqual(s, "100 loops, best of 3: 2.5 usec per loop\n")
def test_main_fixed_iters(self):
s = self.run_main(seconds_per_increment=2.0, switches=['-n35'])
self.assertEqual(s, "35 loops, best of 3: 2 sec per loop\n")
def test_main_setup(self):
s = self.run_main(seconds_per_increment=2.0,
switches=['-n35', '-s', 'print("CustomSetup")'])
self.assertEqual(s, "CustomSetup\n" * 3 +
"35 loops, best of 3: 2 sec per loop\n")
def test_main_fixed_reps(self):
s = self.run_main(seconds_per_increment=60.0, switches=['-r9'])
self.assertEqual(s, "10 loops, best of 9: 60 sec per loop\n")
def test_main_negative_reps(self):
s = self.run_main(seconds_per_increment=60.0, switches=['-r-5'])
self.assertEqual(s, "10 loops, best of 1: 60 sec per loop\n")
@unittest.skipIf(sys.flags.optimize >= 2, "need __doc__")
def test_main_help(self):
s = self.run_main(switches=['-h'])
# Note: It's not clear that the trailing space was intended as part of
# the help text, but since it's there, check for it.
self.assertEqual(s, timeit.__doc__ + ' ')
def test_main_using_time(self):
fake_timer = FakeTimer()
s = self.run_main(switches=['-t'], timer=fake_timer)
self.assertEqual(s, self.MAIN_DEFAULT_OUTPUT)
self.assertIs(fake_timer.saved_timer, time.time)
def test_main_using_clock(self):
fake_timer = FakeTimer()
s = self.run_main(switches=['-c'], timer=fake_timer)
self.assertEqual(s, self.MAIN_DEFAULT_OUTPUT)
self.assertIs(fake_timer.saved_timer, time.clock)
def test_main_verbose(self):
s = self.run_main(switches=['-v'])
self.assertEqual(s, dedent("""\
10 loops -> 10 secs
raw times: 10 10 10
10 loops, best of 3: 1 sec per loop
"""))
def test_main_very_verbose(self):
s = self.run_main(seconds_per_increment=0.000050, switches=['-vv'])
self.assertEqual(s, dedent("""\
10 loops -> 0.0005 secs
100 loops -> 0.005 secs
1000 loops -> 0.05 secs
10000 loops -> 0.5 secs
raw times: 0.5 0.5 0.5
10000 loops, best of 3: 50 usec per loop
"""))
def test_main_exception(self):
with captured_stderr() as error_stringio:
s = self.run_main(switches=['1/0'])
self.assert_exc_string(error_stringio.getvalue(), 'ZeroDivisionError')
def test_main_exception_fixed_reps(self):
with captured_stderr() as error_stringio:
s = self.run_main(switches=['-n1', '1/0'])
self.assert_exc_string(error_stringio.getvalue(), 'ZeroDivisionError')
def test_main():
run_unittest(TestTimeit)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
"""
cssselect2.tests
----------------
Test suite for cssselect2.
:copyright: (c) 2012 by Simon Sapin, 2017 by Guillaume Ayoub.
:license: BSD, see LICENSE for more details.
"""
import json
import os.path
import xml.etree.ElementTree as etree
import pytest
from cssselect2 import ElementWrapper, SelectorError, compile_selector_list
def resource(filename):
return os.path.join(os.path.dirname(__file__), filename)
def load_json(filename):
return json.load(open(resource(filename), encoding='utf-8'))
def get_test_document():
document = etree.parse(resource('content.xhtml'))
parent = next(e for e in document.iter() if e.get('id') == 'root')
# Setup namespace tests
for id in ('any-namespace', 'no-namespace'):
div = etree.SubElement(parent, '{http://www.w3.org/1999/xhtml}div')
div.set('id', id)
etree.SubElement(div, '{http://www.w3.org/1999/xhtml}div') \
.set('id', id + '-div1')
etree.SubElement(div, '{http://www.w3.org/1999/xhtml}div') \
.set('id', id + '-div2')
etree.SubElement(div, 'div').set('id', id + '-div3')
etree.SubElement(div, '{http://www.example.org/ns}div') \
.set('id', id + '-div4')
return document
TEST_DOCUMENT = get_test_document()
@pytest.mark.parametrize('test', load_json('invalid_selectors.json'))
def test_invalid_selectors(test):
if test.get('xfail'):
pytest.xfail()
try:
compile_selector_list(test['selector'])
except SelectorError:
pass
else:
raise AssertionError('Should be invalid: %(selector)r %(name)s' % test)
@pytest.mark.parametrize('test', load_json('valid_selectors.json'))
def test_valid_selectors(test):
if test.get('xfail'):
pytest.xfail()
exclude = test.get('exclude', ())
if 'document' in exclude or 'xhtml' in exclude:
return
root = ElementWrapper.from_xml_root(TEST_DOCUMENT)
result = [e.id for e in root.query_all(test['selector'])]
if result != test['expect']:
print(test['selector'])
print(result)
print('!=')
print(test['expect'])
raise AssertionError(test['name'])
def test_lang():
doc = etree.fromstring('''
<html xmlns="http://www.w3.org/1999/xhtml"></html>
''')
assert not ElementWrapper.from_xml_root(doc).matches(':lang(fr)')
doc = etree.fromstring('''
<html xmlns="http://www.w3.org/1999/xhtml">
<meta http-equiv="Content-Language" content=" fr \t"/>
</html>
''')
root = ElementWrapper.from_xml_root(doc, content_language='en')
assert root.matches(':lang(fr)')
doc = etree.fromstring('''
<html>
<meta http-equiv="Content-Language" content=" fr \t"/>
</html>
''')
root = ElementWrapper.from_xml_root(doc, content_language='en')
assert root.matches(':lang(en)')
doc = etree.fromstring('<html></html>')
root = ElementWrapper.from_xml_root(doc, content_language='en')
assert root.matches(':lang(en)')
root = ElementWrapper.from_xml_root(doc, content_language='en, es')
assert not root.matches(':lang(en)')
root = ElementWrapper.from_xml_root(doc)
assert not root.matches(':lang(en)')
doc = etree.fromstring('<html lang="eN"></html>')
root = ElementWrapper.from_html_root(doc)
assert root.matches(':lang(en)')
doc = etree.fromstring('<html lang="eN"></html>')
root = ElementWrapper.from_xml_root(doc)
assert not root.matches(':lang(en)')
def test_select():
root = etree.fromstring(HTML_IDS)
def select_ids(selector, html_only):
xml_ids = [element.etree_element.get('id', 'nil') for element in
ElementWrapper.from_xml_root(root).query_all(selector)]
html_ids = [element.etree_element.get('id', 'nil') for element in
ElementWrapper.from_html_root(root).query_all(selector)]
if html_only:
assert xml_ids == []
else:
assert xml_ids == html_ids
return html_ids
def pcss(main, *selectors, **kwargs):
html_only = kwargs.pop('html_only', False)
result = select_ids(main, html_only)
for selector in selectors:
assert select_ids(selector, html_only) == result
return result
all_ids = pcss('*')
assert all_ids[:6] == [
'html', 'nil', 'link-href', 'link-nohref', 'nil', 'outer-div']
assert all_ids[-1:] == ['foobar-span']
assert pcss('div') == ['outer-div', 'li-div', 'foobar-div']
assert pcss('DIV', html_only=True) == [
'outer-div', 'li-div', 'foobar-div'] # case-insensitive in HTML
assert pcss('div div') == ['li-div']
assert pcss('div, div div') == ['outer-div', 'li-div', 'foobar-div']
assert pcss('div , div div') == ['outer-div', 'li-div', 'foobar-div']
assert pcss('a[name]') == ['name-anchor']
assert pcss('a[NAme]', html_only=True) == [
'name-anchor'] # case-insensitive in HTML:
assert pcss('a[rel]') == ['tag-anchor', 'nofollow-anchor']
assert pcss('a[rel="tag"]') == ['tag-anchor']
assert pcss('a[href*="localhost"]') == ['tag-anchor']
assert pcss('a[href*=""]') == []
assert pcss('a[href^="http"]') == ['tag-anchor', 'nofollow-anchor']
assert pcss('a[href^="http:"]') == ['tag-anchor']
assert pcss('a[href^=""]') == []
assert pcss('a[href$="org"]') == ['nofollow-anchor']
assert pcss('a[href$=""]') == []
assert pcss('div[foobar~="bc"]', 'div[foobar~="cde"]') == [
'foobar-div']
assert pcss('[foobar~="ab bc"]',
'[foobar~=""]', '[foobar~=" \t"]') == []
assert pcss('div[foobar~="cd"]') == []
assert pcss('*[lang|="En"]', '[lang|="En-us"]') == ['second-li']
# Attribute values are case sensitive
assert pcss('*[lang|="en"]', '[lang|="en-US"]') == []
assert pcss('*[lang|="e"]') == []
# ... :lang() is not.
assert pcss(
':lang(EN)', '*:lang(en-US)'
':lang(En)'
) == ['second-li', 'li-div']
assert pcss(':lang(e)' # , html_only=True
) == []
assert pcss('li:nth-child(3)') == ['third-li']
assert pcss('li:nth-child(10)') == []
assert pcss('li:nth-child(2n)', 'li:nth-child(even)',
'li:nth-child(2n+0)') == [
'second-li', 'fourth-li', 'sixth-li']
assert pcss('li:nth-child(+2n+1)', 'li:nth-child(odd)') == [
'first-li', 'third-li', 'fifth-li', 'seventh-li']
assert pcss('li:nth-child(2n+4)') == ['fourth-li', 'sixth-li']
assert pcss('li:nth-child(3n+1)') == [
'first-li', 'fourth-li', 'seventh-li']
assert pcss('li:nth-last-child(1)') == ['seventh-li']
assert pcss('li:nth-last-child(0)') == []
assert pcss('li:nth-last-child(2n+2)', 'li:nth-last-child(even)') == [
'second-li', 'fourth-li', 'sixth-li']
assert pcss('li:nth-last-child(2n+4)') == ['second-li', 'fourth-li']
assert pcss('ol:first-of-type') == ['first-ol']
assert pcss('ol:nth-child(1)') == []
assert pcss('ol:nth-of-type(2)') == ['second-ol']
assert pcss('ol:nth-last-of-type(2)') == ['first-ol']
assert pcss('span:only-child') == ['foobar-span']
assert pcss('div:only-child') == ['li-div']
assert pcss('div *:only-child') == ['li-div', 'foobar-span']
assert pcss('p *:only-of-type') == ['p-em', 'fieldset']
assert pcss('p:only-of-type') == ['paragraph']
assert pcss('a:empty', 'a:EMpty') == ['name-anchor']
assert pcss('li:empty') == [
'third-li', 'fourth-li', 'fifth-li', 'sixth-li']
assert pcss(':root', 'html:root') == ['html']
assert pcss('li:root', '* :root') == []
assert pcss('.a', '.b', '*.a', 'ol.a') == ['first-ol']
assert pcss('.c', '*.c') == ['first-ol', 'third-li', 'fourth-li']
assert pcss('ol *.c', 'ol li.c', 'li ~ li.c', 'ol > li.c') == [
'third-li', 'fourth-li']
assert pcss('#first-li', 'li#first-li', '*#first-li') == ['first-li']
assert pcss('li div', 'li > div', 'div div') == ['li-div']
assert pcss('div > div') == []
assert pcss('div>.c', 'div > .c') == ['first-ol']
assert pcss('div + div') == ['foobar-div']
assert pcss('a ~ a') == ['tag-anchor', 'nofollow-anchor']
assert pcss('a[rel="tag"] ~ a') == ['nofollow-anchor']
assert pcss('ol#first-ol li:last-child') == ['seventh-li']
assert pcss('ol#first-ol *:last-child') == ['li-div', 'seventh-li']
assert pcss('#outer-div:first-child') == ['outer-div']
assert pcss('#outer-div :first-child') == [
'name-anchor', 'first-li', 'li-div', 'p-b',
'checkbox-fieldset-disabled', 'area-href']
assert pcss('a[href]') == ['tag-anchor', 'nofollow-anchor']
assert pcss(':not(*)') == []
assert pcss('a:not([href])') == ['name-anchor']
assert pcss('ol :Not([class])') == [
'first-li', 'second-li', 'li-div',
'fifth-li', 'sixth-li', 'seventh-li']
# Invalid characters in XPath element names, should not crash
assert pcss(r'di\a0 v', r'div\[') == []
assert pcss(r'[h\a0 ref]', r'[h\]ref]') == []
assert pcss(':link') == [
'link-href', 'tag-anchor', 'nofollow-anchor', 'area-href']
assert pcss('HTML :link', html_only=True) == [
'link-href', 'tag-anchor', 'nofollow-anchor', 'area-href']
assert pcss(':visited') == []
assert pcss(':enabled') == [
'link-href', 'tag-anchor', 'nofollow-anchor',
'checkbox-unchecked', 'text-checked', 'input-hidden',
'checkbox-checked', 'area-href']
assert pcss(':disabled') == [
'checkbox-disabled', 'input-hidden-disabled',
'checkbox-disabled-checked', 'fieldset',
'checkbox-fieldset-disabled',
'hidden-fieldset-disabled']
assert pcss(':checked') == [
'checkbox-checked', 'checkbox-disabled-checked']
def test_select_shakespeare():
document = etree.fromstring(HTML_SHAKESPEARE)
body = document.find('.//{http://www.w3.org/1999/xhtml}body')
body = ElementWrapper.from_xml_root(body)
def count(selector):
return sum(1 for _ in body.query_all(selector))
# Data borrowed from http://mootools.net/slickspeed/
# # Changed from original; probably because I'm only
# # searching the body.
# assert count('*') == 252
assert count('*') == 246
# assert count('div:contains(CELIA)') == 26
assert count('div:only-child') == 22 # ?
assert count('div:nth-child(even)') == 106
assert count('div:nth-child(2n)') == 106
assert count('div:nth-child(odd)') == 137
assert count('div:nth-child(2n+1)') == 137
assert count('div:nth-child(n)') == 243
assert count('div:last-child') == 53
assert count('div:first-child') == 51
assert count('div > div') == 242
assert count('div + div') == 190
assert count('div ~ div') == 190
assert count('body') == 1
assert count('body div') == 243
assert count('div') == 243
assert count('div div') == 242
assert count('div div div') == 241
assert count('div, div, div') == 243
assert count('div, a, span') == 243
assert count('.dialog') == 51
assert count('div.dialog') == 51
assert count('div .dialog') == 51
assert count('div.character, div.dialog') == 99
assert count('div.direction.dialog') == 0
assert count('div.dialog.direction') == 0
assert count('div.dialog.scene') == 1
assert count('div.scene.scene') == 1
assert count('div.scene .scene') == 0
assert count('div.direction .dialog ') == 0
assert count('div .dialog .direction') == 4
assert count('div.dialog .dialog .direction') == 4
assert count('#speech5') == 1
assert count('div#speech5') == 1
assert count('div #speech5') == 1
assert count('div.scene div.dialog') == 49
assert count('div#scene1 div.dialog div') == 142
assert count('#scene1 #speech1') == 1
assert count('div[class]') == 103
assert count('div[class=dialog]') == 50
assert count('div[class^=dia]') == 51
assert count('div[class$=log]') == 50
assert count('div[class*=sce]') == 1
assert count('div[class|=dialog]') == 50 # ? Seems right
# assert count('div[class!=madeup]') == 243 # ? Seems right
assert count('div[class~=dialog]') == 51 # ? Seems right
HTML_IDS = open(resource('ids.html')).read()
HTML_SHAKESPEARE = open(resource('shakespeare.html')).read()
|
|
import numpy as np
import pytest
from scipy import sparse
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression, RANSACRegressor, Ridge
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model._ransac import _dynamic_max_trials
from sklearn.exceptions import ConvergenceWarning
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
rng = np.random.RandomState(1000)
outliers = np.unique(rng.randint(len(X), size=200))
data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0,
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0,
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
max_trials=0,
random_state=0,
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
# there is a 1e-9 chance it will take these many trials. No good reason
# 1e-2 isn't enough, can still happen
# 2 is the what ransac defines as min_samples = X.shape[1] + 1
max_trials = _dynamic_max_trials(len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
for i in range(50):
ransac_estimator.set_params(min_samples=2, random_state=i)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ < max_trials + 1
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
stop_n_inliers=2,
random_state=0,
)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
stop_score=0,
random_state=0,
)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100,))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=0.5, random_state=0
)
ransac_estimator.fit(X, y)
assert ransac_estimator.score(X[2:], y[2:]) == 1
assert ransac_estimator.score(X[:2], y[:2]) < 1
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100,))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=0.5, random_state=0
)
ransac_estimator.fit(X, y)
assert_array_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_residuals_threshold_no_inliers():
# When residual_threshold=nan there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=float("nan"),
random_state=0,
max_trials=5,
)
msg = "RANSAC could not find a valid consensus set"
with pytest.raises(ValueError, match=msg):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 5
assert ransac_estimator.n_skips_invalid_data_ == 0
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, is_data_valid=is_data_valid, max_trials=5
)
msg = "RANSAC could not find a valid consensus set"
with pytest.raises(ValueError, match=msg):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 5
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, is_model_valid=is_model_valid, max_trials=5
)
msg = "RANSAC could not find a valid consensus set"
with pytest.raises(ValueError, match=msg):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 0
assert ransac_estimator.n_skips_invalid_model_ == 5
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, is_data_valid=is_data_valid, max_trials=5, max_skips=3
)
msg = "RANSAC skipped more iterations than `max_skips`"
with pytest.raises(ValueError, match=msg):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, is_data_valid=is_data_valid, max_skips=3, max_trials=5
)
warning_message = (
"RANSAC found a valid consensus set but exited "
"early due to skipping more iterations than "
"`max_skips`. See estimator attributes for "
"diagnostics."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_none_estimator = RANSACRegressor(
None, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(
ransac_estimator.predict(X), ransac_none_estimator.predict(X)
)
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator2 = RANSACRegressor(
base_estimator,
min_samples=2.0 / X.shape[0],
residual_threshold=5,
random_state=0,
)
ransac_estimator3 = RANSACRegressor(
base_estimator, min_samples=-1, residual_threshold=5, random_state=0
)
ransac_estimator4 = RANSACRegressor(
base_estimator, min_samples=5.2, residual_threshold=5, random_state=0
)
ransac_estimator5 = RANSACRegressor(
base_estimator, min_samples=2.0, residual_threshold=5, random_state=0
)
ransac_estimator6 = RANSACRegressor(
base_estimator, residual_threshold=5, random_state=0
)
ransac_estimator7 = RANSACRegressor(
base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0
)
# GH #19390
ransac_estimator8 = RANSACRegressor(
Ridge(), min_samples=None, residual_threshold=5, random_state=0
)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(
ransac_estimator1.predict(X), ransac_estimator2.predict(X)
)
assert_array_almost_equal(
ransac_estimator1.predict(X), ransac_estimator5.predict(X)
)
assert_array_almost_equal(
ransac_estimator1.predict(X), ransac_estimator6.predict(X)
)
with pytest.raises(ValueError):
ransac_estimator3.fit(X, y)
with pytest.raises(ValueError):
ransac_estimator4.fit(X, y)
with pytest.raises(ValueError):
ransac_estimator7.fit(X, y)
err_msg = "From version 1.2, `min_samples` needs to be explicitely set"
with pytest.warns(FutureWarning, match=err_msg):
ransac_estimator8.fit(X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_loss():
def loss_multi1(y_true, y_pred):
return np.sum(np.abs(y_true - y_pred), axis=1)
def loss_multi2(y_true, y_pred):
return np.sum((y_true - y_pred) ** 2, axis=1)
def loss_mono(y_true, y_pred):
return np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(
base_estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator1 = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
random_state=0,
loss=loss_multi1,
)
ransac_estimator2 = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
random_state=0,
loss=loss_multi2,
)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator1.predict(X)
)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator2.predict(X)
)
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator2.predict(X)
)
ransac_estimator3 = RANSACRegressor(
base_estimator,
min_samples=2,
residual_threshold=5,
random_state=0,
loss="squared_error",
)
ransac_estimator3.fit(X, y)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator2.predict(X)
)
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert _dynamic_max_trials(100, 100, 2, 0.99) == 1
# e = 5%, min_samples = 2
assert _dynamic_max_trials(95, 100, 2, 0.99) == 2
# e = 10%, min_samples = 2
assert _dynamic_max_trials(90, 100, 2, 0.99) == 3
# e = 30%, min_samples = 2
assert _dynamic_max_trials(70, 100, 2, 0.99) == 7
# e = 50%, min_samples = 2
assert _dynamic_max_trials(50, 100, 2, 0.99) == 17
# e = 5%, min_samples = 8
assert _dynamic_max_trials(95, 100, 8, 0.99) == 5
# e = 10%, min_samples = 8
assert _dynamic_max_trials(90, 100, 8, 0.99) == 9
# e = 30%, min_samples = 8
assert _dynamic_max_trials(70, 100, 8, 0.99) == 78
# e = 50%, min_samples = 8
assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177
# e = 0%, min_samples = 10
assert _dynamic_max_trials(1, 100, 10, 0) == 0
assert _dynamic_max_trials(1, 100, 10, 1) == float("inf")
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, stop_probability=-0.1
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
ransac_estimator = RANSACRegressor(
base_estimator, min_samples=2, stop_probability=1.1
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert ransac_estimator.inlier_mask_.shape[0] == n_samples
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(
np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0),
axis=0,
)
y_flat = np.ndarray.flatten(
np.append(
np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0,
)
)
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = OrthogonalMatchingPursuit()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=10)
err_msg = f"{base_estimator.__class__.__name__} does not support sample_weight."
with pytest.raises(ValueError, match=err_msg):
ransac_estimator.fit(X, y, weights)
def test_ransac_final_model_fit_sample_weight():
X, y = make_regression(n_samples=1000, random_state=10)
rng = check_random_state(42)
sample_weight = rng.randint(1, 4, size=y.shape[0])
sample_weight = sample_weight / sample_weight.sum()
ransac = RANSACRegressor(base_estimator=LinearRegression(), random_state=0)
ransac.fit(X, y, sample_weight=sample_weight)
final_model = LinearRegression()
mask_samples = ransac.inlier_mask_
final_model.fit(
X[mask_samples], y[mask_samples], sample_weight=sample_weight[mask_samples]
)
assert_allclose(ransac.estimator_.coef_, final_model.coef_, atol=1e-12)
def test_perfect_horizontal_line():
"""Check that we can fit a line where all samples are inliers.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19497
"""
X = np.arange(100)[:, None]
y = np.zeros((100,))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, random_state=0)
ransac_estimator.fit(X, y)
assert_allclose(ransac_estimator.estimator_.coef_, 0.0)
assert_allclose(ransac_estimator.estimator_.intercept_, 0.0)
# TODO: Remove in v1.2
@pytest.mark.parametrize(
"old_loss, new_loss",
[
("absolute_loss", "squared_error"),
("squared_loss", "absolute_error"),
],
)
def test_loss_deprecated(old_loss, new_loss):
est1 = RANSACRegressor(loss=old_loss, random_state=0)
with pytest.warns(FutureWarning, match=f"The loss '{old_loss}' was deprecated"):
est1.fit(X, y)
est2 = RANSACRegressor(loss=new_loss, random_state=0)
est2.fit(X, y)
assert_allclose(est1.predict(X), est2.predict(X))
|
|
from __future__ import absolute_import, unicode_literals
from stravalib import model, attributes, exc, unithelper as uh
from stravalib.client import Client
from stravalib.tests.functional import FunctionalTestBase
import datetime
import requests
class ClientTest(FunctionalTestBase):
def test_get_starred_segment(self):
"""
Test get_starred_segment
"""
i = 0
for segment in self.client.get_starred_segment(limit=5):
self.assertIsInstance(segment, model.Segment)
i+=1
self.assertGreater(i, 0) # star at least one segment
self.assertLessEqual(i, 5)
def test_get_activity(self):
""" Test basic activity fetching. """
activity = self.client.get_activity(96089609)
self.assertEquals('El Dorado County, CA, USA', activity.location_city)
self.assertIsInstance(activity.start_latlng, attributes.LatLon)
self.assertAlmostEquals(-120.4357631, activity.start_latlng.lon, places=2)
self.assertAlmostEquals(38.74263759999999, activity.start_latlng.lat, places=2)
self.assertIsInstance(activity.map, model.Map)
self.assertIsInstance(activity.athlete, model.Athlete)
self.assertEquals(1513, activity.athlete.id)
#self.assertAlmostEqual(first, second, places, msg, delta)
# Ensure that iw as read in with correct units
self.assertEquals(22.5308, float(uh.kilometers(activity.distance)))
def test_get_activity_and_segments(self):
""" Test include_all_efforts parameter on activity fetching. """
if not self.activity_id:
self.fail("Include an activity_id in test.ini to test segment_efforts")
activity = self.client.get_activity(self.activity_id, include_all_efforts=True)
self.assertTrue(isinstance(activity.segment_efforts, list))
# Check also when we have no parameters segment_efforts is None
activity_no_segments = self.client.get_activity(self.activity_id)
self.assertTrue(activity.segment_efforts, None)
def test_get_activity_laps(self):
activity = self.client.get_activity(165094211)
laps = list(self.client.get_activity_laps(165094211))
self.assertEquals(5, len(laps))
# This obviously is far from comprehensive, just a sanity check
self.assertEquals(u'Lap 1', laps[0].name)
self.assertEquals(178.0, laps[0].max_heartrate)
def test_get_activity_zones(self):
"""
Test loading zones for activity.
"""
zones = self.client.get_activity_zones(99895560)
print zones
self.assertEquals(1, len(zones))
self.assertIsInstance(zones[0], model.PaceActivityZone)
# Indirectly
activity = self.client.get_activity(99895560)
self.assertEquals(len(zones), len(activity.zones))
self.assertEquals(zones[0].score, activity.zones[0].score)
def test_activity_comments(self):
"""
Test loading comments for already-loaded activity.
"""
activity = self.client.get_activity(2290897)
self.assertTrue(activity.comment_count > 0)
comments= list(activity.comments)
self.assertEquals(3, len(comments))
self.assertEquals("I love Gordo's. I've been eating there for 20 years!", comments[0].text)
def test_activity_photos(self):
"""
Test photos on activity
"""
activity = self.client.get_activity(152668627)
self.assertTrue(activity.total_photo_count > 0)
photos = list(activity.full_photos)
self.assertEqual(len(photos), 1)
self.assertEqual(len(photos), activity.total_photo_count)
self.assertIsInstance(photos[0], model.ActivityPhoto)
def test_activity_kudos(self):
"""
Test kudos on activity
"""
activity = self.client.get_activity(152668627)
self.assertTrue(activity.kudos_count > 0)
kudos = list(activity.kudos)
self.assertGreater(len(kudos), 6)
self.assertEqual(len(kudos), activity.kudos_count)
self.assertIsInstance(kudos[0], model.ActivityKudos )
def test_activity_streams(self):
"""
Test activity streams
"""
stypes = ['time', 'latlng', 'distance','altitude', 'velocity_smooth',
'heartrate', 'cadence', 'watts', 'temp', 'moving',
'grade_smooth']
streams = self.client.get_activity_streams(152668627, stypes, 'low')
self.assertGreater(len(streams.keys()), 3)
for k in streams.keys():
self.assertIn(k, stypes)
# time stream
self.assertIsInstance(streams['time'].data[0], int)
self.assertGreater(streams['time'].original_size, 100)
self.assertEqual(streams['time'].resolution, 'low')
self.assertEqual(len(streams['time'].data), 100)
# latlng stream
self.assertIsInstance(streams['latlng'].data, list)
self.assertIsInstance(streams['latlng'].data[0][0], float)
def test_related_activities(self):
"""
Test get_related_activities on an activity and related property of Activity
"""
activity_id = 152668627
activity = self.client.get_activity(activity_id)
related_activities = list(self.client.get_related_activities(activity_id))
# Check the number of related_activities matches what activity would expect
self.assertEqual(len(related_activities), activity.athlete_count-1)
# Check the related property gives the same result
related_activities_from_property = list(activity.related)
self.assertEqual(related_activities, related_activities_from_property)
def test_effort_streams(self):
"""
Test effort streams
"""
stypes = ['distance']
activity = self.client.get_activity(165479860) #152668627)
streams = self.client.get_effort_streams(activity.segment_efforts[0].id,
stypes, 'medium')
self.assertIn('distance', streams.keys())
# distance stream
self.assertIsInstance(streams['distance'].data[0], float) #xxx
self.assertEqual(streams['distance'].resolution, 'medium')
self.assertEqual(len(streams['distance'].data),
min(1000, streams['distance'].original_size))
def test_get_curr_athlete(self):
athlete = self.client.get_athlete()
# Just some basic sanity checks here
self.assertTrue(len(athlete.firstname) > 0)
self.assertTrue(athlete.athlete_type in ["runner", "cyclist"])
def test_get_athlete_clubs(self):
clubs = self.client.get_athlete_clubs()
self.assertEquals(3, len(clubs))
self.assertEquals('Team Roaring Mouse', clubs[0].name)
self.assertEquals('Team Strava Cycling', clubs[1].name)
self.assertEquals('Team Strava Cyclocross', clubs[2].name)
clubs_indirect = self.client.get_athlete().clubs
self.assertEquals(3, len(clubs_indirect))
self.assertEquals(clubs[0].name, clubs_indirect[0].name)
self.assertEquals(clubs[1].name, clubs_indirect[1].name)
self.assertEquals(clubs[2].name, clubs_indirect[2].name)
def test_get_gear(self):
g = self.client.get_gear("g69911")
self.assertTrue(float(g.distance) >= 3264.67)
self.assertEquals('Salomon XT Wings 2', g.name)
self.assertEquals('Salomon', g.brand_name)
self.assertTrue(g.primary)
self.assertEquals(model.DETAILED, g.resource_state)
self.assertEquals('g69911', g.id)
self.assertEquals('XT Wings 2', g.model_name)
self.assertEquals('', g.description)
def test_get_segment_leaderboard(self):
lb = self.client.get_segment_leaderboard(229781)
print(lb.effort_count)
print(lb.entry_count)
for i,e in enumerate(lb):
print '{0}: {1}'.format(i, e)
self.assertEquals(10, len(lb.entries)) # 10 top results
self.assertIsInstance(lb.entries[0], model.SegmentLeaderboardEntry)
self.assertEquals(1, lb.entries[0].rank)
self.assertTrue(lb.effort_count > 8000) # At time of writing 8206
# Check the relationships
athlete = lb[0].athlete
print(athlete)
self.assertEquals(lb[0].athlete_name, "{0} {1}".format(athlete.firstname, athlete.lastname))
effort = lb[0].effort
print effort
self.assertIsInstance(effort, model.SegmentEffort)
self.assertEquals('Hawk Hill', effort.name)
activity = lb[0].activity
self.assertIsInstance(activity, model.Activity)
# Can't assert much since #1 ranked activity will likely change in the future.
def test_get_segment(self):
segment = self.client.get_segment(229781)
self.assertIsInstance(segment, model.Segment)
print segment
self.assertEquals('Hawk Hill', segment.name)
self.assertAlmostEqual(2.68, float(uh.kilometers(segment.distance)), places=2)
# Fetch leaderboard
lb = segment.leaderboard
self.assertEquals(10, len(lb)) # 10 top results, 5 bottom results
def test_get_segment_efforts(self):
# test with string
efforts = self.client.get_segment_efforts(4357415,
start_date_local = "2012-12-23T00:00:00Z",
end_date_local = "2012-12-23T11:00:00Z",)
print efforts
i = 0
for effort in efforts:
print effort
self.assertEqual(4357415, effort.segment.id)
self.assertIsInstance(effort, model.BaseEffort)
effort_date = effort.start_date_local
self.assertEqual(effort_date.strftime("%Y-%m-%d"), "2012-12-23")
i+=1
print i
self.assertGreater(i, 2)
# also test with datetime object
start_date = datetime.datetime(2012, 12, 31, 6, 0)
end_date = start_date + datetime.timedelta(hours=12)
efforts = self.client.get_segment_efforts(4357415,
start_date_local = start_date,
end_date_local = end_date,)
print efforts
i = 0
for effort in efforts:
print effort
self.assertEqual(4357415, effort.segment.id)
self.assertIsInstance(effort, model.BaseEffort)
effort_date = effort.start_date_local
self.assertEqual(effort_date.strftime("%Y-%m-%d"), "2012-12-31")
i+=1
print i
self.assertGreater(i, 2)
def test_segment_explorer(self):
bounds = (37.821362,-122.505373,37.842038,-122.465977)
results = self.client.explore_segments(bounds)
# This might be brittle
self.assertEquals('Hawk Hill', results[0].name)
# Fetch full segment
segment = results[0].segment
self.assertEquals(results[0].name, segment.name)
# For some reason these don't follow the simple math rules one might expect (so we round to int)
self.assertAlmostEqual(results[0].elev_difference, segment.elevation_high - segment.elevation_low, places=0)
class AuthenticatedAthleteTest(FunctionalTestBase):
"""
Tests the function is_authenticated_athlete in model.Athlete
"""
def test_caching(self):
a = model.Athlete()
a._is_authenticated = "Not None"
self.assertEqual(a.is_authenticated_athlete(), "Not None")
def test_correct_athlete_returns_true(self):
a = self.client.get_athlete()
self.assertTrue(a.is_authenticated_athlete())
def test_detailed_resource_state_means_true(self):
a = model.Athlete()
a.resource_state = attributes.DETAILED
self.assertTrue(a.is_authenticated_athlete())
def test_correct_athlete_not_detailed_returns_true(self):
a = self.client.get_athlete()
a.resource_state = attributes.SUMMARY
# Now will have to do a look up for the authenticated athlete and check the ids match
self.assertTrue(a.is_authenticated_athlete())
def test_not_authenticated_athlete_is_false(self):
CAV_ID = 1353775
a = self.client.get_athlete(CAV_ID)
self.assertEqual(a.resource_state, attributes.SUMMARY)
self.assertFalse(a.is_authenticated_athlete())
class AthleteStatsTest(FunctionalTestBase):
"""
Tests the functionality for collecting athlete statistics
http://strava.github.io/api/v3/athlete/#stats
"""
def test_basic_get_from_client(self):
stats = self.client.get_athlete_stats()
self.assertIsInstance(stats, model.AthleteStats)
self.assertIsInstance(stats.recent_ride_totals, model.ActivityTotals)
# Check biggest_climb_elevation_gain has been set
self.assertTrue(uh.meters(stats.biggest_climb_elevation_gain) >= uh.meters(0))
def test_get_from_client_with_authenticated_id(self):
athlete_id = self.client.get_athlete().id
stats = self.client.get_athlete_stats(athlete_id)
self.assertIsInstance(stats, model.AthleteStats)
# Check same as before
self.assertEqual(stats.biggest_climb_elevation_gain, self.client.get_athlete_stats().biggest_climb_elevation_gain)
def test_get_from_client_with_wrong_id(self):
CAV_ID = 1353775
# Currently raises a requests.exceptions.HTTPError, TODO: better error handling
self.assertRaises(requests.exceptions.HTTPError, self.client.get_athlete_stats, CAV_ID)
def test_athlete_stats_property_option(self):
a = self.client.get_athlete()
stats = a.stats
self.assertIsInstance(stats, model.AthleteStats)
def test_athlete_stats_cached(self):
a = self.client.get_athlete()
a._stats = "Not None"
stats = a.stats
self.assertEqual(stats, "Not None")
def test_athlete_property_not_authenticated(self):
cav = self.client.get_athlete(1353775)
with self.assertRaises(exc.NotAuthenticatedAthlete):
cav.stats
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Resource expression lexer.
This class is used to parse resource keys, quoted tokens, and operator strings
and characters from resource filter and projection expression strings. Tokens
are defined by isspace() and caller specified per-token terminator characters.
" or ' quotes are supported, with these literal escapes: \\ => \, \' => ',
\" => ", and \<any-other-character> => \<any-other-character>.
Typical resource usage:
# Initialize a lexer with the expression string.
lex = resource_lex.Lexer(expression_string)
# isspace() separated tokens. lex.SkipSpace() returns False at end of input.
while lex.SkipSpace():
# Save the expression string position for syntax error annotation.
here = lex.GetPosition()
# The next token must be a key.
key = lex.Key()
if not key:
if lex.EndOfInput():
# End of input is OK here.
break
# There were some characters in the input that did not form a valid key.
raise resource_exceptions.ExpressionSyntaxError(
'key expected [{0}].'.format(lex.Annotate(here)))
# Check if the key is a function call.
if lex.IsCharacter('('):
# Collect the actual args and convert numeric args to float or int.
args = lex.Args(convert=True)
else:
args = None
# Skip an isspace() characters. End of input will fail with an
# 'Operator expected [...]' resource_exceptions.ExpressionSyntaxError.
lex.SkipSpace(token='Operator')
# The next token must be one of these operators ...
operator = lex.IsCharacter('+-*/&|')
if not operator:
# ... one of the operator names.
if lex.IsString('AND'):
operator = '&'
elif lex.IsString('OR'):
operator = '|'
else:
raise resource_exceptions.ExpressionSyntaxError(
'Operator expected [{0}].'.format(lex.Annotate()))
# The next token must be an operand. Convert to float or int if possible.
# lex.Token() by default eats leading isspace().
operand = lex.Token(convert=True)
if not operand:
raise resource_exceptions.ExpressionSyntaxErrorSyntaxError(
'Operand expected [{0}].'.format(lex.Annotate()))
# Process the key, args, operator and operand.
Process(key, args, operator, operand)
"""
import copy
import re
from googlecloudsdk.core.resource import resource_exceptions
from googlecloudsdk.core.resource import resource_projection_spec
from googlecloudsdk.core.resource import resource_property
from googlecloudsdk.core.resource import resource_transform
# Resource keys cannot contain unquoted operator characters.
OPERATOR_CHARS = ':=!<>~()'
# Reserved operator characters. Resource keys cannot contain unquoted reverved
# operator characters. This prevents key/operator clashes in expressions.
_RESERVED_OPERATOR_CHARS = OPERATOR_CHARS + '[].{},+*/%&|^@#;?'
class _TransformCall(object):
"""A key transform function call with actual args.
Attributes:
name: The transform function name.
func: The transform function.
active: The parent projection active level. A transform is active if
transform.active is None or equal to the caller active level.
map_transform: If r is a list then apply the transform to each list item
up to map_transform times. map_transform>1 handles nested lists.
args: List of function call actual arg strings.
kwargs: List of function call actual keyword arg strings.
"""
def __init__(self, name, func, active=0, map_transform=0, args=None,
kwargs=None):
self.name = name
self.func = func
self.active = active
self.map_transform = map_transform
self.args = args or []
self.kwargs = kwargs or {}
def __str__(self):
args = ['<projecton>' if isinstance(
arg, resource_projection_spec.ProjectionSpec) else arg
for arg in self.args]
if self.map_transform > 1:
prefix = 'map({0}).'.format(self.map_transform)
elif self.map_transform == 1:
prefix = 'map().'
else:
prefix = ''
return '{0}{1}({2})'.format(prefix, self.name, ','.join(args))
def __deepcopy__(self, memo):
# This avoids recursive ProjectionSpec transforms that deepcopy chokes on.
return copy.copy(self)
class _Transform(object):
"""An object that contains an ordered list of _TransformCall objects.
Attributes:
_conditional: The resource_filter expression string for the if() transform.
_transforms: The list of _TransformCall objects.
"""
def __init__(self):
self._conditional = None
self._transforms = []
def __str__(self):
return '[{0}]'.format('.'.join(map(str, self._transforms)))
@property
def active(self):
"""The transform active level or None if always active."""
return self._transforms[0].active if self._transforms else None
@property
def conditional(self):
"""The if() transform conditional expression string."""
return self._conditional
@property
def name(self):
"""The name of the last transform."""
return self._transforms[-1].name if self._transforms else ''
def IsActive(self, active):
"""Returns True if the Transform active level is None or active."""
return self._transforms and self.active in (None, active)
def Add(self, transform):
"""Adds a transform to the list."""
self._transforms.append(transform)
def SetConditional(self, expr):
"""Sets the conditional expression string."""
self._conditional = expr
def Evaluate(self, obj):
"""Apply the list of transforms to obj and return the transformed value."""
for transform in self._transforms:
if transform.map_transform and resource_property.IsListLike(obj):
# A transform mapped on a list - transform each list item.
# map_transform > 1 for nested lists. For example:
# abc[].def[].ghi[].map(3)
# iterates over the items in ghi[] for all abc[] and def[].
items = obj
for _ in range(transform.map_transform - 1):
nested = []
try:
# Stop if items is not a list.
for item in items:
nested.extend(item)
except TypeError:
break
items = nested
obj = []
for item in items:
obj.append(transform.func(item, *transform.args, **transform.kwargs))
elif obj or not transform.map_transform:
obj = transform.func(obj, *transform.args, **transform.kwargs)
return obj
def MakeTransform(func_name, func, args=None, kwargs=None):
"""Returns a transform call object for func(*args, **kwargs).
Args:
func_name: The function name.
func: The function object.
args: The actual call args.
kwargs: The actual call kwargs.
Returns:
A transform call object for func(obj, *args, **kwargs).
"""
calls = _Transform()
calls.Add(_TransformCall(func_name, func, args=args, kwargs=kwargs))
return calls
class Lexer(object):
"""Resource expression lexer.
This lexer handles simple and compound tokens. Compound tokens returned by
Key() and Args() below are not strictly lexical items (i.e., they are parsed
against simple grammars), but treating them as tokens here simplifies the
resource expression parsers that use this class and avoids code replication.
Attributes:
_ESCAPE: The quote escape character.
_QUOTES: The quote characters.
_defaults: ProjectionSpec object for aliases and symbols defaults.
_expr: The expression string.
_position: The index of the next character in _expr to parse.
"""
_ESCAPE = '\\'
_QUOTES = '\'"'
def __init__(self, expression, defaults=None):
"""Initializes a resource lexer.
Args:
expression: The expression string.
defaults: ProjectionSpec object for aliases and symbols defaults.
"""
self._expr = expression or ''
self._position = 0
self._defaults = defaults or resource_projection_spec.ProjectionSpec()
def EndOfInput(self, position=None):
"""Checks if the current expression string position is at the end of input.
Args:
position: Checks position instead of the current expression position.
Returns:
True if the expression string position is at the end of input.
"""
if position is None:
position = self._position
return position >= len(self._expr)
def GetPosition(self):
"""Returns the current expression position.
Returns:
The current expression position.
"""
return self._position
def SetPosition(self, position):
"""Sets the current expression position.
Args:
position: Sets the current position to position. Position should be 0 or a
previous value returned by GetPosition().
"""
self._position = position
def Annotate(self, position=None):
"""Returns the expression string annotated for syntax error messages.
The current position is marked by '*HERE*' for visual effect.
Args:
position: Uses position instead of the current expression position.
Returns:
The expression string with current position annotated.
"""
here = position if position is not None else self._position
cursor = '*HERE*' # For visual effect only.
if here > 0 and not self._expr[here - 1].isspace():
cursor = ' ' + cursor
if here < len(self._expr) and not self._expr[here].isspace():
cursor += ' '
return '{0}{1}{2}'.format(self._expr[0:here], cursor, self._expr[here:])
def SkipSpace(self, token=None, terminators=''):
"""Skips spaces in the expression string.
Args:
token: The expected next token description string, None if end of input is
OK. This string is used in the exception message. It is not used to
validate the type of the next token.
terminators: Space characters in this string will not be skipped.
Raises:
ExpressionSyntaxError: End of input reached after skipping and a token is
expected.
Returns:
True if the expression is not at end of input.
"""
while not self.EndOfInput():
c = self._expr[self._position]
if not c.isspace() or c in terminators:
return True
self._position += 1
if token:
raise resource_exceptions.ExpressionSyntaxError(
'{0} expected [{1}].'.format(token, self.Annotate()))
return False
def IsCharacter(self, characters, peek=False, eoi_ok=False):
"""Checks if the next character is in characters and consumes it if it is.
Args:
characters: A set of characters to check for. It may be a string, tuple,
list or set.
peek: Does not consume a matching character if True.
eoi_ok: True if end of input is OK. Returns None if at end of input.
Raises:
ExpressionSyntaxError: End of input reached and peek and eoi_ok are False.
Returns:
The matching character or None if no match.
"""
if self.EndOfInput():
if peek or eoi_ok:
return None
raise resource_exceptions.ExpressionSyntaxError(
'More tokens expected [{0}].'.format(self.Annotate()))
c = self._expr[self._position]
if c not in characters:
return None
if not peek:
self._position += 1
return c
def IsString(self, name, peek=False):
"""Skips leading space and checks if the next token is name.
One of space, '(', or end of input terminates the next token.
Args:
name: The token name to check.
peek: Does not consume the string on match if True.
Returns:
True if the next space or ( separated token is name.
"""
if not self.SkipSpace():
return False
i = self.GetPosition()
if not self._expr[i:].startswith(name):
return False
i += len(name)
if self.EndOfInput(i) or self._expr[i].isspace() or self._expr[i] == '(':
if not peek:
self.SetPosition(i)
return True
return False
def Token(self, terminators='', balance_parens=False, space=True,
convert=False):
"""Parses a possibly quoted token from the current expression position.
The quote characters are in _QUOTES. The _ESCAPE character can prefix
an _ESCAPE or _QUOTE character to treat it as a normal character. If
_ESCAPE is at end of input, or is followed by any other character, then it
is treated as a normal character.
Quotes may be adjacent ("foo"" & ""bar" => "foo & bar") and they may appear
mid token (foo" & "bar => "foo & bar").
Args:
terminators: A set of characters that terminate the token. isspace()
characters always terminate the token. It may be a string, tuple, list
or set. Terminator characters are not consumed.
balance_parens: True if (...) must be balanced.
space: True if space characters should be skipped after the token. Space
characters are always skipped before the token.
convert: Converts unquoted numeric string tokens to numbers if True.
Raises:
ExpressionSyntaxError: The expression has a syntax error.
Returns:
None if there is no token, the token string if convert is False or the
token is quoted, otherwise the converted float / int / string value of
the token.
"""
quote = None # The current quote character, None if not in quote.
quoted = False # True if the token is constructed from quoted parts.
token = None # The token char list, None for no token, [] for empty token.
paren_count = 0
i = self.GetPosition()
while not self.EndOfInput(i):
c = self._expr[i]
if c == self._ESCAPE and not self.EndOfInput(i + 1):
# Only _ESCAPE, the current quote or _QUOTES are escaped.
c = self._expr[i + 1]
if token is None:
token = []
if (c != self._ESCAPE and c != quote and
(quote or c not in self._QUOTES)):
token.append(self._ESCAPE)
token.append(c)
i += 1
elif c == quote:
# The end of the current quote.
quote = None
elif not quote and c in self._QUOTES:
# The start of a new quote.
quote = c
quoted = True
if token is None:
token = []
elif not quote and c.isspace() and token is None:
pass
elif not quote and balance_parens and c in '()':
if c == '(':
paren_count += 1
else:
if c in terminators and not paren_count:
break
paren_count -= 1
# Append c to the token string.
if token is None:
token = []
token.append(c)
elif not quote and not paren_count and c in terminators:
# Only unquoted terminators terminate the token.
break
elif quote or not c.isspace() or token is not None and balance_parens:
# Append c to the token string.
if token is None:
token = []
token.append(c)
elif token is not None:
# A space after any token characters is a terminator.
break
i += 1
if quote:
raise resource_exceptions.ExpressionSyntaxError(
'Unterminated [{0}] quote [{1}].'.format(quote, self.Annotate()))
self.SetPosition(i)
if space:
self.SkipSpace(terminators=terminators)
if token is not None:
# Convert the list of token chars to a string.
token = ''.join(token)
if convert and token and not quoted:
# Only unquoted tokens are converted.
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
pass
return token
def Args(self, convert=False, separators=','):
"""Parses a separators-separated, )-terminated arg list.
The initial '(' has already been consumed by the caller. The arg list may
be empty. Otherwise the first ',' must be preceded by a non-empty argument,
and every ',' must be followed by a non-empty argument.
Args:
convert: Converts unquoted numeric string args to numbers if True.
separators: A string of argument separator characters.
Raises:
ExpressionSyntaxError: The expression has a syntax error.
Returns:
[...]: The arg list.
"""
required = False # True if there must be another argument token.
args = []
terminators = separators + ')' # The closing ')' also terminates an arg.
while True:
here = self.GetPosition()
arg = self.Token(terminators, balance_parens=True, convert=convert)
end = self.IsCharacter(')')
if end:
sep = end
else:
sep = self.IsCharacter(separators, eoi_ok=True)
if not sep:
# This branch "cannot happen". End of input, separators and
# terminators have already been handled. Retained to guard against
# future ingenuity.
here = self.GetPosition()
raise resource_exceptions.ExpressionSyntaxError(
'Closing ) expected in argument list [{0}].'.format(
self.Annotate(here)))
if arg is not None:
# No empty args with space separators.
if arg or not sep.isspace():
args.append(arg)
elif required or not end:
raise resource_exceptions.ExpressionSyntaxError(
'Argument expected [{0}].'.format(self.Annotate(here)))
if end:
break
required = not sep.isspace()
return args
def Key(self):
"""Parses a resource key from the expression.
A resource key is a '.' separated list of names with optional [] slice or
[NUMBER] array indices. A parsed key is encoded as an ordered list of
tokens, where each token may be:
KEY VALUE PARSED VALUE DESCRIPTION
--------- ------------ -----------
name string A dotted name list element.
[NUMBER] NUMBER An array index.
[] None An array slice.
For example, the key 'abc.def[123].ghi[].jkl' parses to this encoded list:
['abc', 'def', 123, 'ghi', None, 'jkl']
Raises:
ExpressionKeyError: The expression has a key syntax error.
Returns:
The parsed key which is a list of string, int and/or None elements.
"""
key = []
while not self.EndOfInput():
here = self.GetPosition()
name = self.Token(_RESERVED_OPERATOR_CHARS, space=False)
if name:
is_not_function = not self.IsCharacter('(', peek=True, eoi_ok=True)
if not key and is_not_function and name in self._defaults.aliases:
key.extend(self._defaults.aliases[name])
else:
key.append(name)
elif not self.IsCharacter('[', peek=True):
# A single . is a valid key that names the top level resource.
if (not key and
self.IsCharacter('.') and
not self.IsCharacter('.', peek=True, eoi_ok=True) and (
self.EndOfInput() or self.IsCharacter(
_RESERVED_OPERATOR_CHARS, peek=True, eoi_ok=True))):
break
raise resource_exceptions.ExpressionSyntaxError(
'Non-empty key name expected [{0}].'.format(self.Annotate(here)))
if self.EndOfInput():
break
if self.IsCharacter(']'):
raise resource_exceptions.ExpressionSyntaxError(
'Unmatched ] in key [{0}].'.format(self.Annotate(here)))
while self.IsCharacter('[', eoi_ok=True):
# [] slice or [NUMBER] array index.
index = self.Token(']', convert=True)
self.IsCharacter(']')
key.append(index)
if not self.IsCharacter('.', eoi_ok=True):
break
if self.EndOfInput():
# Dangling '.' is not allowed.
raise resource_exceptions.ExpressionSyntaxError(
'Non-empty key name expected [{0}].'.format(self.Annotate()))
return key
def _ParseSynthesize(self, args):
"""Parses the synthesize() transform args and returns a new transform.
The args are a list of tuples. Each tuple is a schema that defines the
synthesis of one resource list item. Each schema item is an attribute
that defines the synthesis of one synthesized_resource attribute from
an original_resource attribute.
There are three kinds of attributes:
name:literal
The value for the name attribute in the synthesized resource is the
literal value.
name=key
The value for the name attribute in the synthesized_resource is the
value of key in the original_resource.
key:
All the attributes of the value of key in the original_resource are
added to the attributes in the synthesized_resource.
Args:
args: The original synthesize transform args.
Returns:
A synthesize transform function that uses the schema from the parsed
args.
Example:
This returns a list of two resource items:
synthesize((name:up, upInfo), (name:down, downInfo))
If upInfo and downInfo serialize to
{"foo": 1, "bar": "yes"}
and
{"foo": 0, "bar": "no"}
then the synthesized resource list is
[{"name": "up", "foo": 1, "bar": "yes"},
{"name": "down", "foo": 0, "bar": "no"}]
which could be displayed by a nested table using
synthesize(...):format="table(name, foo, bar)"
"""
schemas = []
for arg in args:
lex = Lexer(arg)
if not lex.IsCharacter('('):
raise resource_exceptions.ExpressionSyntaxError(
'(...) args expected in synthesizer() transform')
schema = []
for attr in lex.Args():
if ':' in attr:
name, literal = attr.split(':', 1)
key = None
elif '=' in attr:
name, value = attr.split('=', 1)
key = Lexer(value).Key()
literal = None
else:
key = Lexer(attr).Key()
name = None
literal = None
schema.append((name, key, literal))
schemas.append(schema)
def _Synthesize(r):
"""Synthesize a new resource list from the original resource r.
Args:
r: The original resource.
Returns:
The synthesized resource list.
"""
synthesized_resource_list = []
for schema in schemas:
synthesized_resource = {}
for attr in schema:
name, key, literal = attr
value = resource_property.Get(r, key, None) if key else literal
if name:
synthesized_resource[name] = value
elif isinstance(value, dict):
synthesized_resource.update(value)
synthesized_resource_list.append(synthesized_resource)
return synthesized_resource_list
return _Synthesize
def _ParseTransform(self, func_name, active=0, map_transform=None):
"""Parses a transform function call.
The cursor is positioned at the '(' after func_name.
Args:
func_name: The transform function name.
active: The transform active level or None if always active.
map_transform: Apply the transform to each resource list item this many
times.
Returns:
A _TransformCall object. The caller appends these to a list that is used
to apply the transform functions.
Raises:
ExpressionSyntaxError: The expression has a syntax error.
"""
here = self.GetPosition()
if func_name not in self._defaults.symbols:
raise resource_exceptions.ExpressionSyntaxError(
'Unknown transform function {0} [{1}].'.format(
func_name, self.Annotate(here)))
func = self._defaults.symbols[func_name]
args = []
kwargs = {}
doc = getattr(func, 'func_doc', None)
if doc and resource_projection_spec.PROJECTION_ARG_DOC in doc:
# The second transform arg is the caller projection.
args.append(self._defaults)
if getattr(func, 'func_defaults', None):
# Separate the args from the kwargs.
for arg in self.Args():
name, sep, val = arg.partition('=')
if sep:
kwargs[name] = val
else:
args.append(arg)
else:
# No kwargs.
args += self.Args()
return _TransformCall(func_name, func, active=active,
map_transform=map_transform, args=args, kwargs=kwargs)
def Transform(self, func_name, active=0):
"""Parses one or more transform calls and returns a _Transform call object.
The cursor is positioned at the '(' just after the transform name.
Args:
func_name: The name of the first transform function.
active: The transform active level, None for always active.
Returns:
The _Transform object containing the ordered list of transform calls.
"""
here = self.GetPosition()
calls = _Transform()
map_transform = 0
while True:
transform = self._ParseTransform(func_name, active=active,
map_transform=map_transform)
if transform.func == resource_transform.TransformAlways:
active = None # Always active.
func_name = None
elif transform.func == resource_transform.TransformMap:
map_transform = int(transform.args[0]) if transform.args else 1
func_name = None
elif transform.func == resource_transform.TransformIf:
if len(transform.args) != 1:
raise resource_exceptions.ExpressionSyntaxError(
'Conditional filter expression expected [{0}].'.format(
self.Annotate(here)))
calls.SetConditional(transform.args[0])
elif transform.func == resource_transform.TransformSynthesize:
transform.func = self._ParseSynthesize(transform.args)
transform.args = []
transform.kwargs = {}
calls.Add(transform)
else:
# always() applies to all transforms for key.
# map() applies to the next transform.
map_transform = 0
calls.Add(transform)
if not self.IsCharacter('.', eoi_ok=True):
break
call = self.Key()
here = self.GetPosition()
if not self.IsCharacter('('):
raise resource_exceptions.ExpressionSyntaxError(
'Transform function expected [{0}].'.format(
self.Annotate(here)))
if len(call) != 1:
raise resource_exceptions.ExpressionSyntaxError(
'Unknown transform function {0} [{1}].'.format(
'.'.join(call), self.Annotate(here)))
func_name = call.pop()
return calls
def ParseKey(name):
"""Returns a parsed key for the dotted resource name string.
This is an encapsulation of Lexer.Key(). That docstring has the input/output
details for this function.
Args:
name: A resource name string that may contain dotted components and
multi-value indices.
Raises:
ExpressionSyntaxError: If there are unexpected tokens after the key name.
Returns:
A parsed key for he dotted resource name string.
"""
lex = Lexer(name)
key = lex.Key()
if not lex.EndOfInput():
raise resource_exceptions.ExpressionSyntaxError(
'Unexpected tokens [{0}] in key.'.format(lex.Annotate()))
return key
def GetKeyName(key, quote=True):
"""Returns the string representation for a parsed key.
This is the inverse of Lexer.Key(). That docstring has the input/output
details for this function.
Args:
key: A parsed key, which is an ordered list of key names/indices. Each
element in the list may be one of:
str - A resource property name. This could be a class attribute name or
a dict index.
int - A list index. Selects one member is the list. Negative indices
count from the end of the list, starting with -1 for the last element
in the list. An out of bounds index is not an error; it produces the
value None.
None - A list slice. Selects all members of a list or dict like object.
A slice of an empty dict or list is an empty dict or list.
quote: "..." the key name if it contains non-alphanum characters.
Returns:
The string representation of the parsed key.
"""
parts = []
for part in key:
if part is None:
part = '[]'
if parts:
parts[-1] += part
continue
elif isinstance(part, (int, long)):
part = '[{part}]'.format(part=part)
if parts:
parts[-1] += part
continue
elif quote and re.search(r'\W', part):
part = part.replace('\\', '\\\\')
part = part.replace('"', '\\"')
part = u'"{part}"'.format(part=part)
parts.append(part)
return '.'.join(parts) if parts else '.'
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnConnectionsOperations:
"""VpnConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.VpnConnection":
"""Retrieves the details of a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.VpnConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
vpn_connection_parameters: "_models.VpnConnection",
**kwargs: Any
) -> "_models.VpnConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
vpn_connection_parameters: "_models.VpnConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnConnection"]:
"""Creates a vpn connection to a scalable vpn gateway if it doesn't exist else updates the
existing connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param vpn_connection_parameters: Parameters supplied to create or Update a VPN Connection.
:type vpn_connection_parameters: ~azure.mgmt.network.v2018_04_01.models.VpnConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_04_01.models.VpnConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
vpn_connection_parameters=vpn_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def list_by_vpn_gateway(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnConnectionsResult"]:
"""Retrieves all vpn connections for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.ListVpnConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'} # type: ignore
|
|
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
import re
from sys import stderr
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QBrush, QPen, QGraphicsRectItem
from PyQt4.QtGui import QPrinter
from PyQt4.QtCore import QThread, SIGNAL
try:
from PyQt4 import QtOpenGL
USE_GL = True
USE_GL = False # Temporarily disabled
except ImportError:
USE_GL = False
import _mainwindow, _search_dialog, _show_newick, _open_newick, _about
from main import TreeStyle, save, _leaf
from svg_colors import random_color
from qt4_render import render
from ete2._ph import new_version
from ete2 import Tree, TreeStyle
import time
class _SelectorItem(QtGui.QGraphicsRectItem):
def __init__(self, parent=None):
self.Color = QtGui.QColor("blue")
self._active = False
QtGui.QGraphicsRectItem.__init__(self, 0, 0, 0, 0)
if parent:
self.setParentItem(parent)
def paint(self, p, option, widget):
p.setPen(self.Color)
p.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
p.drawRect(self.rect().x(),self.rect().y(),self.rect().width(),self.rect().height())
return
# Draw info text
font = QtGui.QFont("Arial",13)
text = "%d selected." % len(self.get_selected_nodes())
textR = QtGui.QFontMetrics(font).boundingRect(text)
if self.rect().width() > textR.width() and \
self.rect().height() > textR.height()/2 and 0: # OJO !!!!
p.setPen(QtGui.QPen(self.Color))
p.setFont(QtGui.QFont("Arial",13))
p.drawText(self.rect().bottomLeft().x(),self.rect().bottomLeft().y(),text)
def get_selected_nodes(self):
selPath = QtGui.QPainterPath()
selPath.addRect(self.rect())
self.scene().setSelectionArea(selPath)
return [i.node for i in self.scene().selectedItems()]
def setActive(self,bool):
self._active = bool
def isActive(self):
return self._active
def etime(f):
def a_wrapper_accepting_arguments(*args, **kargs):
global TIME
t1 = time.time()
f(*args, **kargs)
print time.time() - t1
return a_wrapper_accepting_arguments
class CheckUpdates(QThread):
def run(self):
try:
current, latest, tag = new_version()
if tag is None:
tag = ""
msg = ""
if current and latest:
if current < latest:
msg = "New version available (rev%s): %s More info at http://etetoolkit.org." %\
(latest, tag)
elif current == latest:
msg = "Up to date"
self.emit(SIGNAL("output(QString)"), msg)
except Exception:
pass
class _GUI(QtGui.QMainWindow):
def _updatestatus(self, msg):
self.main.statusbar.showMessage(msg)
def redraw(self):
self.scene.draw()
self.view.init_values()
def __init__(self, scene, *args):
QtGui.QMainWindow.__init__(self, *args)
self.main = _mainwindow.Ui_MainWindow()
self.main.setupUi(self)
self.setWindowTitle("ETE Tree Browser")
self.scene = scene
self.scene.GUI = self
self.view = _TreeView(scene)
scene.view = self.view
self.node_properties = _PropertiesDialog(scene)
self.view.prop_table = self.node_properties
#self.view.centerOn(0,0)
if scene.img.show_branch_length:
self.main.actionBranchLength.setChecked(True)
if scene.img.show_branch_support:
self.main.actionBranchSupport.setChecked(True)
if scene.img.show_leaf_name:
self.main.actionLeafName.setChecked(True)
if scene.img.force_topology:
self.main.actionForceTopology.setChecked(True)
splitter = QtGui.QSplitter()
splitter.addWidget(self.view)
splitter.addWidget(self.node_properties)
self.setCentralWidget(splitter)
# I create a single dialog to keep the last search options
self.searchDialog = QtGui.QDialog()
# Don't know if this is the best way to set up the dialog and
# its variables
self.searchDialog._conf = _search_dialog.Ui_Dialog()
self.searchDialog._conf.setupUi(self.searchDialog)
self.scene.setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
# Shows the whole tree by default
#self.view.fitInView(self.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
splitter.setCollapsible(1, True)
splitter.setSizes([self.scene.sceneRect().width(), 10])
self.view.fitInView(0, 0, self.scene.sceneRect().width(), 200, QtCore.Qt.KeepAspectRatio)
# Check for updates
self.check = CheckUpdates()
#self.check.start()
#self.connect(self.check, SIGNAL("output(QString)"), self._updatestatus)
@QtCore.pyqtSignature("")
def on_actionETE_triggered(self):
try:
__VERSION__
except:
__VERSION__= "development branch"
d = QtGui.QDialog()
d._conf = _about.Ui_About()
d._conf.setupUi(d)
d._conf.version.setText("Version: %s" %__VERSION__)
d._conf.version.setAlignment(QtCore.Qt.AlignHCenter)
d.exec_()
@QtCore.pyqtSignature("")
def on_actionZoomOut_triggered(self):
self.view.safe_scale(0.8,0.8)
@QtCore.pyqtSignature("")
def on_actionZoomIn_triggered(self):
self.view.safe_scale(1.2,1.2)
@QtCore.pyqtSignature("")
def on_actionZoomInX_triggered(self):
self.scene.img._scale += self.scene.img._scale * 0.05
self.redraw()
@QtCore.pyqtSignature("")
def on_actionZoomOutX_triggered(self):
self.scene.img._scale -= self.scene.img._scale * 0.05
self.redraw()
@QtCore.pyqtSignature("")
def on_actionZoomInY_triggered(self):
self.scene.img.branch_vertical_margin += 5
self.scene.img._scale = None
self.redraw()
@QtCore.pyqtSignature("")
def on_actionZoomOutY_triggered(self):
if self.scene.img.branch_vertical_margin > 0:
margin = self.scene.img.branch_vertical_margin - 5
if margin > 0:
self.scene.img.branch_vertical_margin = margin
else:
self.scene.img.branch_vertical_margin = 0.0
self.scene.img._scale = None
self.redraw()
@QtCore.pyqtSignature("")
def on_actionFit2tree_triggered(self):
self.view.fitInView(self.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
@QtCore.pyqtSignature("")
def on_actionFit2region_triggered(self):
R = self.view.selector.rect()
if R.width()>0 and R.height()>0:
self.view.fitInView(R.x(), R.y(), R.width(),\
R.height(), QtCore.Qt.KeepAspectRatio)
@QtCore.pyqtSignature("")
def on_actionSearchNode_triggered(self):
setup = self.searchDialog._conf
setup.attrValue.setFocus()
ok = self.searchDialog.exec_()
if ok:
mType = setup.attrType.currentIndex()
aName = str(setup.attrName.text())
if mType >= 2 and mType <=6:
try:
aValue = float(setup.attrValue.text())
except ValueError:
QtGui.QMessageBox.information(self, "!",\
"A numeric value is expected")
return
elif mType == 7:
aValue = re.compile(str(setup.attrValue.text()))
elif mType == 0 or mType == 1:
aValue = str(setup.attrValue.text())
if mType == 1 or mType == 2: #"is or =="
cmpFn = lambda x,y: x == y
elif mType == 0: # "contains"
cmpFn = lambda x,y: y in x
elif mType == 3:
cmpFn = lambda x,y: x >= y
elif mType == 4:
cmpFn = lambda x,y: x > y
elif mType == 5:
cmpFn = lambda x,y: x <= y
elif mType == 6:
cmpFn = lambda x,y: x < y
elif mType == 7:
cmpFn = lambda x,y: re.search(y, x)
last_match_node = None
for n in self.scene.tree.traverse(is_leaf_fn=_leaf):
if setup.leaves_only.isChecked() and not _leaf(n):
continue
if hasattr(n, aName) \
and cmpFn(getattr(n, aName), aValue ):
self.scene.view.highlight_node(n)
last_match_node = n
if last_match_node:
item = self.scene.n2i[last_match_node]
R = item.mapToScene(item.fullRegion).boundingRect()
R.adjust(-60, -60, 60, 60)
self.view.fitInView(R.x(), R.y(), R.width(),\
R.height(), QtCore.Qt.KeepAspectRatio)
@QtCore.pyqtSignature("")
def on_actionClear_search_triggered(self):
# This could be much more efficient
for n in self.view.n2hl.keys():
self.scene.view.unhighlight_node(n)
@QtCore.pyqtSignature("")
def on_actionBranchLength_triggered(self):
self.scene.img.show_branch_length ^= True
self.scene.img._scale = None
self.redraw()
self.view.centerOn(0,0)
@QtCore.pyqtSignature("")
def on_actionBranchSupport_triggered(self):
self.scene.img.show_branch_support ^= True
self.scene.img._scale = None
self.redraw()
self.view.centerOn(0,0)
@QtCore.pyqtSignature("")
def on_actionLeafName_triggered(self):
self.scene.img.show_leaf_name ^= True
self.scene.img._scale = None
self.redraw()
self.view.centerOn(0,0)
@QtCore.pyqtSignature("")
def on_actionForceTopology_triggered(self):
self.scene.img.force_topology ^= True
self.scene.img._scale = None
self.redraw()
self.view.centerOn(0,0)
@QtCore.pyqtSignature("")
def on_actionShow_newick_triggered(self):
d = NewickDialog(self.scene.tree)
d._conf = _show_newick.Ui_Newick()
d._conf.setupUi(d)
d.update_newick()
d.exec_()
@QtCore.pyqtSignature("")
def on_actionChange_orientation_triggered(self):
self.scene.props.orientation ^= 1
self.redraw()
@QtCore.pyqtSignature("")
def on_actionShow_phenogram_triggered(self):
self.scene.props.style = 0
self.redraw()
@QtCore.pyqtSignature("")
def on_actionShowCladogram_triggered(self):
self.scene.props.style = 1
self.redraw()
@QtCore.pyqtSignature("")
def on_actionOpen_triggered(self):
d = QtGui.QFileDialog()
d._conf = _open_newick.Ui_OpenNewick()
d._conf.setupUi(d)
d.exec_()
return
fname = QtGui.QFileDialog.getOpenFileName(self ,"Open File",
"/home",
)
try:
t = Tree(str(fname))
except Exception, e:
print e
else:
self.scene.tree = t
self.img = TreeStyle()
self.redraw()
@QtCore.pyqtSignature("")
def on_actionSave_newick_triggered(self):
fname = QtGui.QFileDialog.getSaveFileName(self ,"Save File",
"/home",
"Newick (*.nh *.nhx *.nw )")
nw = self.scene.tree.write()
try:
OUT = open(fname,"w")
except Exception, e:
print e
else:
OUT.write(nw)
OUT.close()
@QtCore.pyqtSignature("")
def on_actionRenderPDF_triggered(self):
F = QtGui.QFileDialog(self)
if F.exec_():
imgName = str(F.selectedFiles()[0])
if not imgName.endswith(".pdf"):
imgName += ".pdf"
save(self.scene, imgName)
@QtCore.pyqtSignature("")
def on_actionRender_selected_region_triggered(self):
if not self.scene.selector.isVisible():
return QtGui.QMessageBox.information(self, "!",\
"You must select a region first")
F = QtGui.QFileDialog(self)
if F.exec_():
imgName = str(F.selectedFiles()[0])
if not imgName.endswith(".pdf"):
imgName += ".pdf"
save(imgName, take_region=True)
@QtCore.pyqtSignature("")
def on_actionPaste_newick_triggered(self):
text,ok = QtGui.QInputDialog.getText(self,\
"Paste Newick",\
"Newick:")
if ok:
try:
t = Tree(str(text))
except Exception,e:
print e
else:
self.scene.tree = t
self.redraw()
self.view.centerOn(0,0)
def keyPressEvent(self,e):
key = e.key()
control = e.modifiers() & QtCore.Qt.ControlModifier
if key == 77:
if self.isMaximized():
self.showNormal()
else:
self.showMaximized()
elif key >= 49 and key <= 58:
key = key - 48
m = self.view.matrix()
m.reset()
self.view.setMatrix(m)
self.view.scale(key, key)
# This function should be reviewed. Probably there are better ways to
# do de same, or at least less messy ways... So far this is what I
# have
class _TableItem(QtGui.QItemDelegate):
def __init__(self, parent=None):
QtGui.QItemDelegate.__init__(self, parent)
self.propdialog = parent
def paint(self, painter, style, index):
self.propdialog.tableView.setRowHeight(index.row(), 18)
val = index.data()
if getattr(val, "background", None):
painter.fillRect(style.rect, QtGui.QColor(val.background))
QtGui.QItemDelegate.paint(self, painter, style, index)
def createEditor(self, parent, option, index):
# Edit only values, not property names
if index.column() != 1:
return None
originalValue = index.model().data(index)
if not self.isSupportedType(originalValue.type()):
return None
if re.search("^#[0-9ABCDEFabcdef]{6}$",str(originalValue.toString())):
origc = QtGui.QColor(str(originalValue.toString()))
color = QtGui.QColorDialog.getColor(origc)
if color.isValid():
self.propdialog._edited_indexes.add( (index.row(), index.column()) )
index.model().setData(index,QtCore.QVariant(color.name()))
self.propdialog.apply_changes()
return None
else:
editField = QtGui.QLineEdit(parent)
editField.setFrame(False)
validator = QtGui.QRegExpValidator(QtCore.QRegExp(".+"), editField)
editField.setValidator(validator)
self.connect(editField, QtCore.SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
self.connect(editField, QtCore.SIGNAL("returnPressed()"),
self.propdialog.apply_changes)
self.propdialog._edited_indexes.add( (index.row(), index.column()) )
return editField
def setEditorData(self, editor, index):
value = index.model().data(index)
if editor is not None:
editor.setText(self.displayText(value))
def isSupportedType(valueType):
return True
isSupportedType = staticmethod(isSupportedType)
def displayText(self,value):
return value.toString()
def commitAndCloseEditor(self):
editor = self.sender()
self.emit(QtCore.SIGNAL("commitData(QWidget *)"), editor)
self.emit(QtCore.SIGNAL("closeEditor(QWidget *)"), editor)
class _PropModeChooser(QtGui.QWidget):
def __init__(self,scene, *args):
QtGui.QWidget.__init__(self,*args)
class _PropertiesDialog(QtGui.QWidget):
def __init__(self, scene, *args):
QtGui.QWidget.__init__(self,*args)
self.scene = scene
self._mode = 0
self.layout = QtGui.QVBoxLayout()
self.tableView = QtGui.QTableView()
self.tableView.verticalHeader().setVisible(False)
#self.tableView.horizontalHeader().setVisible(True)
#self.tableView.setVerticalHeader(None)
self.layout.addWidget(self.tableView)
self.setLayout(self.layout)
self.tableView.setGeometry (0, 0, 200,200)
def update_properties(self, node):
self.node = node
self._edited_indexes = set([])
self._style_indexes = set([])
self._prop_indexes = set([])
self.get_prop_table(node)
def get_props_in_nodes(self, nodes):
# sorts properties and faces of selected nodes
self.prop2nodes = {}
self.prop2values = {}
self.style2nodes = {}
self.style2values = {}
for n in nodes:
for pname in n.features:
pvalue = getattr(n,pname)
if type(pvalue) == int or \
type(pvalue) == float or \
type(pvalue) == str :
self.prop2nodes.setdefault(pname,[]).append(n)
self.prop2values.setdefault(pname,[]).append(pvalue)
for pname,pvalue in n.img_style.iteritems():
if type(pvalue) == int or \
type(pvalue) == float or \
type(pvalue) == str :
self.style2nodes.setdefault(pname,[]).append(n)
self.style2values.setdefault(pname,[]).append(pvalue)
def get_prop_table(self, node):
if self._mode == 0: # node
self.get_props_in_nodes([node])
elif self._mode == 1: # childs
self.get_props_in_nodes(node.get_leaves())
elif self._mode == 2: # partition
self.get_props_in_nodes([node]+node.get_descendants())
total_props = len(self.prop2nodes) + len(self.style2nodes.keys())
self.model = QtGui.QStandardItemModel(total_props, 2)
self.model.setHeaderData(0, QtCore.Qt.Horizontal, "Feature")
self.model.setHeaderData(1, QtCore.Qt.Horizontal, "Value")
self.tableView.setModel(self.model)
self.delegate = _TableItem(self)
self.tableView.setItemDelegate(self.delegate)
row = 0
items = self.prop2nodes.items()
for name, nodes in sorted(items):
value= getattr(nodes[0],name)
index1 = self.model.index(row, 0, QtCore.QModelIndex())
index2 = self.model.index(row, 1, QtCore.QModelIndex())
f = QtCore.QVariant(name)
v = QtCore.QVariant(value)
self.model.setData(index1, f)
self.model.setData(index2, v)
self._prop_indexes.add( (index1, index2) )
row +=1
keys = self.style2nodes.keys()
for name in sorted(keys):
value= self.style2values[name][0]
index1 = self.model.index(row, 0, QtCore.QModelIndex())
index2 = self.model.index(row, 1, QtCore.QModelIndex())
self.model.setData(index1, QtCore.QVariant(name))
v = QtCore.QVariant(value)
self.model.setData(index2, v)
# Creates a variant element
self._style_indexes.add( (index1, index2) )
row +=1
return
def apply_changes(self):
# Apply changes on styles
for i1, i2 in self._style_indexes:
if (i2.row(), i2.column()) not in self._edited_indexes:
continue
name = str(self.model.data(i1).toString())
value = str(self.model.data(i2).toString())
for n in self.style2nodes[name]:
typedvalue = type(n.img_style[name])(value)
try:
n.img_style[name] = typedvalue
except:
#logger(-1, "Wrong format for attribute:", name)
break
# Apply changes on properties
for i1, i2 in self._prop_indexes:
if (i2.row(), i2.column()) not in self._edited_indexes:
continue
name = str(self.model.data(i1).toString())
value = str(self.model.data(i2).toString())
for n in self.prop2nodes[name]:
try:
setattr(n, name, type(getattr(n,name))(value))
except Exception, e:
#logger(-1, "Wrong format for attribute:", name)
print e
break
self.update_properties(self.node)
self.scene.img._scale = None
self.scene.GUI.redraw()
return
class NewickDialog(QtGui.QDialog):
def __init__(self, node, *args):
QtGui.QDialog.__init__(self, *args)
self.node = node
def update_newick(self):
f= int(self._conf.nwFormat.currentText())
self._conf.features_list.selectAll()
if self._conf.useAllFeatures.isChecked():
features = []
elif self._conf.features_list.count()==0:
features = None
else:
features = set()
for i in self._conf.features_list.selectedItems():
features.add(str(i.text()))
nw = self.node.write(format=f, features=features)
self._conf.newickBox.setText(nw)
def add_feature(self):
aName = str(self._conf.attrName.text()).strip()
if aName != '':
self._conf.features_list.addItem(aName)
self.update_newick()
def del_feature(self):
r = self._conf.features_list.currentRow()
self._conf.features_list.takeItem(r)
self.update_newick()
def set_custom_features(self):
state = self._conf.useAllFeatures.isChecked()
self._conf.features_list.setDisabled(state)
self._conf.attrName.setDisabled(state)
self.update_newick()
class _TreeView(QtGui.QGraphicsView):
def __init__(self,*args):
QtGui.QGraphicsView.__init__(self,*args)
self.buffer_node = None
self.init_values()
if USE_GL:
print "USING GL"
F = QtOpenGL.QGLFormat()
F.setSampleBuffers(True)
print F.sampleBuffers()
self.setViewport(QtOpenGL.QGLWidget(F))
self.setRenderHints(QtGui.QPainter.Antialiasing)
else:
self.setRenderHints(QtGui.QPainter.Antialiasing or QtGui.QPainter.SmoothPixmapTransform )
self.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
self.setRenderHints(QtGui.QPainter.Antialiasing or QtGui.QPainter.SmoothPixmapTransform )
#self.setViewportUpdateMode(QtGui.QGraphicsView.NoViewportUpdate)
self.setCacheMode(QtGui.QGraphicsView.CacheBackground)
self.setResizeAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
#self.setOptimizationFlag (QtGui.QGraphicsView.DontAdjustForAntialiasing)
self.setOptimizationFlag (QtGui.QGraphicsView.DontSavePainterState)
#self.setOptimizationFlag (QtGui.QGraphicsView.DontClipPainter)
#self.scene().setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
#self.scene().setBspTreeDepth(24)
def init_values(self):
master_item = self.scene().master_item
self.n2hl = {}
self.focus_highlight = QtGui.QGraphicsRectItem(master_item)
#self.buffer_node = None
self.focus_node = None
self.selector = _SelectorItem(master_item)
def resizeEvent(self, e):
QtGui.QGraphicsView.resizeEvent(self, e)
def safe_scale(self, xfactor, yfactor):
self.setTransformationAnchor(self.AnchorUnderMouse)
xscale = self.matrix().m11()
yscale = self.matrix().m22()
srect = self.sceneRect()
if (xfactor>1 and xscale>200000) or \
(yfactor>1 and yscale>200000):
QtGui.QMessageBox.information(self, "!",\
"I will take the microscope!")
return
# Do not allow to reduce scale to a value producing height or with smaller than 20 pixels
# No restrictions to zoom in
if (yfactor<1 and srect.width() * yscale < 20):
pass
elif (xfactor<1 and srect.width() * xscale < 20):
pass
else:
self.scale(xfactor, yfactor)
def highlight_node(self, n, fullRegion=False, fg="red", bg="gray", permanent=False):
self.unhighlight_node(n)
item = self.scene().n2i[n]
hl = QtGui.QGraphicsRectItem(item.content)
if fullRegion:
hl.setRect(item.fullRegion)
else:
hl.setRect(item.nodeRegion)
hl.setPen(QtGui.QColor(fg))
hl.setBrush(QtGui.QColor(bg))
hl.setOpacity(0.2)
# save info in Scene
self.n2hl[n] = hl
if permanent:
item.highlighted = True
def unhighlight_node(self, n, reset=False):
if n in self.n2hl:
item = self.scene().n2i[n]
if not item.highlighted:
self.scene().removeItem(self.n2hl[n])
del self.n2hl[n]
elif reset:
self.scene().removeItem(self.n2hl[n])
del self.n2hl[n]
item.highlighted = False
else:
pass
def wheelEvent(self,e):
factor = (-e.delta() / 360.0)
if abs(factor)>=1:
factor = 0.0
# Ctrl+Shift -> Zoom in X
if (e.modifiers() & QtCore.Qt.ControlModifier) and (e.modifiers() & QtCore.Qt.ShiftModifier):
self.safe_scale(1+factor, 1)
# Ctrl+Alt -> Zomm in Y
elif (e.modifiers() & QtCore.Qt.ControlModifier) and (e.modifiers() & QtCore.Qt.AltModifier):
self.safe_scale(1,1+factor)
# Ctrl -> Zoom X,Y
elif e.modifiers() & QtCore.Qt.ControlModifier:
self.safe_scale(1-factor, 1-factor)
# Shift -> Horizontal scroll
elif e.modifiers() & QtCore.Qt.ShiftModifier:
if e.delta()>0:
self.horizontalScrollBar().setValue(self.horizontalScrollBar().value()-20 )
else:
self.horizontalScrollBar().setValue(self.horizontalScrollBar().value()+20 )
# No modifiers -> Vertival scroll
else:
if e.delta()>0:
self.verticalScrollBar().setValue(self.verticalScrollBar().value()-20 )
else:
self.verticalScrollBar().setValue(self.verticalScrollBar().value()+20 )
def set_focus(self, node):
i = self.scene().n2i[node]
self.focus_highlight.setPen(QtGui.QColor("red"))
self.focus_highlight.setBrush(QtGui.QColor("SteelBlue"))
self.focus_highlight.setOpacity(0.2)
self.focus_highlight.setParentItem(i.content)
self.focus_highlight.setRect(i.fullRegion)
self.focus_highlight.setVisible(True)
self.prop_table.update_properties(node)
#self.focus_highlight.setRect(i.nodeRegion)
self.focus_node = node
self.update()
def hide_focus(self):
return
#self.focus_highlight.setVisible(False)
def keyPressEvent(self,e):
key = e.key()
control = e.modifiers() & QtCore.Qt.ControlModifier
#print >>sys.stderr, "****** Pressed key: ", key, QtCore.Qt.LeftArrow
if control:
if key == QtCore.Qt.Key_Left:
self.horizontalScrollBar().setValue(self.horizontalScrollBar().value()-20 )
self.update()
elif key == QtCore.Qt.Key_Right:
self.horizontalScrollBar().setValue(self.horizontalScrollBar().value()+20 )
self.update()
elif key == QtCore.Qt.Key_Up:
self.verticalScrollBar().setValue(self.verticalScrollBar().value()-20 )
self.update()
elif key == QtCore.Qt.Key_Down:
self.verticalScrollBar().setValue(self.verticalScrollBar().value()+20 )
self.update()
else:
if not self.focus_node:
self.focus_node = self.scene().tree
if key == QtCore.Qt.Key_Left:
if self.focus_node.up:
new_focus_node = self.focus_node.up
self.set_focus(new_focus_node)
elif key == QtCore.Qt.Key_Right:
if self.focus_node.children:
new_focus_node = self.focus_node.children[0]
self.set_focus(new_focus_node)
elif key == QtCore.Qt.Key_Up:
if self.focus_node.up:
i = self.focus_node.up.children.index(self.focus_node)
if i>0:
new_focus_node = self.focus_node.up.children[i-1]
self.set_focus(new_focus_node)
elif self.focus_node.up:
self.set_focus(self.focus_node.up)
elif key == QtCore.Qt.Key_Down:
if self.focus_node.up:
i = self.focus_node.up.children.index(self.focus_node)
if i < len(self.focus_node.up.children)-1:
new_focus_node = self.focus_node.up.children[i+1]
self.set_focus(new_focus_node)
elif self.focus_node.up:
self.set_focus(self.focus_node.up)
elif key == QtCore.Qt.Key_Escape:
self.hide_focus()
elif key == QtCore.Qt.Key_Enter or\
key == QtCore.Qt.Key_Return:
self.prop_table.tableView.setFocus()
elif key == QtCore.Qt.Key_Space:
self.highlight_node(self.focus_node, fullRegion=True,
bg=random_color(l=0.5, s=0.5),
permanent=True)
QtGui.QGraphicsView.keyPressEvent(self,e)
def mouseReleaseEvent(self, e):
self.scene().view.hide_focus()
curr_pos = self.mapToScene(e.pos())
x = min(self.selector.startPoint.x(),curr_pos.x())
y = min(self.selector.startPoint.y(),curr_pos.y())
w = max(self.selector.startPoint.x(),curr_pos.x()) - x
h = max(self.selector.startPoint.y(),curr_pos.y()) - y
if self.selector.startPoint == curr_pos:
self.selector.setVisible(False)
self.selector.setActive(False)
QtGui.QGraphicsView.mouseReleaseEvent(self,e)
def mousePressEvent(self,e):
pos = self.mapToScene(e.pos())
x, y = pos.x(), pos.y()
self.selector.setRect(x, y, 0,0)
self.selector.startPoint = QtCore.QPointF(x, y)
self.selector.setActive(True)
self.selector.setVisible(True)
QtGui.QGraphicsView.mousePressEvent(self,e)
def mouseMoveEvent(self,e):
curr_pos = self.mapToScene(e.pos())
if self.selector.isActive():
x = min(self.selector.startPoint.x(),curr_pos.x())
y = min(self.selector.startPoint.y(),curr_pos.y())
w = max(self.selector.startPoint.x(),curr_pos.x()) - x
h = max(self.selector.startPoint.y(),curr_pos.y()) - y
self.selector.setRect(x,y,w,h)
QtGui.QGraphicsView.mouseMoveEvent(self, e)
class _BasicNodeActions(object):
""" Should be added as ActionDelegator """
@staticmethod
def init(obj):
obj.setCursor(QtCore.Qt.PointingHandCursor)
obj.setAcceptsHoverEvents(True)
@staticmethod
def hoverEnterEvent (obj, e):
print "HOLA"
@staticmethod
def hoverLeaveEvent(obj, e):
print "ADIOS"
@staticmethod
def mousePressEvent(obj, e):
print "Click"
@staticmethod
def mouseReleaseEvent(obj, e):
if e.button() == QtCore.Qt.RightButton:
obj.showActionPopup()
elif e.button() == QtCore.Qt.LeftButton:
obj.scene().view.set_focus(obj.node)
#obj.scene().view.prop_table.update_properties(obj.node)
@staticmethod
def hoverEnterEvent (self, e):
self.scene().view.highlight_node(self.node, fullRegion=True)
@staticmethod
def hoverLeaveEvent(self,e):
self.scene().view.unhighlight_node(self.node)
|
|
import SocketServer
import trajcomp;
import json;
import sys;
import os, shutil
cfg_numdir = 2;
cfg_location = "/home/jan/Desktop/Masterarbeit/trajcomputing_code/v_2/libtrajcomp-src/data/Geolife";
gN_location = "/home/jan/Desktop/Masterarbeit/trajcomputing_code/v_2/libtrajcomp-src/doc/md/data/GoogleNow";
gN_numdir = 1;
elki_location = "/home/jan/elki.jar";
resample_location = "/home/jan/Desktop/Masterarbeit/trajcomputing_code/v_2/libtrajcomp-src/doc/md/resample/"
def top_k_jaccard_id(theid):
s = trajcomp.jaccard_id(theid);
print min(s)
print max(s)
# Retrieve top k
k=10
# First sort and keep indizes
i = [i[0] for i in sorted(enumerate(s), key=lambda x:x[1])]
i = i[:k] #[-k,:]
for idx in i:
print "Taking "+str(s[idx]);
return i;
def top_k_jaccard(sl):
s = trajcomp.jaccard_stringlist(sl);
print min(s)
print max(s)
# Retrieve top k
k=10
# First sort and keep indizes
i = [i[0] for i in sorted(enumerate(s), key=lambda x:x[1])]
print i[0]
i = i[:k] #[-k,:]
print i[0]
for idx in i:
print "Taking "+str(s[idx]);
return i
def top_k_intersect(sl):
s = trajcomp.intersect_stringlist(sl);
print min(s)
print max(s)
tau = min(s) + 0.75 *(max(s)-min(s));
# Retrieve top k
k=10
# First sort and keep indizes
i = [i[0] for i in sorted(enumerate(s), key=lambda x:x[1])]
# i = i[-k:]
r=[];
for idx in i:
if(s[idx] > tau):
r.append(idx);
print "Left "+str(s);
return r
def top_k_subset_correlation(sl):
s = trajcomp.relative_subset_contradicts_stringlist(sl);
# print s
# subset correlation: take only zero, but double == zero is fabs
i=[i for i, j in enumerate(s) if j <=10E-6 ]
# Retrieve up to
# print i
print "Found " +str(len(i)) + " fully correlating. Limit to k=10"
k=10
i = i[:k] #[-k,:]
sys.stdout.flush()
return i
def dbscan_segmentation(d, m, e):
print "dbscan backend server";
print d;
print m;
print e;
print elki_location;
r = trajcomp.dbscan(float(e),int(m), "Test", d, elki_location);
return r;
def douglas_peucker(d, e):
r = trajcomp.douglas_peucker_online(d,float(e));
return r;
def resample_traj(d, h):
r = trajcomp.resample(d, h, resample_location);
print "got back:"
print r
#Delete created files
#for the_file in os.listdir(resample_location):
# file_path = os.path.join(resample_location, the_file)
# try:
# if os.path.isfile(file_path):
# os.unlink(file_path)
# #elif os.path.isdir(file_path): shutil.rmtree(file_path)
# except Exception, e:
# print e
return r;
def getID(i):
p = "../../doc/md/data/processed/GoogleNow/Jan/" + str(i) + ".csv";
pt = "../../doc/md/data/processed/GoogleNow/Jan/times/" + str(i) + ".csv";
r = trajcomp.get_g_id(p, pt)
print "got back: "
return r;
class MyTCPHandler(SocketServer.StreamRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.rfile is a file-like object created by the handler;
# we can now use e.g. readline() instead of raw recv() calls
self.data="";
while(self.data != "QUIT"):
self.data = self.rfile.readline().strip()
C=self.data.split();
if C[0] == "GET":
T = trajcomp.get(int(C[1]))
self.wfile.write(json.dumps(T)+"\n")
if C[0] == "SUBSETCORR":
answer = top_k_subset_correlation(C[1:]);
self.wfile.write(json.dumps(answer)+"\n");
if C[0] == "JACCARD":
answer = top_k_jaccard(C[1:]);
self.wfile.write(json.dumps(answer)+"\n");
if C[0] == "JACCARDID":
answer = top_k_jaccard_id(int(C[1]));
self.wfile.write(json.dumps(answer)+"\n");
if C[0] == "INTERSECT":
#print C[1:];
answer = top_k_intersect(C[1:]);
self.wfile.write(json.dumps(answer)+"\n");
if C[0] == "DBSCAN":
print C[1:];
answer = dbscan_segmentation(C[1], C[2], C[3]);
self.wfile.write(json.dumps(answer)+"\n");
if C[0] == "DOUGLAS":
print "Backendserver";
print C[1:];
T = trajcomp.douglas_peucker_online(C[1],float(C[2]));
print "Result DP";
print T;
self.wfile.write(json.dumps(T)+"\n");
if C[0] == "RESAMPLE":
print "Resample backend";
print C[1:];
answer = resample_traj(C[1], int(C[2]));
self.wfile.write(json.dumps(answer)+"\n");
if C[0] == "THRESHOLD":
print "Threshold backend";
print C[1:];
T = trajcomp.threshold(float(C[1]), float(C[2]), "../TestTraj.csv", "../TestTrajTimes.csv");
self.wfile.write(json.dumps(T)+"\n");
if C[0] == "PERSISTENCE":
print "Peristence backend";
print C[1:];
T = trajcomp.persistence(float(C[1]), "../TestTraj.csv", "../TestTrajTimes.csv");
self.wfile.write(json.dumps(T)+"\n");
if C[0] == "GETGID":
print "GETID backend";
print C[1:];
T = getID(C[1]);
self.wfile.write(json.dumps(T)+"\n");
if self.data=="HELO":
self.wfile.write("HELO too\n");
if self.data=="KILL":
self.server.shutdown();
self.data="QUIT";
class ThreadingServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): pass
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
# Create the server, binding to localhost on port 9999
server = ThreadingServer((HOST, PORT), MyTCPHandler)
print ("Loading first set of GeoLife Dataset");
print cfg_location;
#A=trajcomp.geolife(cfg_numdir, cfg_location)
# handles = range(A,trajcomp.size());
#print "Found " + str(trajcomp.size())
# Create the index
#trajcomp.create_bloomindex();
server.serve_forever()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fcntl
import os
import shutil
import tempfile
import threading
import eventlet
from eventlet import greenpool
from eventlet import greenthread
from eventlet import semaphore
from oslo.config import cfg
from openstack.common.fixture import config
from openstack.common import lockutils
from openstack.common import test
class TestFileLocks(test.BaseTestCase):
def test_concurrent_green_lock_succeeds(self):
"""Verify spawn_n greenthreads with two locks run concurrently."""
tmpdir = tempfile.mkdtemp()
try:
self.completed = False
def locka(wait):
a = lockutils.InterProcessLock(os.path.join(tmpdir, 'a'))
with a:
wait.wait()
self.completed = True
def lockb(wait):
b = lockutils.InterProcessLock(os.path.join(tmpdir, 'b'))
with b:
wait.wait()
wait1 = eventlet.event.Event()
wait2 = eventlet.event.Event()
pool = greenpool.GreenPool()
pool.spawn_n(locka, wait1)
pool.spawn_n(lockb, wait2)
wait2.send()
eventlet.sleep(0)
wait1.send()
pool.waitall()
self.assertTrue(self.completed)
finally:
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
class LockTestCase(test.BaseTestCase):
def setUp(self):
super(LockTestCase, self).setUp()
self.config = self.useFixture(config.Config()).config
def test_synchronized_wrapped_function_metadata(self):
@lockutils.synchronized('whatever', 'test-')
def foo():
"""Bar"""
pass
self.assertEqual(foo.__doc__, 'Bar', "Wrapped function's docstring "
"got lost")
self.assertEqual(foo.__name__, 'foo', "Wrapped function's name "
"got mangled")
def test_lock_internally(self):
"""We can lock across multiple green threads."""
saved_sem_num = len(lockutils._semaphores)
seen_threads = list()
def f(_id):
with lockutils.lock('testlock2', 'test-', external=False):
for x in range(10):
seen_threads.append(_id)
greenthread.sleep(0)
threads = []
pool = greenpool.GreenPool(10)
for i in range(10):
threads.append(pool.spawn(f, i))
for thread in threads:
thread.wait()
self.assertEqual(len(seen_threads), 100)
# Looking at the seen threads, split it into chunks of 10, and verify
# that the last 9 match the first in each chunk.
for i in range(10):
for j in range(9):
self.assertEqual(seen_threads[i * 10],
seen_threads[i * 10 + 1 + j])
self.assertEqual(saved_sem_num, len(lockutils._semaphores),
"Semaphore leak detected")
def test_nested_synchronized_external_works(self):
"""We can nest external syncs."""
tempdir = tempfile.mkdtemp()
try:
self.config(lock_path=tempdir)
sentinel = object()
@lockutils.synchronized('testlock1', 'test-', external=True)
def outer_lock():
@lockutils.synchronized('testlock2', 'test-', external=True)
def inner_lock():
return sentinel
return inner_lock()
self.assertEqual(sentinel, outer_lock())
finally:
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
def _do_test_lock_externally(self):
"""We can lock across multiple processes."""
def lock_files(handles_dir):
with lockutils.lock('external', 'test-', external=True):
# Open some files we can use for locking
handles = []
for n in range(50):
path = os.path.join(handles_dir, ('file-%s' % n))
handles.append(open(path, 'w'))
# Loop over all the handles and try locking the file
# without blocking, keep a count of how many files we
# were able to lock and then unlock. If the lock fails
# we get an IOError and bail out with bad exit code
count = 0
for handle in handles:
try:
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
count += 1
fcntl.flock(handle, fcntl.LOCK_UN)
except IOError:
os._exit(2)
finally:
handle.close()
# Check if we were able to open all files
self.assertEqual(50, count)
handles_dir = tempfile.mkdtemp()
try:
children = []
for n in range(50):
pid = os.fork()
if pid:
children.append(pid)
else:
try:
lock_files(handles_dir)
finally:
os._exit(0)
for i, child in enumerate(children):
(pid, status) = os.waitpid(child, 0)
if pid:
self.assertEqual(0, status)
finally:
if os.path.exists(handles_dir):
shutil.rmtree(handles_dir, ignore_errors=True)
def test_lock_externally(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir)
try:
self._do_test_lock_externally()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_lock_externally_lock_dir_not_exist(self):
lock_dir = tempfile.mkdtemp()
os.rmdir(lock_dir)
self.config(lock_path=lock_dir)
try:
self._do_test_lock_externally()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_synchronized_with_prefix(self):
lock_name = 'mylock'
lock_pfix = 'mypfix-'
foo = lockutils.synchronized_with_prefix(lock_pfix)
@foo(lock_name, external=True)
def bar(dirpath, pfix, name):
filepath = os.path.join(dirpath, '%s%s' % (pfix, name))
return os.path.isfile(filepath)
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir)
self.assertTrue(bar(lock_dir, lock_pfix, lock_name))
def test_synchronized_without_prefix(self):
lock_dir = tempfile.mkdtemp()
@lockutils.synchronized('lock', external=True, lock_path=lock_dir)
def test_without_prefix():
path = os.path.join(lock_dir, "lock")
self.assertTrue(os.path.exists(path))
try:
test_without_prefix()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_synchronized_prefix_without_hypen(self):
lock_dir = tempfile.mkdtemp()
@lockutils.synchronized('lock', 'hypen', True, lock_dir)
def test_without_hypen():
path = os.path.join(lock_dir, "hypen-lock")
self.assertTrue(os.path.exists(path))
try:
test_without_hypen()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_contextlock(self):
lock_dir = tempfile.mkdtemp()
try:
# Note(flaper87): Lock is not external, which means
# a semaphore will be yielded
with lockutils.lock("test") as sem:
semaphores = (semaphore.Semaphore, threading._Semaphore)
self.assertTrue(isinstance(sem, semaphores))
# NOTE(flaper87): Lock is external so an InterProcessLock
# will be yielded.
with lockutils.lock("test2", external=True,
lock_path=lock_dir):
path = os.path.join(lock_dir, "test2")
self.assertTrue(os.path.exists(path))
with lockutils.lock("test1",
external=True,
lock_path=lock_dir) as lock1:
self.assertTrue(isinstance(lock1,
lockutils.InterProcessLock))
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_contextlock_unlocks(self):
lock_dir = tempfile.mkdtemp()
sem = None
try:
with lockutils.lock("test") as sem:
semaphores = (semaphore.Semaphore, threading._Semaphore)
self.assertTrue(isinstance(sem, semaphores))
with lockutils.lock("test2", external=True,
lock_path=lock_dir):
path = os.path.join(lock_dir, "test2")
self.assertTrue(os.path.exists(path))
# NOTE(flaper87): Lock should be free
with lockutils.lock("test2", external=True,
lock_path=lock_dir):
path = os.path.join(lock_dir, "test2")
self.assertTrue(os.path.exists(path))
# NOTE(flaper87): Lock should be free
# but semaphore should already exist.
with lockutils.lock("test") as sem2:
self.assertEqual(sem, sem2)
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_synchronized_externally_without_lock_path(self):
self.config(lock_path=None)
@lockutils.synchronized('external', 'test-', external=True)
def foo():
pass
self.assertRaises(cfg.RequiredOptError, foo)
|
|
from direct.showbase.DirectObject import DirectObject
from direct.directnotify.DirectNotifyGlobal import directNotify
from panda3d.core import *
from .PhasedObject import PhasedObject
class DistancePhasedNode(PhasedObject, DirectObject, NodePath):
"""
This class defines a PhasedObject,NodePath object that will handle the phasing
of an object in the scene graph according to its distance from some
other collider object(such as an avatar).
Since it's a NodePath, you can parent it to another object in the
scene graph, or even inherit from this class to get its functionality.
What you will need to define to use this class:
- The distances at which you want the phases to load/unload
- Whether you want the object to clean itself up or not when
exitting the largest distance sphere
- What the load/unload functions are
- What sort of events to listen for when a collision occurs
- (Optional) A collision bitmask for the phase collision spheres
- (Optional) A 'from' collision node to collide into our 'into' spheres
You specify the distances and function names by the phaseParamMap
parameter to __init__(). For example:
phaseParamMap = {'Alias': distance, ...}
...
def loadPhaseAlias(self):
pass
def unloadPhaseAlias(self):
pass
If the 'fromCollideNode' is supplied, we will set up our own
traverser and only traverse below this node. It will send out
events of the form '<enterPrefix>%in' and '<exitPrefix>%in' in
order to match the main collision traverser's patterns. Note
that this will only be used after a reset or phase change in
order to fully transition to the correct phase in a single pass.
Most of the time, it will be reacting to events from the main
collision traverser.
IMPORTANT!: The following only applies when autoCleanup == True:
If you unload the last phase, by either calling
cleanup() or by exitting the last phase's distance,
you will need to explicitly call reset() to get the
distance phasing to work again. This was done so if
either this node or the collider is removed from the
scene graph(eg. avatar teleport), the phased object
will clean itself up automatically.
"""
notify = directNotify.newCategory("DistancePhasedObject")
__InstanceSequence = 0
__InstanceDeque = []
@staticmethod
def __allocateId():
"""
Give each phase node a unique id in order to filter out
collision events from other phase nodes. We do it in
this manner so the client doesn't need to worry about
giving each phase node a unique name.
"""
if DistancePhasedNode.__InstanceDeque:
return DistancePhasedNode.__InstanceDeque.pop(0)
else:
id = DistancePhasedNode.__InstanceSequence
DistancePhasedNode.__InstanceSequence += 1
DistancePhasedNode.__InstanceSequence &= 65535
return id
@staticmethod
def __deallocateId(id):
"""
Reuse abandoned ids.
"""
DistancePhasedNode.__InstanceDeque.append(id)
def __init__(self, name, phaseParamMap = {},
autoCleanup = True,
enterPrefix = 'enter', exitPrefix = 'exit',
phaseCollideMask = BitMask32.allOn(),
fromCollideNode = None):
NodePath.__init__(self, name)
self.phaseParamMap = phaseParamMap
self.phaseParamList = sorted(list(phaseParamMap.items()),
key = lambda x: x[1],
reverse = True)
PhasedObject.__init__(self,
dict([(alias,phase) for (phase,alias) in enumerate([item[0] for item in self.phaseParamList])]))
self.__id = self.__allocateId()
self.autoCleanup = autoCleanup
self.enterPrefix = enterPrefix
self.exitPrefix = exitPrefix
self.phaseCollideMask = phaseCollideMask
self.cTrav = base.cTrav
self.fromCollideNode = fromCollideNode
self._colSpheres = []
self.reset()
def __del__(self):
self.__deallocateId(self.__id)
def __repr__(self):
outStr = 'DistancePhasedObject('
outStr += '%s' % repr(self.getName())
for param, value in zip(('phaseParamMap', 'autoCleanup', 'enterPrefix', 'exitPrefix', 'phaseCollideMask', 'fromCollideNode'),
('{}', 'True','\'enter\'','\'exit\'','BitMask32.allOn()','None')):
outStr += eval('(\', ' + param + ' = %s\' % repr(self.' + param + '),\'\')[self.' + param + ' == ' + value + ']')
outStr += ')'
return outStr
def __str__(self):
return '%s in phase \'%s\'' % (NodePath.__str__(self), self.getPhase())
def cleanup(self):
"""
Disables all collisions.
Ignores all owned event listeners.
Unloads all unloaded phases.
"""
self.__disableCollisions(cleanup = True)
for sphere in self._colSpheres:
sphere.remove()
self._colSpheres = []
PhasedObject.cleanup(self)
def setPhaseCollideMask(self, mask):
"""
Sets the intoCollideMasks for our collision spheres.
"""
self.phaseCollideMask = mask
for sphere in self._colSpheres:
self.colSphere.node().setIntoCollideMask(self.phaseCollideMask)
def reset(self):
"""
Unloads all loaded phases and puts the phase node
in the startup state is if it had just been initialized.
"""
self.cleanup()
self.__oneTimeCollide()
for name, dist in self.phaseParamList:
cSphere = CollisionSphere(0.0, 0.0, 0.0, dist)
cSphere.setTangible(0)
cName = 'PhaseNode%s-%d' % (name, self.__id)
cSphereNode = CollisionNode(cName)
cSphereNode.setIntoCollideMask(self.phaseCollideMask)
cSphereNode.setFromCollideMask(BitMask32.allOff())
cSphereNode.addSolid(cSphere)
cSphereNodePath = self.attachNewNode(cSphereNode)
cSphereNodePath.stash()
# cSphereNodePath.show() # For debugging
self._colSpheres.append(cSphereNodePath)
if self.fromCollideNode:
self.cTrav = CollisionTraverser()
cHandler = CollisionHandlerEvent()
cHandler.addInPattern(self.enterPrefix + '%in')
cHandler.addOutPattern(self.exitPrefix + '%in')
self.cTrav.addCollider(self.fromCollideNode,cHandler)
self.__enableCollisions(-1)
def setPhase(self, aPhase):
"""
See PhasedObject.setPhase()
"""
phase = self.getAliasPhase(aPhase)
PhasedObject.setPhase(self, aPhase)
self.__disableCollisions()
self.__enableCollisions(phase)
if phase == -1 and self.autoCleanup:
self.cleanup()
else:
self.__oneTimeCollide()
def __getEnterEvent(self, phaseName):
return '%sPhaseNode%s-%d' % (self.enterPrefix, phaseName, self.__id)
def __getExitEvent(self, phaseName):
return '%sPhaseNode%s-%d' % (self.exitPrefix, phaseName, self.__id)
def __enableCollisions(self, phase):
"""
Turns on collisions for the spheres bounding this
phase zone by unstashing their geometry. Enables
the exit event for the larger and the enter event
for the smaller. Handles the extreme(end) phases
gracefully.
"""
if 0 <= phase:
phaseName = self.getPhaseAlias(phase)
self.accept(self.__getExitEvent(phaseName),
self.__handleExitEvent,
extraArgs = [phaseName])
self._colSpheres[phase].unstash()
if 0 <= phase+1 < len(self._colSpheres):
phaseName = self.getPhaseAlias(phase+1)
self.accept(self.__getEnterEvent(phaseName),
self.__handleEnterEvent,
extraArgs = [phaseName])
self._colSpheres[phase+1].unstash()
def __disableCollisions(self, cleanup = False):
"""
Disables all collision geometry by stashing
the geometry. If autoCleanup == True and we're
not currently cleaning up, leave the exit event
and collision sphere active for the largest(thus lowest)
phase. This is so that we can still cleanup if
the phase node exits the largest sphere.
"""
for x,sphere in enumerate(self._colSpheres):
phaseName = self.getPhaseAlias(x)
self.ignore(self.__getEnterEvent(phaseName))
if x > 0 or not self.autoCleanup or cleanup:
sphere.stash()
self.ignore(self.__getExitEvent(phaseName))
def __handleEnterEvent(self, phaseName, cEntry):
self.setPhase(phaseName)
def __handleExitEvent(self, phaseName, cEntry):
phase = self.getAliasPhase(phaseName) - 1
self.setPhase(phase)
def __oneTimeCollide(self):
"""
Fire off a one-time collision traversal of the
scene graph. This allows us to process our entire
phasing process in one frame in the cases where
we cross more than one phase border.
"""
if self.cTrav:
if self.cTrav is base.cTrav:
# we use 'render'here since if we only try to
# traverse ourself, we end up calling exit
# events for the rest of the eventHandlers.
# Consider supplying the fromCollideNode parameter.
self.cTrav.traverse(render)
else:
# Only traverse ourself
self.cTrav.traverse(self)
base.eventMgr.doEvents()
class BufferedDistancePhasedNode(DistancePhasedNode):
"""
This class is similar to DistancePhasedNode except you can also
specify a buffer distance for each phase. Upon entering that phase,
its distance will be increased by the buffer amount. Conversely,
the distance will be decremented by that amount, back to its
original size, upon leaving. In this manner, you can avoid the problem
of 'phase flicker' as someone repeatedly steps across a static phase
border.
You specify the buffer amount in the bufferParamMap parameter
to __init__(). It has this format:
bufferParamMap = {'alias':(distance, bufferAmount), ...}
"""
notify = directNotify.newCategory("BufferedDistancePhasedObject")
def __init__(self, name, bufferParamMap = {}, autoCleanup = True,
enterPrefix = 'enter', exitPrefix = 'exit', phaseCollideMask = BitMask32.allOn(), fromCollideNode = None):
self.bufferParamMap = bufferParamMap
self.bufferParamList = sorted(list(bufferParamMap.items()),
key = lambda x: x[1],
reverse = True)
sParams = dict(bufferParamMap)
for key in sParams:
sParams[key] = sParams[key][0]
DistancePhasedNode.__init__(self, name = name,
phaseParamMap = sParams,
autoCleanup = autoCleanup,
enterPrefix = enterPrefix,
exitPrefix = exitPrefix,
phaseCollideMask = phaseCollideMask,
fromCollideNode = fromCollideNode)
def __repr__(self):
outStr = 'BufferedDistancePhasedNode('
outStr += '%s' % repr(self.getName())
for param, value in zip(('bufferParamMap', 'autoCleanup', 'enterPrefix', 'exitPrefix', 'phaseCollideMask', 'fromCollideNode'),
('{}', 'True','\'enter\'','\'exit\'','BitMask32.allOn()', 'None')):
outStr += eval('(\', ' + param + ' = %s\' % repr(self.' + param + '),\'\')[self.' + param + ' == ' + value + ']')
outStr += ')'
return outStr
def __str__(self):
return '%s in phase \'%s\'' % (NodePath.__str__(self), self.getPhase())
def setPhase(self, aPhase):
"""
see DistancePhasedNode.setPhase()
"""
DistancePhasedNode.setPhase(self, aPhase)
phase = self.getAliasPhase(aPhase)
self.__adjustCollisions(phase)
def __adjustCollisions(self, phase):
for x,sphere in enumerate(self._colSpheres[:phase+1]):
sphere.node().modifySolid(0).setRadius(self.bufferParamList[x][1][1])
sphere.node().markInternalBoundsStale()
for x,sphere in enumerate(self._colSpheres[phase+1:]):
sphere.node().modifySolid(0).setRadius(self.bufferParamList[x+phase+1][1][0])
sphere.node().markInternalBoundsStale()
if __debug__ and 0:
cSphere = CollisionSphere(0,0,0,0.1)
cNode = CollisionNode('camCol')
cNode.addSolid(cSphere)
cNodePath = NodePath(cNode)
cNodePath.reparentTo(base.cam)
# cNodePath.show()
# cNodePath.setPos(25,0,0)
base.cTrav = CollisionTraverser()
eventHandler = CollisionHandlerEvent()
eventHandler.addInPattern('enter%in')
eventHandler.addOutPattern('exit%in')
# messenger.toggleVerbose()
base.cTrav.addCollider(cNodePath,eventHandler)
p = BufferedDistancePhasedNode('p',{'At':(10,20),'Near':(100,200),'Far':(1000, 1020)},
autoCleanup = False,
fromCollideNode = cNodePath,
)
p.reparentTo(render)
p._DistancePhasedNode__oneTimeCollide()
base.eventMgr.doEvents()
|
|
import os
import textwrap
from optparse import OptionConflictError
from warnings import warn
from nose.util import tolist
class Plugin(object):
"""Base class for nose plugins. It's recommended but not *necessary* to
subclass this class to create a plugin, but all plugins *must* implement
`options(self, parser, env)` and `configure(self, options, conf)`, and
must have the attributes `enabled`, `name` and `score`. The `name`
attribute may contain hyphens ('-').
Plugins should not be enabled by default.
Subclassing Plugin (and calling the superclass methods in
__init__, configure, and options, if you override them) will give
your plugin some friendly default behavior:
* A --with-$name option will be added to the command line interface
to enable the plugin, and a corresponding environment variable
will be used as the default value. The plugin class's docstring
will be used as the help for this option.
* The plugin will not be enabled unless this option is selected by
the user.
"""
can_configure = False
enabled = False
enableOpt = None
name = None
score = 100
def __init__(self):
if self.name is None:
self.name = self.__class__.__name__.lower()
if self.enableOpt is None:
self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
def addOptions(self, parser, env=None):
"""Add command-line options for this plugin.
The base plugin class adds --with-$name by default, used to enable the
plugin.
.. warning :: Don't implement addOptions unless you want to override
all default option handling behavior, including
warnings for conflicting options. Implement
:meth:`options
<nose.plugins.base.IPluginInterface.options>`
instead.
"""
self.add_options(parser, env)
def add_options(self, parser, env=None):
"""Non-camel-case version of func name for backwards compatibility.
.. warning ::
DEPRECATED: Do not use this method,
use :meth:`options <nose.plugins.base.IPluginInterface.options>`
instead.
"""
# FIXME raise deprecation warning if wasn't called by wrapper
if env is None:
env = os.environ
try:
self.options(parser, env)
self.can_configure = True
except OptionConflictError, e:
warn("Plugin %s has conflicting option string: %s and will "
"be disabled" % (self, e), RuntimeWarning)
self.enabled = False
self.can_configure = False
def options(self, parser, env):
"""Register commandline options.
Implement this method for normal options behavior with protection from
OptionConflictErrors. If you override this method and want the default
--with-$name option to be registered, be sure to call super().
"""
env_opt = 'NOSE_WITH_%s' % self.name.upper()
env_opt = env_opt.replace('-', '_')
parser.add_option("--with-%s" % self.name,
action="store_true",
dest=self.enableOpt,
default=env.get(env_opt),
help="Enable plugin %s: %s [%s]" %
(self.__class__.__name__, self.help(), env_opt))
def configure(self, options, conf):
"""Configure the plugin and system, based on selected options.
The base plugin class sets the plugin to enabled if the enable option
for the plugin (self.enableOpt) is true.
"""
if not self.can_configure:
return
self.conf = conf
if hasattr(options, self.enableOpt):
self.enabled = getattr(options, self.enableOpt)
def help(self):
"""Return help for this plugin. This will be output as the help
section of the --with-$name option that enables the plugin.
"""
if self.__class__.__doc__:
# doc sections are often indented; compress the spaces
return textwrap.dedent(self.__class__.__doc__)
return "(no help available)"
# Compatiblity shim
def tolist(self, val):
warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
DeprecationWarning)
return tolist(val)
class IPluginInterface(object):
"""
IPluginInterface describes the plugin API. Do not subclass or use this
class directly.
"""
def __new__(cls, *arg, **kw):
raise TypeError("IPluginInterface class is for documentation only")
def addOptions(self, parser, env):
"""Called to allow plugin to register command-line options with the
parser. DO NOT return a value from this method unless you want to stop
all other plugins from setting their options.
.. warning ::
DEPRECATED -- implement
:meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
"""
pass
add_options = addOptions
add_options.deprecated = True
def addDeprecated(self, test):
"""Called when a deprecated test is seen. DO NOT return a value
unless you want to stop other plugins from seeing the deprecated
test.
.. warning :: DEPRECATED -- check error class in addError instead
"""
pass
addDeprecated.deprecated = True
def addError(self, test, err):
"""Called when a test raises an uncaught exception. DO NOT return a
value unless you want to stop other plugins from seeing that the
test has raised an error.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
addError.changed = True
def addFailure(self, test, err):
"""Called when a test fails. DO NOT return a value unless you
want to stop other plugins from seeing that the test has failed.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: 3-tuple
:type err: sys.exc_info() tuple
"""
pass
addFailure.changed = True
def addSkip(self, test):
"""Called when a test is skipped. DO NOT return a value unless
you want to stop other plugins from seeing the skipped test.
.. warning:: DEPRECATED -- check error class in addError instead
"""
pass
addSkip.deprecated = True
def addSuccess(self, test):
"""Called when a test passes. DO NOT return a value unless you
want to stop other plugins from seeing the passing test.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
addSuccess.changed = True
def afterContext(self):
"""Called after a context (generally a module) has been
lazy-loaded, imported, setup, had its tests loaded and
executed, and torn down.
"""
pass
afterContext._new = True
def afterDirectory(self, path):
"""Called after all tests have been loaded from directory at path
and run.
:param path: the directory that has finished processing
:type path: string
"""
pass
afterDirectory._new = True
def afterImport(self, filename, module):
"""Called after module is imported from filename. afterImport
is called even if the import failed.
:param filename: The file that was loaded
:type filename: string
:param filename: The name of the module
:type module: string
"""
pass
afterImport._new = True
def afterTest(self, test):
"""Called after the test has been run and the result recorded
(after stopTest).
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
afterTest._new = True
def beforeContext(self):
"""Called before a context (generally a module) is
examined. Because the context is not yet loaded, plugins don't
get to know what the context is; so any context operations
should use a stack that is pushed in `beforeContext` and popped
in `afterContext` to ensure they operate symmetrically.
`beforeContext` and `afterContext` are mainly useful for tracking
and restoring global state around possible changes from within a
context, whatever the context may be. If you need to operate on
contexts themselves, see `startContext` and `stopContext`, which
are passed the context in question, but are called after
it has been loaded (imported in the module case).
"""
pass
beforeContext._new = True
def beforeDirectory(self, path):
"""Called before tests are loaded from directory at path.
:param path: the directory that is about to be processed
"""
pass
beforeDirectory._new = True
def beforeImport(self, filename, module):
"""Called before module is imported from filename.
:param filename: The file that will be loaded
:param module: The name of the module found in file
:type module: string
"""
beforeImport._new = True
def beforeTest(self, test):
"""Called before the test is run (before startTest).
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
beforeTest._new = True
def begin(self):
"""Called before any tests are collected or run. Use this to
perform any setup needed before testing begins.
"""
pass
def configure(self, options, conf):
"""Called after the command line has been parsed, with the
parsed options and the config container. Here, implement any
config storage or changes to state or operation that are set
by command line options.
DO NOT return a value from this method unless you want to
stop all other plugins from being configured.
"""
pass
def finalize(self, result):
"""Called after all report output, including output from all
plugins, has been sent to the stream. Use this to print final
test results or perform final cleanup. Return None to allow
other plugins to continue printing, or any other value to stop
them.
:param result: test result object
.. Note:: When tests are run under a test runner other than
:class:`nose.core.TextTestRunner`, such as
via ``python setup.py test``, this method may be called
**before** the default report output is sent.
"""
pass
def describeTest(self, test):
"""Return a test description.
Called by :meth:`nose.case.Test.shortDescription`.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
describeTest._new = True
def formatError(self, test, err):
"""Called in result.addError, before plugin.addError. If you
want to replace or modify the error tuple, return a new error
tuple.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
formatError._new = True
formatError.chainable = True
# test arg is not chainable
formatError.static_args = (True, False)
def formatFailure(self, test, err):
"""Called in result.addFailure, before plugin.addFailure. If you
want to replace or modify the error tuple, return a new error
tuple. Because this method is chainable, you must return the
test as well, so you'll return something like::
return (test, err)
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
formatFailure._new = True
formatFailure.chainable = True
# test arg is not chainable
formatFailure.static_args = (True, False)
def handleError(self, test, err):
"""Called on addError. To handle the error yourself and prevent normal
error processing, return a true value.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
handleError._new = True
def handleFailure(self, test, err):
"""Called on addFailure. To handle the failure yourself and
prevent normal failure processing, return a true value.
:param test: the test case
:type test: :class:`nose.case.Test`
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
handleFailure._new = True
def loadTestsFromDir(self, path):
"""Return iterable of tests from a directory. May be a
generator. Each item returned must be a runnable
unittest.TestCase (or subclass) instance or suite instance.
Return None if your plugin cannot collect any tests from
directory.
:param path: The path to the directory.
"""
pass
loadTestsFromDir.generative = True
loadTestsFromDir._new = True
def loadTestsFromModule(self, module, path=None):
"""Return iterable of tests in a module. May be a
generator. Each item returned must be a runnable
unittest.TestCase (or subclass) instance.
Return None if your plugin cannot
collect any tests from module.
:param module: The module object
:type module: python module
:param path: the path of the module to search, to distinguish from
namespace package modules
.. note::
NEW. The ``path`` parameter will only be passed by nose 0.11
or above.
"""
pass
loadTestsFromModule.generative = True
def loadTestsFromName(self, name, module=None, importPath=None):
"""Return tests in this file or module. Return None if you are not able
to load any tests, or an iterable if you are. May be a
generator.
:param name: The test name. May be a file or module name plus a test
callable. Use split_test_name to split into parts. Or it might
be some crazy name of your own devising, in which case, do
whatever you want.
:param module: Module from which the name is to be loaded
:param importPath: Path from which file (must be a python module) was
found
.. warning:: DEPRECATED: this argument will NOT be passed.
"""
pass
loadTestsFromName.generative = True
def loadTestsFromNames(self, names, module=None):
"""Return a tuple of (tests loaded, remaining names). Return
None if you are not able to load any tests. Multiple plugins
may implement loadTestsFromNames; the remaining name list from
each will be passed to the next as input.
:param names: List of test names.
:type names: iterable
:param module: Module from which the names are to be loaded
"""
pass
loadTestsFromNames._new = True
loadTestsFromNames.chainable = True
def loadTestsFromFile(self, filename):
"""Return tests in this file. Return None if you are not
interested in loading any tests, or an iterable if you are and
can load some. May be a generator. *If you are interested in
loading tests from the file and encounter no errors, but find
no tests, yield False or return [False].*
.. Note:: This method replaces loadTestsFromPath from the 0.9
API.
:param filename: The full path to the file or directory.
"""
pass
loadTestsFromFile.generative = True
loadTestsFromFile._new = True
def loadTestsFromPath(self, path):
"""
.. warning:: DEPRECATED -- use loadTestsFromFile instead
"""
pass
loadTestsFromPath.deprecated = True
def loadTestsFromTestCase(self, cls):
"""Return tests in this test case class. Return None if you are
not able to load any tests, or an iterable if you are. May be a
generator.
:param cls: The test case class. Must be subclass of
:class:`unittest.TestCase`.
"""
pass
loadTestsFromTestCase.generative = True
def loadTestsFromTestClass(self, cls):
"""Return tests in this test class. Class will *not* be a
unittest.TestCase subclass. Return None if you are not able to
load any tests, an iterable if you are. May be a generator.
:param cls: The test case class. Must be **not** be subclass of
:class:`unittest.TestCase`.
"""
pass
loadTestsFromTestClass._new = True
loadTestsFromTestClass.generative = True
def makeTest(self, obj, parent):
"""Given an object and its parent, return or yield one or more
test cases. Each test must be a unittest.TestCase (or subclass)
instance. This is called before default test loading to allow
plugins to load an alternate test case or cases for an
object. May be a generator.
:param obj: The object to be made into a test
:param parent: The parent of obj (eg, for a method, the class)
"""
pass
makeTest._new = True
makeTest.generative = True
def options(self, parser, env):
"""Called to allow plugin to register command line
options with the parser.
DO NOT return a value from this method unless you want to stop
all other plugins from setting their options.
:param parser: options parser instance
:type parser: :class:`ConfigParser.ConfigParser`
:param env: environment, default is os.environ
"""
pass
options._new = True
def prepareTest(self, test):
"""Called before the test is run by the test runner. Please
note the article *the* in the previous sentence: prepareTest
is called *only once*, and is passed the test case or test
suite that the test runner will execute. It is *not* called
for each individual test case. If you return a non-None value,
that return value will be run as the test. Use this hook to
wrap or decorate the test with another function. If you need
to modify or wrap individual test cases, use `prepareTestCase`
instead.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
def prepareTestCase(self, test):
"""Prepare or wrap an individual test case. Called before
execution of the test. The test passed here is a
nose.case.Test instance; the case to be executed is in the
test attribute of the passed case. To modify the test to be
run, you should return a callable that takes one argument (the
test result object) -- it is recommended that you *do not*
side-effect the nose.case.Test instance you have been passed.
Keep in mind that when you replace the test callable you are
replacing the run() method of the test case -- including the
exception handling and result calls, etc.
:param test: the test case
:type test: :class:`nose.case.Test`
"""
pass
prepareTestCase._new = True
def prepareTestLoader(self, loader):
"""Called before tests are loaded. To replace the test loader,
return a test loader. To allow other plugins to process the
test loader, return None. Only one plugin may replace the test
loader. Only valid when using nose.TestProgram.
:param loader: :class:`nose.loader.TestLoader`
(or other loader) instance
"""
pass
prepareTestLoader._new = True
def prepareTestResult(self, result):
"""Called before the first test is run. To use a different
test result handler for all tests than the given result,
return a test result handler. NOTE however that this handler
will only be seen by tests, that is, inside of the result
proxy system. The TestRunner and TestProgram -- whether nose's
or other -- will continue to see the original result
handler. For this reason, it is usually better to monkeypatch
the result (for instance, if you want to handle some
exceptions in a unique way). Only one plugin may replace the
result, but many may monkeypatch it. If you want to
monkeypatch and stop other plugins from doing so, monkeypatch
and return the patched result.
:param result: :class:`nose.result.TextTestResult`
(or other result) instance
"""
pass
prepareTestResult._new = True
def prepareTestRunner(self, runner):
"""Called before tests are run. To replace the test runner,
return a test runner. To allow other plugins to process the
test runner, return None. Only valid when using nose.TestProgram.
:param runner: :class:`nose.core.TextTestRunner`
(or other runner) instance
"""
pass
prepareTestRunner._new = True
def report(self, stream):
"""Called after all error output has been printed. Print your
plugin's report to the provided stream. Return None to allow
other plugins to print reports, any other value to stop them.
:param stream: stream object; send your output here
:type stream: file-like object
"""
pass
def setOutputStream(self, stream):
"""Called before test output begins. To direct test output to a
new stream, return a stream object, which must implement a
`write(msg)` method. If you only want to note the stream, not
capture or redirect it, then return None.
:param stream: stream object; send your output here
:type stream: file-like object
"""
def startContext(self, context):
"""Called before context setup and the running of tests in the
context. Note that tests have already been *loaded* from the
context before this call.
:param context: the context about to be setup. May be a module or
class, or any other object that contains tests.
"""
pass
startContext._new = True
def startTest(self, test):
"""Called before each test is run. DO NOT return a value unless
you want to stop other plugins from seeing the test start.
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
def stopContext(self, context):
"""Called after the tests in a context have run and the
context has been torn down.
:param context: the context about to be setup. May be a module or
class, or any other object that contains tests.
"""
pass
stopContext._new = True
def stopTest(self, test):
"""Called after each test is run. DO NOT return a value unless
you want to stop other plugins from seeing that the test has stopped.
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
def testName(self, test):
"""Return a short test name. Called by `nose.case.Test.__str__`.
:param err: sys.exc_info() tuple
:type err: 3-tuple
"""
pass
testName._new = True
def wantClass(self, cls):
"""Return true if you want the main test selector to collect
tests from this class, false if you don't, and None if you don't
care.
:param cls: The class being examined by the selector
"""
pass
def wantDirectory(self, dirname):
"""Return true if you want test collection to descend into this
directory, false if you do not, and None if you don't care.
:param dirname: Full path to directory being examined by the selector
"""
pass
def wantFile(self, file):
"""Return true if you want to collect tests from this file,
false if you do not and None if you don't care.
Change from 0.9: The optional package parameter is no longer passed.
:param file: Full path to file being examined by the selector
"""
pass
def wantFunction(self, function):
"""Return true to collect this function as a test, false to
prevent it from being collected, and None if you don't care.
:param function: The function object being examined by the selector
"""
pass
def wantMethod(self, method):
"""Return true to collect this method as a test, false to
prevent it from being collected, and None if you don't care.
:param method: The method object being examined by the selector
:type method: unbound method
"""
pass
def wantModule(self, module):
"""Return true if you want to collection to descend into this
module, false to prevent the collector from descending into the
module, and None if you don't care.
:param module: The module object being examined by the selector
:type module: python module
"""
pass
def wantModuleTests(self, module):
"""
.. warning:: DEPRECATED -- this method will not be called, it has
been folded into wantModule.
"""
pass
wantModuleTests.deprecated = True
|
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains handlers for federation events."""
from ._base import BaseHandler
from synapse.api.errors import (
AuthError, FederationError, StoreError, CodeMessageException, SynapseError,
)
from synapse.api.constants import EventTypes, Membership, RejectedReason
from synapse.events.validator import EventValidator
from synapse.util import unwrapFirstError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.logutils import log_function
from synapse.util.async import run_on_reactor
from synapse.util.frozenutils import unfreeze
from synapse.crypto.event_signing import (
compute_event_signature, add_hashes_and_signatures,
)
from synapse.types import UserID
from synapse.events.utils import prune_event
from synapse.util.retryutils import NotRetryingDestination
from twisted.internet import defer
import itertools
import logging
logger = logging.getLogger(__name__)
class FederationHandler(BaseHandler):
"""Handles events that originated from federation.
Responsible for:
a) handling received Pdus before handing them on as Events to the rest
of the home server (including auth and state conflict resoultion)
b) converting events that were produced by local clients that may need
to be sent to remote home servers.
c) doing the necessary dances to invite remote users and join remote
rooms.
"""
def __init__(self, hs):
super(FederationHandler, self).__init__(hs)
self.hs = hs
self.distributor.observe(
"user_joined_room",
self._on_user_joined
)
self.waiting_for_join_list = {}
self.store = hs.get_datastore()
self.replication_layer = hs.get_replication_layer()
self.state_handler = hs.get_state_handler()
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
self.replication_layer.set_handler(self)
# When joining a room we need to queue any events for that room up
self.room_queues = {}
def handle_new_event(self, event, destinations):
""" Takes in an event from the client to server side, that has already
been authed and handled by the state module, and sends it to any
remote home servers that may be interested.
Args:
event: The event to send
destinations: A list of destinations to send it to
Returns:
Deferred: Resolved when it has successfully been queued for
processing.
"""
return self.replication_layer.send_pdu(event, destinations)
@log_function
@defer.inlineCallbacks
def on_receive_pdu(self, origin, pdu, backfilled, state=None,
auth_chain=None):
""" Called by the ReplicationLayer when we have a new pdu. We need to
do auth checks and put it through the StateHandler.
"""
event = pdu
logger.debug("Got event: %s", event.event_id)
# If we are currently in the process of joining this room, then we
# queue up events for later processing.
if event.room_id in self.room_queues:
self.room_queues[event.room_id].append((pdu, origin))
return
logger.debug("Processing event: %s", event.event_id)
logger.debug("Event: %s", event)
# FIXME (erikj): Awful hack to make the case where we are not currently
# in the room work
current_state = None
is_in_room = yield self.auth.check_host_in_room(
event.room_id,
self.server_name
)
if not is_in_room and not event.internal_metadata.is_outlier():
logger.debug("Got event for room we're not in.")
try:
event_stream_id, max_stream_id = yield self._persist_auth_tree(
auth_chain, state, event
)
except AuthError as e:
raise FederationError(
"ERROR",
e.code,
e.msg,
affected=event.event_id,
)
else:
event_ids = set()
if state:
event_ids |= {e.event_id for e in state}
if auth_chain:
event_ids |= {e.event_id for e in auth_chain}
seen_ids = set(
(yield self.store.have_events(event_ids)).keys()
)
if state and auth_chain is not None:
# If we have any state or auth_chain given to us by the replication
# layer, then we should handle them (if we haven't before.)
event_infos = []
for e in itertools.chain(auth_chain, state):
if e.event_id in seen_ids:
continue
e.internal_metadata.outlier = True
auth_ids = [e_id for e_id, _ in e.auth_events]
auth = {
(e.type, e.state_key): e for e in auth_chain
if e.event_id in auth_ids or e.type == EventTypes.Create
}
event_infos.append({
"event": e,
"auth_events": auth,
})
seen_ids.add(e.event_id)
yield self._handle_new_events(
origin,
event_infos,
outliers=True
)
try:
_, event_stream_id, max_stream_id = yield self._handle_new_event(
origin,
event,
state=state,
backfilled=backfilled,
current_state=current_state,
)
except AuthError as e:
raise FederationError(
"ERROR",
e.code,
e.msg,
affected=event.event_id,
)
# if we're receiving valid events from an origin,
# it's probably a good idea to mark it as not in retry-state
# for sending (although this is a bit of a leap)
retry_timings = yield self.store.get_destination_retry_timings(origin)
if retry_timings and retry_timings["retry_last_ts"]:
self.store.set_destination_retry_timings(origin, 0, 0)
room = yield self.store.get_room(event.room_id)
if not room:
try:
yield self.store.store_room(
room_id=event.room_id,
room_creator_user_id="",
is_public=False,
)
except StoreError:
logger.exception("Failed to store room.")
if not backfilled:
extra_users = []
if event.type == EventTypes.Member:
target_user_id = event.state_key
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
if event.type == EventTypes.Member:
if event.membership == Membership.JOIN:
user = UserID.from_string(event.state_key)
yield self.distributor.fire(
"user_joined_room", user=user, room_id=event.room_id
)
@defer.inlineCallbacks
def _filter_events_for_server(self, server_name, room_id, events):
event_to_state = yield self.store.get_state_for_events(
frozenset(e.event_id for e in events),
types=(
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, None),
)
)
def redact_disallowed(event, state):
if not state:
return event
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
if history:
visibility = history.content.get("history_visibility", "shared")
if visibility in ["invited", "joined"]:
# We now loop through all state events looking for
# membership states for the requesting server to determine
# if the server is either in the room or has been invited
# into the room.
for ev in state.values():
if ev.type != EventTypes.Member:
continue
try:
domain = UserID.from_string(ev.state_key).domain
except:
continue
if domain != server_name:
continue
memtype = ev.membership
if memtype == Membership.JOIN:
return event
elif memtype == Membership.INVITE:
if visibility == "invited":
return event
else:
return prune_event(event)
return event
defer.returnValue([
redact_disallowed(e, event_to_state[e.event_id])
for e in events
])
@log_function
@defer.inlineCallbacks
def backfill(self, dest, room_id, limit, extremities=[]):
""" Trigger a backfill request to `dest` for the given `room_id`
"""
if not extremities:
extremities = yield self.store.get_oldest_events_in_room(room_id)
events = yield self.replication_layer.backfill(
dest,
room_id,
limit=limit,
extremities=extremities,
)
event_map = {e.event_id: e for e in events}
event_ids = set(e.event_id for e in events)
edges = [
ev.event_id
for ev in events
if set(e_id for e_id, _ in ev.prev_events) - event_ids
]
logger.info(
"backfill: Got %d events with %d edges",
len(events), len(edges),
)
# For each edge get the current state.
auth_events = {}
state_events = {}
events_to_state = {}
for e_id in edges:
state, auth = yield self.replication_layer.get_state_for_room(
destination=dest,
room_id=room_id,
event_id=e_id
)
auth_events.update({a.event_id: a for a in auth})
auth_events.update({s.event_id: s for s in state})
state_events.update({s.event_id: s for s in state})
events_to_state[e_id] = state
seen_events = yield self.store.have_events(
set(auth_events.keys()) | set(state_events.keys())
)
all_events = events + state_events.values() + auth_events.values()
required_auth = set(
a_id for event in all_events for a_id, _ in event.auth_events
)
missing_auth = required_auth - set(auth_events)
results = yield defer.gatherResults(
[
self.replication_layer.get_pdu(
[dest],
event_id,
outlier=True,
timeout=10000,
)
for event_id in missing_auth
],
consumeErrors=True
).addErrback(unwrapFirstError)
auth_events.update({a.event_id: a for a in results})
ev_infos = []
for a in auth_events.values():
if a.event_id in seen_events:
continue
ev_infos.append({
"event": a,
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
for a_id, _ in a.auth_events
}
})
for e_id in events_to_state:
ev_infos.append({
"event": event_map[e_id],
"state": events_to_state[e_id],
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
for a_id, _ in event_map[e_id].auth_events
}
})
events.sort(key=lambda e: e.depth)
for event in events:
if event in events_to_state:
continue
ev_infos.append({
"event": event,
})
yield self._handle_new_events(
dest, ev_infos,
backfilled=True,
)
defer.returnValue(events)
@defer.inlineCallbacks
def maybe_backfill(self, room_id, current_depth):
"""Checks the database to see if we should backfill before paginating,
and if so do.
"""
extremities = yield self.store.get_oldest_events_with_depth_in_room(
room_id
)
if not extremities:
logger.debug("Not backfilling as no extremeties found.")
return
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(
extremities.items(),
key=lambda e: -int(e[1])
)
max_depth = sorted_extremeties_tuple[0][1]
if current_depth > max_depth:
logger.debug(
"Not backfilling as we don't need to. %d < %d",
max_depth, current_depth,
)
return
# Now we need to decide which hosts to hit first.
# First we try hosts that are already in the room
# TODO: HEURISTIC ALERT.
curr_state = yield self.state_handler.get_current_state(room_id)
def get_domains_from_state(state):
joined_users = [
(state_key, int(event.depth))
for (e_type, state_key), event in state.items()
if e_type == EventTypes.Member
and event.membership == Membership.JOIN
]
joined_domains = {}
for u, d in joined_users:
try:
dom = UserID.from_string(u).domain
old_d = joined_domains.get(dom)
if old_d:
joined_domains[dom] = min(d, old_d)
else:
joined_domains[dom] = d
except:
pass
return sorted(joined_domains.items(), key=lambda d: d[1])
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains
if domain is not self.server_name
]
@defer.inlineCallbacks
def try_backfill(domains):
# TODO: Should we try multiple of these at a time?
for dom in domains:
try:
events = yield self.backfill(
dom, room_id,
limit=100,
extremities=[e for e in extremities.keys()]
)
except SynapseError:
logger.info(
"Failed to backfill from %s because %s",
dom, e,
)
continue
except CodeMessageException as e:
if 400 <= e.code < 500:
raise
logger.info(
"Failed to backfill from %s because %s",
dom, e,
)
continue
except NotRetryingDestination as e:
logger.info(e.message)
continue
except Exception as e:
logger.exception(
"Failed to backfill from %s because %s",
dom, e,
)
continue
if events:
defer.returnValue(True)
defer.returnValue(False)
success = yield try_backfill(likely_domains)
if success:
defer.returnValue(True)
# Huh, well *those* domains didn't work out. Lets try some domains
# from the time.
tried_domains = set(likely_domains)
tried_domains.add(self.server_name)
event_ids = list(extremities.keys())
states = yield defer.gatherResults([
self.state_handler.resolve_state_groups(room_id, [e])
for e in event_ids
])
states = dict(zip(event_ids, [s[1] for s in states]))
for e_id, _ in sorted_extremeties_tuple:
likely_domains = get_domains_from_state(states[e_id])
success = yield try_backfill([
dom for dom in likely_domains
if dom not in tried_domains
])
if success:
defer.returnValue(True)
tried_domains.update(likely_domains)
defer.returnValue(False)
@defer.inlineCallbacks
def send_invite(self, target_host, event):
""" Sends the invite to the remote server for signing.
Invites must be signed by the invitee's server before distribution.
"""
pdu = yield self.replication_layer.send_invite(
destination=target_host,
room_id=event.room_id,
event_id=event.event_id,
pdu=event
)
defer.returnValue(pdu)
@defer.inlineCallbacks
def on_event_auth(self, event_id):
auth = yield self.store.get_auth_chain([event_id])
for event in auth:
event.signatures.update(
compute_event_signature(
event,
self.hs.hostname,
self.hs.config.signing_key[0]
)
)
defer.returnValue([e for e in auth])
@log_function
@defer.inlineCallbacks
def do_invite_join(self, target_hosts, room_id, joinee, content):
""" Attempts to join the `joinee` to the room `room_id` via the
server `target_host`.
This first triggers a /make_join/ request that returns a partial
event that we can fill out and sign. This is then sent to the
remote server via /send_join/ which responds with the state at that
event and the auth_chains.
We suspend processing of any received events from this room until we
have finished processing the join.
"""
logger.debug("Joining %s to %s", joinee, room_id)
yield self.store.clean_room_for_join(room_id)
origin, event = yield self._make_and_verify_event(
target_hosts,
room_id,
joinee,
"join",
content,
)
self.room_queues[room_id] = []
handled_events = set()
try:
new_event = self._sign_event(event)
# Try the host we successfully got a response to /make_join/
# request first.
try:
target_hosts.remove(origin)
target_hosts.insert(0, origin)
except ValueError:
pass
ret = yield self.replication_layer.send_join(target_hosts, new_event)
origin = ret["origin"]
state = ret["state"]
auth_chain = ret["auth_chain"]
auth_chain.sort(key=lambda e: e.depth)
handled_events.update([s.event_id for s in state])
handled_events.update([a.event_id for a in auth_chain])
handled_events.add(new_event.event_id)
logger.debug("do_invite_join auth_chain: %s", auth_chain)
logger.debug("do_invite_join state: %s", state)
logger.debug("do_invite_join event: %s", new_event)
try:
yield self.store.store_room(
room_id=room_id,
room_creator_user_id="",
is_public=False
)
except:
# FIXME
pass
event_stream_id, max_stream_id = yield self._persist_auth_tree(
auth_chain, state, event
)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
new_event, event_stream_id, max_stream_id,
extra_users=[joinee]
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
new_event.event_id, f.value
)
d.addErrback(log_failure)
logger.debug("Finished joining %s to %s", joinee, room_id)
finally:
room_queue = self.room_queues[room_id]
del self.room_queues[room_id]
for p, origin in room_queue:
if p.event_id in handled_events:
continue
try:
self.on_receive_pdu(origin, p, backfilled=False)
except:
logger.exception("Couldn't handle pdu")
defer.returnValue(True)
@defer.inlineCallbacks
@log_function
def on_make_join_request(self, room_id, user_id):
""" We've received a /make_join/ request, so we create a partial
join event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
"""
event_content = {"membership": Membership.JOIN}
builder = self.event_builder_factory.new({
"type": EventTypes.Member,
"content": event_content,
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
})
event, context = yield self._create_new_client_event(
builder=builder,
)
self.auth.check(event, auth_events=context.current_state)
defer.returnValue(event)
@defer.inlineCallbacks
@log_function
def on_send_join_request(self, origin, pdu):
""" We have received a join event for a room. Fully process it and
respond with the current state and auth chains.
"""
event = pdu
logger.debug(
"on_send_join_request: Got event: %s, signatures: %s",
event.event_id,
event.signatures,
)
event.internal_metadata.outlier = False
context, event_stream_id, max_stream_id = yield self._handle_new_event(
origin, event
)
logger.debug(
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
event.signatures,
)
extra_users = []
if event.type == EventTypes.Member:
target_user_id = event.state_key
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id, extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.JOIN:
user = UserID.from_string(event.state_key)
yield self.distributor.fire(
"user_joined_room", user=user, room_id=event.room_id
)
new_pdu = event
destinations = set()
for k, s in context.current_state.items():
try:
if k[0] == EventTypes.Member:
if s.content["membership"] == Membership.JOIN:
destinations.add(
UserID.from_string(s.state_key).domain
)
except:
logger.warn(
"Failed to get destination from event %s", s.event_id
)
destinations.discard(origin)
logger.debug(
"on_send_join_request: Sending event: %s, signatures: %s",
event.event_id,
event.signatures,
)
self.replication_layer.send_pdu(new_pdu, destinations)
state_ids = [e.event_id for e in context.current_state.values()]
auth_chain = yield self.store.get_auth_chain(set(
[event.event_id] + state_ids
))
defer.returnValue({
"state": context.current_state.values(),
"auth_chain": auth_chain,
})
@defer.inlineCallbacks
def on_invite_request(self, origin, pdu):
""" We've got an invite event. Process and persist it. Sign it.
Respond with the now signed event.
"""
event = pdu
event.internal_metadata.outlier = True
event.signatures.update(
compute_event_signature(
event,
self.hs.hostname,
self.hs.config.signing_key[0]
)
)
context = yield self.state_handler.compute_event_context(event)
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=False,
)
target_user = UserID.from_string(event.state_key)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=[target_user],
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
defer.returnValue(event)
@defer.inlineCallbacks
def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
origin, event = yield self._make_and_verify_event(
target_hosts,
room_id,
user_id,
"leave"
)
signed_event = self._sign_event(event)
# Try the host we successfully got a response to /make_join/
# request first.
try:
target_hosts.remove(origin)
target_hosts.insert(0, origin)
except ValueError:
pass
yield self.replication_layer.send_leave(
target_hosts,
signed_event
)
defer.returnValue(None)
@defer.inlineCallbacks
def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
content={},):
origin, pdu = yield self.replication_layer.make_membership_event(
target_hosts,
room_id,
user_id,
membership,
content,
)
logger.debug("Got response to make_%s: %s", membership, pdu)
event = pdu
# We should assert some things.
# FIXME: Do this in a nicer way
assert(event.type == EventTypes.Member)
assert(event.user_id == user_id)
assert(event.state_key == user_id)
assert(event.room_id == room_id)
defer.returnValue((origin, event))
def _sign_event(self, event):
event.internal_metadata.outlier = False
builder = self.event_builder_factory.new(
unfreeze(event.get_pdu_json())
)
builder.event_id = self.event_builder_factory.create_event_id()
builder.origin = self.hs.hostname
if not hasattr(event, "signatures"):
builder.signatures = {}
add_hashes_and_signatures(
builder,
self.hs.hostname,
self.hs.config.signing_key[0],
)
return builder.build()
@defer.inlineCallbacks
@log_function
def on_make_leave_request(self, room_id, user_id):
""" We've received a /make_leave/ request, so we create a partial
join event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
"""
builder = self.event_builder_factory.new({
"type": EventTypes.Member,
"content": {"membership": Membership.LEAVE},
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
})
event, context = yield self._create_new_client_event(
builder=builder,
)
self.auth.check(event, auth_events=context.current_state)
defer.returnValue(event)
@defer.inlineCallbacks
@log_function
def on_send_leave_request(self, origin, pdu):
""" We have received a leave event for a room. Fully process it."""
event = pdu
logger.debug(
"on_send_leave_request: Got event: %s, signatures: %s",
event.event_id,
event.signatures,
)
event.internal_metadata.outlier = False
context, event_stream_id, max_stream_id = yield self._handle_new_event(
origin, event
)
logger.debug(
"on_send_leave_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
event.signatures,
)
extra_users = []
if event.type == EventTypes.Member:
target_user_id = event.state_key
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id, extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
new_pdu = event
destinations = set()
for k, s in context.current_state.items():
try:
if k[0] == EventTypes.Member:
if s.content["membership"] == Membership.LEAVE:
destinations.add(
UserID.from_string(s.state_key).domain
)
except:
logger.warn(
"Failed to get destination from event %s", s.event_id
)
destinations.discard(origin)
logger.debug(
"on_send_leave_request: Sending event: %s, signatures: %s",
event.event_id,
event.signatures,
)
self.replication_layer.send_pdu(new_pdu, destinations)
defer.returnValue(None)
@defer.inlineCallbacks
def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True):
yield run_on_reactor()
if do_auth:
in_room = yield self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
state_groups = yield self.store.get_state_groups(
room_id, [event_id]
)
if state_groups:
_, state = state_groups.items().pop()
results = {
(e.type, e.state_key): e for e in state
}
event = yield self.store.get_event(event_id)
if event and event.is_state():
# Get previous state
if "replaces_state" in event.unsigned:
prev_id = event.unsigned["replaces_state"]
if prev_id != event.event_id:
prev_event = yield self.store.get_event(prev_id)
results[(event.type, event.state_key)] = prev_event
else:
del results[(event.type, event.state_key)]
res = results.values()
for event in res:
event.signatures.update(
compute_event_signature(
event,
self.hs.hostname,
self.hs.config.signing_key[0]
)
)
defer.returnValue(res)
else:
defer.returnValue([])
@defer.inlineCallbacks
@log_function
def on_backfill_request(self, origin, room_id, pdu_list, limit):
in_room = yield self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
events = yield self.store.get_backfill_events(
room_id,
pdu_list,
limit
)
events = yield self._filter_events_for_server(origin, room_id, events)
defer.returnValue(events)
@defer.inlineCallbacks
@log_function
def get_persisted_pdu(self, origin, event_id, do_auth=True):
""" Get a PDU from the database with given origin and id.
Returns:
Deferred: Results in a `Pdu`.
"""
event = yield self.store.get_event(
event_id,
allow_none=True,
allow_rejected=True,
)
if event:
# FIXME: This is a temporary work around where we occasionally
# return events slightly differently than when they were
# originally signed
event.signatures.update(
compute_event_signature(
event,
self.hs.hostname,
self.hs.config.signing_key[0]
)
)
if do_auth:
in_room = yield self.auth.check_host_in_room(
event.room_id,
origin
)
if not in_room:
raise AuthError(403, "Host not in room.")
defer.returnValue(event)
else:
defer.returnValue(None)
@log_function
def get_min_depth_for_context(self, context):
return self.store.get_min_depth(context)
@log_function
def _on_user_joined(self, user, room_id):
waiters = self.waiting_for_join_list.get(
(user.to_string(), room_id),
[]
)
while waiters:
waiters.pop().callback(None)
@defer.inlineCallbacks
@log_function
def _handle_new_event(self, origin, event, state=None, backfilled=False,
current_state=None, auth_events=None):
outlier = event.internal_metadata.is_outlier()
context = yield self._prep_event(
origin, event,
state=state,
auth_events=auth_events,
)
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=backfilled,
is_new_state=(not outlier and not backfilled),
current_state=current_state,
)
defer.returnValue((context, event_stream_id, max_stream_id))
@defer.inlineCallbacks
def _handle_new_events(self, origin, event_infos, backfilled=False,
outliers=False):
contexts = yield defer.gatherResults(
[
self._prep_event(
origin,
ev_info["event"],
state=ev_info.get("state"),
auth_events=ev_info.get("auth_events"),
)
for ev_info in event_infos
]
)
yield self.store.persist_events(
[
(ev_info["event"], context)
for ev_info, context in itertools.izip(event_infos, contexts)
],
backfilled=backfilled,
is_new_state=(not outliers and not backfilled),
)
@defer.inlineCallbacks
def _persist_auth_tree(self, auth_events, state, event):
"""Checks the auth chain is valid (and passes auth checks) for the
state and event. Then persists the auth chain and state atomically.
Persists the event seperately.
Returns:
2-tuple of (event_stream_id, max_stream_id) from the persist_event
call for `event`
"""
events_to_context = {}
for e in itertools.chain(auth_events, state):
ctx = yield self.state_handler.compute_event_context(
e, outlier=True,
)
events_to_context[e.event_id] = ctx
e.internal_metadata.outlier = True
event_map = {
e.event_id: e
for e in auth_events
}
create_event = None
for e in auth_events:
if (e.type, e.state_key) == (EventTypes.Create, ""):
create_event = e
break
for e in itertools.chain(auth_events, state, [event]):
auth_for_e = {
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
for e_id, _ in e.auth_events
}
if create_event:
auth_for_e[(EventTypes.Create, "")] = create_event
try:
self.auth.check(e, auth_events=auth_for_e)
except AuthError as err:
logger.warn(
"Rejecting %s because %s",
e.event_id, err.msg
)
if e == event:
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
yield self.store.persist_events(
[
(e, events_to_context[e.event_id])
for e in itertools.chain(auth_events, state)
],
is_new_state=False,
)
new_event_context = yield self.state_handler.compute_event_context(
event, old_state=state, outlier=False,
)
event_stream_id, max_stream_id = yield self.store.persist_event(
event, new_event_context,
backfilled=False,
is_new_state=True,
current_state=state,
)
defer.returnValue((event_stream_id, max_stream_id))
@defer.inlineCallbacks
def _prep_event(self, origin, event, state=None, auth_events=None):
outlier = event.internal_metadata.is_outlier()
context = yield self.state_handler.compute_event_context(
event, old_state=state, outlier=outlier,
)
if not auth_events:
auth_events = context.current_state
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
if event.type == EventTypes.Member and not event.auth_events:
if len(event.prev_events) == 1 and event.depth < 5:
c = yield self.store.get_event(
event.prev_events[0][0],
allow_none=True,
)
if c and c.type == EventTypes.Create:
auth_events[(c.type, c.state_key)] = c
try:
yield self.do_auth(
origin, event, context, auth_events=auth_events
)
except AuthError as e:
logger.warn(
"Rejecting %s because %s",
event.event_id, e.msg
)
context.rejected = RejectedReason.AUTH_ERROR
if event.type == EventTypes.GuestAccess:
full_context = yield self.store.get_current_state(room_id=event.room_id)
yield self.maybe_kick_guest_users(event, full_context)
defer.returnValue(context)
@defer.inlineCallbacks
def on_query_auth(self, origin, event_id, remote_auth_chain, rejects,
missing):
# Just go through and process each event in `remote_auth_chain`. We
# don't want to fall into the trap of `missing` being wrong.
for e in remote_auth_chain:
try:
yield self._handle_new_event(origin, e)
except AuthError:
pass
# Now get the current auth_chain for the event.
local_auth_chain = yield self.store.get_auth_chain([event_id])
# TODO: Check if we would now reject event_id. If so we need to tell
# everyone.
ret = yield self.construct_auth_difference(
local_auth_chain, remote_auth_chain
)
for event in ret["auth_chain"]:
event.signatures.update(
compute_event_signature(
event,
self.hs.hostname,
self.hs.config.signing_key[0]
)
)
logger.debug("on_query_auth returning: %s", ret)
defer.returnValue(ret)
@defer.inlineCallbacks
def on_get_missing_events(self, origin, room_id, earliest_events,
latest_events, limit, min_depth):
in_room = yield self.auth.check_host_in_room(
room_id,
origin
)
if not in_room:
raise AuthError(403, "Host not in room.")
limit = min(limit, 20)
min_depth = max(min_depth, 0)
missing_events = yield self.store.get_missing_events(
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
min_depth=min_depth,
)
defer.returnValue(missing_events)
@defer.inlineCallbacks
@log_function
def do_auth(self, origin, event, context, auth_events):
# Check if we have all the auth events.
current_state = set(e.event_id for e in auth_events.values())
event_auth_events = set(e_id for e_id, _ in event.auth_events)
if event_auth_events - current_state:
have_events = yield self.store.have_events(
event_auth_events - current_state
)
else:
have_events = {}
have_events.update({
e.event_id: ""
for e in auth_events.values()
})
seen_events = set(have_events.keys())
missing_auth = event_auth_events - seen_events - current_state
if missing_auth:
logger.info("Missing auth: %s", missing_auth)
# If we don't have all the auth events, we need to get them.
try:
remote_auth_chain = yield self.replication_layer.get_event_auth(
origin, event.room_id, event.event_id
)
seen_remotes = yield self.store.have_events(
[e.event_id for e in remote_auth_chain]
)
for e in remote_auth_chain:
if e.event_id in seen_remotes.keys():
continue
if e.event_id == event.event_id:
continue
try:
auth_ids = [e_id for e_id, _ in e.auth_events]
auth = {
(e.type, e.state_key): e for e in remote_auth_chain
if e.event_id in auth_ids or e.type == EventTypes.Create
}
e.internal_metadata.outlier = True
logger.debug(
"do_auth %s missing_auth: %s",
event.event_id, e.event_id
)
yield self._handle_new_event(
origin, e, auth_events=auth
)
if e.event_id in event_auth_events:
auth_events[(e.type, e.state_key)] = e
except AuthError:
pass
have_events = yield self.store.have_events(
[e_id for e_id, _ in event.auth_events]
)
seen_events = set(have_events.keys())
except:
# FIXME:
logger.exception("Failed to get auth chain")
# FIXME: Assumes we have and stored all the state for all the
# prev_events
current_state = set(e.event_id for e in auth_events.values())
different_auth = event_auth_events - current_state
if different_auth and not event.internal_metadata.is_outlier():
# Do auth conflict res.
logger.info("Different auth: %s", different_auth)
different_events = yield defer.gatherResults(
[
self.store.get_event(
d,
allow_none=True,
allow_rejected=False,
)
for d in different_auth
if d in have_events and not have_events[d]
],
consumeErrors=True
).addErrback(unwrapFirstError)
if different_events:
local_view = dict(auth_events)
remote_view = dict(auth_events)
remote_view.update({
(d.type, d.state_key): d for d in different_events
})
new_state, prev_state = self.state_handler.resolve_events(
[local_view.values(), remote_view.values()],
event
)
auth_events.update(new_state)
current_state = set(e.event_id for e in auth_events.values())
different_auth = event_auth_events - current_state
context.current_state.update(auth_events)
context.state_group = None
if different_auth and not event.internal_metadata.is_outlier():
logger.info("Different auth after resolution: %s", different_auth)
# Only do auth resolution if we have something new to say.
# We can't rove an auth failure.
do_resolution = False
provable = [
RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
]
for e_id in different_auth:
if e_id in have_events:
if have_events[e_id] in provable:
do_resolution = True
break
if do_resolution:
# 1. Get what we think is the auth chain.
auth_ids = self.auth.compute_auth_events(
event, context.current_state
)
local_auth_chain = yield self.store.get_auth_chain(auth_ids)
try:
# 2. Get remote difference.
result = yield self.replication_layer.query_auth(
origin,
event.room_id,
event.event_id,
local_auth_chain,
)
seen_remotes = yield self.store.have_events(
[e.event_id for e in result["auth_chain"]]
)
# 3. Process any remote auth chain events we haven't seen.
for ev in result["auth_chain"]:
if ev.event_id in seen_remotes.keys():
continue
if ev.event_id == event.event_id:
continue
try:
auth_ids = [e_id for e_id, _ in ev.auth_events]
auth = {
(e.type, e.state_key): e
for e in result["auth_chain"]
if e.event_id in auth_ids
or event.type == EventTypes.Create
}
ev.internal_metadata.outlier = True
logger.debug(
"do_auth %s different_auth: %s",
event.event_id, e.event_id
)
yield self._handle_new_event(
origin, ev, auth_events=auth
)
if ev.event_id in event_auth_events:
auth_events[(ev.type, ev.state_key)] = ev
except AuthError:
pass
except:
# FIXME:
logger.exception("Failed to query auth chain")
# 4. Look at rejects and their proofs.
# TODO.
context.current_state.update(auth_events)
context.state_group = None
try:
self.auth.check(event, auth_events=auth_events)
except AuthError:
raise
@defer.inlineCallbacks
def construct_auth_difference(self, local_auth, remote_auth):
""" Given a local and remote auth chain, find the differences. This
assumes that we have already processed all events in remote_auth
Params:
local_auth (list)
remote_auth (list)
Returns:
dict
"""
logger.debug("construct_auth_difference Start!")
# TODO: Make sure we are OK with local_auth or remote_auth having more
# auth events in them than strictly necessary.
def sort_fun(ev):
return ev.depth, ev.event_id
logger.debug("construct_auth_difference after sort_fun!")
# We find the differences by starting at the "bottom" of each list
# and iterating up on both lists. The lists are ordered by depth and
# then event_id, we iterate up both lists until we find the event ids
# don't match. Then we look at depth/event_id to see which side is
# missing that event, and iterate only up that list. Repeat.
remote_list = list(remote_auth)
remote_list.sort(key=sort_fun)
local_list = list(local_auth)
local_list.sort(key=sort_fun)
local_iter = iter(local_list)
remote_iter = iter(remote_list)
logger.debug("construct_auth_difference before get_next!")
def get_next(it, opt=None):
try:
return it.next()
except:
return opt
current_local = get_next(local_iter)
current_remote = get_next(remote_iter)
logger.debug("construct_auth_difference before while")
missing_remotes = []
missing_locals = []
while current_local or current_remote:
if current_remote is None:
missing_locals.append(current_local)
current_local = get_next(local_iter)
continue
if current_local is None:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
if current_local.event_id == current_remote.event_id:
current_local = get_next(local_iter)
current_remote = get_next(remote_iter)
continue
if current_local.depth < current_remote.depth:
missing_locals.append(current_local)
current_local = get_next(local_iter)
continue
if current_local.depth > current_remote.depth:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
# They have the same depth, so we fall back to the event_id order
if current_local.event_id < current_remote.event_id:
missing_locals.append(current_local)
current_local = get_next(local_iter)
if current_local.event_id > current_remote.event_id:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
logger.debug("construct_auth_difference after while")
# missing locals should be sent to the server
# We should find why we are missing remotes, as they will have been
# rejected.
# Remove events from missing_remotes if they are referencing a missing
# remote. We only care about the "root" rejected ones.
missing_remote_ids = [e.event_id for e in missing_remotes]
base_remote_rejected = list(missing_remotes)
for e in missing_remotes:
for e_id, _ in e.auth_events:
if e_id in missing_remote_ids:
try:
base_remote_rejected.remove(e)
except ValueError:
pass
reason_map = {}
for e in base_remote_rejected:
reason = yield self.store.get_rejection_reason(e.event_id)
if reason is None:
# TODO: e is not in the current state, so we should
# construct some proof of that.
continue
reason_map[e.event_id] = reason
if reason == RejectedReason.AUTH_ERROR:
pass
elif reason == RejectedReason.REPLACED:
# TODO: Get proof
pass
elif reason == RejectedReason.NOT_ANCESTOR:
# TODO: Get proof.
pass
logger.debug("construct_auth_difference returning")
defer.returnValue({
"auth_chain": local_auth,
"rejects": {
e.event_id: {
"reason": reason_map[e.event_id],
"proof": None,
}
for e in base_remote_rejected
},
"missing": [e.event_id for e in missing_locals],
})
@defer.inlineCallbacks
@log_function
def exchange_third_party_invite(self, invite):
sender = invite["sender"]
room_id = invite["room_id"]
event_dict = {
"type": EventTypes.Member,
"content": {
"membership": Membership.INVITE,
"third_party_invite": invite,
},
"room_id": room_id,
"sender": sender,
"state_key": invite["mxid"],
}
if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
builder = self.event_builder_factory.new(event_dict)
EventValidator().validate_new(builder)
event, context = yield self._create_new_client_event(builder=builder)
self.auth.check(event, context.current_state)
yield self._validate_keyserver(event, auth_events=context.current_state)
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context)
else:
destinations = set([x.split(":", 1)[-1] for x in (sender, room_id)])
yield self.replication_layer.forward_third_party_invite(
destinations,
room_id,
event_dict,
)
@defer.inlineCallbacks
@log_function
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
builder = self.event_builder_factory.new(event_dict)
event, context = yield self._create_new_client_event(
builder=builder,
)
self.auth.check(event, auth_events=context.current_state)
yield self._validate_keyserver(event, auth_events=context.current_state)
returned_invite = yield self.send_invite(origin, event)
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context)
@defer.inlineCallbacks
def _validate_keyserver(self, event, auth_events):
token = event.content["third_party_invite"]["signed"]["token"]
invite_event = auth_events.get(
(EventTypes.ThirdPartyInvite, token,)
)
try:
response = yield self.hs.get_simple_http_client().get_json(
invite_event.content["key_validity_url"],
{"public_key": invite_event.content["public_key"]}
)
except Exception:
raise SynapseError(
502,
"Third party certificate could not be checked"
)
if "valid" not in response or not response["valid"]:
raise AuthError(403, "Third party certificate was invalid")
|
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import pandas as pd
import random
import tempfile
import os
import shutil
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.data import TSDataset
from pandas.testing import assert_frame_equal
from numpy.testing import assert_array_almost_equal
def get_ts_df():
sample_num = np.random.randint(100, 200)
train_df = pd.DataFrame({"datetime": pd.date_range('1/1/2019', periods=sample_num),
"value": np.random.randn(sample_num),
"id": np.array(['00']*sample_num),
"extra feature": np.random.randn(sample_num)})
return train_df
def get_multi_id_ts_df():
sample_num = 100
train_df = pd.DataFrame({"value": np.random.randn(sample_num),
"id": np.array(['00']*50 + ['01']*50),
"extra feature": np.random.randn(sample_num)})
train_df["datetime"] = pd.date_range('1/1/2019', periods=sample_num)
train_df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
return train_df
def get_ugly_ts_df():
data = np.random.random_sample((100, 5))
mask = np.random.random_sample((100, 5))
newmask = mask.copy()
mask[newmask >= 0.4] = 2
mask[newmask < 0.4] = 1
mask[newmask < 0.2] = 0
data[mask == 0] = None
data[mask == 1] = np.nan
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan # make sure column 'a' has a N/A
df["datetime"] = pd.date_range('1/1/2019', periods=100)
df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
df["id"] = np.array(['00']*50 + ['01']*50)
return df
def get_int_target_df():
sample_num = np.random.randint(100, 200)
train_df = pd.DataFrame({"datetime": pd.date_range('1/1/2019', periods=sample_num),
"value": np.array(sample_num),
"id": np.array(['00']*sample_num),
"extra feature": np.random.randn(sample_num)})
return train_df
def get_non_dt():
df = pd.DataFrame({"datetime": np.arange(100),
"id": np.array(['00']*100),
"value": np.random.randn(100),
"extra feature": np.random.randn(100)})
return df
def get_not_aligned_df():
df_val = pd.DataFrame({"id": np.array(['00']*20+['01']*30+['02']*50),
"value": np.random.randn(100),
"extra feature": np.random.randn(100)})
data_sec = pd.DataFrame({"datetime": pd.date_range(
start='1/1/2019 00:00:00', periods=20, freq='S')})
data_min = pd.DataFrame({"datetime": pd.date_range(
start='1/2/2019 00:00:00', periods=30, freq='H')})
data_hou = pd.DataFrame({"datetime": pd.date_range(
start='1/3/2019 00:00:00', periods=50, freq='D')})
dt_val = pd.concat([data_sec, data_min, data_hou],
axis=0, ignore_index=True)
df = pd.merge(left=dt_val, right=df_val, left_index=True, right_index=True)
return df
class TestTSDataset(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_tsdataset_initialization(self):
df = get_ts_df()
# legal input
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == ['00']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == ['00']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df.drop(columns=["id"]), dt_col="datetime",
target_col=["value"], extra_feature_col="extra feature")
assert tsdata._id_list == ['0']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
# illegal input
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col=0)
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col=0, target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=0,
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(0, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value1"],
extra_feature_col="extra feature", id_col="id")
def test_tsdataset_from_parquet(self):
df = get_ts_df()
configs = dict(dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"],
id_col="id")
tsdata_pd = TSDataset.from_pandas(df, **configs)
temp = tempfile.mkdtemp()
try:
path = os.path.join(temp, "test.parquet")
df.to_parquet(path)
tsdata_pq = TSDataset.from_parquet(path, **configs)
pd.testing.assert_frame_equal(tsdata_pd.to_pandas(), tsdata_pq.to_pandas(),
check_like=True)
finally:
shutil.rmtree(temp)
def test_tsdataset_initialization_multiple(self):
df = get_multi_id_ts_df()
# legal input
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == ['00', '01']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == ['00', '01']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df.drop(columns=["id"]), dt_col="datetime",
target_col=["value"], extra_feature_col="extra feature")
assert tsdata._id_list == ['0']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
# illegael input
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col=0)
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col=0, target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=0,
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(0, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value1"],
extra_feature_col="extra feature", id_col="id")
def test_tsdataset_roll_single_id(self):
df = get_ts_df()
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.to_numpy()
# roll train, diff input.
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
# add extra_feature_col.
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=["extra feature"], target_col="value")
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=["extra feature"], target_col="value", id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=[], target_col="value")
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 1)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=[], target_col="value", id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 1)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
# roll test.
horizon = 0
lookback = random.randint(1, 20)
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y is None
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y is None
tsdata._check_basic_invariants()
def test_tsdataset_roll_multi_id(self):
df = get_multi_id_ts_df()
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
# test train
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1), lookback, 4)
assert y.shape == ((50-lookback-horizon+1), horizon, 2)
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
# horizon list.
horizon_list = [1, 3, 5]
tsdata.roll(lookback=lookback, horizon=horizon_list)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-max(horizon_list)+1)*2, lookback, 2)
assert y.shape == ((50-lookback-max(horizon_list)+1)*2, len(horizon_list), 1)
horizon_list = [1, 5, 9]
tsdata.roll(lookback=lookback, horizon=horizon_list, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-max(horizon_list)+1), lookback, 4)
assert y.shape == ((50-lookback-max(horizon_list)+1), len(horizon_list), 2)
# target multi.
tsdata = TSDataset.from_pandas(df,
dt_col="datetime",
target_col=["value", "extra feature"],
id_col="id")
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=False)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 2)
tsdata._check_basic_invariants()
def test_tsdataset_roll_order(self):
df = pd.DataFrame({"datetime": np.array(['1/1/2019', '1/1/2019', '1/2/2019', '1/2/2019']),
"value": np.array([1.9, 2.3, 2.4, 2.6]),
"id": np.array(['00', '01', '00', '01']),
"extra feature1": np.array([1, 0, 3, 0]),
"extra feature2": np.array([2, 9, 4, 2])})
tsdata = TSDataset.from_pandas(df,
dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature1", "extra feature2"],
id_col="id")
x, y = tsdata.roll(lookback=1, horizon=1, id_sensitive=False).to_numpy()
assert x.shape == (2, 1, 3) and y.shape == (2, 1, 1)
assert np.array_equal(x, np.array([[[1.9, 1, 2]], [[2.3, 0, 9]]], dtype=np.float32))
assert np.array_equal(y, np.array([[[2.4]], [[2.6]]], dtype=np.float32))
x, y = tsdata.roll(lookback=1, horizon=1, id_sensitive=True).to_numpy()
assert x.shape == (1, 1, 6) and y.shape == (1, 1, 2)
assert np.array_equal(x, np.array([[[1.9, 2.3, 1, 2, 0, 9]]], dtype=np.float32))
assert np.array_equal(y, np.array([[[2.4, 2.6]]], dtype=np.float32))
def test_tsdata_roll_int_target(self):
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
df = get_int_target_df()
tsdata = TSDataset.from_pandas(df, dt_col='datetime', target_col='value',
extra_feature_col=['extra feature'], id_col="id")
x, y = tsdata.roll(lookback=lookback, horizon=horizon).to_numpy()
assert x.dtype == np.float32
assert y.dtype == np.float32
tsdata._check_basic_invariants()
def test_tsdataset_to_torch_loader_roll(self):
df_single_id = get_ts_df()
df_multi_id = get_multi_id_ts_df()
for df in [df_single_id, df_multi_id]:
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
batch_size = random.randint(16, 32)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
# train
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon)
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, horizon, 1)
break
# test
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=0)
for x_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
break
# specify feature_col
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon,
feature_col=[])
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 1)
assert tuple(y_batch.size()) == (batch_size, horizon, 1)
break
# Non-subset relationship
with pytest.raises(ValueError):
tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon,
target_col=['value', 'extra feature'])
# specify horizon_list
horizon_list = [1, 3, 5]
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon_list)
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, len(horizon_list), 1)
break
# multi target_col
tsdata = TSDataset.from_pandas(df, dt_col="datetime",
target_col=["value", "extra feature"], id_col="id")
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon)
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, horizon, 2)
break
def test_tsdataset_to_torch_loader(self):
df = get_ts_df()
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
batch_size = random.randint(16, 32)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.to_torch_data_loader()
tsdata.roll(lookback=lookback, horizon=horizon)
loader = tsdata.to_torch_data_loader(batch_size=batch_size,
lookback=lookback,
horizon=horizon)
for x_batch, y_batch in loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, horizon, 1)
break
def test_tsdata_multi_unscale_numpy_torch_load(self):
lookback = random.randint(1, 10)
horizon = random.randint(1, 20)
batch_size = random.randint(16, 32)
df = get_multi_id_ts_df()
df_test = get_multi_id_ts_df()
tsdata_train = TSDataset.from_pandas(df,
target_col='value',
dt_col='datetime',
extra_feature_col='extra feature',
id_col='id')
tsdata_test = TSDataset.from_pandas(df_test,
target_col='value',
dt_col='datetime',
extra_feature_col='extra feature',
id_col='id')
# roll is True.
from sklearn.preprocessing import StandardScaler
stand = StandardScaler()
for tsdata in [tsdata_train, tsdata_test]:
tsdata.scale(stand, fit=tsdata is tsdata_train)
test_loader = tsdata_test.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon)
import torch
from torch.utils.data.dataloader import DataLoader
test_loader = DataLoader(test_loader.dataset, batch_size=batch_size, shuffle=False)
batch_load_list = []
for _, y_batch in test_loader:
batch_load_list.append(y_batch)
y_test = torch.cat(batch_load_list, dim=0)
pred = np.copy(y_test.numpy()) # sanity check
unscaled_pred = tsdata_train.unscale_numpy(pred)
unscaled_y_test = tsdata_train.unscale_numpy(y_test.numpy())
_, unscaled_y_test_reproduce = tsdata_test.unscale()\
.roll(lookback=lookback, horizon=horizon)\
.to_numpy()
assert_array_almost_equal(unscaled_pred, unscaled_y_test_reproduce)
assert_array_almost_equal(unscaled_y_test, unscaled_y_test_reproduce)
tsdata._check_basic_invariants()
def test_tsdataset_imputation(self):
for val in ["last", "const", "linear"]:
df = get_ugly_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="e",
extra_feature_col=["a", "b", "c", "d"], id_col="id")
tsdata.impute(mode=val)
assert tsdata.to_pandas().isna().sum().sum() == 0
assert len(tsdata.to_pandas()) == 100
tsdata._check_basic_invariants()
def test_tsdataset_deduplicate(self):
df = get_ugly_ts_df()
for _ in range(20):
df.loc[len(df)] = df.loc[np.random.randint(0, 99)]
assert len(df) == 120
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="e",
extra_feature_col=["a", "b", "c", "d"], id_col="id")
tsdata.deduplicate()
assert len(tsdata.to_pandas()) == 100
tsdata._check_basic_invariants()
def test_tsdataset_datetime_feature(self):
df = get_ts_df()
# interval = day
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature()
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
# interval = day, one_hot = ["WEEKDAY"]
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature(one_hot_features=["WEEKDAY"])
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
def test_tsdataset_datetime_feature_multiple(self):
df = get_multi_id_ts_df()
# interval = day
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature()
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
# interval = day, one_hot = ["WEEKDAY"]
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature(one_hot_features=["WEEKDAY"])
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'YEAR',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
def test_tsdataset_scale_unscale(self):
df = get_ts_df()
df_test = get_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata_test = TSDataset.from_pandas(df_test, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
from sklearn.preprocessing import StandardScaler, MaxAbsScaler, MinMaxScaler, RobustScaler
scalers = [StandardScaler(), MaxAbsScaler(),
MinMaxScaler(), RobustScaler()]
for scaler in scalers:
tsdata.scale(scaler)
tsdata_test.scale(scaler, fit=False)
with pytest.raises(AssertionError):
assert_frame_equal(tsdata.to_pandas(), df)
with pytest.raises(AssertionError):
assert_frame_equal(tsdata_test.to_pandas(), df_test)
tsdata.unscale()
tsdata_test.unscale()
assert_frame_equal(tsdata.to_pandas(), df)
assert_frame_equal(tsdata_test.to_pandas(), df_test)
tsdata._check_basic_invariants()
def test_tsdataset_unscale_numpy(self):
df = get_multi_id_ts_df()
df_test = get_multi_id_ts_df()
from sklearn.preprocessing import StandardScaler, MaxAbsScaler, MinMaxScaler, RobustScaler
scalers = [StandardScaler(),
StandardScaler(with_mean=False),
StandardScaler(with_std=False),
MaxAbsScaler(),
MinMaxScaler(),
MinMaxScaler(feature_range=(1, 3)),
RobustScaler(),
RobustScaler(with_centering=False),
RobustScaler(with_scaling=False),
RobustScaler(quantile_range=(20, 80))]
for scaler in scalers:
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata_test = TSDataset.from_pandas(df_test, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature()\
.scale(scaler)\
.roll(lookback=5, horizon=4, id_sensitive=True)
tsdata_test.gen_dt_feature()\
.scale(scaler, fit=False)\
.roll(lookback=5, horizon=4, id_sensitive=True)
_, _ = tsdata.to_numpy()
_, y_test = tsdata_test.to_numpy()
pred = np.copy(y_test) # sanity check
unscaled_pred = tsdata.unscale_numpy(pred)
unscaled_y_test = tsdata.unscale_numpy(y_test)
tsdata_test.unscale()\
.roll(lookback=5, horizon=4, id_sensitive=True)
_, unscaled_y_test_reproduce = tsdata_test.to_numpy()
assert_array_almost_equal(unscaled_pred, unscaled_y_test_reproduce)
assert_array_almost_equal(unscaled_y_test, unscaled_y_test_reproduce)
tsdata._check_basic_invariants()
def test_tsdataset_resample(self):
df = get_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.resample('2D', df["datetime"][0], df["datetime"][df.shape[0]-1])
assert len(tsdata.to_pandas()) == (df.shape[0] + 1) // 2
tsdata._check_basic_invariants()
# target_col\extra_feature_col dtype is object(str).
sample_num = np.random.randint(100, 200)
df = pd.DataFrame({"datetime": pd.date_range('1/1/2019', periods=sample_num),
"value": np.array(['test_value']*sample_num),
"id": np.array(['00']*sample_num),
"extra feature": np.array(['test_extra_feature']*sample_num)})
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.resample('2S', df.datetime[0], df.datetime[df.shape[0]-1])
tsdata._check_basic_invariants()
# target_col\extra_feature_col dtype is object(numeric).
df = get_ts_df()
df.value = df.value.astype(np.object)
df['extra feature'] = df['extra feature'].astype(np.object)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
before_sampling = tsdata.df.columns
tsdata.resample('2S', df.datetime[0], df.datetime[df.shape[0]-1])
assert set(before_sampling) == set(tsdata.df.columns)
tsdata._check_basic_invariants()
def test_tsdataset_resample_multiple(self):
df = get_multi_id_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.resample('2D', df["datetime"][0], df["datetime"][df.shape[0]-1])
assert len(tsdata.to_pandas()) == df.shape[0] // 2
tsdata._check_basic_invariants()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.resample('2D')
assert len(tsdata.to_pandas()) == 50
tsdata._check_basic_invariants()
# target_col\extra_feature_col dtype is object(str).
df = pd.DataFrame({"value": np.array(['test_value']*100),
"id": np.array(['00']*50 + ['01']*50),
"extra feature": np.array(['test_extra_feature']*100)})
df["datetime"] = pd.date_range('1/1/2019', periods=100)
df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.resample('2S', df.datetime[0], df.datetime[df.shape[0]-1])
tsdata._check_basic_invariants()
# target_col/extra_feature_col dtype is object(numeric).
df = get_multi_id_ts_df()
df.value = df.value.astype(np.object)
df['extra feature'] = df['extra feature'].astype(np.object)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
before_sampling = tsdata.df.columns
tsdata.resample('2S', df.datetime[0], df.datetime[df.shape[0]-1])
assert set(before_sampling) == set(tsdata.df.columns)
tsdata._check_basic_invariants()
def test_tsdataset_split(self):
df = get_ts_df()
# only train and test
tsdata_train, tsdata_valid, tsdata_test =\
TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0, test_ratio=0.1)
# standard split with all three sets
tsdata_train, tsdata_valid, tsdata_test =\
TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0.1, test_ratio=0.1,
largest_look_back=5, largest_horizon=2)
assert set(np.unique(tsdata_train.to_pandas()["id"])) == {"00"}
assert set(np.unique(tsdata_valid.to_pandas()["id"])) == {"00"}
assert set(np.unique(tsdata_test.to_pandas()["id"])) == {"00"}
assert len(tsdata_train.to_pandas()) == df[:-(int(df.shape[0]*0.1)*2)].shape[0]
assert len(tsdata_valid.to_pandas()) == int(df.shape[0] * 0.1 + 5 + 2 - 1)
assert len(tsdata_test.to_pandas()) == int(df.shape[0] * 0.1 + 5 + 2 - 1)
tsdata_train.feature_col.append("new extra feature")
assert len(tsdata_train.feature_col) == 2
assert len(tsdata_valid.feature_col) == 1
assert len(tsdata_test.feature_col) == 1
tsdata_train.target_col[0] = "new value"
assert tsdata_train.target_col[0] == "new value"
assert tsdata_valid.target_col[0] != "new value"
assert tsdata_test.target_col[0] != "new value"
def test_tsdataset_split_multiple(self):
df = get_multi_id_ts_df()
tsdata_train, tsdata_valid, tsdata_test =\
TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0.1, test_ratio=0.1,
largest_look_back=5, largest_horizon=2)
assert set(np.unique(tsdata_train.to_pandas()["id"])) == {"00", "01"}
assert set(np.unique(tsdata_valid.to_pandas()["id"])) == {"00", "01"}
assert set(np.unique(tsdata_test.to_pandas()["id"])) == {"00", "01"}
assert len(tsdata_train.to_pandas()) == (50 * 0.8)*2
assert len(tsdata_valid.to_pandas()) == (50 * 0.1 + 5 + 2 - 1)*2
assert len(tsdata_test.to_pandas()) == (50 * 0.1 + 5 + 2 - 1)*2
assert tsdata_train.feature_col is not tsdata_valid.feature_col
assert tsdata_train.feature_col is not tsdata_test.feature_col
assert tsdata_train.target_col is not tsdata_valid.target_col
assert tsdata_train.target_col is not tsdata_test.target_col
tsdata_train.feature_col.append("new extra feature")
assert len(tsdata_train.feature_col) == 2
assert len(tsdata_valid.feature_col) == 1
assert len(tsdata_test.feature_col) == 1
tsdata_train.target_col[0] = "new value"
assert tsdata_train.target_col[0] == "new value"
assert tsdata_valid.target_col[0] != "new value"
assert tsdata_test.target_col[0] != "new value"
def test_tsdataset_global_feature(self):
for val in ["minimal"]:
df = get_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_global_feature(settings=val)
tsdata._check_basic_invariants()
def test_tsdataset_global_feature_multiple(self):
df = get_multi_id_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_global_feature(settings="minimal")
tsdata._check_basic_invariants()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_global_feature(settings="minimal", n_jobs=2)
tsdata._check_basic_invariants()
def test_tsdataset_rolling_feature_multiple(self):
df = get_multi_id_ts_df()
horizon = random.randint(2, 10)
lookback = random.randint(2, 20)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_rolling_feature(settings="minimal", window_size=lookback)
tsdata._check_basic_invariants()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_rolling_feature(settings="minimal", window_size=lookback, n_jobs=2)
tsdata._check_basic_invariants()
# roll train
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
feature_num = len(tsdata.feature_col) + len(tsdata.target_col)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, feature_num)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1), lookback, feature_num*2)
assert y.shape == ((50-lookback-horizon+1), horizon, 2)
tsdata._check_basic_invariants()
def test_check_scale_sequence(self):
df = get_multi_id_ts_df()
# with split is True.
td_train, td_valid, td_test = TSDataset.from_pandas(df, dt_col="datetime",
target_col="value",
extra_feature_col=[
"extra feature"],
id_col="id",
with_split=True,
val_ratio=0.1,
test_ratio=0.1)
from sklearn.preprocessing import StandardScaler
stand = StandardScaler()
with pytest.raises(AssertionError):
for tsdata in [td_train, td_valid, td_test]:
tsdata.scale(stand, fit=False)
tsdata._check_basic_invariants()
# remove due to the possible large cost on test sys
# with pytest.raises(AssertionError):
# tsdata.gen_global_feature(settings="minimal")\
# .gen_rolling_feature(settings="minimal", window_size=5)
def test_non_pd_datetime(self):
df = get_non_dt()
tsdata = TSDataset.from_pandas(df, dt_col="datetime",
target_col="value",
extra_feature_col="extra feature",
id_col="id")
with pytest.raises(AssertionError):
tsdata.resample('2D')
with pytest.raises(AssertionError):
tsdata.gen_dt_feature()
with pytest.raises(AssertionError):
tsdata.gen_rolling_feature(settings="minimal", window_size=1000)
tsdata._check_basic_invariants()
def test_not_aligned(self):
df = get_not_aligned_df()
tsdata = TSDataset.from_pandas(df, target_col="value",
dt_col="datetime",
extra_feature_col="extra feature",
id_col="id")
with pytest.raises(AssertionError):
tsdata.roll(lookback=5, horizon=2, id_sensitive=True)
tsdata._check_basic_invariants()
def test_dt_sorted(self):
df = pd.DataFrame({"datetime": np.array(['20000101', '20000102', '20000102', '20000101']),
"value": np.array([1.9, 2.3, 2.4, 2.6]),
"id": np.array(['00', '01', '00', '01'])})
tsdata = TSDataset.from_pandas(df, target_col='value',
dt_col='datetime')
with pytest.raises(RuntimeError):
tsdata._check_basic_invariants(strict_check=True)
|
|
"""The tests for the image_processing component."""
from unittest.mock import patch, PropertyMock
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.setup import setup_component
from homeassistant.exceptions import HomeAssistantError
import homeassistant.components.http as http
import homeassistant.components.image_processing as ip
from tests.common import (
get_test_home_assistant, get_test_instance_port, assert_setup_component)
class TestSetupImageProcessing:
"""Test class for setup image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Set up demo platform on image_process component."""
config = {
ip.DOMAIN: {
'platform': 'demo'
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
def test_setup_component_with_service(self):
"""Set up demo platform on image_process component test service."""
config = {
ip.DOMAIN: {
'platform': 'demo'
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.services.has_service(ip.DOMAIN, 'scan')
class TestImageProcessing:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
setup_component(
self.hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: get_test_instance_port()}})
config = {
ip.DOMAIN: {
'platform': 'test'
},
'camera': {
'platform': 'demo'
},
}
setup_component(self.hass, ip.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.camera.demo.DemoCamera.camera_image',
autospec=True, return_value=b'Test')
def test_get_image_from_camera(self, mock_camera):
"""Grab an image from camera entity."""
self.hass.start()
ip.scan(self.hass, entity_id='image_processing.test')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.test')
assert mock_camera.called
assert state.state == '1'
assert state.attributes['image'] == b'Test'
@patch('homeassistant.components.camera.async_get_image',
side_effect=HomeAssistantError())
def test_get_image_without_exists_camera(self, mock_image):
"""Try to get image without exists camera."""
self.hass.states.remove('camera.demo_camera')
ip.scan(self.hass, entity_id='image_processing.test')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.test')
assert mock_image.called
assert state.state == '0'
class TestImageProcessingAlpr:
"""Test class for alpr image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
'platform': 'demo'
},
'camera': {
'platform': 'demo'
},
}
with patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingAlpr.should_poll',
new_callable=PropertyMock(return_value=False)):
setup_component(self.hass, ip.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
self.alpr_events = []
@callback
def mock_alpr_event(event):
"""Mock event."""
self.alpr_events.append(event)
self.hass.bus.listen('image_processing.found_plate', mock_alpr_event)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_alpr_event_single_call(self, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b'image')
ip.scan(self.hass, entity_id='image_processing.demo_alpr')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_alpr')
assert len(self.alpr_events) == 4
assert state.state == 'AC3829'
event_data = [event.data for event in self.alpr_events if
event.data.get('plate') == 'AC3829']
assert len(event_data) == 1
assert event_data[0]['plate'] == 'AC3829'
assert event_data[0]['confidence'] == 98.3
assert event_data[0]['entity_id'] == 'image_processing.demo_alpr'
def test_alpr_event_double_call(self, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b'image')
ip.scan(self.hass, entity_id='image_processing.demo_alpr')
ip.scan(self.hass, entity_id='image_processing.demo_alpr')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_alpr')
assert len(self.alpr_events) == 4
assert state.state == 'AC3829'
event_data = [event.data for event in self.alpr_events if
event.data.get('plate') == 'AC3829']
assert len(event_data) == 1
assert event_data[0]['plate'] == 'AC3829'
assert event_data[0]['confidence'] == 98.3
assert event_data[0]['entity_id'] == 'image_processing.demo_alpr'
@patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingAlpr.confidence',
new_callable=PropertyMock(return_value=95))
def test_alpr_event_single_call_confidence(self, confidence_mock,
aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b'image')
ip.scan(self.hass, entity_id='image_processing.demo_alpr')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_alpr')
assert len(self.alpr_events) == 2
assert state.state == 'AC3829'
event_data = [event.data for event in self.alpr_events if
event.data.get('plate') == 'AC3829']
assert len(event_data) == 1
assert event_data[0]['plate'] == 'AC3829'
assert event_data[0]['confidence'] == 98.3
assert event_data[0]['entity_id'] == 'image_processing.demo_alpr'
class TestImageProcessingFace:
"""Test class for face image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
'platform': 'demo'
},
'camera': {
'platform': 'demo'
},
}
with patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingFace.should_poll',
new_callable=PropertyMock(return_value=False)):
setup_component(self.hass, ip.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
self.face_events = []
@callback
def mock_face_event(event):
"""Mock event."""
self.face_events.append(event)
self.hass.bus.listen('image_processing.detect_face', mock_face_event)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_face_event_call(self, aioclient_mock):
"""Set up and scan a picture and test faces from event."""
aioclient_mock.get(self.url, content=b'image')
ip.scan(self.hass, entity_id='image_processing.demo_face')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_face')
assert len(self.face_events) == 2
assert state.state == 'Hans'
assert state.attributes['total_faces'] == 4
event_data = [event.data for event in self.face_events if
event.data.get('name') == 'Hans']
assert len(event_data) == 1
assert event_data[0]['name'] == 'Hans'
assert event_data[0]['confidence'] == 98.34
assert event_data[0]['gender'] == 'male'
assert event_data[0]['entity_id'] == \
'image_processing.demo_face'
@patch('homeassistant.components.image_processing.demo.'
'DemoImageProcessingFace.confidence',
new_callable=PropertyMock(return_value=None))
def test_face_event_call_no_confidence(self, mock_config, aioclient_mock):
"""Set up and scan a picture and test faces from event."""
aioclient_mock.get(self.url, content=b'image')
ip.scan(self.hass, entity_id='image_processing.demo_face')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.demo_face')
assert len(self.face_events) == 3
assert state.state == '4'
assert state.attributes['total_faces'] == 4
event_data = [event.data for event in self.face_events if
event.data.get('name') == 'Hans']
assert len(event_data) == 1
assert event_data[0]['name'] == 'Hans'
assert event_data[0]['confidence'] == 98.34
assert event_data[0]['gender'] == 'male'
assert event_data[0]['entity_id'] == \
'image_processing.demo_face'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.