code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from gettext import gettext as _
EXP1 = [
_('Provinces'),
['lineasDepto'],
[],
['deptos']
]
EXP2 = [
_('Provincial capitals'),
['lineasDepto', 'capitales'],
[],
['capitales']
]
EXP3 = [
_('Cities'),
['lineasDepto', 'capitales', 'ciudades'],
[],
['capitales', 'ciudades']
]
EXP4 = [
_('Waterways'),
['rios'],
[],
['rios']
]
EXP5 = [
_('Routes'),
['rutas', 'capitales'],
['capitales'],
['rutas']
]
EXPLORATIONS = [EXP1, EXP2, EXP3, EXP4, EXP5]
| AlanJAS/iknowAmerica | recursos/0panama/datos/explorations.py | Python | gpl-3.0 | 554 |
from setuptools import setup, find_packages
setup(name='BIOMD0000000269',
version=20140916,
description='BIOMD0000000269 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000269',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/BIOMD0000000269 | setup.py | Python | cc0-1.0 | 377 |
from neb.plugins import Plugin
import urllib
class UrlPlugin(Plugin):
"""URL encode or decode text.
url encode <text>
url decode <text>
"""
name = "url"
def cmd_encode(self, event, *args):
"""URL encode text. 'url encode <text>'"""
# use the body directly so quotes are parsed correctly.
return urllib.quote(event["content"]["body"][12:])
def cmd_decode(self, event, *args):
"""URL decode text. 'url decode <url encoded text>'"""
# use the body directly so quotes are parsed correctly.
return urllib.unquote(event["content"]["body"][12:])
| Kegsay/Matrix-NEB | plugins/url.py | Python | apache-2.0 | 622 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
import numpy.random as nr
import random as r
from python_util.util import *
from python_util.data import *
from python_util.options import *
from python_util.gpumodel import *
import sys
import math as m
import layer as lay
from convdata import *
#TasksImageDataProvider, CIFARDataProvider, DummyConvNetLogRegDataProvider
from os import linesep as NL
import copy as cp
import os
class Driver(object):
def __init__(self, convnet):
self.convnet = convnet
def on_start_batch(self, batch_data, train):
pass
def on_finish_batch(self):
pass
class GradCheckDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.checkGradients(data)
class TrainingDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.startBatch(data, self.convnet.get_progress(), not train)
class MultiviewTestDriver(TrainingDriver):
def on_start_batch(self, batch_data, train):
self.write_output = False
if train:
TrainingDriver.on_start_batch(self, batch_data, train)
else:
data = batch_data[2]
num_views = self.convnet.test_data_provider.num_views
if self.convnet.test_out != "" and self.convnet.logreg_name != "":
self.write_output = True
self.test_file_name = os.path.join(self.convnet.test_out, 'test_preds_%d' % batch_data[1])
self.probs = n.zeros((data[0].shape[1]/num_views, self.convnet.test_data_provider.get_num_classes()), dtype=n.single)
self.convnet.libmodel.startMultiviewTest(data, num_views, self.probs, self.convnet.logreg_name)
else:
self.convnet.libmodel.startMultiviewTest(data, num_views)
def on_finish_batch(self):
if self.write_output:
if not os.path.exists(self.convnet.test_out):
os.makedirs(self.convnet.test_out)
pickle(self.test_file_name, {'data': self.probs,
'note': 'generated from %s' % self.convnet.save_file})
class FeatureWriterDriver(Driver):
def __init__(self, convnet):
Driver.__init__(self, convnet)
self.last_batch = convnet.test_batch_range[-1]
def on_start_batch(self, batch_data, train):
if train:
raise ModelStateException("FeatureWriter must be used in conjunction with --test-only=1. It writes test data features.")
self.batchnum, self.data = batch_data[1], batch_data[2]
if not os.path.exists(self.convnet.feature_path):
os.makedirs(self.convnet.feature_path)
self.num_ftrs = self.convnet.layers[self.convnet.write_features]['outputs']
self.ftrs = n.zeros((self.data[0].shape[1], self.num_ftrs), dtype=n.single)
self.convnet.libmodel.startFeatureWriter(self.data, [self.ftrs], [self.convnet.write_features])
def on_finish_batch(self):
path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
pickle(path_out, {'data': self.ftrs, 'labels': self.data[1]})
print "Wrote feature file %s" % path_out
if self.batchnum == self.last_batch:
pickle(os.path.join(self.convnet.feature_path, 'batches.meta'), {'source_model':self.convnet.load_file,
'num_vis':self.num_ftrs,
'batch_size': self.convnet.test_data_provider.batch_meta['batch_size']})
class ConvNet(IGPUModel):
def __init__(self, op, load_dic, dp_params={}):
filename_options = []
for v in ('color_noise', 'multiview_test', 'inner_size', 'scalar_mean', 'minibatch_size'):
dp_params[v] = op.get_value(v)
IGPUModel.__init__(self, "ConvNet", op, load_dic, filename_options, dp_params=dp_params)
def import_model(self):
lib_name = "cudaconvnet._ConvNet"
print "========================="
print "Importing %s C++ module" % lib_name
self.libmodel = __import__(lib_name,fromlist=['_ConvNet'])
def init_model_lib(self):
self.libmodel.initModel(self.layers,
self.device_ids,
self.minibatch_size,
self.conserve_mem)
def init_model_state(self):
ms = self.model_state
layers = ms['layers'] if self.loaded_from_checkpoint else {}
ms['layers'] = lay.LayerParser.parse_layers(os.path.join(self.layer_path, self.layer_def),
os.path.join(self.layer_path, self.layer_params), self, layers=layers)
self.do_decouple_conv()
self.do_unshare_weights()
self.op.set_value('conv_to_local', [], parse=False)
self.op.set_value('unshare_weights', [], parse=False)
self.set_driver()
def do_decouple_conv(self):
# Convert convolutional layers to local
if len(self.op.get_value('conv_to_local')) > 0:
for lname in self.op.get_value('conv_to_local'):
if self.model_state['layers'][lname]['type'] == 'conv':
lay.LocalLayerParser.conv_to_local(self.model_state['layers'], lname)
def do_unshare_weights(self):
# Decouple weight matrices
if len(self.op.get_value('unshare_weights')) > 0:
for name_str in self.op.get_value('unshare_weights'):
if name_str:
name = lay.WeightLayerParser.get_layer_name(name_str)
if name is not None:
name, idx = name[0], name[1]
if name not in self.model_state['layers']:
raise ModelStateException("Layer '%s' does not exist; unable to unshare" % name)
layer = self.model_state['layers'][name]
lay.WeightLayerParser.unshare_weights(layer, self.model_state['layers'], matrix_idx=idx)
else:
raise ModelStateException("Invalid layer name '%s'; unable to unshare." % name_str)
def set_driver(self):
if self.op.get_value('check_grads'):
self.driver = GradCheckDriver(self)
elif self.op.get_value('multiview_test'):
self.driver = MultiviewTestDriver(self)
elif self.op.get_value('write_features'):
self.driver = FeatureWriterDriver(self)
else:
self.driver = TrainingDriver(self)
def fill_excused_options(self):
if self.op.get_value('check_grads'):
self.op.set_value('save_path', '')
self.op.set_value('train_batch_range', '0')
self.op.set_value('test_batch_range', '0')
self.op.set_value('data_path', '')
# Make sure the data provider returned data in proper format
def parse_batch_data(self, batch_data, train=True):
if max(d.dtype != n.single for d in batch_data[2]):
raise DataProviderException("All matrices returned by data provider must consist of single-precision floats.")
return batch_data
def start_batch(self, batch_data, train=True):
self.driver.on_start_batch(batch_data, train)
def finish_batch(self):
ret = IGPUModel.finish_batch(self)
self.driver.on_finish_batch()
return ret
def print_iteration(self):
print "%d.%d (%.2f%%)..." % (self.epoch, self.batchnum, 100 * self.get_progress()),
def print_train_time(self, compute_time_py):
print "(%.3f sec)" % (compute_time_py)
def print_costs(self, cost_outputs):
costs, num_cases = cost_outputs[0], cost_outputs[1]
children = set()
#print "costs", costs
#print "num_cases", num_cases
for errname in costs:
#print "errname ", errname
if sum(errname in self.layers[z]['children'] for z in costs) == 0:
# print self.layers[errname]['children']
for child in set(self.layers[errname]['children']) & set(costs.keys()):
costs[errname] = [v + u for v, u in zip(costs[errname], costs[child])]
children.add(child)
filtered_costs = eval(self.layers[errname]['outputFilter'])(costs[errname], num_cases)
print "%s: " % errname,
if 'outputFilterFormatter' not in self.layers[errname]:
print ", ".join("%.6f" % v for v in filtered_costs),
else:
print eval(self.layers[errname]['outputFilterFormatter'])(self,filtered_costs),
if len(filtered_costs) != 0:
if m.isnan(filtered_costs[0]) or m.isinf(filtered_costs[0]):
print "<- error nan or inf!"
sys.exit(1)
for c in children:
del costs[c]
def print_train_results(self):
self.print_costs(self.train_outputs[-1])
def print_test_status(self):
pass
def print_test_results(self):
print NL + "======================Test output======================"
self.print_costs(self.test_outputs[-1])
if not self.test_only:
print NL + "----------------------Averages-------------------------"
self.print_costs(self.aggregate_test_outputs(self.test_outputs[-len(self.test_batch_range):]))
print NL + "-------------------------------------------------------",
for name,val in sorted(self.layers.items(), key=lambda x: x[1]['id']): # This is kind of hacky but will do for now.
l = self.layers[name]
if 'weights' in l:
wscales = [(l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi))) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))]
print ""
print NL.join("Layer '%s' weights[%d]: %e [%e] [%e]" % (s[0], s[1], s[2], s[3], s[3]/s[2] if s[2] > 0 else 0) for s in wscales),
print "%sLayer '%s' biases: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc']))),
print ""
def conditional_save(self):
self.save_state()
def aggregate_test_outputs(self, test_outputs):
test_outputs = cp.deepcopy(test_outputs)
num_cases = sum(t[1] for t in test_outputs)
for i in xrange(1 ,len(test_outputs)):
for k,v in test_outputs[i][0].items():
for j in xrange(len(v)):
test_outputs[0][0][k][j] += test_outputs[i][0][k][j]
return (test_outputs[0][0], num_cases)
@classmethod
def get_options_parser(cls):
op = IGPUModel.get_options_parser()
op.add_option("mini", "minibatch_size", IntegerOptionParser, "Minibatch size", default=128)
op.add_option("layer-def", "layer_def", StringOptionParser, "Layer definition file", set_once=False)
op.add_option("layer-params", "layer_params", StringOptionParser, "Layer parameter file")
op.add_option("layer-path", "layer_path", StringOptionParser, "Layer file path prefix", default="")
op.add_option("check-grads", "check_grads", BooleanOptionParser, "Check gradients and quit?", default=0, excuses=['data_path','save_path', 'save_file_override', 'train_batch_range','test_batch_range'])
op.add_option("multiview-test", "multiview_test", BooleanOptionParser, "Cropped DP: test on multiple patches?", default=0)
op.add_option("inner-size", "inner_size", IntegerOptionParser, "Cropped DP: crop size (0 = don't crop)", default=0, set_once=True)
op.add_option("conv-to-local", "conv_to_local", ListOptionParser(StringOptionParser), "Convert given conv layers to unshared local", default=[])
op.add_option("unshare-weights", "unshare_weights", ListOptionParser(StringOptionParser), "Unshare weight matrices in given layers", default=[])
op.add_option("conserve-mem", "conserve_mem", BooleanOptionParser, "Conserve GPU memory (slower)?", default=0)
op.add_option("color-noise", "color_noise", FloatOptionParser, "Add PCA noise to color channels with given scale", default=0.0)
op.add_option("test-out", "test_out", StringOptionParser, "Output test case predictions to given path", default="", requires=['logreg_name', 'multiview_test'])
op.add_option("logreg-name", "logreg_name", StringOptionParser, "Logreg cost layer name (for --test-out)", default="")
op.add_option("scalar-mean", "scalar_mean", FloatOptionParser, "Subtract this scalar from image (-1 = don't)", default=-1)
op.add_option("write-features", "write_features", StringOptionParser, "Write test data features from given layer", default="", requires=['feature-path'])
op.add_option("feature-path", "feature_path", StringOptionParser, "Write test data features to this path (to be used with --write-features)", default="")
op.delete_option('max_test_err')
op.options["testing_freq"].default = 57
op.options["num_epochs"].default = 50000
op.options['dp_type'].default = None
DataProvider.register_data_provider('dummy-lr-n', 'Dummy ConvNet logistic regression', DummyConvNetLogRegDataProvider)
DataProvider.register_data_provider('image', 'JPEG-encoded image data provider', ImageDataProvider)
DataProvider.register_data_provider('taskscotr', 'JPEG-encoded image data provider with Tasks', TasksCotrDataProvider)
DataProvider.register_data_provider('cifar', 'CIFAR-10 data provider', CIFARDataProvider)
DataProvider.register_data_provider('ocr', 'OCR data provider', OCRDataProvider)
return op
if __name__ == "__main__":
# nr.seed(6)
op = ConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ConvNet(op, load_dic)
model.start()
| s9xie/convnet2_misc | convnet.py | Python | apache-2.0 | 14,754 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Purchase orders - Force number",
'version': '0.1',
'category': 'Purchase Management',
'summary': "Force purchase orders numeration",
'description': """
This simple module allows to specify the number to use when creating purchase
orders. If user does not change the default value ('/'), the standard sequence
is used.""",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": ['purchase'],
"data": [
'purchase_view.xml',
],
"demo": [],
"active": False,
"installable": False
}
| andrius-preimantas/purchase-workflow | purchase_order_force_number/__openerp__.py | Python | agpl-3.0 | 1,589 |
"""Convert CSV containing grid row/column IDs and category values to GeoJSON."""
import csv
import argparse
from geojson import Polygon, Feature, FeatureCollection, dump
def main():
parser = argparse.ArgumentParser()
parser.add_argument("csv_in")
parser.add_argument("-columns", nargs="+", default=[])
args = parser.parse_args()
with open(args.csv_in, 'r') as fin:
csvreader = csv.reader(fin)
header = next(csvreader)
rid_idx = header.index('rid')
cid_idx = header.index('cid')
empath_indices = {}
if not args.columns:
for i in range(0, len(header)):
if header[i] == 'rid' or header[i] == 'cid':
continue
empath_indices[header[i]] = i
else:
for cat in args.columns:
empath_indices[cat] = header.index(cat)
features = []
for line in csvreader:
cid = line[cid_idx]
rid = line[rid_idx]
properties = {'rid':rid, 'cid':cid}
for cat in empath_indices:
properties[cat] = float(line[empath_indices[cat]])
bottomleftcorner = (float(cid) / 10**3, float(rid) / 10**3)
coords = [bottomleftcorner]
for i in [(0.001, 0), (0.001, 0.001), (0, 0.001), (0,0)]:
coords.append((bottomleftcorner[0] + i[1], bottomleftcorner[1] + i[0]))
features.append(Feature(geometry=Polygon([coords]), properties=properties))
with open(args.csv_in.replace(".csv", ".geojson"), 'w') as fout:
dump(FeatureCollection(features), fout)
if __name__ == "__main__":
main()
| joh12041/route-externalities | utils/grid_csv_to_geojson.py | Python | mit | 1,678 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Static evaluation of EdgeQL IR."""
from __future__ import annotations
from typing import *
import dataclasses
import decimal
import functools
from edb import errors
from edb.common import typeutils
from edb.common import uuidgen
from edb.edgeql import ast as qlast
from edb.edgeql import compiler as qlcompiler
from edb.edgeql import qltypes
from edb.ir import ast as irast
from edb.ir import typeutils as irtyputils
from edb.ir import statypes as statypes
from edb.ir import utils as irutils
from edb.schema import objects as s_obj
from edb.schema import objtypes as s_objtypes
from edb.schema import types as s_types
from edb.schema import scalars as s_scalars
from edb.schema import schema as s_schema
from edb.schema import constraints as s_constr
from edb.server import config
class StaticEvaluationError(errors.QueryError):
pass
class UnsupportedExpressionError(errors.QueryError):
pass
EvaluationResult = Union[irast.TypeCast, irast.ConstExpr]
def evaluate_to_python_val(
ir: irast.Base,
schema: s_schema.Schema,
) -> Any:
const: EvaluationResult
if isinstance(ir, irast.Set) and isinstance(ir.expr, irast.TypeCast):
# Special case for type casts.
# We cannot fold them, but can eval to Python
const = ir.expr
else:
const = evaluate(ir, schema=schema)
return const_to_python(const, schema=schema)
@functools.singledispatch
def evaluate(
ir: irast.Base,
schema: s_schema.Schema) -> EvaluationResult:
raise UnsupportedExpressionError(
f'no static IR evaluation handler for {ir.__class__}')
@evaluate.register(irast.SelectStmt)
def evaluate_SelectStmt(
ir_stmt: irast.SelectStmt,
schema: s_schema.Schema) -> EvaluationResult:
if irutils.is_trivial_select(ir_stmt) and not ir_stmt.result.is_binding:
return evaluate(ir_stmt.result, schema)
else:
raise UnsupportedExpressionError(
'expression is not constant', context=ir_stmt.context)
@evaluate.register(irast.TypeCast)
def evaluate_TypeCast(
ir_cast: irast.TypeCast,
schema: s_schema.Schema) -> EvaluationResult:
schema, from_type = irtyputils.ir_typeref_to_type(
schema, ir_cast.from_type)
schema, to_type = irtyputils.ir_typeref_to_type(
schema, ir_cast.from_type)
schema_type_to_python_type(from_type, schema)
schema_type_to_python_type(to_type, schema)
evaluate(ir_cast.expr, schema)
return ir_cast
@evaluate.register(irast.EmptySet)
def evaluate_EmptySet(
ir_set: irast.EmptySet,
schema: s_schema.Schema) -> EvaluationResult:
return ir_set
@evaluate.register(irast.Set)
def evaluate_Set(
ir_set: irast.Set,
schema: s_schema.Schema) -> EvaluationResult:
if ir_set.expr is not None:
return evaluate(ir_set.expr, schema=schema)
else:
raise UnsupportedExpressionError(
'expression is not constant', context=ir_set.context)
@evaluate.register(irast.ConstExpr)
def evaluate_BaseConstant(
ir_const: irast.ConstExpr,
schema: s_schema.Schema) -> EvaluationResult:
return ir_const
op_table = {
# Concatenation
('Infix', 'std::++'): lambda a, b: a + b,
}
@evaluate.register(irast.OperatorCall)
def evaluate_OperatorCall(
opcall: irast.OperatorCall,
schema: s_schema.Schema) -> irast.ConstExpr:
if irutils.is_union_expr(opcall):
return _evaluate_union(opcall, schema)
eval_func = op_table.get(
(opcall.operator_kind, str(opcall.func_shortname)),
)
if eval_func is None:
raise UnsupportedExpressionError(
f'unsupported operator: {opcall.func_shortname}',
context=opcall.context)
args = []
for arg in opcall.args:
arg_val = evaluate_to_python_val(arg.expr, schema=schema)
if isinstance(arg_val, tuple):
raise UnsupportedExpressionError(
f'non-singleton operations are not supported',
context=opcall.context)
if arg_val is None:
raise UnsupportedExpressionError(
f'empty operations are not supported',
context=opcall.context)
args.append(arg_val)
value = eval_func(*args)
# Since we only perform string concatenations here, the constant
# in question is always a StringConstant.
qlconst = qlast.StringConstant.from_python(value)
result = qlcompiler.compile_constant_tree_to_ir(
qlconst, styperef=opcall.typeref, schema=schema)
assert isinstance(result, irast.ConstExpr), 'expected ConstExpr'
return result
def _evaluate_union(
opcall: irast.OperatorCall,
schema: s_schema.Schema) -> irast.ConstExpr:
elements: List[irast.BaseConstant] = []
for arg in opcall.args:
val = evaluate(arg.expr, schema=schema)
if isinstance(val, irast.ConstantSet):
elements.extend(val.elements)
elif isinstance(val, irast.EmptySet):
empty_set = val
elif isinstance(val, irast.BaseConstant):
elements.append(val)
else:
raise UnsupportedExpressionError(
f'{val!r} not supported in UNION',
context=opcall.context)
if elements:
return irast.ConstantSet(
elements=tuple(elements),
typeref=next(iter(elements)).typeref,
)
else:
# We get an empty set if the UNION was exclusivly empty set
# literals. If that happens, grab one of the empty sets
# that we saw and return it.
return empty_set
@functools.singledispatch
def const_to_python(
ir: irast.ConstExpr | irast.TypeCast,
schema: s_schema.Schema) -> Any:
raise UnsupportedExpressionError(
f'cannot convert {ir!r} to Python value')
@const_to_python.register(irast.EmptySet)
def empty_set_to_python(
ir: irast.EmptySet,
schema: s_schema.Schema,
) -> None:
return None
@const_to_python.register(irast.ConstantSet)
def const_set_to_python(
ir: irast.ConstantSet,
schema: s_schema.Schema) -> Tuple[Any, ...]:
return tuple(const_to_python(v, schema) for v in ir.elements)
@const_to_python.register(irast.IntegerConstant)
def int_const_to_python(
ir: irast.IntegerConstant,
schema: s_schema.Schema) -> Any:
stype = schema.get_by_id(ir.typeref.id)
assert isinstance(stype, s_types.Type)
bigint = schema.get('std::bigint', type=s_obj.SubclassableObject)
if stype.issubclass(schema, bigint):
return decimal.Decimal(ir.value)
else:
return int(ir.value)
@const_to_python.register(irast.FloatConstant)
def float_const_to_python(
ir: irast.FloatConstant,
schema: s_schema.Schema) -> Any:
stype = schema.get_by_id(ir.typeref.id)
assert isinstance(stype, s_types.Type)
bigint = schema.get('std::bigint', type=s_obj.SubclassableObject)
if stype.issubclass(schema, bigint):
return decimal.Decimal(ir.value)
else:
return float(ir.value)
@const_to_python.register(irast.StringConstant)
def str_const_to_python(
ir: irast.StringConstant,
schema: s_schema.Schema) -> Any:
return ir.value
@const_to_python.register(irast.BooleanConstant)
def bool_const_to_python(
ir: irast.BooleanConstant,
schema: s_schema.Schema) -> Any:
return ir.value == 'true'
@const_to_python.register(irast.TypeCast)
def cast_const_to_python(
ir: irast.TypeCast,
schema: s_schema.Schema) -> Any:
schema, stype = irtyputils.ir_typeref_to_type(schema, ir.to_type)
pytype = scalar_type_to_python_type(stype, schema)
sval = evaluate_to_python_val(ir.expr, schema=schema)
if sval is None:
return None
elif isinstance(sval, tuple):
return tuple(pytype(elem) for elem in sval)
else:
return pytype(sval)
def schema_type_to_python_type(
stype: s_types.Type,
schema: s_schema.Schema) -> type:
if isinstance(stype, s_scalars.ScalarType):
return scalar_type_to_python_type(stype, schema)
elif isinstance(stype, s_objtypes.ObjectType):
return object_type_to_python_type(stype, schema)
else:
raise UnsupportedExpressionError(
f'{stype.get_displayname(schema)} is not representable in Python')
typemap = {
'std::str': str,
'std::anyint': int,
'std::anyfloat': float,
'std::decimal': decimal.Decimal,
'std::bigint': decimal.Decimal,
'std::bool': bool,
'std::json': str,
'std::uuid': uuidgen.UUID,
'std::duration': statypes.Duration,
'cfg::memory': statypes.ConfigMemory,
}
def scalar_type_to_python_type(
stype: s_types.Type,
schema: s_schema.Schema,
) -> type:
for basetype_name, pytype in typemap.items():
basetype = schema.get(basetype_name, type=s_obj.InheritingObject)
if stype.issubclass(schema, basetype):
return pytype
if stype.is_enum(schema):
return str
raise UnsupportedExpressionError(
f'{stype.get_displayname(schema)} is not representable in Python')
def object_type_to_python_type(
objtype: s_objtypes.ObjectType,
schema: s_schema.Schema,
*,
base_class: Optional[type] = None,
_memo: Optional[Dict[s_types.Type, type]] = None,
) -> type:
if _memo is None:
_memo = {}
default: Any
fields = []
subclasses = []
for pn, p in objtype.get_pointers(schema).items(schema):
str_pn = str(pn)
if str_pn in ('id', '__type__'):
continue
ptype = p.get_target(schema)
assert ptype is not None
if isinstance(ptype, s_objtypes.ObjectType):
pytype = _memo.get(ptype)
if pytype is None:
pytype = object_type_to_python_type(
ptype, schema, base_class=base_class, _memo=_memo)
_memo[ptype] = pytype
for subtype in ptype.children(schema):
subclasses.append(
object_type_to_python_type(
subtype, schema,
base_class=pytype, _memo=_memo))
else:
pytype = scalar_type_to_python_type(ptype, schema)
ptr_card = p.get_cardinality(schema)
is_multi = ptr_card.is_multi()
if is_multi:
pytype = FrozenSet[pytype] # type: ignore
default = p.get_default(schema)
if default is None:
if p.get_required(schema):
default = dataclasses.MISSING
else:
default = qlcompiler.evaluate_to_python_val(
default.text, schema=schema)
if is_multi and not isinstance(default, frozenset):
default = frozenset((default,))
constraints = p.get_constraints(schema).objects(schema)
exclusive = schema.get('std::exclusive', type=s_constr.Constraint)
unique = (
not ptype.is_object_type()
and any(c.issubclass(schema, exclusive) for c in constraints)
)
field = dataclasses.field(
compare=unique,
hash=unique,
repr=True,
default=default,
)
fields.append((str_pn, pytype, field))
bases: Tuple[type, ...]
if base_class is not None:
bases = (base_class,)
else:
bases = ()
ptype_dataclass = dataclasses.make_dataclass(
objtype.get_name(schema).name,
fields=fields,
bases=bases,
frozen=True,
namespace={'_subclasses': subclasses},
)
assert isinstance(ptype_dataclass, type)
return ptype_dataclass
@functools.singledispatch
def evaluate_to_config_op(
ir: irast.Base,
schema: s_schema.Schema) -> Any:
raise UnsupportedExpressionError(
f'no config op evaluation handler for {ir.__class__}')
@evaluate_to_config_op.register
def evaluate_config_set(
ir: irast.ConfigSet,
schema: s_schema.Schema) -> Any:
value = evaluate_to_python_val(ir.expr, schema)
if ir.cardinality is qltypes.SchemaCardinality.Many:
if value is None:
value = []
elif not typeutils.is_container(value):
value = [value]
return config.Operation(
opcode=config.OpCode.CONFIG_SET,
scope=ir.scope,
setting_name=ir.name,
value=value,
)
@evaluate_to_config_op.register
def evaluate_config_reset(
ir: irast.ConfigReset,
schema: s_schema.Schema) -> Any:
if ir.selector is not None:
raise UnsupportedExpressionError(
'filtered CONFIGURE RESET is not supported by static eval'
)
return config.Operation(
opcode=config.OpCode.CONFIG_RESET,
scope=ir.scope,
setting_name=ir.name,
value=None,
)
| edgedb/edgedb | edb/ir/staeval.py | Python | apache-2.0 | 13,571 |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for `swift.common.splice`'''
import os
import errno
import ctypes
import logging
import tempfile
import unittest
import contextlib
import re
import mock
import six
from swift.common.splice import splice, tee
LOGGER = logging.getLogger(__name__)
def NamedTemporaryFile():
'''Wrapper to tempfile.NamedTemporaryFile() disabling bufferring.
The wrapper is used to support Python 2 and Python 3 in the same
code base.
'''
if six.PY3:
return tempfile.NamedTemporaryFile(buffering=0)
else:
return tempfile.NamedTemporaryFile(bufsize=0)
def safe_close(fd):
'''Close a file descriptor, ignoring any exceptions'''
try:
os.close(fd)
except Exception:
LOGGER.exception('Error while closing FD')
@contextlib.contextmanager
def pipe():
'''Context-manager providing 2 ends of a pipe, closing them at exit'''
fds = os.pipe()
try:
yield fds
finally:
safe_close(fds[0])
safe_close(fds[1])
class TestSplice(unittest.TestCase):
'''Tests for `splice`'''
def setUp(self):
if not splice.available:
raise unittest.SkipTest('splice not available')
def test_flags(self):
'''Test flag attribute availability'''
self.assertTrue(hasattr(splice, 'SPLICE_F_MOVE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_NONBLOCK'))
self.assertTrue(hasattr(splice, 'SPLICE_F_MORE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_GIFT'))
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(splice.available)
def test_splice_pipe_to_pipe(self):
'''Test `splice` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = splice(p1a, None, p2b, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 3), b'def')
def test_splice_file_to_pipe(self):
'''Test `splice` from a file to a pipe'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
fd.write(b'abcdef')
fd.seek(0, os.SEEK_SET)
res = splice(fd, None, pb, None, 3, 0)
self.assertEqual(res, (3, None, None))
# `fd.tell()` isn't updated...
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 3)
fd.seek(0, os.SEEK_SET)
res = splice(fd, 3, pb, None, 3, 0)
self.assertEqual(res, (3, 6, None))
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 0)
self.assertEqual(os.read(pa, 6), b'abcdef')
def test_splice_pipe_to_file(self):
'''Test `splice` from a pipe to a file'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
os.write(pb, b'abcdef')
res = splice(pa, None, fd, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(fd.tell(), 3)
fd.seek(0, os.SEEK_SET)
res = splice(pa, None, fd, 3, 3, 0)
self.assertEqual(res, (3, None, 6))
self.assertEqual(fd.tell(), 0)
self.assertEqual(fd.read(6), b'abcdef')
@mock.patch.object(splice, '_c_splice')
def test_fileno(self, mock_splice):
'''Test handling of file-descriptors'''
splice(1, None, 2, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
mock_splice.reset_mock()
with open('/dev/zero', 'r') as fd:
splice(fd, None, fd, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((fd.fileno(), None, fd.fileno(), None, 3, 0),
{}))
@mock.patch.object(splice, '_c_splice')
def test_flags_list(self, mock_splice):
'''Test handling of flag lists'''
splice(1, None, 2, None, 3,
[splice.SPLICE_F_MOVE, splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, flags), {}))
mock_splice.reset_mock()
splice(1, None, 2, None, 3, [])
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] splice: %s' % (err, os.strerror(err))
try:
splice(fd, None, fd, None, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, splice, 1, None, 2, None, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `splice` support'''
class LibC(object):
'''A fake `libc` object tracking `splice` attribute access'''
def __init__(self):
self.splice_retrieved = False
@property
def splice(self):
self.splice_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Splice` instance
# Something you're not supposed to do in actual code
new_splice = type(splice)()
self.assertFalse(new_splice.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.splice_retrieved)
class TestTee(unittest.TestCase):
'''Tests for `tee`'''
def setUp(self):
if not tee.available:
raise unittest.SkipTest('tee not available')
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(tee.available)
def test_tee_pipe_to_pipe(self):
'''Test `tee` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = tee(p1a, p2b, 3, 0)
self.assertEqual(res, 3)
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 6), b'abcdef')
@mock.patch.object(tee, '_c_tee')
def test_fileno(self, mock_tee):
'''Test handling of file-descriptors'''
with pipe() as (pa, pb):
tee(pa, pb, 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
mock_tee.reset_mock()
tee(os.fdopen(pa, 'r'), os.fdopen(pb, 'w'), 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
@mock.patch.object(tee, '_c_tee')
def test_flags_list(self, mock_tee):
'''Test handling of flag lists'''
tee(1, 2, 3, [splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_tee.call_args, ((1, 2, 3, flags), {}))
mock_tee.reset_mock()
tee(1, 2, 3, [])
self.assertEqual(mock_tee.call_args, ((1, 2, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] tee: %s' % (err, os.strerror(err))
try:
tee(fd, fd, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, tee, 1, 2, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `tee` support'''
class LibC(object):
'''A fake `libc` object tracking `tee` attribute access'''
def __init__(self):
self.tee_retrieved = False
@property
def tee(self):
self.tee_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Tee` instance
# Something you're not supposed to do in actual code
new_tee = type(tee)()
self.assertFalse(new_tee.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.tee_retrieved)
| nadeemsyed/swift | test/unit/common/test_splice.py | Python | apache-2.0 | 10,235 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('identities', '0001_initial'),
('finance', '0001_initial'),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=512)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='finance.Asset', null=True)),
],
options={
'ordering': ['name'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='ItemField',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=256)),
('label', models.CharField(max_length=256)),
('field_type', models.CharField(max_length=64, choices=[(b'text', b'Text'), (b'details', b'Details'), (b'url', b'URL'), (b'picture', b'Picture'), (b'date', b'Date')])),
('required', models.BooleanField(default=False)),
('allowed_values', models.TextField(null=True, blank=True)),
('details', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['name'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='ItemServicing',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=256)),
('start_date', models.DateField(null=True, blank=True)),
('expiry_date', models.DateField(null=True, blank=True)),
('details', models.TextField(blank=True)),
('items', models.ManyToManyField(to='infrastructure.Item', null=True, blank=True)),
('payments', models.ManyToManyField(to='finance.Transaction', null=True, blank=True)),
('supplier', models.ForeignKey(related_name='itemservice_supplied', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='identities.Contact', null=True)),
],
options={
'ordering': ['-expiry_date'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='ItemStatus',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=256)),
('details', models.TextField(null=True, blank=True)),
('active', models.BooleanField(default=True)),
('hidden', models.BooleanField(default=False)),
],
options={
'ordering': ('hidden', '-active', 'name'),
},
bases=('core.object',),
),
migrations.CreateModel(
name='ItemType',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=512)),
('details', models.TextField(null=True, blank=True)),
('fields', models.ManyToManyField(to='infrastructure.ItemField', null=True, blank=True)),
('parent', models.ForeignKey(related_name='child_set', blank=True, to='infrastructure.ItemType', null=True)),
],
options={
'ordering': ['name'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='ItemValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.TextField(blank=True)),
('field', models.ForeignKey(to='infrastructure.ItemField')),
('item', models.ForeignKey(to='infrastructure.Item')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='item',
name='item_type',
field=models.ForeignKey(to='infrastructure.ItemType'),
preserve_default=True,
),
migrations.AddField(
model_name='item',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='core.Location', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='item',
name='manufacturer',
field=models.ForeignKey(related_name='items_manufactured', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='identities.Contact', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='item',
name='owner',
field=models.ForeignKey(related_name='items_owned', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='identities.Contact', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='item',
name='parent',
field=models.ForeignKey(related_name='child_set', blank=True, to='infrastructure.Item', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='item',
name='status',
field=models.ForeignKey(to='infrastructure.ItemStatus'),
preserve_default=True,
),
migrations.AddField(
model_name='item',
name='supplier',
field=models.ForeignKey(related_name='items_supplied', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='identities.Contact', null=True),
preserve_default=True,
),
]
| thiagof/treeio | treeio/infrastructure/migrations/0001_initial.py | Python | mit | 6,480 |
c = float(input('Celsius: '))
f = 9*c/5 + 32
print ('%.2f Fahrenheit' %f)
| SANDEISON/Python | Exercicios/01 Exercício de Programação/Feitos pelo Professor/Lista I exercício 7.py | Python | gpl-2.0 | 74 |
"""Testing of module table."""
# pylint: disable=no-self-use
import unittest
from hamcrest import assert_that, equal_to, contains_string
from spline.tools.table import calculate_columns, calculate_row_format, pprint
from spline.tools.stream import stdout_redirector
class TestTable(unittest.TestCase):
"""Testing of module table."""
def test_calulate_columns(self):
"""Testing of spline.tools.table.calculate_columns function."""
data = self.default_test_data()
columns = calculate_columns(data)
assert_that(len(columns), equal_to(3))
assert_that(columns['first name'], equal_to(10)) # column title
assert_that(columns['surname'], equal_to(8)) # column title and value
assert_that(columns['character'], equal_to(14)) # value
def test_calculate_row_format(self):
"""Testing of spline.tools.table.calculate_row_format function."""
data = self.default_test_data()
columns = calculate_columns(data)
row_format = calculate_row_format(columns, list(sorted(columns.keys())))
expected_row_format = '|%(character)-14s|%(first name)-10s|%(surname)-8s|'
assert_that(row_format, equal_to(expected_row_format))
row_format = calculate_row_format(columns)
assert_that(row_format, contains_string('|%(character)-14s'))
assert_that(row_format, contains_string('|%(first name)-10s'))
assert_that(row_format, contains_string('|%(surname)-8s'))
def test_pprint(self):
"""Testing pprint function."""
data = self.default_test_data()
with stdout_redirector() as stream:
pprint(data, list(sorted(data[0].keys())))
content = stream.getvalue()
lines = content.split('\n')
separator = '|--------------|----------|--------|'
assert_that(len(lines), equal_to(7))
assert_that(lines[0], equal_to(separator))
assert_that(lines[1], equal_to('|Character |First Name|Surname |'))
assert_that(lines[2], equal_to(separator))
assert_that(lines[3], equal_to('|Hercule Poirot|Agatha |Christie|'))
assert_that(lines[4], equal_to('|Nero Wolfe |Rex |Stout |'))
assert_that(lines[5], equal_to(separator))
@staticmethod
def default_test_data():
"""
Provide test data.
Returns:
list: each entry a dictionatory representing the row data.
"""
return [
{'first name': 'Agatha', 'surname': 'Christie', 'character': 'Hercule Poirot'},
{'first name': 'Rex', 'surname': 'Stout', 'character': 'Nero Wolfe'},
]
| Nachtfeuer/pipeline | tests/tools/test_table.py | Python | mit | 2,688 |
from flask_wtf import Form
from wtforms import StringField, DateField, PasswordField
from wtforms.validators import Optional, AnyOf, DataRequired, \
Email
from datetime import datetime
from dmutils.formats import DATE_FORMAT
class ServiceUpdateAuditEventsForm(Form):
audit_date = DateField(
'Audit Date',
format=DATE_FORMAT,
validators=[Optional()])
acknowledged = StringField(
'acknowledged',
default="false",
validators=[
AnyOf(['all', 'true', 'false']),
Optional()]
)
def default_acknowledged(self):
if self.acknowledged.data:
return self.acknowledged.data
else:
return self.acknowledged.default
def format_date(self):
if self.audit_date.data:
return datetime.strftime(self.audit_date.data, DATE_FORMAT)
else:
return None
def format_date_for_display(self):
if self.audit_date.data:
return self.audit_date.data
else:
return datetime.utcnow()
class LoginForm(Form):
email_address = StringField('Email address', validators=[
DataRequired(message='Email cannot be empty'),
Email(message='Please enter a valid email address')
])
password = PasswordField('Password', validators=[
DataRequired(message='Please enter your password')
])
| mtekel/digitalmarketplace-admin-frontend | app/main/forms.py | Python | mit | 1,398 |
#!/usr/bin/env python3
import unittest
import os
import shutil
from mapping import waymaker
TMPDIR = '/tmp/test-waymaker'
class TestWaymaker(unittest.TestCase):
def test_waymaker(self):
self.maxDiff = None # to see full errors
if os.path.exists(TMPDIR):
shutil.rmtree(TMPDIR)
os.mkdir(TMPDIR)
inputfile = os.path.join(TMPDIR, "test-ways.txt")
with open(inputfile, "w") as fp:
print('''
10 Sherwood Blvd, White Rock, NM 87547
White Rock Library
1000 Central Ave
Los Alamos, NM 87544
Los Alamos County Building
and also Council Headquarters
''', file=fp)
entries = waymaker.read_description_file(inputfile)
self.assertEqual(entries, [
[35.825485, -106.21147, '10 Sherwood Blvd, White Rock, NM 87547\nWhite Rock Library'],
[35.88126, -106.29589, '1000 Central Ave\nLos Alamos, NM 87544\nLos Alamos County Building\nand also Council Headquarters']])
gpxfile = os.path.join(TMPDIR, "test-ways.gpx")
waymaker.write_gpx_file(entries, gpxfile, omit_time=True)
with open(gpxfile) as gpxfp:
written_gpx = gpxfp.read()
self.assertEqual(written_gpx, '''<?xml version="1.0" encoding="UTF-8"?>
<gpx
version="1.0"
creator="waymaker v. 0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://www.topografix.com/GPX/1/0"
xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd">
<bounds minlat="35.825485" minlon="-106.295890" maxlat="35.881260" maxlon="-106.211470"/>
<wpt lat="35.825485" lon="-106.211470">
<name>10 Sherwood Blvd, White Rock, NM 87547
White Rock Library</name>
</wpt>
<wpt lat="35.881260" lon="-106.295890">
<name>1000 Central Ave
Los Alamos, NM 87544
Los Alamos County Building
and also Council Headquarters</name>
</wpt>
</gpx>
''')
# Clean up
shutil.rmtree(TMPDIR)
if __name__ == '__main__':
unittest.main()
| akkana/scripts | test/test_waymaker.py | Python | gpl-2.0 | 1,966 |
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Antonin Bas (antonin@barefootnetworks.com)
#
#
import argparse
import cmd
import os
import sys
import struct
import json
from functools import wraps
import bmpy_utils as utils
from bm_runtime.standard import Standard
from bm_runtime.standard.ttypes import *
try:
from bm_runtime.simple_pre import SimplePre
except:
pass
try:
from bm_runtime.simple_pre_lag import SimplePreLAG
except:
pass
def enum(type_name, *sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
@staticmethod
def to_str(x):
return reverse[x]
enums['to_str'] = to_str
@staticmethod
def from_str(x):
return enums[x]
enums['from_str'] = from_str
return type(type_name, (), enums)
PreType = enum('PreType', 'None', 'SimplePre', 'SimplePreLAG')
MeterType = enum('MeterType', 'packets', 'bytes')
TableType = enum('TableType', 'simple', 'indirect', 'indirect_ws')
def bytes_to_string(byte_array):
form = 'B' * len(byte_array)
return struct.pack(form, *byte_array)
def table_error_name(x):
return TableOperationErrorCode._VALUES_TO_NAMES[x]
def get_parser():
class ActionToPreType(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(ActionToPreType, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
assert(type(values) is str)
setattr(namespace, self.dest, PreType.from_str(values))
parser = argparse.ArgumentParser(description='BM runtime CLI')
# One port == one device !!!! This is not a multidevice CLI
parser.add_argument('--thrift-port', help='Thrift server port for table updates',
type=int, action="store", default=9090)
parser.add_argument('--thrift-ip', help='Thrift IP address for table updates',
type=str, action="store", default='localhost')
parser.add_argument('--json', help='JSON description of P4 program',
type=str, action="store", required=False)
parser.add_argument('--pre', help='Packet Replication Engine used by target',
type=str, choices=['None', 'SimplePre', 'SimplePreLAG'],
default=PreType.SimplePre, action=ActionToPreType)
return parser
TABLES = {}
ACTION_PROFS = {}
ACTIONS = {}
METER_ARRAYS = {}
COUNTER_ARRAYS = {}
REGISTER_ARRAYS = {}
CUSTOM_CRC_CALCS = {}
class MatchType:
EXACT = 0
LPM = 1
TERNARY = 2
VALID = 3
RANGE = 4
@staticmethod
def to_str(x):
return {0: "exact", 1: "lpm", 2: "ternary", 3: "valid", 4: "range"}[x]
@staticmethod
def from_str(x):
return {"exact": 0, "lpm": 1, "ternary": 2, "valid": 3, "range": 4}[x]
class Table:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.match_type_ = None
self.actions = {}
self.key = []
self.default_action = None
self.type_ = None
self.support_timeout = False
self.action_prof = None
TABLES[name] = self
def num_key_fields(self):
return len(self.key)
def key_str(self):
return ",\t".join([name + "(" + MatchType.to_str(t) + ", " + str(bw) + ")" for name, t, bw in self.key])
def table_str(self):
ap_str = "implementation={}".format(
"None" if not self.action_prof else self.action_prof.name)
return "{0:30} [{1}, mk={2}]".format(self.name, ap_str, self.key_str())
class ActionProf:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.with_selection = False
self.actions = {}
self.ref_cnt = 0
ACTION_PROFS[name] = self
def action_prof_str(self):
return "{0:30} [{1}]".format(self.name, self.with_selection)
class Action:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.runtime_data = []
ACTIONS[name] = self
def num_params(self):
return len(self.runtime_data)
def runtime_data_str(self):
return ",\t".join([name + "(" + str(bw) + ")" for name, bw in self.runtime_data])
def action_str(self):
return "{0:30} [{1}]".format(self.name, self.runtime_data_str())
class MeterArray:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.type_ = None
self.is_direct = None
self.size = None
self.binding = None
self.rate_count = None
METER_ARRAYS[name] = self
def meter_str(self):
return "{0:30} [{1}, {2}]".format(self.name, self.size,
MeterType.to_str(self.type_))
class CounterArray:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.is_direct = None
self.size = None
self.binding = None
COUNTER_ARRAYS[name] = self
def counter_str(self):
return "{0:30} [{1}]".format(self.name, self.size)
class RegisterArray:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.width = None
self.size = None
REGISTER_ARRAYS[name] = self
def register_str(self):
return "{0:30} [{1}]".format(self.name, self.size)
def reset_config():
TABLES.clear()
ACTION_PROFS.clear()
ACTIONS.clear()
METER_ARRAYS.clear()
COUNTER_ARRAYS.clear()
REGISTER_ARRAYS.clear()
CUSTOM_CRC_CALCS.clear()
def load_json_str(json_str):
def get_header_type(header_name, j_headers):
for h in j_headers:
if h["name"] == header_name:
return h["header_type"]
assert(0)
def get_field_bitwidth(header_type, field_name, j_header_types):
for h in j_header_types:
if h["name"] != header_type: continue
for t in h["fields"]:
# t can have a third element (field signedness)
f, bw = t[0], t[1]
if f == field_name:
return bw
assert(0)
reset_config()
json_ = json.loads(json_str)
for j_action in json_["actions"]:
action = Action(j_action["name"], j_action["id"])
for j_param in j_action["runtime_data"]:
action.runtime_data += [(j_param["name"], j_param["bitwidth"])]
for j_pipeline in json_["pipelines"]:
if "action_profiles" in j_pipeline: # new JSON format
for j_aprof in j_pipeline["action_profiles"]:
action_prof = ActionProf(j_aprof["name"], j_aprof["id"])
action_prof.with_selection = "selector" in j_aprof
for j_table in j_pipeline["tables"]:
table = Table(j_table["name"], j_table["id"])
table.match_type = MatchType.from_str(j_table["match_type"])
table.type_ = TableType.from_str(j_table["type"])
table.support_timeout = j_table["support_timeout"]
for action in j_table["actions"]:
table.actions[action] = ACTIONS[action]
if table.type_ in {TableType.indirect, TableType.indirect_ws}:
if "action_profile" in j_table:
action_prof = ACTION_PROFS[j_table["action_profile"]]
else: # for backward compatibility
assert("act_prof_name" in j_table)
action_prof = ActionProf(j_table["act_prof_name"],
table.id_)
action_prof.with_selection = "selector" in j_table
action_prof.actions.update(table.actions)
action_prof.ref_cnt += 1
table.action_prof = action_prof
for j_key in j_table["key"]:
target = j_key["target"]
match_type = MatchType.from_str(j_key["match_type"])
if match_type == MatchType.VALID:
field_name = target + "_valid"
bitwidth = 1
elif target[1] == "$valid$":
field_name = target[0] + "_valid"
bitwidth = 1
else:
field_name = ".".join(target)
header_type = get_header_type(target[0],
json_["headers"])
bitwidth = get_field_bitwidth(header_type, target[1],
json_["header_types"])
table.key += [(field_name, match_type, bitwidth)]
if "meter_arrays" in json_:
for j_meter in json_["meter_arrays"]:
meter_array = MeterArray(j_meter["name"], j_meter["id"])
if "is_direct" in j_meter and j_meter["is_direct"]:
meter_array.is_direct = True
meter_array.binding = j_meter["binding"]
else:
meter_array.is_direct = False
meter_array.size = j_meter["size"]
meter_array.type_ = MeterType.from_str(j_meter["type"])
meter_array.rate_count = j_meter["rate_count"]
if "counter_arrays" in json_:
for j_counter in json_["counter_arrays"]:
counter_array = CounterArray(j_counter["name"], j_counter["id"])
counter_array.is_direct = j_counter["is_direct"]
if counter_array.is_direct:
counter_array.binding = j_counter["binding"]
else:
counter_array.size = j_counter["size"]
if "register_arrays" in json_:
for j_register in json_["register_arrays"]:
register_array = RegisterArray(j_register["name"],
j_register["id"])
register_array.size = j_register["size"]
register_array.width = j_register["bitwidth"]
if "calculations" in json_:
for j_calc in json_["calculations"]:
calc_name = j_calc["name"]
if j_calc["algo"] == "crc16_custom":
CUSTOM_CRC_CALCS[calc_name] = 16
elif j_calc["algo"] == "crc32_custom":
CUSTOM_CRC_CALCS[calc_name] = 32
class UIn_Error(Exception):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class UIn_ResourceError(UIn_Error):
def __init__(self, res_type, name):
self.res_type = res_type
self.name = name
def __str__(self):
return "Invalid %s name (%s)" % (self.res_type, self.name)
class UIn_MatchKeyError(UIn_Error):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class UIn_RuntimeDataError(UIn_Error):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class CLI_FormatExploreError(Exception):
def __init__(self):
pass
class UIn_BadParamError(UIn_Error):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class UIn_BadIPv4Error(UIn_Error):
def __init__(self):
pass
class UIn_BadIPv6Error(UIn_Error):
def __init__(self):
pass
class UIn_BadMacError(UIn_Error):
def __init__(self):
pass
def ipv4Addr_to_bytes(addr):
if not '.' in addr:
raise CLI_FormatExploreError()
s = addr.split('.')
if len(s) != 4:
raise UIn_BadIPv4Error()
try:
return [int(b) for b in s]
except:
raise UIn_BadIPv4Error()
def macAddr_to_bytes(addr):
if not ':' in addr:
raise CLI_FormatExploreError()
s = addr.split(':')
if len(s) != 6:
raise UIn_BadMacError()
try:
return [int(b, 16) for b in s]
except:
raise UIn_BadMacError()
def ipv6Addr_to_bytes(addr):
from ipaddr import IPv6Address
if not ':' in addr:
raise CLI_FormatExploreError()
try:
ip = IPv6Address(addr)
except:
raise UIn_BadIPv6Error()
try:
return [ord(b) for b in ip.packed]
except:
raise UIn_BadIPv6Error()
def int_to_bytes(i, num):
byte_array = []
while i > 0:
byte_array.append(i % 256)
i = i / 256
num -= 1
if num < 0:
raise UIn_BadParamError("Parameter is too large")
while num > 0:
byte_array.append(0)
num -= 1
byte_array.reverse()
return byte_array
def parse_param(input_str, bitwidth):
if bitwidth == 32:
try:
return ipv4Addr_to_bytes(input_str)
except CLI_FormatExploreError:
pass
except UIn_BadIPv4Error:
raise UIn_BadParamError("Invalid IPv4 address")
elif bitwidth == 48:
try:
return macAddr_to_bytes(input_str)
except CLI_FormatExploreError:
pass
except UIn_BadMacError:
raise UIn_BadParamError("Invalid MAC address")
elif bitwidth == 128:
try:
return ipv6Addr_to_bytes(input_str)
except CLI_FormatExploreError:
pass
except UIn_BadIPv6Error:
raise UIn_BadParamError("Invalid IPv6 address")
try:
input_ = int(input_str, 0)
except:
raise UIn_BadParamError(
"Invalid input, could not cast to integer, try in hex with 0x prefix"
)
try:
return int_to_bytes(input_, (bitwidth + 7) / 8)
except UIn_BadParamError:
raise
def parse_runtime_data(action, params):
def parse_param_(field, bw):
try:
return parse_param(field, bw)
except UIn_BadParamError as e:
raise UIn_RuntimeDataError(
"Error while parsing %s - %s" % (field, e)
)
bitwidths = [bw for( _, bw) in action.runtime_data]
byte_array = []
for input_str, bitwidth in zip(params, bitwidths):
byte_array += [bytes_to_string(parse_param_(input_str, bitwidth))]
return byte_array
_match_types_mapping = {
MatchType.EXACT : BmMatchParamType.EXACT,
MatchType.LPM : BmMatchParamType.LPM,
MatchType.TERNARY : BmMatchParamType.TERNARY,
MatchType.VALID : BmMatchParamType.VALID,
MatchType.RANGE : BmMatchParamType.RANGE,
}
def parse_match_key(table, key_fields):
def parse_param_(field, bw):
try:
return parse_param(field, bw)
except UIn_BadParamError as e:
raise UIn_MatchKeyError(
"Error while parsing %s - %s" % (field, e)
)
params = []
match_types = [t for (_, t, _) in table.key]
bitwidths = [bw for (_, _, bw) in table.key]
for idx, field in enumerate(key_fields):
param_type = _match_types_mapping[match_types[idx]]
bw = bitwidths[idx]
if param_type == BmMatchParamType.EXACT:
key = bytes_to_string(parse_param_(field, bw))
param = BmMatchParam(type = param_type,
exact = BmMatchParamExact(key))
elif param_type == BmMatchParamType.LPM:
prefix, length = field.split("/")
key = bytes_to_string(parse_param_(prefix, bw))
param = BmMatchParam(type = param_type,
lpm = BmMatchParamLPM(key, int(length)))
elif param_type == BmMatchParamType.TERNARY:
key, mask = field.split("&&&")
key = bytes_to_string(parse_param_(key, bw))
mask = bytes_to_string(parse_param_(mask, bw))
if len(mask) != len(key):
raise UIn_MatchKeyError(
"Key and mask have different lengths in expression %s" % field
)
param = BmMatchParam(type = param_type,
ternary = BmMatchParamTernary(key, mask))
elif param_type == BmMatchParamType.VALID:
key = bool(int(field))
param = BmMatchParam(type = param_type,
valid = BmMatchParamValid(key))
elif param_type == BmMatchParamType.RANGE:
start, end = field.split("->")
start = bytes_to_string(parse_param_(start, bw))
end = bytes_to_string(parse_param_(end, bw))
if len(start) != len(end):
raise UIn_MatchKeyError(
"start and end have different lengths in expression %s" % field
)
if start > end:
raise UIn_MatchKeyError(
"start is less than end in expression %s" % field
)
param = BmMatchParam(type = param_type,
range = BmMatchParamRange(start, end))
else:
assert(0)
params.append(param)
return params
def printable_byte_str(s):
return ":".join("{:02x}".format(ord(c)) for c in s)
def BmMatchParam_to_str(self):
return BmMatchParamType._VALUES_TO_NAMES[self.type] + "-" +\
(self.exact.to_str() if self.exact else "") +\
(self.lpm.to_str() if self.lpm else "") +\
(self.ternary.to_str() if self.ternary else "") +\
(self.valid.to_str() if self.valid else "") +\
(self.range.to_str() if self.range else "")
def BmMatchParamExact_to_str(self):
return printable_byte_str(self.key)
def BmMatchParamLPM_to_str(self):
return printable_byte_str(self.key) + "/" + str(self.prefix_length)
def BmMatchParamTernary_to_str(self):
return printable_byte_str(self.key) + " &&& " + printable_byte_str(self.mask)
def BmMatchParamValid_to_str(self):
return ""
def BmMatchParamRange_to_str(self):
return printable_byte_str(self.start) + " -> " + printable_byte_str(self.end_)
BmMatchParam.to_str = BmMatchParam_to_str
BmMatchParamExact.to_str = BmMatchParamExact_to_str
BmMatchParamLPM.to_str = BmMatchParamLPM_to_str
BmMatchParamTernary.to_str = BmMatchParamTernary_to_str
BmMatchParamValid.to_str = BmMatchParamValid_to_str
BmMatchParamRange.to_str = BmMatchParamRange_to_str
# services is [(service_name, client_class), ...]
def thrift_connect(thrift_ip, thrift_port, services):
return utils.thrift_connect(thrift_ip, thrift_port, services)
def handle_bad_input(f):
@wraps(f)
def handle(*args, **kwargs):
try:
return f(*args, **kwargs)
except UIn_MatchKeyError as e:
print "Invalid match key:", e
except UIn_RuntimeDataError as e:
print "Invalid runtime data:", e
except UIn_Error as e:
print "Error:", e
except InvalidTableOperation as e:
error = TableOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid table operation (%s)" % error
except InvalidCounterOperation as e:
error = CounterOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid counter operation (%s)" % error
except InvalidMeterOperation as e:
error = MeterOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid meter operation (%s)" % error
except InvalidRegisterOperation as e:
error = RegisterOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid register operation (%s)" % error
except InvalidLearnOperation as e:
error = LearnOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid learn operation (%s)" % error
except InvalidSwapOperation as e:
error = SwapOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid swap operation (%s)" % error
except InvalidDevMgrOperation as e:
error = DevMgrErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid device manager operation (%s)" % error
except InvalidCrcOperation as e:
error = CrcErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid crc operation (%s)" % error
return handle
def deprecated_act_prof(substitute, with_selection=False,
strictly_deprecated=True):
# need two levels here because our decorator takes arguments
def deprecated_act_prof_(f):
# not sure if this is the right place for it, if I want it to play nice
# with @wraps
if strictly_deprecated:
f.__doc__ = "[DEPRECATED!] " + f.__doc__
f.__doc__ += "\nUse '{}' instead".format(substitute)
@wraps(f)
def wrapper(obj, line):
substitute_fn = getattr(obj, "do_" + substitute)
args = line.split()
obj.at_least_n_args(args, 1)
table_name = args[0]
table = obj.get_res("table", table_name, TABLES)
if with_selection:
obj.check_indirect_ws(table)
else:
obj.check_indirect(table)
assert(table.action_prof is not None)
assert(table.action_prof.ref_cnt > 0)
if strictly_deprecated and table.action_prof.ref_cnt > 1:
raise UIn_Error(
"Legacy command does not work with shared action profiles")
args[0] = table.action_prof.name
if strictly_deprecated:
# writing to stderr in case someone is parsing stdout
sys.stderr.write(
"This is a deprecated command, use '{}' instead\n".format(
substitute))
return substitute_fn(" ".join(args))
# we add the handle_bad_input decorator "programatically"
return handle_bad_input(wrapper)
return deprecated_act_prof_
# thrift does not support unsigned integers
def hex_to_i16(h):
x = int(h, 0)
if (x > 0xFFFF):
raise UIn_Error("Integer cannot fit within 16 bits")
if (x > 0x7FFF): x-= 0x10000
return x
def i16_to_hex(h):
x = int(h)
if (x & 0x8000): x+= 0x10000
return x
def hex_to_i32(h):
x = int(h, 0)
if (x > 0xFFFFFFFF):
raise UIn_Error("Integer cannot fit within 32 bits")
if (x > 0x7FFFFFFF): x-= 0x100000000
return x
def i32_to_hex(h):
x = int(h)
if (x & 0x80000000): x+= 0x100000000
return x
def parse_bool(s):
if s == "true" or s == "True":
return True
if s == "false" or s == "False":
return False
try:
s = int(s, 0)
return bool(s)
except:
pass
raise UIn_Error("Invalid bool parameter")
class RuntimeAPI(cmd.Cmd):
prompt = 'RuntimeCmd: '
intro = "Control utility for runtime P4 table manipulation"
@staticmethod
def get_thrift_services(pre_type):
services = [("standard", Standard.Client)]
if pre_type == PreType.SimplePre:
services += [("simple_pre", SimplePre.Client)]
elif pre_type == PreType.SimplePreLAG:
services += [("simple_pre_lag", SimplePreLAG.Client)]
else:
services += [(None, None)]
return services
def __init__(self, pre_type, standard_client, mc_client=None):
cmd.Cmd.__init__(self)
self.client = standard_client
self.mc_client = mc_client
self.pre_type = pre_type
def do_greet(self, line):
print "hello"
def do_EOF(self, line):
print
return True
def do_shell(self, line):
"Run a shell command"
output = os.popen(line).read()
print output
def get_res(self, type_name, name, array):
if name not in array:
raise UIn_ResourceError(type_name, name)
return array[name]
def at_least_n_args(self, args, n):
if len(args) < n:
raise UIn_Error("Insufficient number of args")
def exactly_n_args(self, args, n):
if len(args) != n:
raise UIn_Error(
"Wrong number of args, expected %d but got %d" % (n, len(args))
)
def _complete_res(self, array, text):
res = sorted(array.keys())
if not text:
return res
return [r for r in res if r.startswith(text)]
@handle_bad_input
def do_show_tables(self, line):
"List tables defined in the P4 program: show_tables"
self.exactly_n_args(line.split(), 0)
for table_name in sorted(TABLES):
print TABLES[table_name].table_str()
@handle_bad_input
def do_show_actions(self, line):
"List actions defined in the P4 program: show_actions"
self.exactly_n_args(line.split(), 0)
for action_name in sorted(ACTIONS):
print ACTIONS[action_name].action_str()
def _complete_tables(self, text):
return self._complete_res(TABLES, text)
def _complete_act_profs(self, text):
return self._complete_res(ACTION_PROFS, text)
@handle_bad_input
def do_table_show_actions(self, line):
"List one table's actions as per the P4 program: table_show_actions <table_name>"
self.exactly_n_args(line.split(), 1)
table = TABLES[line]
for action_name in sorted(table.actions):
print ACTIONS[action_name].action_str()
def complete_table_show_actions(self, text, line, start_index, end_index):
return self._complete_tables(self, text)
@handle_bad_input
def do_table_info(self, line):
"Show info about a table: table_info <table_name>"
self.exactly_n_args(line.split(), 1)
table = TABLES[line]
print table.table_str()
print "*" * 80
for action_name in sorted(table.actions):
print ACTIONS[action_name].action_str()
def complete_table_info(self, text, line, start_index, end_index):
return self._complete_tables(text)
# used for tables but also for action profiles
def _complete_actions(self, text, table_name = None, res = TABLES):
if not table_name:
actions = sorted(ACTIONS.keys())
elif table_name not in res:
return []
actions = sorted(res[table_name].actions.keys())
if not text:
return actions
return [a for a in actions if a.startswith(text)]
def _complete_table_and_action(self, text, line):
tables = sorted(TABLES.keys())
args = line.split()
args_cnt = len(args)
if args_cnt == 1 and not text:
return self._complete_tables(text)
if args_cnt == 2 and text:
return self._complete_tables(text)
table_name = args[1]
if args_cnt == 2 and not text:
return self._complete_actions(text, table_name)
if args_cnt == 3 and text:
return self._complete_actions(text, table_name)
return []
def _complete_act_prof_and_action(self, text, line):
act_profs = sorted(ACTION_PROFS.keys())
args = line.split()
args_cnt = len(args)
if args_cnt == 1 and not text:
return self._complete_act_profs(text)
if args_cnt == 2 and text:
return self._complete_act_profs(text)
act_prof_name = args[1]
if args_cnt == 2 and not text:
return self._complete_actions(text, act_prof_name, ACTION_PROFS)
if args_cnt == 3 and text:
return self._complete_actions(text, act_prof_name, ACTION_PROFS)
return []
# for debugging
def print_set_default(self, table_name, action_name, runtime_data):
print "Setting default action of", table_name
print "{0:20} {1}".format("action:", action_name)
print "{0:20} {1}".format(
"runtime data:",
"\t".join(printable_byte_str(d) for d in runtime_data)
)
@handle_bad_input
def do_table_set_default(self, line):
"Set default action for a match table: table_set_default <table name> <action name> <action parameters>"
args = line.split()
self.at_least_n_args(args, 2)
table_name, action_name = args[0], args[1]
table = self.get_res("table", table_name, TABLES)
if action_name not in table.actions:
raise UIn_Error(
"Table %s has no action %s" % (table_name, action_name)
)
action = ACTIONS[action_name]
if len(args[2:]) != action.num_params():
raise UIn_Error(
"Action %s needs %d parameters" % (action_name, action.num_params())
)
runtime_data = parse_runtime_data(action, args[2:])
self.print_set_default(table_name, action_name, runtime_data)
self.client.bm_mt_set_default_action(0, table_name, action_name, runtime_data)
def complete_table_set_default(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
def parse_runtime_data(self, action, action_params):
if len(action_params) != action.num_params():
raise UIn_Error(
"Action %s needs %d parameters" % (action.name, action.num_params())
)
return parse_runtime_data(action, action_params)
# for debugging
def print_table_add(self, match_key, action_name, runtime_data):
print "{0:20} {1}".format(
"match key:",
"\t".join(d.to_str() for d in match_key)
)
print "{0:20} {1}".format("action:", action_name)
print "{0:20} {1}".format(
"runtime data:",
"\t".join(printable_byte_str(d) for d in runtime_data)
)
@handle_bad_input
def do_table_num_entries(self, line):
"Return the number of entries in a match table (direct or indirect): table_num_entries <table name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
print self.client.bm_mt_get_num_entries(0, table_name)
def complete_table_num_entries(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_add(self, line):
"Add entry to a match table: table_add <table name> <action name> <match fields> => <action parameters> [priority]"
args = line.split()
self.at_least_n_args(args, 3)
table_name, action_name = args[0], args[1]
table = self.get_res("table", table_name, TABLES)
if action_name not in table.actions:
raise UIn_Error(
"Table %s has no action %s" % (table_name, action_name)
)
if table.match_type in {MatchType.TERNARY, MatchType.RANGE}:
try:
priority = int(args.pop(-1))
except:
raise UIn_Error(
"Table is ternary, but could not extract a valid priority from args"
)
else:
priority = 0
# guaranteed to exist
action = ACTIONS[action_name]
for idx, input_ in enumerate(args[2:]):
if input_ == "=>": break
idx += 2
match_key = args[2:idx]
action_params = args[idx+1:]
if len(match_key) != table.num_key_fields():
raise UIn_Error(
"Table %s needs %d key fields" % (table_name, table.num_key_fields())
)
runtime_data = self.parse_runtime_data(action, action_params)
match_key = parse_match_key(table, match_key)
print "Adding entry to", MatchType.to_str(table.match_type), "match table", table_name
# disable, maybe a verbose CLI option?
self.print_table_add(match_key, action_name, runtime_data)
entry_handle = self.client.bm_mt_add_entry(
0, table_name, match_key, action_name, runtime_data,
BmAddEntryOptions(priority = priority)
)
print "Entry has been added with handle", entry_handle
def complete_table_add(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
@handle_bad_input
def do_table_set_timeout(self, line):
"Set a timeout in ms for a given entry; the table has to support timeouts: table_set_timeout <table_name> <entry handle> <timeout (ms)>"
args = line.split()
self.exactly_n_args(args, 3)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
if not table.support_timeout:
raise UIn_Error(
"Table {} does not support entry timeouts".format(table_name))
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
try:
timeout_ms = int(args[2])
except:
raise UIn_Error("Bad format for timeout")
print "Setting a", timeout_ms, "ms timeout for entry", entry_handle
self.client.bm_mt_set_entry_ttl(0, table_name, entry_handle, timeout_ms)
def complete_table_set_timeout(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_modify(self, line):
"Add entry to a match table: table_modify <table name> <action name> <entry handle> [action parameters]"
args = line.split()
self.at_least_n_args(args, 3)
table_name, action_name = args[0], args[1]
table = self.get_res("table", table_name, TABLES)
if action_name not in table.actions:
raise UIn_Error(
"Table %s has no action %s" % (table_name, action_name)
)
# guaranteed to exist
action = ACTIONS[action_name]
try:
entry_handle = int(args[2])
except:
raise UIn_Error("Bad format for entry handle")
action_params = args[3:]
if args[3] == "=>":
# be more tolerant
action_params = args[4:]
runtime_data = self.parse_runtime_data(action, action_params)
print "Modifying entry", entry_handle, "for", MatchType.to_str(table.match_type), "match table", table_name
entry_handle = self.client.bm_mt_modify_entry(
0, table_name, entry_handle, action_name, runtime_data
)
def complete_table_modify(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
@handle_bad_input
def do_table_delete(self, line):
"Delete entry from a match table: table_delete <table name> <entry handle>"
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
print "Deleting entry", entry_handle, "from", table_name
self.client.bm_mt_delete_entry(0, table_name, entry_handle)
def complete_table_delete(self, text, line, start_index, end_index):
return self._complete_tables(text)
def check_indirect(self, table):
if table.type_ not in {TableType.indirect, TableType.indirect_ws}:
raise UIn_Error("Cannot run this command on non-indirect table")
def check_indirect_ws(self, table):
if table.type_ != TableType.indirect_ws:
raise UIn_Error(
"Cannot run this command on non-indirect table,"\
" or on indirect table with no selector")
def check_act_prof_ws(self, act_prof):
if not act_prof.with_selection:
raise UIn_Error(
"Cannot run this command on an action profile without selector")
@handle_bad_input
def do_act_prof_create_member(self, line):
"Add a member to an action profile: act_prof_create_member <action profile name> <action_name> [action parameters]"
args = line.split()
self.at_least_n_args(args, 2)
act_prof_name, action_name = args[0], args[1]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
if action_name not in act_prof.actions:
raise UIn_Error("Action profile '{}' has no action '{}'".format(
act_prof_name, action_name))
action = ACTIONS[action_name]
action_params = args[2:]
runtime_data = self.parse_runtime_data(action, action_params)
mbr_handle = self.client.bm_mt_act_prof_add_member(
0, act_prof_name, action_name, runtime_data)
print "Member has been created with handle", mbr_handle
def complete_act_prof_create_member(self, text, line, start_index, end_index):
return self._complete_act_prof_and_action(text, line)
@deprecated_act_prof("act_prof_create_member")
def do_table_indirect_create_member(self, line):
"Add a member to an indirect match table: table_indirect_create_member <table name> <action_name> [action parameters]"
pass
def complete_table_indirect_create_member(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
@handle_bad_input
def do_act_prof_delete_member(self, line):
"Delete a member in an action profile: act_prof_delete_member <action profile name> <member handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name, action_name = args[0], args[1]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
self.client.bm_mt_act_prof_delete_member(0, act_prof_name, mbr_handle)
def complete_act_prof_delete_member(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_delete_member")
def do_table_indirect_delete_member(self, line):
"Delete a member in an indirect match table: table_indirect_delete_member <table name> <member handle>"
pass
def complete_table_indirect_delete_member(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_modify_member(self, line):
"Modify member in an action profile: act_prof_modify_member <action profile name> <action_name> <member_handle> [action parameters]"
args = line.split()
self.at_least_n_args(args, 3)
act_prof_name, action_name = args[0], args[1]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
if action_name not in act_prof.actions:
raise UIn_Error("Action profile '{}' has no action '{}'".format(
act_prof_name, action_name))
action = ACTIONS[action_name]
try:
mbr_handle = int(args[2])
except:
raise UIn_Error("Bad format for member handle")
action_params = args[3:]
if args[3] == "=>":
# be more tolerant
action_params = args[4:]
runtime_data = self.parse_runtime_data(action, action_params)
mbr_handle = self.client.bm_mt_act_prof_modify_member(
0, act_prof_name, mbr_handle, action_name, runtime_data)
def complete_act_prof_modify_member(self, text, line, start_index, end_index):
return self._complete_act_prof_and_action(text, line)
@deprecated_act_prof("act_prof_modify_member")
def do_table_indirect_modify_member(self, line):
"Modify member in an indirect match table: table_indirect_modify_member <table name> <action_name> <member_handle> [action parameters]"
pass
def complete_table_indirect_modify_member(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
def indirect_add_common(self, line, ws=False):
args = line.split()
self.at_least_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
if ws:
self.check_indirect_ws(table)
else:
self.check_indirect(table)
if table.match_type in {MatchType.TERNARY, MatchType.RANGE}:
try:
priority = int(args.pop(-1))
except:
raise UIn_Error(
"Table is ternary, but could not extract a valid priority from args"
)
else:
priority = 0
for idx, input_ in enumerate(args[1:]):
if input_ == "=>": break
idx += 1
match_key = args[1:idx]
if len(args) != (idx + 2):
raise UIn_Error("Invalid arguments, could not find handle")
handle = args[idx+1]
try:
handle = int(handle)
except:
raise UIn_Error("Bad format for handle")
match_key = parse_match_key(table, match_key)
print "Adding entry to indirect match table", table_name
return table_name, match_key, handle, BmAddEntryOptions(priority = priority)
@handle_bad_input
def do_table_indirect_add(self, line):
"Add entry to an indirect match table: table_indirect_add <table name> <match fields> => <member handle> [priority]"
table_name, match_key, handle, options = self.indirect_add_common(line)
entry_handle = self.client.bm_mt_indirect_add_entry(
0, table_name, match_key, handle, options
)
print "Entry has been added with handle", entry_handle
def complete_table_indirect_add(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_indirect_add_with_group(self, line):
"Add entry to an indirect match table: table_indirect_add <table name> <match fields> => <group handle> [priority]"
table_name, match_key, handle, options = self.indirect_add_common(line, ws=True)
entry_handle = self.client.bm_mt_indirect_ws_add_entry(
0, table_name, match_key, handle, options
)
print "Entry has been added with handle", entry_handle
def complete_table_indirect_add_with_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_indirect_delete(self, line):
"Delete entry from an indirect match table: table_indirect_delete <table name> <entry handle>"
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
self.check_indirect(table)
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
print "Deleting entry", entry_handle, "from", table_name
self.client.bm_mt_indirect_delete_entry(0, table_name, entry_handle)
def complete_table_indirect_delete(self, text, line, start_index, end_index):
return self._complete_tables(text)
def indirect_set_default_common(self, line, ws=False):
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
if ws:
self.check_indirect_ws(table)
else:
self.check_indirect(table)
try:
handle = int(args[1])
except:
raise UIn_Error("Bad format for handle")
return table_name, handle
@handle_bad_input
def do_table_indirect_set_default(self, line):
"Set default member for indirect match table: table_indirect_set_default <table name> <member handle>"
table_name, handle = self.indirect_set_default_common(line)
self.client.bm_mt_indirect_set_default_member(0, table_name, handle)
def complete_table_indirect_set_default(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_indirect_set_default_with_group(self, line):
"Set default group for indirect match table: table_indirect_set_default <table name> <group handle>"
table_name, handle = self.indirect_set_default_common(line, ws=True)
self.client.bm_mt_indirect_ws_set_default_group(0, table_name, handle)
def complete_table_indirect_set_default_with_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_create_group(self, line):
"Add a group to an action pofile: act_prof_create_group <action profile name>"
args = line.split()
self.exactly_n_args(args, 1)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
self.check_act_prof_ws(act_prof)
grp_handle = self.client.bm_mt_act_prof_create_group(0, act_prof_name)
print "Group has been created with handle", grp_handle
def complete_act_prof_create_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_create_group", with_selection=True)
def do_table_indirect_create_group(self, line):
"Add a group to an indirect match table: table_indirect_create_group <table name>"
pass
def complete_table_indirect_create_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_delete_group(self, line):
"Delete a group from an action profile: act_prof_delete_group <action profile name> <group handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
self.check_act_prof_ws(act_prof)
try:
grp_handle = int(args[1])
except:
raise UIn_Error("Bad format for group handle")
self.client.bm_mt_act_prof_delete_group(0, act_prof_name, grp_handle)
def complete_act_prof_delete_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_delete_group", with_selection=True)
def do_table_indirect_delete_group(self, line):
"Delete a group: table_indirect_delete_group <table name> <group handle>"
pass
def complete_table_indirect_delete_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_add_member_to_group(self, line):
"Add member to group in an action profile: act_prof_add_member_to_group <action profile name> <member handle> <group handle>"
args = line.split()
self.exactly_n_args(args, 3)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
self.check_act_prof_ws(act_prof)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
try:
grp_handle = int(args[2])
except:
raise UIn_Error("Bad format for group handle")
self.client.bm_mt_act_prof_add_member_to_group(
0, act_prof_name, mbr_handle, grp_handle)
def complete_act_prof_add_member_to_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_add_member_to_group", with_selection=True)
def do_table_indirect_add_member_to_group(self, line):
"Add member to group: table_indirect_add_member_to_group <table name> <member handle> <group handle>"
pass
def complete_table_indirect_add_member_to_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_remove_member_from_group(self, line):
"Remove member from group in action profile: act_prof_remove_member_from_group <action profile name> <member handle> <group handle>"
args = line.split()
self.exactly_n_args(args, 3)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
self.check_act_prof_ws(act_prof)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
try:
grp_handle = int(args[2])
except:
raise UIn_Error("Bad format for group handle")
self.client.bm_mt_act_prof_remove_member_from_group(
0, act_prof_name, mbr_handle, grp_handle)
def complete_act_prof_remove_member_from_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_remove_member_from_group", with_selection=True)
def do_table_indirect_remove_member_from_group(self, line):
"Remove member from group: table_indirect_remove_member_from_group <table name> <member handle> <group handle>"
pass
def complete_table_indirect_remove_member_from_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
def check_has_pre(self):
if self.pre_type == PreType.None:
raise UIn_Error(
"Cannot execute this command without packet replication engine"
)
def get_mgrp(self, s):
try:
return int(s)
except:
raise UIn_Error("Bad format for multicast group id")
@handle_bad_input
def do_mc_mgrp_create(self, line):
"Create multicast group: mc_mgrp_create <group id>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 1)
mgrp = self.get_mgrp(args[0])
print "Creating multicast group", mgrp
mgrp_hdl = self.mc_client.bm_mc_mgrp_create(0, mgrp)
assert(mgrp == mgrp_hdl)
@handle_bad_input
def do_mc_mgrp_destroy(self, line):
"Destroy multicast group: mc_mgrp_destroy <group id>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 1)
mgrp = self.get_mgrp(args[0])
print "Destroying multicast group", mgrp
self.mc_client.bm_mc_mgrp_destroy(0, mgrp)
def ports_to_port_map_str(self, ports):
last_port_num = 0
port_map_str = ""
ports_int = []
for port_num_str in ports:
try:
port_num = int(port_num_str)
except:
raise UIn_Error("'%s' is not a valid port number" % port_num_str)
ports_int.append(port_num)
ports_int.sort()
for port_num in ports_int:
port_map_str += "0" * (port_num - last_port_num) + "1"
last_port_num = port_num + 1
return port_map_str[::-1]
def parse_ports_and_lags(self, args):
ports = []
i = 1
while (i < len(args) and args[i] != '|'):
ports.append(args[i])
i += 1
port_map_str = self.ports_to_port_map_str(ports)
if self.pre_type == PreType.SimplePreLAG:
i += 1
lags = [] if i == len(args) else args[i:]
lag_map_str = self.ports_to_port_map_str(lags)
else:
lag_map_str = None
return port_map_str, lag_map_str
@handle_bad_input
def do_mc_node_create(self, line):
"Create multicast node: mc_node_create <rid> <space-separated port list> [ | <space-separated lag list> ]"
self.check_has_pre()
args = line.split()
self.at_least_n_args(args, 1)
try:
rid = int(args[0])
except:
raise UIn_Error("Bad format for rid")
port_map_str, lag_map_str = self.parse_ports_and_lags(args)
if self.pre_type == PreType.SimplePre:
print "Creating node with rid", rid, "and with port map", port_map_str
l1_hdl = self.mc_client.bm_mc_node_create(0, rid, port_map_str)
else:
print "Creating node with rid", rid, ", port map", port_map_str, "and lag map", lag_map_str
l1_hdl = self.mc_client.bm_mc_node_create(0, rid, port_map_str, lag_map_str)
print "node was created with handle", l1_hdl
def get_node_handle(self, s):
try:
return int(s)
except:
raise UIn_Error("Bad format for node handle")
@handle_bad_input
def do_mc_node_update(self, line):
"Update multicast node: mc_node_update <node handle> <space-separated port list> [ | <space-separated lag list> ]"
self.check_has_pre()
args = line.split()
self.at_least_n_args(args, 2)
l1_hdl = self.get_node_handle(args[0])
port_map_str, lag_map_str = self.parse_ports_and_lags(args)
if self.pre_type == PreType.SimplePre:
print "Updating node", l1_hdl, "with port map", port_map_str
self.mc_client.bm_mc_node_update(0, l1_hdl, port_map_str)
else:
print "Updating node", l1_hdl, "with port map", port_map_str, "and lag map", lag_map_str
self.mc_client.bm_mc_node_update(0, l1_hdl, port_map_str, lag_map_str)
@handle_bad_input
def do_mc_node_associate(self, line):
"Associate node to multicast group: mc_node_associate <group handle> <node handle>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 2)
mgrp = self.get_mgrp(args[0])
l1_hdl = self.get_node_handle(args[1])
print "Associating node", l1_hdl, "to multicast group", mgrp
self.mc_client.bm_mc_node_associate(0, mgrp, l1_hdl)
@handle_bad_input
def do_mc_node_dissociate(self, line):
"Dissociate node from multicast group: mc_node_associate <group handle> <node handle>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 2)
mgrp = self.get_mgrp(args[0])
l1_hdl = self.get_node_handle(args[1])
print "Dissociating node", l1_hdl, "from multicast group", mgrp
self.mc_client.bm_mc_node_dissociate(0, mgrp, l1_hdl)
@handle_bad_input
def do_mc_node_destroy(self, line):
"Destroy multicast node: mc_node_destroy <node handle>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 1)
l1_hdl = int(line.split()[0])
print "Destroying node", l1_hdl
self.mc_client.bm_mc_node_destroy(0, l1_hdl)
@handle_bad_input
def do_mc_set_lag_membership(self, line):
"Set lag membership of port list: mc_set_lag_membership <lag index> <space-separated port list>"
self.check_has_pre()
if self.pre_type != PreType.SimplePreLAG:
raise UIn_Error(
"Cannot execute this command with this type of PRE,"\
" SimplePreLAG is required"
)
args = line.split()
self.at_least_n_args(args, 2)
try:
lag_index = int(args[0])
except:
raise UIn_Error("Bad format for lag index")
port_map_str = self.ports_to_port_map_str(args[1:])
print "Setting lag membership:", lag_index, "<-", port_map_str
self.mc_client.bm_mc_set_lag_membership(0, lag_index, port_map_str)
@handle_bad_input
def do_mc_dump(self, line):
"Dump entries in multicast engine"
self.check_has_pre()
json_dump = self.mc_client.bm_mc_get_entries(0)
try:
mc_json = json.loads(json_dump)
except:
print "Exception when retrieving MC entries"
return
l1_handles = {}
for h in mc_json["l1_handles"]:
l1_handles[h["handle"]] = (h["rid"], h["l2_handle"])
l2_handles = {}
for h in mc_json["l2_handles"]:
l2_handles[h["handle"]] = (h["ports"], h["lags"])
print "=========="
print "MC ENTRIES"
for mgrp in mc_json["mgrps"]:
print "**********"
mgid = mgrp["id"]
print "mgrp({})".format(mgid)
for L1h in mgrp["l1_handles"]:
rid, L2h = l1_handles[L1h]
print " -> (L1h={}, rid={})".format(L1h, rid),
ports, lags = l2_handles[L2h]
print "-> (ports=[{}], lags=[{}])".format(
", ".join([str(p) for p in ports]),
", ".join([str(l) for l in lags]))
print "=========="
print "LAGS"
for lag in mc_json["lags"]:
print "lag({})".format(lag["id"]),
print "-> ports=[{}]".format(", ".join([str(p) for p in ports]))
print "=========="
@handle_bad_input
def do_load_new_config_file(self, line):
"Load new json config: load_new_config_file <path to .json file>"
args = line.split()
self.exactly_n_args(args, 1)
filename = args[0]
if not os.path.isfile(filename):
raise UIn_Error("Not a valid filename")
print "Loading new Json config"
with open(filename, 'r') as f:
json_str = f.read()
try:
json.loads(json_str)
except:
raise UIn_Error("Not a valid JSON file")
self.client.bm_load_new_config(json_str)
load_json_str(json_str)
@handle_bad_input
def do_swap_configs(self, line):
"Swap the 2 existing configs, need to have called load_new_config_file before"
print "Swapping configs"
self.client.bm_swap_configs()
@handle_bad_input
def do_meter_array_set_rates(self, line):
"Configure rates for an entire meter array: meter_array_set_rates <name> <rate_1>:<burst_1> <rate_2>:<burst_2> ..."
args = line.split()
self.at_least_n_args(args, 1)
meter_name = args[0]
meter = self.get_res("meter", meter_name, METER_ARRAYS)
rates = args[1:]
if len(rates) != meter.rate_count:
raise UIn_Error(
"Invalid number of rates, expected %d but got %d"\
% (meter.rate_count, len(rates))
)
new_rates = []
for rate in rates:
try:
r, b = rate.split(':')
r = float(r)
b = int(b)
new_rates.append(BmMeterRateConfig(r, b))
except:
raise UIn_Error("Error while parsing rates")
self.client.bm_meter_array_set_rates(0, meter_name, new_rates)
def complete_meter_array_set_rates(self, text, line, start_index, end_index):
return self._complete_meters(text)
@handle_bad_input
def do_meter_set_rates(self, line):
"Configure rates for a meter: meter_set_rates <name> <index> <rate_1>:<burst_1> <rate_2>:<burst_2> ..."
args = line.split()
self.at_least_n_args(args, 2)
meter_name = args[0]
meter = self.get_res("meter", meter_name, METER_ARRAYS)
try:
index = int(args[1])
except:
raise UIn_Error("Bad format for index")
rates = args[2:]
if len(rates) != meter.rate_count:
raise UIn_Error(
"Invalid number of rates, expected %d but got %d"\
% (meter.rate_count, len(rates))
)
new_rates = []
for rate in rates:
try:
r, b = rate.split(':')
r = float(r)
b = int(b)
new_rates.append(BmMeterRateConfig(r, b))
except:
raise UIn_Error("Error while parsing rates")
if meter.is_direct:
table_name = meter.binding
self.client.bm_mt_set_meter_rates(0, table_name, index, new_rates)
else:
self.client.bm_meter_set_rates(0, meter_name, index, new_rates)
def complete_meter_set_rates(self, text, line, start_index, end_index):
return self._complete_meters(text)
@handle_bad_input
def do_meter_get_rates(self, line):
"Retrieve rates for a meter: meter_get_rates <name> <index>"
args = line.split()
self.exactly_n_args(args, 2)
meter_name = args[0]
meter = self.get_res("meter", meter_name, METER_ARRAYS)
try:
index = int(args[1])
except:
raise UIn_Error("Bad format for index")
# meter.rate_count
if meter.is_direct:
table_name = meter.binding
rates = self.client.bm_mt_get_meter_rates(0, table_name, index)
else:
rates = self.client.bm_meter_get_rates(0, meter_name, index)
if len(rates) != meter.rate_count:
print "WARNING: expected", meter.rate_count, "rates",
print "but only received", len(rates)
for idx, rate in enumerate(rates):
print "{}: info rate = {}, burst size = {}".format(
idx, rate.units_per_micros, rate.burst_size)
def complete_meter_get_rates(self, text, line, start_index, end_index):
return self._complete_meters(text)
def _complete_meters(self, text):
return self._complete_res(METER_ARRAYS, text)
@handle_bad_input
def do_counter_read(self, line):
"Read counter value: counter_read <name> <index>"
args = line.split()
self.exactly_n_args(args, 2)
counter_name = args[0]
counter = self.get_res("counter", counter_name, COUNTER_ARRAYS)
index = args[1]
try:
index = int(index)
except:
raise UIn_Error("Bad format for index")
if counter.is_direct:
table_name = counter.binding
print "this is the direct counter for table", table_name
# index = index & 0xffffffff
value = self.client.bm_mt_read_counter(0, table_name, index)
else:
value = self.client.bm_counter_read(0, counter_name, index)
print "%s[%d]= " % (counter_name, index), value
def complete_counter_read(self, text, line, start_index, end_index):
return self._complete_counters(text)
@handle_bad_input
def do_counter_reset(self, line):
"Reset counter: counter_reset <name>"
args = line.split()
self.exactly_n_args(args, 1)
counter_name = args[0]
counter = self.get_res("counter", counter_name, COUNTER_ARRAYS)
if counter.is_direct:
table_name = counter.binding
print "this is the direct counter for table", table_name
value = self.client.bm_mt_reset_counters(0, table_name)
else:
value = self.client.bm_counter_reset_all(0, counter_name)
def complete_counter_reset(self, text, line, start_index, end_index):
return self._complete_counters(text)
def _complete_counters(self, text):
return self._complete_res(COUNTER_ARRAYS, text)
@handle_bad_input
def do_register_read(self, line):
"Read register value: register_read <name> <index>"
args = line.split()
self.exactly_n_args(args, 2)
register_name = args[0]
register = self.get_res("register", register_name, REGISTER_ARRAYS)
index = args[1]
try:
index = int(index)
except:
raise UIn_Error("Bad format for index")
value = self.client.bm_register_read(0, register_name, index)
print "%s[%d]= " % (register_name, index), value
def complete_register_read(self, text, line, start_index, end_index):
return self._complete_registers(text)
@handle_bad_input
def do_register_write(self, line):
"Write register value: register_write <name> <index> <value>"
args = line.split()
self.exactly_n_args(args, 3)
register_name = args[0]
register = self.get_res("register", register_name, REGISTER_ARRAYS)
index = args[1]
try:
index = int(index)
except:
raise UIn_Error("Bad format for index")
value = args[2]
try:
value = int(value)
except:
raise UIn_Error("Bad format for value, must be an integer")
self.client.bm_register_write(0, register_name, index, value)
def complete_register_write(self, text, line, start_index, end_index):
return self._complete_registers(text)
@handle_bad_input
def do_register_reset(self, line):
"Reset all the cells in the register array to 0: register_reset <name>"
args = line.split()
self.exactly_n_args(args, 1)
register_name = args[0]
self.client.bm_register_reset(0, register_name)
def complete_register_reset(self, text, line, start_index, end_index):
return self._complete_registers(text)
def _complete_registers(self, text):
return self._complete_res(REGISTER_ARRAYS, text)
def dump_action_and_data(self, action_name, action_data):
def hexstr(v):
return "".join("{:02x}".format(ord(c)) for c in v)
print "Action entry: {} - {}".format(
action_name, ", ".join([hexstr(a) for a in action_data]))
def dump_action_entry(self, a_entry):
if a_entry.action_type == BmActionEntryType.NONE:
print "EMPTY"
elif a_entry.action_type == BmActionEntryType.ACTION_DATA:
self.dump_action_and_data(a_entry.action_name, a_entry.action_data)
elif a_entry.action_type == BmActionEntryType.MBR_HANDLE:
print "Index: member({})".format(a_entry.mbr_handle)
elif a_entry.action_type == BmActionEntryType.GRP_HANDLE:
print "Index: group({})".format(a_entry.grp_handle)
def dump_one_member(self, member):
print "Dumping member {}".format(member.mbr_handle)
self.dump_action_and_data(member.action_name, member.action_data)
def dump_members(self, members):
for m in members:
print "**********"
self.dump_one_member(m)
def dump_one_group(self, group):
print "Dumping group {}".format(group.grp_handle)
print "Members: [{}]".format(", ".join(
[str(h) for h in group.mbr_handles]))
def dump_groups(self, groups):
for g in groups:
print "**********"
self.dump_one_group(g)
def dump_one_entry(self, table, entry):
if table.key:
out_name_w = max(20, max([len(t[0]) for t in table.key]))
def hexstr(v):
return "".join("{:02x}".format(ord(c)) for c in v)
def dump_exact(p):
return hexstr(p.exact.key)
def dump_lpm(p):
return "{}/{}".format(hexstr(p.lpm.key), p.lpm.prefix_length)
def dump_ternary(p):
return "{} &&& {}".format(hexstr(p.ternary.key),
hexstr(p.ternary.mask))
def dump_range(p):
return "{} -> {}".format(hexstr(p.range.start),
hexstr(p.range.end_))
def dump_valid(p):
return "01" if p.valid.key else "00"
pdumpers = {"exact": dump_exact, "lpm": dump_lpm,
"ternary": dump_ternary, "valid": dump_valid,
"range": dump_range}
print "Dumping entry {}".format(hex(entry.entry_handle))
print "Match key:"
for p, k in zip(entry.match_key, table.key):
assert(k[1] == p.type)
pdumper = pdumpers[MatchType.to_str(p.type)]
print "* {0:{w}}: {1:10}{2}".format(
k[0], MatchType.to_str(p.type).upper(),
pdumper(p), w=out_name_w)
if entry.options.priority >= 0:
print "Priority: {}".format(entry.options.priority)
self.dump_action_entry(entry.action_entry)
if entry.life is not None:
print "Life: {}ms since hit, timeout is {}ms".format(
entry.life.time_since_hit_ms, entry.life.timeout_ms)
@handle_bad_input
def do_table_dump_entry(self, line):
"Display some information about a table entry: table_dump_entry <table name> <entry handle>"
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
entry = self.client.bm_mt_get_entry(0, table_name, entry_handle)
self.dump_one_entry(table, entry)
def complete_table_dump_entry(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_dump_member(self, line):
"Display some information about a member: act_prof_dump_member <action profile name> <member handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
member = self.client.bm_mt_act_prof_get_member(
0, act_prof_name, mbr_handle)
self.dump_one_member(member)
def complete_act_prof_dump_member(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
# notice the strictly_deprecated=False; I don't consider this command to be
# strictly deprecated because it can be convenient and does not modify the
# action profile so won't create problems
@deprecated_act_prof("act_prof_dump_member", with_selection=False,
strictly_deprecated=False)
def do_table_dump_member(self, line):
"Display some information about a member: table_dump_member <table name> <member handle>"
pass
def complete_table_dump_member(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_dump_group(self, line):
"Display some information about a group: table_dump_group <action profile name> <group handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
try:
grp_handle = int(args[1])
except:
raise UIn_Error("Bad format for group handle")
group = self.client.bm_mt_act_prof_get_group(
0, act_prof_name, grp_handle)
self.dump_one_group(group)
def complete_act_prof_dump_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_dump_group", with_selection=False,
strictly_deprecated=False)
def do_table_dump_group(self, line):
"Display some information about a group: table_dump_group <table name> <group handle>"
pass
def complete_table_dump_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
def _dump_act_prof(self, act_prof):
act_prof_name = act_prof.name
members = self.client.bm_mt_act_prof_get_members(0, act_prof_name)
print "=========="
print "MEMBERS"
self.dump_members(members)
if act_prof.with_selection:
groups = self.client.bm_mt_act_prof_get_groups(0, act_prof_name)
print "=========="
print "GROUPS"
self.dump_groups(groups)
@handle_bad_input
def do_act_prof_dump(self, line):
"Display entries in an action profile: act_prof_dump <action profile name>"
args = line.split()
self.exactly_n_args(args, 1)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name, ACTION_PROFS)
self._dump_act_prof(act_prof)
def complete_act_prof_dump(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@handle_bad_input
def do_table_dump(self, line):
"Display entries in a match-table: table_dump <table name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
entries = self.client.bm_mt_get_entries(0, table_name)
print "=========="
print "TABLE ENTRIES"
for e in entries:
print "**********"
self.dump_one_entry(table, e)
if table.type_ == TableType.indirect or\
table.type_ == TableType.indirect_ws:
assert(table.action_prof is not None)
self._dump_act_prof(table.action_prof)
# default entry
default_entry = self.client.bm_mt_get_default_entry(0, table_name)
print "=========="
print "Dumping default entry"
self.dump_action_entry(default_entry)
print "=========="
def complete_table_dump(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_dump_entry_from_key(self, line):
"Display some information about a table entry: table_dump_entry_from_key <table name> <match fields> [priority]"
args = line.split()
self.at_least_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, TABLES)
if table.match_type in {MatchType.TERNARY, MatchType.RANGE}:
try:
priority = int(args.pop(-1))
except:
raise UIn_Error(
"Table is ternary, but could not extract a valid priority from args"
)
else:
priority = 0
match_key = args[1:]
if len(match_key) != table.num_key_fields():
raise UIn_Error(
"Table %s needs %d key fields" % (table_name, table.num_key_fields())
)
match_key = parse_match_key(table, match_key)
entry = self.client.bm_mt_get_entry_from_key(
0, table_name, match_key, BmAddEntryOptions(priority = priority))
self.dump_one_entry(table, entry)
def complete_table_dump_entry_from_key(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_port_add(self, line):
"Add a port to the switch (behavior depends on device manager used): port_add <iface_name> <port_num> [pcap_path]"
args = line.split()
self.at_least_n_args(args, 2)
iface_name = args[0]
try:
port_num = int(args[1])
except:
raise UIn_Error("Bad format for port_num, must be an integer")
pcap_path = ""
if len(args) > 2:
pcap_path = args[2]
self.client.bm_dev_mgr_add_port(iface_name, port_num, pcap_path)
@handle_bad_input
def do_port_remove(self, line):
"Removes a port from the switch (behavior depends on device manager used): port_remove <port_num>"
args = line.split()
self.exactly_n_args(args, 1)
try:
port_num = int(args[0])
except:
raise UIn_Error("Bad format for port_num, must be an integer")
self.client.bm_dev_mgr_remove_port(port_num)
@handle_bad_input
def do_show_ports(self, line):
"Shows the ports connected to the switch: show_ports"
self.exactly_n_args(line.split(), 0)
ports = self.client.bm_dev_mgr_show_ports()
print "{:^10}{:^20}{:^10}{}".format(
"port #", "iface name", "status", "extra info")
print "=" * 50
for port_info in ports:
status = "UP" if port_info.is_up else "DOWN"
extra_info = "; ".join(
[k + "=" + v for k, v in port_info.extra.items()])
print "{:^10}{:^20}{:^10}{}".format(
port_info.port_num, port_info.iface_name, status, extra_info)
@handle_bad_input
def do_switch_info(self, line):
"Show some basic info about the switch: switch_info"
self.exactly_n_args(line.split(), 0)
info = self.client.bm_mgmt_get_info()
attributes = [t[2] for t in info.thrift_spec[1:]]
out_attr_w = 5 + max(len(a) for a in attributes)
for a in attributes:
print "{:{w}}: {}".format(a, getattr(info, a), w=out_attr_w)
@handle_bad_input
def do_reset_state(self, line):
"Reset all state in the switch (table entries, registers, ...), but P4 config is preserved: reset_state"
self.exactly_n_args(line.split(), 0)
self.client.bm_reset_state()
@handle_bad_input
def do_write_config_to_file(self, line):
"Retrieves the JSON config currently used by the switch and dumps it to user-specified file"
args = line.split()
self.exactly_n_args(args, 1)
filename = args[0]
json_cfg = self.client.bm_get_config()
with open(filename, 'w') as f:
f.write(json_cfg)
@handle_bad_input
def do_serialize_state(self, line):
"Serialize the switch state and dumps it to user-specified file"
args = line.split()
self.exactly_n_args(args, 1)
filename = args[0]
state = self.client.bm_serialize_state()
with open(filename, 'w') as f:
f.write(state)
def set_crc_parameters_common(self, line, crc_width=16):
conversion_fn = {16: hex_to_i16, 32: hex_to_i32}[crc_width]
config_type = {16: BmCrc16Config, 32: BmCrc32Config}[crc_width]
thrift_fn = {16: self.client.bm_set_crc16_custom_parameters,
32: self.client.bm_set_crc32_custom_parameters}[crc_width]
args = line.split()
self.exactly_n_args(args, 6)
name = args[0]
if name not in CUSTOM_CRC_CALCS or CUSTOM_CRC_CALCS[name] != crc_width:
raise UIn_ResourceError("crc{}_custom".format(crc_width), name)
config_args = [conversion_fn(a) for a in args[1:4]]
config_args += [parse_bool(a) for a in args[4:6]]
crc_config = config_type(*config_args)
thrift_fn(0, name, crc_config)
def _complete_crc(self, text, crc_width=16):
crcs = sorted(
[c for c, w in CUSTOM_CRC_CALCS.items() if w == crc_width])
if not text:
return crcs
return [c for c in crcs if c.startswith(text)]
@handle_bad_input
def do_set_crc16_parameters(self, line):
"Change the parameters for a custom crc16 hash: set_crc16_parameters <name> <polynomial> <initial remainder> <final xor value> <reflect data?> <reflect remainder?>"
self.set_crc_parameters_common(line, 16)
def complete_set_crc16_parameters(self, text, line, start_index, end_index):
return self._complete_crc(text, 16)
@handle_bad_input
def do_set_crc32_parameters(self, line):
"Change the parameters for a custom crc32 hash: set_crc32_parameters <name> <polynomial> <initial remainder> <final xor value> <reflect data?> <reflect remainder?>"
self.set_crc_parameters_common(line, 32)
def complete_set_crc32_parameters(self, text, line, start_index, end_index):
return self._complete_crc(text, 32)
def load_json_config(standard_client=None, json_path=None):
load_json_str(utils.get_json_config(standard_client, json_path))
def main():
args = get_parser().parse_args()
standard_client, mc_client = thrift_connect(
args.thrift_ip, args.thrift_port,
RuntimeAPI.get_thrift_services(args.pre)
)
load_json_config(standard_client, args.json)
RuntimeAPI(args.pre, standard_client, mc_client).cmdloop()
if __name__ == '__main__':
main()
| Emil-501/block.p4 | env/runtime_CLI.py | Python | apache-2.0 | 80,370 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import psycopg2
from socorrolib.lib import (
MissingArgumentError,
BadArgumentError,
external_common,
)
from socorro.external.postgresql.base import PostgreSQLBase
class GraphicsDevices(PostgreSQLBase):
def get(self, **kwargs):
filters = [
("vendor_hex", None, ["list", "str"]),
("adapter_hex", None, ["list", "str"]),
]
params = external_common.parse_arguments(filters, kwargs)
for key in ('vendor_hex', 'adapter_hex'):
param = params[key]
if not param:
raise MissingArgumentError(key)
params[key] = tuple(params[key])
sql_query = """
SELECT
vendor_hex, adapter_hex, vendor_name, adapter_name
FROM graphics_device
WHERE vendor_hex IN %(vendor_hex)s
AND adapter_hex IN %(adapter_hex)s
"""
results = self.query(sql_query, params)
hits = results.zipped()
return {
'hits': hits,
'total': len(hits)
}
def post(self, **kwargs):
try:
data = kwargs['data']
if data is None:
raise BadArgumentError('POST data sent was null')
except AttributeError:
raise MissingArgumentError('No POST data sent')
except ValueError:
raise BadArgumentError('Posted data not valid JSON')
except TypeError:
# happens if kwargs['data'] is None
raise BadArgumentError('POST data sent was empty')
# make an upsert for each thing and rollback if any failed
upsert = """
WITH
update_graphics_device AS (
UPDATE graphics_device
SET
adapter_name = %(adapter_name)s,
vendor_name = %(vendor_name)s
WHERE
vendor_hex = %(vendor_hex)s
AND
adapter_hex = %(adapter_hex)s
RETURNING 1
),
insert_graphics_device AS (
INSERT INTO
graphics_device
(vendor_hex, adapter_hex, vendor_name, adapter_name)
SELECT
%(vendor_hex)s AS vendor_hex,
%(adapter_hex)s AS adapter_hex,
%(vendor_name)s AS vendor_name,
%(adapter_name)s AS adapter_name
WHERE NOT EXISTS (
SELECT * FROM graphics_device
WHERE
vendor_hex = %(vendor_hex)s
AND
adapter_hex = %(adapter_hex)s
LIMIT 1
)
RETURNING 2
)
SELECT * FROM update_graphics_device
UNION
ALL SELECT * FROM insert_graphics_device
"""
with self.get_connection() as connection:
try:
for row in data:
self.query(upsert, row, connection=connection)
connection.commit()
return True
except (psycopg2.Error, KeyError):
# KeyErrors happen if any of the rows don't have
# all the required keys
connection.rollback()
return False
| KaiRo-at/socorro | socorro/external/postgresql/graphics_devices.py | Python | mpl-2.0 | 3,440 |
input = """
a:-b.
b:-a.
a v b.
:- not a.
:- not b.
"""
output = """
{a, b}
"""
| Yarrick13/hwasp | tests/wasp1/AllAnswerSets/choice_14.test.py | Python | apache-2.0 | 81 |
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from GLUEInfoProvider import CommonUtils
def process(siteDefs, out=sys.stdout):
if siteDefs.clusterHost and siteDefs.clusterHost <> siteDefs.ceHost:
#The current node is not the cluster node
#Cannot process cluster and subclusters
return
clusterDN = 'GlueClusterUniqueID=%s,mds-vo-name=resource,o=grid' % siteDefs.clusterId
out.write('dn:%s\n' % clusterDN)
out.write('''objectClass: GlueClusterTop
objectClass: GlueCluster
objectClass: GlueInformationService
objectClass: GlueKey
objectClass: GlueSchemaVersion
''')
out.write('GlueClusterUniqueID: %s\n' % siteDefs.clusterId)
out.write('GlueClusterName: %s\n' % siteDefs.clusterName)
out.write('GlueForeignKey: GlueSiteUniqueID=%s\n' % siteDefs.clusterSite)
qList = siteDefs.ruleTable.getQueueList(siteDefs.ceHost)
for queue in qList:
glueceID = '%s:%d/cream-%s-%s' % (siteDefs.ceHost, siteDefs.cePort, siteDefs.jobmanager, queue)
out.write('GlueForeignKey: GlueCEUniqueID=%s\n' % glueceID)
for queue in qList:
glueceID = '%s:%d/cream-%s-%s' % (siteDefs.ceHost, siteDefs.cePort, siteDefs.jobmanager, queue)
out.write('GlueClusterService: %s\n' % glueceID)
out.write('GlueInformationServiceURL: ldap://%s:2170/mds-vo-name=resource,o=grid\n' % siteDefs.clusterHost)
out.write('GlueSchemaVersionMajor: 1\n')
out.write('GlueSchemaVersionMinor: 3\n')
out.write('\n')
for resData in siteDefs.resourceTable.values():
out.write('dn: GlueSubClusterUniqueID=%s,%s\n' % (resData.id, clusterDN))
out.write('''objectClass: GlueClusterTop
objectClass: GlueSubCluster
objectClass: GlueHostApplicationSoftware
objectClass: GlueHostArchitecture
objectClass: GlueHostBenchmark
objectClass: GlueHostMainMemory
objectClass: GlueHostNetworkAdapter
objectClass: GlueHostOperatingSystem
objectClass: GlueHostProcessor
objectClass: GlueInformationService
objectClass: GlueKey
objectClass: GlueSchemaVersion
''')
out.write('GlueSubClusterUniqueID: %s\n' % resData.id)
out.write('GlueChunkKey: GlueClusterUniqueID=%s\n' % siteDefs.clusterId)
out.write('GlueHostArchitecturePlatformType: %s\n' % resData.osArch)
out.write('GlueHostArchitectureSMPSize: %d\n' % resData.smpSize)
out.write('GlueHostBenchmarkSF00: %f\n' % resData.benchSF00)
out.write('GlueHostBenchmarkSI00: %f\n' % resData.benchSI00)
out.write('GlueHostMainMemoryRAMSize: %d\n' % resData.mainMemSize)
out.write('GlueHostMainMemoryVirtualSize: %d\n' % resData.mainVirtSize)
out.write('GlueHostNetworkAdapterInboundIP: %s\n' % str(resData.inBound).upper())
out.write('GlueHostNetworkAdapterOutboundIP: %s\n' % str(resData.outBound).upper())
out.write('GlueHostOperatingSystemName: %s\n' % resData.osName)
out.write('GlueHostOperatingSystemRelease: %s\n' % resData.osRelease)
out.write('GlueHostOperatingSystemVersion: %s\n' % resData.osVersion)
out.write('GlueHostProcessorClockSpeed: %d\n' % resData.procSpeed)
out.write('GlueHostProcessorModel: %s\n' % resData.procModel)
out.write('GlueHostProcessorVendor: %s\n' % resData.procVendor)
out.write('GlueHostProcessorOtherDescription: %s\n' % resData.procDescr)
for appItem in resData.runtimeEnv:
out.write("GlueHostApplicationSoftwareRunTimeEnvironment: %s\n" % appItem)
out.write('GlueSubClusterName: %s\n' % resData.name)
out.write('GlueSubClusterPhysicalCPUs: %d\n' % resData.phyCPU)
out.write('GlueSubClusterLogicalCPUs: %d\n' % resData.logCPU)
out.write('GlueSubClusterTmpDir: %s\n' % resData.tmpDir)
out.write('GlueSubClusterWNTmpDir: %s\n' % resData.WNDir)
out.write('GlueInformationServiceURL: ldap://%s:2170/mds-vo-name=resource,o=grid\n' % siteDefs.clusterHost)
out.write('GlueSchemaVersionMajor: 1\n')
out.write('GlueSchemaVersionMinor: 3\n')
out.write('\n')
#end of resources
| pandreetto/info-glue-provider | src/GLUEInfoProvider/GLUEClusterHandler.py | Python | apache-2.0 | 4,801 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging, os
from datetime import datetime
from inspect import getargspec
from traceback import format_exc
# anyjson
from anyjson import dumps as anyjson_dumps, loads
# Bunch
from bunch import bunchify
# lxml
from lxml import objectify
# requests
import requests
# Zato
from zato.common import BROKER, soap_data_path, soap_data_xpath, soap_fault_xpath, \
ZatoException, zato_data_path, zato_data_xpath, zato_details_xpath, \
ZATO_NOT_GIVEN, ZATO_OK, zato_result_xpath
from zato.common.log_message import CID_LENGTH
from zato.common.odb.model import Server
# Set max_cid_repr to CID_NO_CLIP if it's desired to return the whole of a CID
# in a response's __repr__ method.
CID_NO_CLIP = int(CID_LENGTH / 2)
DEFAULT_MAX_RESPONSE_REPR = 2500
DEFAULT_MAX_CID_REPR = 5
mod_logger = logging.getLogger(__name__)
# Work around https://bitbucket.org/runeh/anyjson/pull-request/4/
if getargspec(anyjson_dumps).keywords:
dumps = anyjson_dumps
else:
def dumps(data, *args, **kwargs):
return anyjson_dumps(data)
# ################################################################################################################################
# Version
# ################################################################################################################################
try:
curdir = os.path.dirname(os.path.abspath(__file__))
_version_py = os.path.normpath(os.path.join(curdir, '..', '..', '..', '..', '.version.py'))
_locals = {}
execfile(_version_py, _locals)
version = _locals['version']
except IOError:
version = '2.0.3.4'
# ################################################################################################################################
class _Response(object):
""" A base class for all specific response types client may return.
"""
def __init__(self, inner, to_bunch, max_response_repr, max_cid_repr, logger, output_repeated=False):
self.inner = inner # Acutal response from the requests module
self.to_bunch = to_bunch
self.max_response_repr = max_response_repr
self.max_cid_repr = max_cid_repr
self.logger = logger
self.sio_result = None
self.ok = False
self.has_data = False
self.output_repeated = output_repeated
self.data = [] if self.output_repeated else None
self.meta = {}
self.cid = self.inner.headers.get('x-zato-cid', '(None)')
self.details = None
self.init()
def __repr__(self):
if self.max_cid_repr >= CID_NO_CLIP:
cid = '[{}]'.format(self.cid)
else:
cid = '[{}..{}]'.format(self.cid[:self.max_cid_repr], self.cid[-self.max_cid_repr:])
return '<{} at {} ok:[{}] inner.status_code:[{}] cid:{}, inner.text:[{}]>'.format(
self.__class__.__name__, hex(id(self)), self.ok, self.inner.status_code,
cid, self.inner.text[:self.max_response_repr])
def __iter__(self):
return iter(self.data)
def init(self):
raise NotImplementedError('Must be defined by subclasses')
# ################################################################################################################################
class _StructuredResponse(_Response):
""" Any non-raw and non-SIO response.
"""
def init(self):
if self.set_data():
self.set_has_data()
self.set_ok()
def _set_data_details(self):
try:
self.data = self.load_func(self.inner.text.encode('utf-8'))
except Exception, e:
self.details = format_exc(e)
else:
return True
def load_func(self):
raise NotImplementedError('Must be defined by subclasses')
def set_data(self):
return self._set_data_details()
def set_has_data(self):
raise NotImplementedError('Must be defined by subclasses')
def set_ok(self):
self.ok = self.inner.ok
class JSONResponse(_StructuredResponse):
""" Stores responses from JSON services.
"""
def load_func(self, data):
return loads(data)
def set_has_data(self):
self.has_data = bool(self.data)
class XMLResponse(_StructuredResponse):
""" Stores responses from XML services.
"""
def load_func(self, data):
return objectify.fromstring(data)
def set_has_data(self):
self.has_data = self.data is not None
class SOAPResponse(XMLResponse):
""" Stores responses from SOAP services.
"""
path, xpath = soap_data_path, soap_data_xpath
def init(self):
if self.set_data():
self.set_has_data()
def set_data(self):
if self._set_data_details():
data = self.xpath(self.data)
if not data:
self.details = 'No {} in SOAP response'.format(self.path)
else:
if soap_fault_xpath(data[0]):
self.details = data[0]
else:
self.data = data[0]
self.ok = True
return True
# ################################################################################################################################
class JSONSIOResponse(_Response):
""" Stores responses from JSON SIO services.
"""
def init(self, _non_data=('zato_env', '_meta')):
try:
json = loads(self.inner.text)
except ValueError:
msg = 'inner.status_code `{}`, JSON parsing error `{}`'.format(self.inner.status_code, self.inner.text)
self.logger.error(msg)
raise ValueError(msg)
if 'zato_env' in json:
has_zato_env = True
self.details = json['zato_env']['details']
self.sio_result = json['zato_env']['result']
self.ok = self.sio_result == ZATO_OK
else:
has_zato_env = False
self.details = self.inner.text
self.ok = self.inner.ok
if self.ok:
if has_zato_env:
# There will be two keys, zato_env and the actual payload
for key, _value in json.items():
if key not in _non_data:
value = _value
break
else:
value = json
if self.set_data(value, has_zato_env):
self.has_data = True
if self.to_bunch:
self.data = bunchify(self.data)
def set_data(self, payload, _ignored):
self.data = payload
return True
class SOAPSIOResponse(_Response):
""" Stores responses from SOAP SIO services.
"""
def init(self):
response = objectify.fromstring(self.inner.text)
soap_fault = soap_fault_xpath(response)
if soap_fault:
self.details = soap_fault[0]
else:
zato_data = zato_data_xpath(response)
if not zato_data:
msg = 'Server did not send a business payload ({} element is missing), soap_response:[{}]'.format(
zato_data_path, self.inner.text)
self.details = msg
# We have a payload but hadn't there been any errors at the server's side?
zato_result = zato_result_xpath(response)
if zato_result[0] == ZATO_OK:
self.ok = True
self.data = zato_data[0]
self.has_data = True
else:
self.details = zato_details_xpath(response)[0]
class ServiceInvokeResponse(JSONSIOResponse):
""" Stores responses from SIO services invoked through the zato.service.invoke service.
"""
def __init__(self, *args, **kwargs):
self.inner_service_response = None
super(ServiceInvokeResponse, self).__init__(*args, **kwargs)
def set_data(self, payload, has_zato_env):
response = payload.get('response')
if response:
if has_zato_env:
self.inner_service_response = payload['response'].decode('base64')
try:
data = loads(self.inner_service_response)
except ValueError:
# Not a JSON response
self.data = self.inner_service_response
else:
if isinstance(data, dict):
self.meta = data.get('_meta')
data_keys = data.keys()
if len(data_keys) == 1:
data_key = data_keys[0]
if isinstance(data_key, basestring) and data_key.startswith('zato'):
self.data = data[data_key]
else:
self.data = data
else:
self.data = data
else:
self.data = data
else:
try:
data = loads(response)
except ValueError:
# Not a JSON response
self.data = response
else:
self.data = data
return True
# ################################################################################################################################
class RawDataResponse(_Response):
""" Stores responses from services that weren't invoked using any particular
data format
"""
def init(self):
self.ok = self.inner.ok
if self.set_data():
self.has_data = True
def set_data(self):
if self.ok:
self.data = self.inner.text
else:
self.details = self.inner.text
return self.data and len(self.data) > 0
# ################################################################################################################################
class _Client(object):
""" A base class of convenience clients for invoking Zato services from other Python applications.
"""
def __init__(self, address, path, auth=None, session=None, to_bunch=False,
max_response_repr=DEFAULT_MAX_RESPONSE_REPR, max_cid_repr=DEFAULT_MAX_CID_REPR, logger=None,
tls_verify=True):
self.address = address
self.service_address = '{}{}'.format(address, path)
self.session = session or requests.session()
self.to_bunch = to_bunch
self.max_response_repr = max_response_repr
self.max_cid_repr = max_cid_repr
self.logger = logger or mod_logger
self.tls_verify = tls_verify
if not self.session.auth:
self.session.auth = auth
def inner_invoke(self, request, response_class, async, headers, output_repeated=False):
""" Actually invokes a service through HTTP and returns its response.
"""
raw_response = self.session.post(self.service_address, request, headers=headers, verify=self.tls_verify)
response = response_class(
raw_response, self.to_bunch, self.max_response_repr,
self.max_cid_repr, self.logger, output_repeated)
if self.logger.isEnabledFor(logging.DEBUG):
msg = 'request:[%s]\nresponse_class:[%s]\nasync:[%s]\nheaders:[%s]\n text:[%s]\ndata:[%s]'
self.logger.debug(msg, request.decode('utf-8'), response_class, async, headers, raw_response.text, response.data)
return response
def invoke(self, request, response_class, async=False, headers=None, output_repeated=False):
""" Input parameters are like when invoking a service directly.
"""
headers = headers or {}
return self.inner_invoke(request, response_class, async, headers)
# ################################################################################################################################
class _JSONClient(_Client):
""" Base class for all JSON clients.
"""
response_class = None
def invoke(self, payload='', headers=None, to_json=True):
if to_json:
payload = dumps(payload)
return super(_JSONClient, self).invoke(payload, self.response_class, headers=headers)
class JSONClient(_JSONClient):
""" Client for services that accept JSON input.
"""
response_class = JSONResponse
# ################################################################################################################################
class JSONSIOClient(_JSONClient):
""" Client for services that accept Simple IO (SIO) in JSON.
"""
response_class = JSONSIOResponse
class SOAPSIOClient(_Client):
""" Client for services that accept Simple IO (SIO) in SOAP.
"""
def invoke(self, soap_action, payload=None, headers=None):
headers = headers or {}
headers['SOAPAction'] = soap_action
return super(SOAPSIOClient, self).invoke(payload, SOAPSIOResponse, headers=headers)
class AnyServiceInvoker(_Client):
""" Uses zato.service.invoke to invoke other services. The services being invoked
don't have to be available through any channels, it suffices for zato.service.invoke
to be exposed over HTTP.
"""
def json_default_handler(self, value):
if isinstance(value, datetime):
return value.isoformat()
raise TypeError('Cannot serialize [{}]'.format(value))
def _invoke(self, name=None, payload='', headers=None, channel='invoke', data_format='json',
transport=None, async=False, expiration=BROKER.DEFAULT_EXPIRATION, id=None,
to_json=True, output_repeated=ZATO_NOT_GIVEN, pid=None):
if not(name or id):
raise ZatoException(msg='Either name or id must be provided')
if name and output_repeated == ZATO_NOT_GIVEN:
output_repeated = name.lower().endswith('list')
if to_json:
payload = dumps(payload, default=self.json_default_handler)
id_, value = ('name', name) if name else ('id', id)
request = {
id_: value,
'payload': payload.encode('base64'),
'channel': channel,
'data_format': data_format,
'transport': transport,
'async': async,
'expiration':expiration,
'pid':pid
}
return super(AnyServiceInvoker, self).invoke(dumps(request), ServiceInvokeResponse, async, headers, output_repeated)
def invoke(self, *args, **kwargs):
return self._invoke(async=False, *args, **kwargs)
def invoke_async(self, *args, **kwargs):
return self._invoke(async=True, *args, **kwargs)
# ################################################################################################################################
class XMLClient(_Client):
def invoke(self, payload='', headers=None):
return super(XMLClient, self).invoke(payload, XMLResponse, headers=headers)
class SOAPClient(_Client):
def invoke(self, soap_action, payload='', headers=None):
headers = headers or {}
headers['SOAPAction'] = soap_action
return super(SOAPClient, self).invoke(payload, SOAPResponse, headers=headers)
# ################################################################################################################################
class RawDataClient(_Client):
""" Client which doesn't process requests before passing them into a service.
Likewise, no parsing of response is performed.
"""
def invoke(self, payload='', headers=None):
return super(RawDataClient, self).invoke(payload, RawDataResponse, headers=headers)
# ################################################################################################################################
def get_client_from_server_conf(server_dir, client_auth_func, get_config_func, server_url=None):
""" Returns a Zato client built out of data found in a given server's config files.
"""
# To avoid circular references
from zato.common.util import get_crypto_manager_from_server_config, get_odb_session_from_server_config
class ZatoClient(AnyServiceInvoker):
def __init__(self, *args, **kwargs):
super(ZatoClient, self).__init__(*args, **kwargs)
self.cluster_id = None
self.odb_session = None
repo_dir = os.path.join(os.path.abspath(os.path.join(server_dir)), 'config', 'repo')
config = get_config_func(repo_dir, 'server.conf')
server_url = server_url if server_url else config.main.gunicorn_bind
client = ZatoClient('http://{}'.format(server_url),
'/zato/admin/invoke', client_auth_func(config, repo_dir), max_response_repr=15000)
session = get_odb_session_from_server_config(
config, get_crypto_manager_from_server_config(config, repo_dir))
client.cluster_id = session.query(Server).\
filter(Server.token == config.main.token).\
one().cluster_id
client.odb_session = session
return client
# ################################################################################################################################
| alirizakeles/zato | code/zato-client/src/zato/client/__init__.py | Python | gpl-3.0 | 17,345 |
import astropy.units as u
import astropy.constants as con
import numpy as np
from radio_beam import Beam
'''
Recording the conversion from NHI to M, since I keep getting close to the
apparent correct numerical factor with slight differences everytime I try.
In the optically-thin limit, NHI = 1.82e18 * int T_B dv, where T_B is in K and
dv is in km/s. The factor of 1.82e18 must then have units of cm^-2 / K km s^-1.
To convert to mass, M = m_H * int NHI dA = m_H * NHI * D^2 * theta^2 * pi/4.
The relation in terms of the flux density is M = 2.36e5 * D^2 * int S_v dv
where 2.36e5 Msol / Mpc^2 * Jy * km s^-1.
There aren't many places where I've found the direct conversion of mass from
the line brightness (K km s^-1). Two editions of Tools of Radio Astro have
two different values: 5.30e2 and 0.39e2. Neither of these can be correct,
since the scaling from Jy to K is ~1e5.
This equation is M = E * D^2 theta^2 * int T_B dv, with theta in arcsec.
Here I recover the typically quoted 2.36e5 conversion factor, and using that,
get 0.34 Msol / Mpc^2 * K * km s^-1 * arcsec^2.
'''
nu = 1.420405 * u.GHz
C = 1.8224e18 * (u.cm**-2 / (u.K * u.km * u.s**-1))
D = C * con.m_p.to(u.Msun) * \
(Beam(1 * u.rad).jtok(nu) / u.Jy) * (1 * u.cm / (1 * u.cm).to(u.Mpc))**2
# Now the beam is assumed to NOT be given the FWHM parameters. So I think we
# need to convert out 1 factor of the FWHM on the area
fwhm_area_factor = np.pi / (4 * np.log(2))
D *= fwhm_area_factor
true_D = 2.36e5 * D.unit
frac_diff = 100 * np.abs(D.value - true_D.value) / true_D.value
print("Fraction difference: {}%".format(frac_diff))
# Off by ~0.5%! Success!
# Now what's the conversion factor if we don't go back to Jy and stay in K.
# To match the usual scaling, use a 1 arcsecond beam
E = D / ((Beam(1 * u.arcsec).jtok(nu) / u.Jy) * fwhm_area_factor) / u.arcsec**2
print("Using flux density: {0} {1}".format(D.value, D.unit.to_string()))
print("Using brightness temp: {0} {1}".format(E.value, E.unit.to_string()))
| e-koch/ewky_scripts | proposal_tools/hi_column_density_to_mass.py | Python | mit | 2,001 |
__all__ = (
'BaseTransition',
'Transition',
)
import collections
import itertools
from .errors import AnnotationError
from .expression import make_expression
from .iterable import Iterable
from .node import BaseTransition
from .token_list import TokenList
class Substitution(collections.OrderedDict):
pass
class Transition(BaseTransition):
def __init__(self, name, *, net=None, guard=None):
super().__init__(name=name, net=net)
self._guard = None
self.update(guard=guard)
@property
def guard(self):
return self._guard
def _add_input_arc(self, arc):
used_variables = set()
for input_arc in self.input_arcs():
used_variables.update(input_arc.annotation.variables())
invalid_variables = used_variables.intersection(arc.annotation.variables())
if invalid_variables:
raise AnnotationError("{!r}: cannot add {!r}: variable redefinition: {}".format(
self, arc, ', '.join(repr(variable) for variable in sorted(invalid_variables))))
super()._add_input_arc(arc)
def update(self, *, guard=None):
self._guard = make_expression(guard)
def iter_substitutions(self):
input_iterators = []
for arc in self.input_arcs():
annotation = arc.annotation
substitutions = arc.filter_substitutions()
if substitutions is not None:
input_iterators.append(arc.filter_substitutions())
for dct_tuple in itertools.product(*input_iterators):
substitution = Substitution()
for dct in dct_tuple:
if dct is not None: # inhibitor arcs
substitution.update(dct)
print("Subst", substitution)
if self._guard is None or self._guard.evaluate(globals_d=self._net.globals_d, locals_d=substitution):
yield substitution
def substitutions(self):
return Iterable(self.iter_substitutions())
def first_substitution(self):
it = iter(self.iter_substitutions())
try:
return next(it)
except StopIteration:
return None
def fire(self, substitution=None):
#print("pre fire:", self._net.get_marking(), substitution)
if substitution is None:
substitution = self.first_substitution()
if substitution is None:
return False
# compute all output arc expressions:
lst = []
for arc in self.output_arcs():
result = arc.produce_token(substitution)
lst.append((arc, result))
# produce all output tokens:
for arc, result in lst:
print("add:", result, arc)
arc.add_token(result)
# remove all input tokens:
for arc in self.input_arcs():
arc.remove_substitution(substitution)
# notify firing:
self._net.notify_transition_fired(self)
#print("post fire:", substitution, self._net.get_marking())
#input("...")
return True
def label(self):
label = super().label()
if self._guard is not None:
label += '\n' + str(self._guard)
return label
| simone-campagna/petra | petra/transition.py | Python | apache-2.0 | 3,240 |
"""
Exports from this provider
"""
from .provider import AzureCloudProvider # noqa
from .provider import MockAzureCloudProvider # noqa
| ms-azure-cloudbroker/cloudbridge | cloudbridge/cloud/providers/azure/__init__.py | Python | mit | 138 |
import h5py
import numpy as np
import pyfits as pf
data = pf.open("wfc3_f160w_clean_central100.fits")[0].data[0:120,:]
F = h5py.File("data&mask.hdf5",'r')
FF = h5py.File("masked_data.hdf5",'r')
FD = h5py.File('sampler.hdf5','r')
GD = h5py.File('masked_sampler.hdf5','r')
r = np.array(F["data_mask"]["data"])
w= np.array(F["data_mask"]["mask"])
zz= np.array(FF["masked_data"]["0"])
kk = np.array(FD["sampler"]["0"])
print kk
#print w[1]
#print zz,zz.shape
| mjvakili/supermean | code/test_h5.py | Python | mit | 465 |
"""Tests for the views of the ``event_rsvp`` app."""
from django.test import TestCase
from django.utils import timezone
from django_libs.tests.factories import UserFactory
from django_libs.tests.mixins import ViewTestMixin
from event_rsvp.models import Event, Guest
from event_rsvp.tests.factories import EventFactory, GuestFactory, StaffFactory
class EventListViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventListView`` view."""
longMessage = True
def get_view_name(self):
return 'rsvp_event_list'
def test_view(self):
self.should_be_callable_when_anonymous()
self.user = UserFactory()
self.should_be_callable_when_authenticated(self.user)
class EventDetailViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventDetailView`` view."""
longMessage = True
def get_url(self, **kwargs):
return self.event.get_absolute_url()
def test_view(self):
self.event = EventFactory()
self.is_not_callable()
self.event.is_published = True
self.event.save()
self.should_be_callable_when_anonymous()
# Test with wrong url kwargs
resp = self.client.get(self.event.get_absolute_url().replace('2', '1'))
self.assertEqual(resp.status_code, 302)
class EventCreateViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventCreateView`` view."""
longMessage = True
def setUp(self):
self.user = UserFactory()
self.staff = StaffFactory()
def get_view_name(self):
return 'rsvp_event_create'
def test_view(self):
# Staff rights required
self.is_not_callable(user=self.user)
self.should_be_callable_when_authenticated(self.staff)
data = {
'title': 'Foo',
'venue': 'Bar',
'start': timezone.now(),
'end': timezone.now() + timezone.timedelta(days=11),
'max_seats_per_guest': 1,
}
self.is_callable('POST', data=data)
class EventUpdateViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventUpdateView`` view."""
longMessage = True
def get_url(self, *args, **kwargs):
return self.event.get_update_url()
def setUp(self):
self.event = EventFactory()
self.staff = StaffFactory()
def test_view(self):
data = {
'title': self.event.title,
'venue': self.event.venue,
'start': self.event.start,
'end': self.event.end,
'max_seats_per_guest': 20,
}
self.is_callable('POST', data=data, user=self.staff)
self.assertEqual(
Event.objects.get(pk=self.event.pk).max_seats_per_guest, 20)
class EventDeleteViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventDeleteView`` view."""
longMessage = True
def get_url(self, *args, **kwargs):
return self.event.get_delete_url()
def setUp(self):
self.event = EventFactory()
self.staff = StaffFactory()
def test_view(self):
self.is_callable('POST', data={'Foo': 'Bar'}, user=self.staff)
self.assertEqual(Event.objects.all().count(), 0)
class EventCreateFromTemplateViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventCreateFromTemplateView`` view."""
longMessage = True
def get_url(self, *args, **kwargs):
return self.event.get_template_url()
def setUp(self):
self.event = EventFactory()
self.staff = StaffFactory()
def test_view(self):
# Only callable if event is a template
self.is_not_callable(user=self.staff)
self.event.template_name = 'Foo'
self.event.save()
self.is_callable(user=self.staff)
data = {
'title': self.event.title,
'venue': self.event.venue,
'start': self.event.start,
'end': self.event.end,
}
self.is_callable('POST', data=data, user=self.staff)
# The template remains and a new event has been created
self.assertEqual(Event.objects.all().count(), 2)
class StaffDashboardViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``StaffDashboardView`` view."""
longMessage = True
def get_view_name(self):
return 'rsvp_event_staff'
def test_view(self):
staff = StaffFactory()
self.is_callable(user=staff)
class GuestCreateViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``GuestCreateView`` view."""
longMessage = True
def setUp(self):
self.event = EventFactory()
self.user = UserFactory()
def get_view_name(self):
return 'rsvp_guest_create'
def get_view_kwargs(self):
return {'event_slug': self.event.slug}
def test_view(self):
# Wrong event slug
self.is_not_callable(kwargs={'event_slug': 'bullshit'})
self.should_be_callable_when_anonymous()
self.is_callable('POST', data={}, user=self.user)
self.assertEqual(Guest.objects.all().count(), 1)
class GuestDeleteViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``GuestDeleteView`` view."""
longMessage = True
def setUp(self):
self.guest = GuestFactory()
self.staff = StaffFactory()
def get_view_name(self):
return 'rsvp_guest_delete'
def get_view_kwargs(self):
return {'pk': self.guest.pk, 'event_slug': self.guest.event.slug}
def test_view(self):
self.is_callable('POST', data={'Foo': 'Bar'}, user=self.staff)
self.assertEqual(Guest.objects.all().count(), 0)
class GuestDetailViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``GuestDetailView`` view."""
longMessage = True
def setUp(self):
self.guest = GuestFactory()
self.staff = StaffFactory()
self.event = EventFactory()
def get_view_name(self):
return 'rsvp_guest_detail'
def get_view_kwargs(self):
return {'pk': self.guest.pk, 'event_slug': self.guest.event.slug}
def test_view(self):
self.should_be_callable_when_authenticated(self.staff)
self.is_not_callable(kwargs={'pk': self.guest.pk,
'event_slug': self.event.slug})
class GuestUpdateViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``GuestUpdateView`` view."""
longMessage = True
def setUp(self):
self.user = UserFactory()
self.guest = GuestFactory(user=self.user)
self.staff = StaffFactory()
def get_view_name(self):
return 'rsvp_guest_update'
def get_view_kwargs(self):
return {'pk': self.guest.pk, 'event_slug': self.guest.event.slug}
def test_view(self):
self.should_be_callable_when_authenticated(self.staff)
self.should_be_callable_when_authenticated(self.user)
self.is_not_callable(kwargs={'pk': self.guest.pk, 'event_slug': '500'})
self.guest.user = None
self.guest.save()
self.is_not_callable(user=self.user)
| bitmazk/django-event-rsvp | event_rsvp/tests/integration_tests/views_tests.py | Python | mit | 7,036 |
#!/usr/bin/env python
#
# Copyright (C) 2015 Joseph W. Metcalf
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby
# granted, provided that the above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys
import defs
import argparse
import string
import logging
import datetime
import time
import subprocess
def alert_start(JJJHHMM, format='%j%H%M'):
import calendar
"""Convert EAS date string to datetime format"""
utc_dt=datetime.datetime.strptime(JJJHHMM, format).replace(datetime.datetime.utcnow().year)
timestamp = calendar.timegm(utc_dt.timetuple())
return datetime.datetime.fromtimestamp(timestamp)
def fn_dt(dt, format='%I:%M %p'):
"""Return formated datetime"""
return dt.strftime(format)
# ZCZC-ORG-EEE-PSSCCC-PSSCCC+TTTT-JJJHHMM-LLLLLLLL-
def format_error(info=''):
logging.warning(' '.join(['INVALID FORMAT', info]))
def time_str(x, type='hour'):
if x==1:
return ''.join([str(x),' ',type])
elif x>=2:
return ''.join([str(x),' ',type,'s'])
def get_length(TTTT):
hh,mm=TTTT[:2],TTTT[2:]
return ' '.join(filter(None, (time_str(int(hh)), time_str(int(mm), type='minute'))))
def county_decode(input, COUNTRY):
"""Convert SAME county/geographic code to text list"""
P, SS, CCC, SSCCC=input[:1], input[1:3], input[3:], input[1:]
if COUNTRY=='US':
if SSCCC in defs.SAME_CTYB:
SAME__LOC=defs.SAME_LOCB
else:
SAME__LOC=defs.SAME_LOCA
if CCC=='000':
county='ALL'
else:
county=defs.US_SAME_CODE[SSCCC]
return [' '.join(filter(None, (SAME__LOC[P], county))), defs.US_SAME_AREA[SS]]
else:
if CCC=='000':
county='ALL'
else:
county=defs.CA_SAME_CODE[SSCCC]
return [county, defs.CA_SAME_AREA[SS]]
def get_division(input, COUNTRY='US'):
if COUNTRY=='US':
try:
DIVISION=defs.FIPS_DIVN[input]
if not DIVISION:
DIVISION='areas'
except:
DIVISION='counties'
else:
DIVISION='areas'
return DIVISION
def get_event(input):
event=None
try:
event=defs.SAME__EEE[input]
except:
if input[2:] in 'WAESTM':
event=defs.SAME_UEEE[input[2:]]
return event
def printf(output=''):
output=output.lstrip(' ')
output=' '.join(output.split())
sys.stdout.write(''.join([output, '\n']))
def alert_end(JJJHHMM, TTTT):
alertstart = alert_start(JJJHHMM)
delta = datetime.timedelta(hours = int(TTTT[:2]), minutes=int(TTTT[2:]))
return alertstart + delta
def get_location(STATION=None, TYPE=None):
location=''
if TYPE=='NWS':
try:
location=defs.ICAO_LIST[STATION].title()
except:
pass
return location
def check_watch(watch_list, PSSCCC_list, event_list, EEE):
if not watch_list:
watch_list=PSSCCC_list
if not event_list:
event_list=[EEE]
w, p = [],[]
w += [item[1:] for item in watch_list]
p += [item[1:] for item in PSSCCC_list]
if (set(w) & set(p)) and EEE in event_list:
return True
else:
return False
def format_message(command, ORG='WXR', EEE='RWT',PSSCCC=[],TTTT='0030',JJJHHMM='0010000', STATION=None, TYPE=None, LLLLLLLL=None, COUNTRY='US', LANG='EN', MESSAGE=None,**kwargs):
return command.format(ORG=ORG, EEE=EEE, TTTT=TTTT, JJJHHMM=JJJHHMM, STATION=STATION, TYPE=TYPE, LLLLLLLL=LLLLLLLL, COUNTRY=COUNTRY, LANG=LANG, event=get_event(EEE), end=fn_dt(alert_end(JJJHHMM,TTTT)), start=fn_dt(alert_start(JJJHHMM)), organization=defs.SAME__ORG[ORG]['NAME'][COUNTRY], PSSCCC='-'.join(PSSCCC), location=get_location(STATION, TYPE), date=fn_dt(datetime.datetime.now(),'%c'), length=get_length(TTTT), MESSAGE=MESSAGE, **kwargs)
def readable_message(ORG='WXR',EEE='RWT',PSSCCC=[],TTTT='0030',JJJHHMM='0010000',STATION=None, TYPE=None, LLLLLLLL=None, COUNTRY='US', LANG='EN'):
import textwrap
printf()
location=get_location(STATION, TYPE)
MSG=[format_message(defs.MSG__TEXT[LANG]['MSG1'], ORG=ORG, EEE=EEE, TTTT=TTTT, JJJHHMM=JJJHHMM, STATION=STATION, TYPE=TYPE, COUNTRY=COUNTRY, LANG=LANG, article=defs.MSG__TEXT[LANG][defs.SAME__ORG[ORG]['ARTICLE'][COUNTRY]].title(), has=defs.MSG__TEXT[LANG]['HAS'] if not defs.SAME__ORG[ORG]['PLURAL'] else defs.MSG__TEXT[LANG]['HAVE'], preposition=defs.MSG__TEXT[LANG]['IN'] if location !='' else '')]
current_state=None
for idx, item in enumerate(PSSCCC):
county, state=county_decode(item, COUNTRY)
if current_state != state:
DIVISION=get_division(PSSCCC[idx][1:3], COUNTRY)
output=defs.MSG__TEXT[LANG]['MSG2'].format(conjunction='' if idx == 0 else defs.MSG__TEXT[LANG]['AND'], state=state, division=DIVISION)
MSG+=[''.join(output)]
current_state=state
MSG+=[defs.MSG__TEXT[LANG]['MSG3'].format(county=county if county != state else defs.MSG__TEXT[LANG]['ALL'].upper(),punc=',' if idx !=len(PSSCCC)-1 else '.')]
MSG+=[defs.MSG__TEXT[LANG]['MSG4']]
MSG+=[''.join(['(',LLLLLLLL,')'])]
output=textwrap.wrap(''.join(MSG), 78)
for item in output:
printf(item)
printf()
return ''.join(MSG)
def clean_msg(same):
valid_chars=''.join([string.ascii_uppercase, string.digits, '+-/*'])
same = same.upper() # Uppercase
msgidx=same.find('ZCZC')
if msgidx != -1:
same=same[msgidx:] # Left Offset
same = ''.join(same.split()) # Remove whitespace
same = ''.join(filter(lambda x: x in valid_chars, same)) # Valid ASCII codes only
slen= len(same)-1
if same[slen] !='-':
ridx=same.rfind('-')
offset = slen-ridx
if (offset <= 8):
same=''.join([same.ljust(slen+(8-offset)+1,'?'), '-']) # Add final dash and/or pad location field
return same
def same_decode(same, lang, same_watch=None, event_watch=None, text=True, call=None, command=None):
try:
same = clean_msg(same)
except:
return
msgidx=same.find('ZCZC')
if msgidx != -1:
logging.debug('-' * 30)
logging.debug(' '.join([' Identifer found >','ZCZC']))
S1, S2 = None, None
try:
S1,S2=same[msgidx:].split('+')
except:
format_error()
return
try:
ZCZC, ORG, EEE, PSSCCC=S1.split('-',3)
except:
format_error()
return
logging.debug(' '.join([' Originator found >',ORG]))
logging.debug(' '.join([' Event Code found >',EEE]))
try:
PSSCCC_list=PSSCCC.split('-')
except:
format_error()
try:
TTTT,JJJHHMM,LLLLLLLL,tail=S2.split('-')
except:
format_error()
return
logging.debug(' '.join([' Purge Time found >',TTTT]))
logging.debug(' '.join([' Date Code found >',JJJHHMM]))
logging.debug(' '.join(['Location Code found >',LLLLLLLL]))
try:
STATION, TYPE=LLLLLLLL.split('/',1)
except:
STATION, TYPE= None, None
format_error()
logging.debug(' '.join([' SAME Codes found >',str(len(PSSCCC_list))]))
US_bad_list=[]
CA_bad_list=[]
for code in PSSCCC_list:
try:
county=defs.US_SAME_CODE[code[1:]]
except KeyError:
US_bad_list.append(code)
try:
county=defs.CA_SAME_CODE[code[1:]]
except KeyError:
CA_bad_list.append(code)
if len(US_bad_list) < len(CA_bad_list):
COUNTRY='US'
if len(US_bad_list) > len(CA_bad_list):
COUNTRY='CA'
if len(US_bad_list) == len(CA_bad_list):
if type=='CA':
COUNTRY='CA'
else:
COUNTRY='US'
if COUNTRY=='CA':
bad_list=CA_bad_list
else:
bad_list=US_bad_list
logging.debug(' '.join(['Invalid Codes found >',str(len(bad_list))]))
logging.debug(' '.join([' Country >',COUNTRY]))
logging.debug('-' * 30)
for code in bad_list:
PSSCCC_list.remove(code)
PSSCCC_list.sort()
if check_watch(same_watch, PSSCCC_list, event_watch, EEE):
if text:
MESSAGE=readable_message(ORG, EEE, PSSCCC_list, TTTT, JJJHHMM, STATION, TYPE, LLLLLLLL, COUNTRY, lang)
if command:
if call:
l_cmd=[]
for cmd in command:
l_cmd.append(format_message(cmd, ORG, EEE, PSSCCC_list, TTTT, JJJHHMM, STATION, TYPE, LLLLLLLL, COUNTRY, lang, MESSAGE))
try:
subprocess.call([call] + l_cmd)
except Exception as detail:
logging.error(detail)
return
pass
else:
f_cmd=format_message(' '.join(command), ORG, EEE, PSSCCC_list, TTTT, JJJHHMM, STATION, TYPE, LLLLLLLL, COUNTRY, lang, MESSAGE)
printf(f_cmd)
else:
msgidx=same.find('NNNN')
if msgidx == -1:
logging.warning('Valid identifer not found.')
else:
logging.debug(' '.join(['End of Message found >','NNNN',str(msgidx)]))
def parse_arguments():
parser = argparse.ArgumentParser(description=defs.DESCRIPTION, prog=defs.PROGRAM, fromfile_prefix_chars='@')
parser.add_argument('--msg', help='message to decode')
parser.add_argument('--same', nargs='*', help='filter by SAME code')
parser.add_argument('--event', nargs='*', help='filter by event code')
parser.add_argument('--lang', default='EN', help='set language')
parser.add_argument('--loglevel', default=40, type=int, choices=[10, 20, 30, 40, 50], help='set log level')
parser.add_argument('--text', dest='text', action='store_true', help='output readable message')
parser.add_argument('--no-text', dest='text', action='store_false', help='disable readable message')
parser.add_argument('--version', action='version', version=' '.join([defs.PROGRAM, defs.VERSION]),help='show version infomation and exit')
parser.add_argument('--call', help='call external command')
parser.add_argument('--command', nargs='*', help='command message')
parser.add_argument('--source', help='source program')
parser.set_defaults(text=True)
args, unknown = parser.parse_known_args()
return args
def main():
args=parse_arguments()
logging.basicConfig(level=args.loglevel,format='%(levelname)s: %(message)s')
if args.msg:
same_decode(args.msg, args.lang, same_watch=args.same, event_watch=args.event, text=args.text, call=args.call, command=args.command)
elif args.source:
try:
source_process = subprocess.Popen(args.source, stdout=subprocess.PIPE)
except Exception as detail:
logging.error(detail)
return
while True:
line = source_process.stdout.readline()
if line:
logging.debug(line)
same_decode(line, args.lang, same_watch=args.same, event_watch=args.event, text=args.text, call=args.call, command=args.command)
else:
while True:
for line in sys.stdin:
logging.debug(line)
same_decode(line, args.lang, same_watch=args.same, event_watch=args.event, text=args.text, call=args.call, command=args.command)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass | sabas1080/dsame | dsame.py | Python | isc | 12,438 |
import os;
# Django settings for wordlinks project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'c9', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
# 'USER': os.environ['C9_USER'],
# 'PASSWORD': '',
# 'HOST': os.environ['IP'], # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'PORT': '', # Set to empty string for default.
}
}
# oh wait ignore all that and do this instead:
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'staticfiles'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '78c(enih8c3nkxysbr43yt8zhold7^14@g%otq1(&e*2(c9xxa'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'wordlinks.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wordlinks.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, 'static'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'words',
'puzzles',
'taggit',
'learn',
'herokuapp',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
},
},
"loggers": {
"django": {
"handlers": ["console"],
}
}
} | gcusacrificialvictim/wordlinks | wordlinks/settings.py | Python | gpl-2.0 | 5,516 |
# Copyright 2012 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
import ldap
except ImportError:
# This module needs to be importable despite ldap not being a requirement
ldap = None
import time
from oslo.config import cfg
from nova import exception
from nova.network import dns_driver
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ldap_dns_opts = [
cfg.StrOpt('ldap_dns_url',
default='ldap://ldap.example.com:389',
help='URL for ldap server which will store dns entries'),
cfg.StrOpt('ldap_dns_user',
default='uid=admin,ou=people,dc=example,dc=org',
help='user for ldap DNS'),
cfg.StrOpt('ldap_dns_password',
default='password',
help='password for ldap DNS',
secret=True),
cfg.StrOpt('ldap_dns_soa_hostmaster',
default='hostmaster@example.org',
help='Hostmaster for ldap dns driver Statement of Authority'),
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help='DNS Servers for ldap dns driver'),
cfg.StrOpt('ldap_dns_base_dn',
default='ou=hosts,dc=example,dc=org',
help='Base DN for DNS entries in ldap'),
cfg.StrOpt('ldap_dns_soa_refresh',
default='1800',
help='Refresh interval (in seconds) for ldap dns driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_retry',
default='3600',
help='Retry interval (in seconds) for ldap dns driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_expiry',
default='86400',
help='Expiry interval (in seconds) for ldap dns driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_minimum',
default='7200',
help='Minimum interval (in seconds) for ldap dns driver '
'Statement of Authority'),
]
CONF.register_opts(ldap_dns_opts)
# Importing ldap.modlist breaks the tests for some reason,
# so this is an abbreviated version of a function from
# there.
def create_modlist(newattrs):
modlist = []
for attrtype in newattrs.keys():
utf8_vals = []
for val in newattrs[attrtype]:
utf8_vals.append(utils.utf8(val))
newattrs[attrtype] = utf8_vals
modlist.append((attrtype, newattrs[attrtype]))
return modlist
class DNSEntry(object):
def __init__(self, ldap_object):
"""ldap_object is an instance of ldap.LDAPObject.
It should already be initialized and bound before
getting passed in here.
"""
self.lobj = ldap_object
self.ldap_tuple = None
self.qualified_domain = None
@classmethod
def _get_tuple_for_domain(cls, lobj, domain):
entry = lobj.search_s(CONF.ldap_dns_base_dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s)' % utils.utf8(domain))
if not entry:
return None
if len(entry) > 1:
LOG.warn(_("Found multiple matches for domain "
"%(domain)s.\n%(entry)s") %
(domain, entry))
return entry[0]
@classmethod
def _get_all_domains(cls, lobj):
entries = lobj.search_s(CONF.ldap_dns_base_dn,
ldap.SCOPE_SUBTREE, '(sOARecord=*)')
domains = []
for entry in entries:
domain = entry[1].get('associatedDomain')
if domain:
domains.append(domain[0])
return domains
def _set_tuple(self, tuple):
self.ldap_tuple = tuple
def _qualify(self, name):
return '%s.%s' % (name, self.qualified_domain)
def _dequalify(self, name):
z = ".%s" % self.qualified_domain
if name.endswith(z):
dequalified = name[0:name.rfind(z)]
else:
LOG.warn(_("Unable to dequalify. %(name)s is not in "
"%(domain)s.\n") %
{'name': name,
'domain': self.qualified_domain})
dequalified = None
return dequalified
def _dn(self):
return self.ldap_tuple[0]
dn = property(_dn)
def _rdn(self):
return self.dn.partition(',')[0]
rdn = property(_rdn)
class DomainEntry(DNSEntry):
@classmethod
def _soa(cls):
date = time.strftime('%Y%m%d%H%M%S')
soa = '%s %s %s %s %s %s %s' % (
CONF.ldap_dns_servers[0],
CONF.ldap_dns_soa_hostmaster,
date,
CONF.ldap_dns_soa_refresh,
CONF.ldap_dns_soa_retry,
CONF.ldap_dns_soa_expiry,
CONF.ldap_dns_soa_minimum)
return utils.utf8(soa)
@classmethod
def create_domain(cls, lobj, domain):
"""Create a new domain entry, and return an object that wraps it."""
entry = cls._get_tuple_for_domain(lobj, domain)
if entry:
raise exception.FloatingIpDNSExists(name=domain, domain='')
newdn = 'dc=%s,%s' % (domain, CONF.ldap_dns_base_dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'sOARecord': [cls._soa()],
'associatedDomain': [domain],
'dc': [domain]}
lobj.add_s(newdn, create_modlist(attrs))
return DomainEntry(lobj, domain)
def __init__(self, ldap_object, domain):
super(DomainEntry, self).__init__(ldap_object)
entry = self._get_tuple_for_domain(self.lobj, domain)
if not entry:
raise exception.NotFound()
self._set_tuple(entry)
assert(entry[1]['associatedDomain'][0] == domain)
self.qualified_domain = domain
def delete(self):
"""Delete the domain that this entry refers to."""
entries = self.lobj.search_s(self.dn,
ldap.SCOPE_SUBTREE,
'(aRecord=*)')
for entry in entries:
self.lobj.delete_s(entry[0])
self.lobj.delete_s(self.dn)
def update_soa(self):
mlist = [(ldap.MOD_REPLACE, 'sOARecord', self._soa())]
self.lobj.modify_s(self.dn, mlist)
def subentry_with_name(self, name):
entry = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s.%s)' %
(utils.utf8(name),
utils.utf8(self.qualified_domain)))
if entry:
return HostEntry(self, entry[0])
else:
return None
def subentries_with_ip(self, ip):
entries = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(aRecord=%s)' % utils.utf8(ip))
objs = []
for entry in entries:
if 'associatedDomain' in entry[1]:
objs.append(HostEntry(self, entry))
return objs
def add_entry(self, name, address):
if self.subentry_with_name(name):
raise exception.FloatingIpDNSExists(name=name,
domain=self.qualified_domain)
entries = self.subentries_with_ip(address)
if entries:
# We already have an ldap entry for this IP, so we just
# need to add the new name.
existingdn = entries[0].dn
self.lobj.modify_s(existingdn, [(ldap.MOD_ADD,
'associatedDomain',
utils.utf8(self._qualify(name)))])
return self.subentry_with_name(name)
else:
# We need to create an entirely new entry.
newdn = 'dc=%s,%s' % (name, self.dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'aRecord': [address],
'associatedDomain': [self._qualify(name)],
'dc': [name]}
self.lobj.add_s(newdn, create_modlist(attrs))
return self.subentry_with_name(name)
def remove_entry(self, name):
entry = self.subentry_with_name(name)
if not entry:
raise exception.NotFound()
entry.remove_name(name)
self.update_soa()
class HostEntry(DNSEntry):
def __init__(self, parent, tuple):
super(HostEntry, self).__init__(parent.lobj)
self.parent_entry = parent
self._set_tuple(tuple)
self.qualified_domain = parent.qualified_domain
def remove_name(self, name):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) > 1:
# We just have to remove the requested domain.
self.lobj.modify_s(self.dn, [(ldap.MOD_DELETE, 'associatedDomain',
self._qualify(utils.utf8(name)))])
if (self.rdn[1] == name):
# We just removed the rdn, so we need to move this entry.
names.remove(self._qualify(name))
newrdn = 'dc=%s' % self._dequalify(names[0])
self.lobj.modrdn_s(self.dn, [newrdn])
else:
# We should delete the entire record.
self.lobj.delete_s(self.dn)
def modify_address(self, name, address):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) == 1:
self.lobj.modify_s(self.dn, [(ldap.MOD_REPLACE, 'aRecord',
[utils.utf8(address)])])
else:
self.remove_name(name)
self.parent.add_entry(name, address)
def _names(self):
names = []
for domain in self.ldap_tuple[1]['associatedDomain']:
names.append(self._dequalify(domain))
return names
names = property(_names)
def _ip(self):
ip = self.ldap_tuple[1]['aRecord'][0]
return ip
ip = property(_ip)
def _parent(self):
return self.parent_entry
parent = property(_parent)
class LdapDNS(dns_driver.DNSDriver):
"""Driver for PowerDNS using ldap as a back end.
This driver assumes ldap-method=strict, with all domains
in the top-level, aRecords only.
"""
def __init__(self):
if not ldap:
raise ImportError(_('ldap not installed'))
self.lobj = ldap.initialize(CONF.ldap_dns_url)
self.lobj.simple_bind_s(CONF.ldap_dns_user,
CONF.ldap_dns_password)
def get_domains(self):
return DomainEntry._get_all_domains(self.lobj)
def create_entry(self, name, address, type, domain):
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
"type 'a' entries."))
dEntry = DomainEntry(self.lobj, domain)
dEntry.add_entry(name, address)
def delete_entry(self, name, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.remove_entry(name)
def get_entries_by_address(self, address, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
entries = dEntry.subentries_with_ip(address)
names = []
for entry in entries:
names.extend(entry.names)
return names
def get_entries_by_name(self, name, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
nEntry = dEntry.subentry_with_name(name)
if nEntry:
return [nEntry.ip]
def modify_address(self, name, address, domain):
dEntry = DomainEntry(self.lobj, domain)
nEntry = dEntry.subentry_with_name(name)
nEntry.modify_address(name, address)
def create_domain(self, domain):
DomainEntry.create_domain(self.lobj, domain)
def delete_domain(self, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.delete()
def delete_dns_file(self):
LOG.warn(_("This shouldn't be getting called except during testing."))
pass
| sacharya/nova | nova/network/ldapdns.py | Python | apache-2.0 | 13,197 |
from flask import redirect, request, url_for, Blueprint, request, jsonify, session, Response
from flask.ext.login import login_user, login_required, logout_user, abort
from project import utils, database_wrapper
from project.services.auth import Auth
from flask.ext.api import FlaskAPI, exceptions
from flask.ext.api.status import *
import json
from models import user, invite
from flask_oauth import OAuth
from bson.objectid import ObjectId
import sys
blueprint = Blueprint(
'invites', __name__
)
@blueprint.route('/invites', methods=['GET'])
@Auth.require(Auth.USER)
def get_invites():
'''
Gets all the invites for the currently logged on user
'''
user_id = user.getUserID('me')
entries = invite.find_multiple_invites({'producerObjectId': user_id})
invite_attributes_list = [invite.get_invite_attributes(entry) for entry in entries]
return jsonify(error=None, invites=invite_attributes_list)
@blueprint.route('/invites/<invite_id>', methods=['PUT'])
@Auth.require(Auth.USER)
def put_invite(invite_id):
invite.put_invite(invite_id, request.get_json())
return jsonify(error=None)
# This endpoint needs to be removed before release
@blueprint.route('/invites/create', methods=['POST'])
@Auth.require(Auth.ADMIN)
def create_invites():
'''
Create <number> of invites for a target user
(or by default the currently logged in user).
'''
try:
user_id = user.getUserID('me')
if request.get_json().get('user'):
user_id = ObjectId(request.get_json().get('user'))
number = int(request.get_json()['number'])
output_invites = []
for i in range(number):
output_invite = invite.create_invite(user_id)
output_invites.append(invite.get_invite_attributes(output_invite))
return jsonify(error=None, invites=output_invites)
except Exception as e:
return jsonify(error=str(e))
@blueprint.route('/invites/validate/<invite_code>', methods=['GET'])
def validate_invite(invite_code):
if invite.is_valid(invite_code):
return jsonify(error=None, status=True)
else:
return jsonify(error='Invalid Code', status=False), HTTP_400_BAD_REQUEST
| AustinStoneProjects/Founderati-Server | project/api/invites.py | Python | apache-2.0 | 2,204 |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The MIDI tool widget.
"""
from __future__ import unicode_literals
from PyQt4.QtCore import Qt, QTimer, QSettings
from PyQt4.QtGui import (
QWidget, QComboBox, QToolButton, QSlider, QGridLayout, QSizePolicy, QLabel)
import app
import css
import qutil
import midihub
import widgets.drag
from . import midifiles
from . import output
from . import player
class Widget(QWidget):
def __init__(self, dockwidget):
super(Widget, self).__init__(dockwidget)
self._document = None
self._fileSelector = QComboBox(editable=True, insertPolicy=QComboBox.NoInsert)
widgets.drag.ComboDrag(self._fileSelector).role = Qt.UserRole
self._fileSelector.lineEdit().setReadOnly(True)
self._fileSelector.lineEdit().setFocusPolicy(Qt.NoFocus)
self._stopButton = QToolButton()
self._playButton = QToolButton()
self._timeSlider = QSlider(Qt.Horizontal, tracking=False,
singleStep=500, pageStep=5000, invertedControls=True)
self._display = Display()
self._tempoFactor = QSlider(Qt.Vertical, minimum=-50, maximum=50,
singleStep=1, pageStep=5)
grid = QGridLayout(spacing=0)
self.setLayout(grid)
grid.addWidget(self._fileSelector, 0, 0, 1, 3)
grid.addWidget(self._stopButton, 1, 0)
grid.addWidget(self._playButton, 1, 1)
grid.addWidget(self._timeSlider, 1, 2)
grid.addWidget(self._display, 2, 0, 1, 3)
grid.addWidget(self._tempoFactor, 0, 3, 3, 1)
# size policy of combo
p = self._fileSelector.sizePolicy()
p.setHorizontalPolicy(QSizePolicy.Ignored)
self._fileSelector.setSizePolicy(p)
# size policy of combo popup
p = self._fileSelector.view().sizePolicy()
p.setHorizontalPolicy(QSizePolicy.MinimumExpanding)
self._fileSelector.view().setSizePolicy(p)
self._player = player.Player()
self._outputCloseTimer = QTimer(interval=60000, singleShot=True,
timeout=self.closeOutput)
self._timeSliderTicker = QTimer(interval=200, timeout=self.updateTimeSlider)
self._fileSelector.activated[int].connect(self.slotFileSelected)
self._tempoFactor.valueChanged.connect(self.slotTempoChanged)
self._timeSlider.valueChanged.connect(self.slotTimeSliderChanged)
self._timeSlider.sliderMoved.connect(self.slotTimeSliderMoved)
self._player.beat.connect(self.updateDisplayBeat)
self._player.time.connect(self.updateDisplayTime)
self._player.stateChanged.connect(self.slotPlayerStateChanged)
self.slotPlayerStateChanged(False)
dockwidget.mainwindow().currentDocumentChanged.connect(self.loadResults)
app.documentLoaded.connect(self.slotUpdatedFiles)
app.jobFinished.connect(self.slotUpdatedFiles)
app.aboutToQuit.connect(self.stop)
midihub.aboutToRestart.connect(self.slotAboutToRestart)
midihub.settingsChanged.connect(self.clearMidiSettings, -100)
midihub.settingsChanged.connect(self.readMidiSettings)
app.documentClosed.connect(self.slotDocumentClosed)
app.translateUI(self)
self.readMidiSettings()
d = dockwidget.mainwindow().currentDocument()
if d:
self.loadResults(d)
def translateUI(self):
self._tempoFactor.setToolTip(_("Tempo"))
def slotAboutToRestart(self):
self.stop()
self._player.set_output(None)
def clearMidiSettings(self):
"""Called first when settings are changed."""
self.stop()
self._outputCloseTimer.stop()
self._player.set_output(None)
def readMidiSettings(self):
"""Called after clearMidiSettings(), and on first init."""
pass
def openOutput(self):
"""Called when playing starts. Ensures an output port is opened."""
self._outputCloseTimer.stop()
if not self._player.output():
p = QSettings().value("midi/player/output_port", midihub.default_output(), type(""))
o = midihub.output_by_name(p)
if o:
self._player.set_output(output.Output(o))
def closeOutput(self):
"""Called when the output close timer fires. Closes the output."""
self._player.set_output(None)
def slotPlayerStateChanged(self, playing):
ac = self.parentWidget().actionCollection
# setDefaultAction also adds the action
for b in self._stopButton, self._playButton:
while b.actions():
b.removeAction(b.actions()[0])
if playing:
self._timeSliderTicker.start()
self._stopButton.setDefaultAction(ac.midi_stop)
self._playButton.setDefaultAction(ac.midi_pause)
else:
self._timeSliderTicker.stop()
self.updateTimeSlider()
self._stopButton.setDefaultAction(ac.midi_restart)
self._playButton.setDefaultAction(ac.midi_play)
# close the output if the preference is set
if QSettings().value("midi/close_outputs", False, bool):
self._outputCloseTimer.start()
def play(self):
"""Starts the MIDI player, opening an output if necessary."""
if not self._player.is_playing() and not self._player.has_events():
self.restart()
self.openOutput()
if not self._player.output():
self._display.statusMessage(_("No output found!"))
self._player.start()
def stop(self):
"""Stops the MIDI player."""
self._player.stop()
def restart(self):
"""Restarts the MIDI player.
If another file is in the file selector, or the file was updated,
the new file is loaded.
"""
self._player.seek(0)
self.updateTimeSlider()
self._display.reset()
if self._document:
files = midifiles.MidiFiles.instance(self._document)
index = self._fileSelector.currentIndex()
if files and (files.song(index) is not self._player.song()):
self.loadSong(index)
def slotTempoChanged(self, value):
"""Called when the user drags the tempo."""
# convert -50 to 50 to 0.5 to 2.0
factor = 2 ** (value / 50.0)
self._player.set_tempo_factor(factor)
self._display.setTempo("{0}%".format(int(factor * 100)))
def slotTimeSliderChanged(self, value):
self._player.seek(value)
self._display.setTime(value)
if self._player.song():
self._display.setBeat(*self._player.song().beat(value)[1:])
def slotTimeSliderMoved(self, value):
self._display.setTime(value)
if self._player.song():
self._display.setBeat(*self._player.song().beat(value)[1:])
def updateTimeSlider(self):
if not self._timeSlider.isSliderDown():
with qutil.signalsBlocked(self._timeSlider):
self._timeSlider.setMaximum(self._player.total_time())
self._timeSlider.setValue(self._player.current_time())
def updateDisplayBeat(self, measnum, beat, num, den):
if not self._timeSlider.isSliderDown():
self._display.setBeat(measnum, beat, num, den)
def updateDisplayTime(self, time):
if not self._timeSlider.isSliderDown():
self._display.setTime(time)
def slotUpdatedFiles(self, document):
"""Called when there are new MIDI files."""
if document == self.parentWidget().mainwindow().currentDocument():
self.loadResults(document)
def loadResults(self, document):
self._document = document
files = midifiles.MidiFiles.instance(document)
self._fileSelector.setModel(files.model())
if files:
self._fileSelector.setCurrentIndex(files.current)
if not self._player.is_playing():
self.loadSong(files.current)
def loadSong(self, index):
files = midifiles.MidiFiles.instance(self._document)
self._player.set_song(files.song(index))
m, s = divmod(self._player.total_time() // 1000, 60)
name = self._fileSelector.currentText()
self.updateTimeSlider()
self._display.reset()
self._display.statusMessage(
_("midi lcd screen", "LOADED"), name,
_("midi lcd screen", "TOTAL"), "{0}:{1:02}".format(m, s))
def slotFileSelected(self, index):
if self._document:
self._player.stop()
files = midifiles.MidiFiles.instance(self._document)
if files:
files.current = index
self.restart()
def slotDocumentClosed(self, document):
if document == self._document:
self._document = None
self._fileSelector.clear()
self._player.stop()
self._player.clear()
self.updateTimeSlider()
self._display.reset()
class Display(QLabel):
"""Maintains values in the LCD display."""
def __init__(self):
QLabel.__init__(self, wordWrap=True)
self.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred))
self.setStyleSheet(css.lcd_screen)
self._tempoTimer = QTimer(interval=1500, singleShot=True,
timeout=self.setTempo)
self._statusTimer = QTimer(interval=2000, singleShot=True,
timeout=self.statusMessage)
self._tempo = None
self._status = None
self.reset()
app.translateUI(self)
def reset(self):
"""Sets everything to 0."""
self._time = 0
self._beat = 0, 0, 0, 0
self.updateDisplay()
def translateUI(self):
self.updateDisplay()
def setTime(self, time):
self._time = time
self.updateDisplay()
def setBeat(self, measnum, beat, num, den):
self._beat = measnum, beat, num, den
self.updateDisplay()
def setTempo(self, text=None):
self._tempo = text
if text:
self._tempoTimer.start()
self.updateDisplay()
def statusMessage(self, *msg):
"""Status message can be multiple arguments (1 to 4)."""
self._status = msg
if msg:
self._statusTimer.start()
self.updateDisplay()
def updateDisplay(self):
minutes, seconds = divmod(self._time // 1000, 60)
time_spec = "{0}:{1:02}".format(minutes, seconds)
if self._status:
items = self._status
if len(items) == 1:
self.setText(_lcd_status_one.format(" ", items[0]))
elif len(items) == 2:
self.setText(_lcd_status_one.format(*items))
elif len(items) == 3:
self.setText(_lcd_status_two.format(
items[0], items[1], " ", items[2]))
elif len(items) == 4:
self.setText(_lcd_status_two.format(*items))
elif self._tempo:
self.setText(_lcd_text.format(
_("midi lcd screen", "TIME"),
_("midi lcd screen", "TEMPO"),
time_spec,
self._tempo,
))
else:
measnum, beat, num, den = self._beat
beat_spec = "{0}.{1:2}".format(measnum, beat)
time_sig = " {0}/{1}".format(num, 2 ** den) if num else ""
self.setText(_lcd_text.format(
_("midi lcd screen", "TIME"),
_("midi lcd screen", "BEAT") + time_sig,
time_spec,
beat_spec,
))
_lcd_text = """\
<table width=100% border=0 cellspacing=0>
<tr>
<td width=50% align=right style="font-size:8px;">{0}</td>
<td width=50% align=right style="font-size:8px;">{1}</td>
</tr>
<tr>
<td width=50% align=right><h2>{2}</h2></td>
<td width=50% align=right><h2>{3}</h2></td>
</tr>
</table>"""
_lcd_status_one = """\
<table width=100% border=0 cellspacing=0>
<tr>
<td align=left style="font-size:8px;">{0}</td>
</tr>
<tr>
<td align=left><h2>{1}</h2></td>
</tr>
</table>"""
_lcd_status_two = """\
<table width=100% border=0 cellspacing=0>
<tr>
<td align=left style="font-size:8px;">{0}</td>
<td align=right style="font-size:8px;">{2}</td>
</tr>
<tr>
<td align=left><h2>{1}</h2></td>
<td align=right><h2>{3}</h2></td>
</tr>
</table>"""
| shimpe/frescobaldi | frescobaldi_app/miditool/widget.py | Python | gpl-2.0 | 13,401 |
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
"""
QUEUES :
Queue is a collection of objects that are inserted and removed according to the first-in,
first-out (FIFO) principle. Elements, can be inserted at any time, but only the element
that has been in the queue the longest can be next removed.
THE QUEUE ABSTRACT DATA TYPE :
The queue acstract data type (ADT) supports the following two fundamental methods for queue Q :
Q.enqueue(e) : Add element e to the back of queue Q
Q.dequeue() : Remove and return the first element from queue Q;
an error occurs if queue is empty
Queue ADT also supports following methods :
Q.first() : Return's True if queue Q does not contain any elements.
len(Q) : Return number of elements in queue Q; in Python, we implement this with th special
method __len__
Q.is_empty() : Return True if queue Q does not contin any element.
"""
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
class ArrayQueue:
""" FIFO queue implementation using a Python list as underlying storage """
DEFAULT_CAPACITY = 10 # moderate capacity for all new queues
def __init__(self):
""" Create an empty queue """
self._data = [None] * ArrayQueue.DEFAULT_CAPACITY
self._size = 0
self._front = 0
def is_empty(self):
""" Return True if the queue is empty """
return self._size == 0
def __len__(self):
""" Return the number of element in the queue """
return self._size
def first(self):
""" Return (but do not remove) the element at the front of the queue.
Raise Empty exception if the queue is empty
"""
if self.is_empty():
raise Empty('Queue is empty')
return self._data[self._front]
def dequeue(self):
""" Remove and return the first element of the queue
Raise an exception if the queue is empty
"""
if self.is_empty():
raise Empty('Queue is empty')
answer = self._data[self._front]
self._data[self._front] = None # garbage collection
self._front = (self._front + 1) % len(self._data)
self._size -=1
print('Dequeue')
print('front :',self._front,'\t size : ',self._size,'\t length : ',len(self._data))
return answer
def enqueue(self, e):
""" Add element to the back of queue """
if self._size == len(self._data):
self._resize(2 * len(self._data)) # double the array size
avail = (self._front + self._size) % len(self._data)
print('enqueue')
print('avail : ',avail,'\t front : ',self._front,'\t size : ',self._size,'\t length : ',len(self._data))
self._data[avail] = e
self._size += 1
def _resize(self, cap):
""" resize to a new list of capacity >= len(self) """
old = self._data
self._data = [None] * cap
walk = self._front
for k in range(self._size):
self._data[k] = old[walk]
walk = (1 + walk) % len(old)
self._front = 0
def print_Queue(self):
""" Prints the whole queue
Raise an exception if queue is empty
"""
print("Queue --> ")
for i in (10):
print(self._data[i])
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
"""
ADDING AND REMOVING ELEMENTS
The goal of the enqueue method is to add a new element to the back pf the queue. We need to deter
mine the proper index at which to place the new element. We compute the location of the next opening
based on formula :
avail = (self._front + self._size) % len(self._data)
When dequeue method is called, the current value of self._front designates the index of the value that
is index of the value that is to be removed and returned.
* Keep the local refrence to the element that wull be returned, seting :
answer = self._data[self._front]
* just prior to removing the refrence to that object from the list, with the assignment
self._data[self._front] = None
Second, responsibility of dequeue method is to update the value of _front to reflect removal of the
element, and the presumed promotion of the second element to become the new first.
"""
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
if __name__ == '__main__':
queue = ArrayQueue()
for i in range(30):
queue.enqueue(i)
queue.dequeue()
queue.dequeue()
queue.dequeue()
queue.dequeue()
for j in range(5):
queue.enqueue(j)
#queue.print_Queue()
for k in range(30):
queue.dequeue()
queue.enqueue(10)
#-----------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------
"""
OUTPUT :
enqueue
avail : 0 front : 0 size : 0 length : 10
enqueue
avail : 1 front : 0 size : 1 length : 10
enqueue
avail : 2 front : 0 size : 2 length : 10
enqueue
avail : 3 front : 0 size : 3 length : 10
enqueue
avail : 4 front : 0 size : 4 length : 10
enqueue
avail : 5 front : 0 size : 5 length : 10
enqueue
avail : 6 front : 0 size : 6 length : 10
enqueue
avail : 7 front : 0 size : 7 length : 10
enqueue
avail : 8 front : 0 size : 8 length : 10
enqueue
avail : 9 front : 0 size : 9 length : 10
enqueue
avail : 10 front : 0 size : 10 length : 20
enqueue
avail : 11 front : 0 size : 11 length : 20
enqueue
avail : 12 front : 0 size : 12 length : 20
enqueue
avail : 13 front : 0 size : 13 length : 20
enqueue
avail : 14 front : 0 size : 14 length : 20
enqueue
avail : 15 front : 0 size : 15 length : 20
enqueue
avail : 16 front : 0 size : 16 length : 20
enqueue
avail : 17 front : 0 size : 17 length : 20
enqueue
avail : 18 front : 0 size : 18 length : 20
enqueue
avail : 19 front : 0 size : 19 length : 20
enqueue
avail : 20 front : 0 size : 20 length : 40
enqueue
avail : 21 front : 0 size : 21 length : 40
enqueue
avail : 22 front : 0 size : 22 length : 40
enqueue
avail : 23 front : 0 size : 23 length : 40
enqueue
avail : 24 front : 0 size : 24 length : 40
enqueue
avail : 25 front : 0 size : 25 length : 40
enqueue
avail : 26 front : 0 size : 26 length : 40
enqueue
avail : 27 front : 0 size : 27 length : 40
enqueue
avail : 28 front : 0 size : 28 length : 40
enqueue
avail : 29 front : 0 size : 29 length : 40
Dequeue
front : 1 size : 29 length : 40
Dequeue
front : 2 size : 28 length : 40
Dequeue
front : 3 size : 27 length : 40
Dequeue
front : 4 size : 26 length : 40
enqueue
avail : 30 front : 4 size : 26 length : 40
enqueue
avail : 31 front : 4 size : 27 length : 40
enqueue
avail : 32 front : 4 size : 28 length : 40
enqueue
avail : 33 front : 4 size : 29 length : 40
enqueue
avail : 34 front : 4 size : 30 length : 40
Dequeue
front : 5 size : 30 length : 40
Dequeue
front : 6 size : 29 length : 40
Dequeue
front : 7 size : 28 length : 40
Dequeue
front : 8 size : 27 length : 40
Dequeue
front : 9 size : 26 length : 40
Dequeue
front : 10 size : 25 length : 40
Dequeue
front : 11 size : 24 length : 40
Dequeue
front : 12 size : 23 length : 40
Dequeue
front : 13 size : 22 length : 40
Dequeue
front : 14 size : 21 length : 40
Dequeue
front : 15 size : 20 length : 40
Dequeue
front : 16 size : 19 length : 40
Dequeue
front : 17 size : 18 length : 40
Dequeue
front : 18 size : 17 length : 40
Dequeue
front : 19 size : 16 length : 40
Dequeue
front : 20 size : 15 length : 40
Dequeue
front : 21 size : 14 length : 40
Dequeue
front : 22 size : 13 length : 40
Dequeue
front : 23 size : 12 length : 40
Dequeue
front : 24 size : 11 length : 40
Dequeue
front : 25 size : 10 length : 40
Dequeue
front : 26 size : 9 length : 40
Dequeue
front : 27 size : 8 length : 40
Dequeue
front : 28 size : 7 length : 40
Dequeue
front : 29 size : 6 length : 40
Dequeue
front : 30 size : 5 length : 40
Dequeue
front : 31 size : 4 length : 40
Dequeue
front : 32 size : 3 length : 40
Dequeue
front : 33 size : 2 length : 40
Dequeue
front : 34 size : 1 length : 40
enqueue
avail : 35 front : 34 size : 1 length : 40
"""
| MithileshCParab/Algorithms | Python/Data Structure/Queue/Queues.py | Python | apache-2.0 | 8,845 |
"""Generated file that augments the standard schema L{datatype
definitions<pyxb.binding.datatypes>} with their respective
U{constraining facets<http://www.w3.org/TR/xmlschema-2/index.html#rf-facets>}. At
one time, the C{maintainer/xsdfacet.py} script could be used to
generate this. No idea if that's still true.
"""
import facets
from datatypes import *
gDay._CF_pattern = facets.CF_pattern()
gDay._CF_enumeration = facets.CF_enumeration(value_datatype=gDay)
gDay._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gDay._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gDay._CF_minInclusive = facets.CF_minInclusive(value_datatype=gDay)
gDay._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gDay._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gDay)
gDay._InitializeFacetMap(gDay._CF_pattern,
gDay._CF_enumeration,
gDay._CF_minExclusive,
gDay._CF_whiteSpace,
gDay._CF_minInclusive,
gDay._CF_maxExclusive,
gDay._CF_maxInclusive)
gMonthDay._CF_pattern = facets.CF_pattern()
gMonthDay._CF_enumeration = facets.CF_enumeration(value_datatype=gMonthDay)
gMonthDay._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gMonthDay._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gMonthDay._CF_minInclusive = facets.CF_minInclusive(value_datatype=gMonthDay)
gMonthDay._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gMonthDay._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gMonthDay)
gMonthDay._InitializeFacetMap(gMonthDay._CF_pattern,
gMonthDay._CF_enumeration,
gMonthDay._CF_minExclusive,
gMonthDay._CF_whiteSpace,
gMonthDay._CF_minInclusive,
gMonthDay._CF_maxExclusive,
gMonthDay._CF_maxInclusive)
gYearMonth._CF_pattern = facets.CF_pattern()
gYearMonth._CF_enumeration = facets.CF_enumeration(value_datatype=gYearMonth)
gYearMonth._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gYearMonth._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gYearMonth._CF_minInclusive = facets.CF_minInclusive(value_datatype=gYearMonth)
gYearMonth._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gYearMonth._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gYearMonth)
gYearMonth._InitializeFacetMap(gYearMonth._CF_pattern,
gYearMonth._CF_enumeration,
gYearMonth._CF_minExclusive,
gYearMonth._CF_whiteSpace,
gYearMonth._CF_minInclusive,
gYearMonth._CF_maxExclusive,
gYearMonth._CF_maxInclusive)
ENTITIES._CF_minLength = facets.CF_minLength(value=nonNegativeInteger(1))
ENTITIES._CF_maxLength = facets.CF_maxLength()
ENTITIES._CF_whiteSpace = facets.CF_whiteSpace()
ENTITIES._CF_length = facets.CF_length()
ENTITIES._CF_enumeration = facets.CF_enumeration(value_datatype=ENTITIES)
ENTITIES._CF_pattern = facets.CF_pattern()
ENTITIES._InitializeFacetMap(ENTITIES._CF_minLength,
ENTITIES._CF_maxLength,
ENTITIES._CF_whiteSpace,
ENTITIES._CF_length,
ENTITIES._CF_enumeration,
ENTITIES._CF_pattern)
IDREFS._CF_minLength = facets.CF_minLength(value=nonNegativeInteger(1))
IDREFS._CF_maxLength = facets.CF_maxLength()
IDREFS._CF_whiteSpace = facets.CF_whiteSpace()
IDREFS._CF_length = facets.CF_length()
IDREFS._CF_enumeration = facets.CF_enumeration(value_datatype=IDREFS)
IDREFS._CF_pattern = facets.CF_pattern()
IDREFS._InitializeFacetMap(IDREFS._CF_minLength,
IDREFS._CF_maxLength,
IDREFS._CF_whiteSpace,
IDREFS._CF_length,
IDREFS._CF_enumeration,
IDREFS._CF_pattern)
time._CF_pattern = facets.CF_pattern()
time._CF_enumeration = facets.CF_enumeration(value_datatype=time)
time._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
time._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
time._CF_minInclusive = facets.CF_minInclusive(value_datatype=time)
time._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
time._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=time)
time._InitializeFacetMap(time._CF_pattern,
time._CF_enumeration,
time._CF_minExclusive,
time._CF_whiteSpace,
time._CF_minInclusive,
time._CF_maxExclusive,
time._CF_maxInclusive)
date._CF_pattern = facets.CF_pattern()
date._CF_enumeration = facets.CF_enumeration(value_datatype=date)
date._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
date._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
date._CF_minInclusive = facets.CF_minInclusive(value_datatype=date)
date._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
date._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=date)
date._InitializeFacetMap(date._CF_pattern,
date._CF_enumeration,
date._CF_minExclusive,
date._CF_whiteSpace,
date._CF_minInclusive,
date._CF_maxExclusive,
date._CF_maxInclusive)
NMTOKENS._CF_minLength = facets.CF_minLength(value=nonNegativeInteger(1))
NMTOKENS._CF_maxLength = facets.CF_maxLength()
NMTOKENS._CF_whiteSpace = facets.CF_whiteSpace()
NMTOKENS._CF_length = facets.CF_length()
NMTOKENS._CF_enumeration = facets.CF_enumeration(value_datatype=NMTOKENS)
NMTOKENS._CF_pattern = facets.CF_pattern()
NMTOKENS._InitializeFacetMap(NMTOKENS._CF_minLength,
NMTOKENS._CF_maxLength,
NMTOKENS._CF_whiteSpace,
NMTOKENS._CF_length,
NMTOKENS._CF_enumeration,
NMTOKENS._CF_pattern)
duration._CF_pattern = facets.CF_pattern()
duration._CF_enumeration = facets.CF_enumeration(value_datatype=duration)
duration._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
duration._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
duration._CF_minInclusive = facets.CF_minInclusive(value_datatype=duration)
duration._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
duration._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=duration)
duration._InitializeFacetMap(duration._CF_pattern,
duration._CF_enumeration,
duration._CF_minExclusive,
duration._CF_whiteSpace,
duration._CF_minInclusive,
duration._CF_maxExclusive,
duration._CF_maxInclusive)
gMonth._CF_pattern = facets.CF_pattern()
gMonth._CF_enumeration = facets.CF_enumeration(value_datatype=gMonth)
gMonth._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gMonth._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gMonth._CF_minInclusive = facets.CF_minInclusive(value_datatype=gMonth)
gMonth._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gMonth._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gMonth)
gMonth._InitializeFacetMap(gMonth._CF_pattern,
gMonth._CF_enumeration,
gMonth._CF_minExclusive,
gMonth._CF_whiteSpace,
gMonth._CF_minInclusive,
gMonth._CF_maxExclusive,
gMonth._CF_maxInclusive)
hexBinary._CF_minLength = facets.CF_minLength()
hexBinary._CF_maxLength = facets.CF_maxLength()
hexBinary._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
hexBinary._CF_length = facets.CF_length()
hexBinary._CF_enumeration = facets.CF_enumeration(value_datatype=hexBinary)
hexBinary._CF_pattern = facets.CF_pattern()
hexBinary._InitializeFacetMap(hexBinary._CF_minLength,
hexBinary._CF_maxLength,
hexBinary._CF_whiteSpace,
hexBinary._CF_length,
hexBinary._CF_enumeration,
hexBinary._CF_pattern)
double._CF_pattern = facets.CF_pattern()
double._CF_enumeration = facets.CF_enumeration(value_datatype=double)
double._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
double._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
double._CF_minInclusive = facets.CF_minInclusive(value_datatype=double)
double._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
double._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=double)
double._InitializeFacetMap(double._CF_pattern,
double._CF_enumeration,
double._CF_minExclusive,
double._CF_whiteSpace,
double._CF_minInclusive,
double._CF_maxExclusive,
double._CF_maxInclusive)
QName._CF_minLength = facets.CF_minLength()
QName._CF_maxLength = facets.CF_maxLength()
QName._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
QName._CF_length = facets.CF_length()
QName._CF_enumeration = facets.CF_enumeration(value_datatype=QName)
QName._CF_pattern = facets.CF_pattern()
QName._InitializeFacetMap(QName._CF_minLength,
QName._CF_maxLength,
QName._CF_whiteSpace,
QName._CF_length,
QName._CF_enumeration,
QName._CF_pattern)
NOTATION._CF_minLength = facets.CF_minLength()
NOTATION._CF_maxLength = facets.CF_maxLength()
NOTATION._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
NOTATION._CF_length = facets.CF_length()
NOTATION._CF_enumeration = facets.CF_enumeration(value_datatype=NOTATION)
NOTATION._CF_pattern = facets.CF_pattern()
NOTATION._InitializeFacetMap(NOTATION._CF_minLength,
NOTATION._CF_maxLength,
NOTATION._CF_whiteSpace,
NOTATION._CF_length,
NOTATION._CF_enumeration,
NOTATION._CF_pattern)
decimal._CF_fractionDigits = facets.CF_fractionDigits()
decimal._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=decimal)
decimal._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
decimal._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
decimal._CF_totalDigits = facets.CF_totalDigits()
decimal._CF_enumeration = facets.CF_enumeration(value_datatype=decimal)
decimal._CF_minInclusive = facets.CF_minInclusive(value_datatype=decimal)
decimal._CF_pattern = facets.CF_pattern()
decimal._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
decimal._InitializeFacetMap(decimal._CF_fractionDigits,
decimal._CF_maxInclusive,
decimal._CF_minExclusive,
decimal._CF_whiteSpace,
decimal._CF_totalDigits,
decimal._CF_enumeration,
decimal._CF_minInclusive,
decimal._CF_pattern,
decimal._CF_maxExclusive)
gYear._CF_pattern = facets.CF_pattern()
gYear._CF_enumeration = facets.CF_enumeration(value_datatype=gYear)
gYear._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gYear._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gYear._CF_minInclusive = facets.CF_minInclusive(value_datatype=gYear)
gYear._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gYear._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gYear)
gYear._InitializeFacetMap(gYear._CF_pattern,
gYear._CF_enumeration,
gYear._CF_minExclusive,
gYear._CF_whiteSpace,
gYear._CF_minInclusive,
gYear._CF_maxExclusive,
gYear._CF_maxInclusive)
boolean._CF_pattern = facets.CF_pattern()
boolean._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
boolean._InitializeFacetMap(boolean._CF_pattern,
boolean._CF_whiteSpace)
base64Binary._CF_minLength = facets.CF_minLength()
base64Binary._CF_maxLength = facets.CF_maxLength()
base64Binary._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
base64Binary._CF_length = facets.CF_length()
base64Binary._CF_enumeration = facets.CF_enumeration(value_datatype=base64Binary)
base64Binary._CF_pattern = facets.CF_pattern()
base64Binary._InitializeFacetMap(base64Binary._CF_minLength,
base64Binary._CF_maxLength,
base64Binary._CF_whiteSpace,
base64Binary._CF_length,
base64Binary._CF_enumeration,
base64Binary._CF_pattern)
float._CF_pattern = facets.CF_pattern()
float._CF_enumeration = facets.CF_enumeration(value_datatype=float)
float._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
float._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
float._CF_minInclusive = facets.CF_minInclusive(value_datatype=float)
float._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
float._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=float)
float._InitializeFacetMap(float._CF_pattern,
float._CF_enumeration,
float._CF_minExclusive,
float._CF_whiteSpace,
float._CF_minInclusive,
float._CF_maxExclusive,
float._CF_maxInclusive)
anyURI._CF_minLength = facets.CF_minLength()
anyURI._CF_maxLength = facets.CF_maxLength()
anyURI._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
anyURI._CF_length = facets.CF_length()
anyURI._CF_enumeration = facets.CF_enumeration(value_datatype=anyURI)
anyURI._CF_pattern = facets.CF_pattern()
anyURI._InitializeFacetMap(anyURI._CF_minLength,
anyURI._CF_maxLength,
anyURI._CF_whiteSpace,
anyURI._CF_length,
anyURI._CF_enumeration,
anyURI._CF_pattern)
string._CF_minLength = facets.CF_minLength()
string._CF_maxLength = facets.CF_maxLength()
string._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.preserve)
string._CF_length = facets.CF_length()
string._CF_enumeration = facets.CF_enumeration(value_datatype=string)
string._CF_pattern = facets.CF_pattern()
string._InitializeFacetMap(string._CF_minLength,
string._CF_maxLength,
string._CF_whiteSpace,
string._CF_length,
string._CF_enumeration,
string._CF_pattern)
dateTime._CF_pattern = facets.CF_pattern()
dateTime._CF_enumeration = facets.CF_enumeration(value_datatype=dateTime)
dateTime._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
dateTime._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
dateTime._CF_minInclusive = facets.CF_minInclusive(value_datatype=dateTime)
dateTime._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
dateTime._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=dateTime)
dateTime._InitializeFacetMap(dateTime._CF_pattern,
dateTime._CF_enumeration,
dateTime._CF_minExclusive,
dateTime._CF_whiteSpace,
dateTime._CF_minInclusive,
dateTime._CF_maxExclusive,
dateTime._CF_maxInclusive)
normalizedString._CF_whiteSpace = facets.CF_whiteSpace(super_facet=string._CF_whiteSpace, value=facets._WhiteSpace_enum.replace)
normalizedString._InitializeFacetMap(normalizedString._CF_whiteSpace)
integer._CF_fractionDigits = facets.CF_fractionDigits(value=nonNegativeInteger(0))
integer._CF_pattern = facets.CF_pattern()
integer._CF_pattern.addPattern(pattern=u'[\\-+]?[0-9]+')
integer._InitializeFacetMap(integer._CF_fractionDigits,
integer._CF_pattern)
nonPositiveInteger._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=nonPositiveInteger, value=nonPositiveInteger(0))
nonPositiveInteger._InitializeFacetMap(nonPositiveInteger._CF_maxInclusive)
token._CF_whiteSpace = facets.CF_whiteSpace(super_facet=normalizedString._CF_whiteSpace, value=facets._WhiteSpace_enum.collapse)
token._InitializeFacetMap(token._CF_whiteSpace)
nonNegativeInteger._CF_minInclusive = facets.CF_minInclusive(value_datatype=nonNegativeInteger, value=nonNegativeInteger(0))
nonNegativeInteger._InitializeFacetMap(nonNegativeInteger._CF_minInclusive)
long._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=long, value=long(9223372036854775807))
long._CF_minInclusive = facets.CF_minInclusive(value_datatype=long, value=long(-9223372036854775808))
long._InitializeFacetMap(long._CF_maxInclusive,
long._CF_minInclusive)
negativeInteger._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=negativeInteger, super_facet=nonPositiveInteger._CF_maxInclusive, value=negativeInteger(-1))
negativeInteger._InitializeFacetMap(negativeInteger._CF_maxInclusive)
Name._CF_pattern = facets.CF_pattern()
Name._CF_pattern.addPattern(pattern=u'\\i\\c*')
Name._InitializeFacetMap(Name._CF_pattern)
NMTOKEN._CF_pattern = facets.CF_pattern()
NMTOKEN._CF_pattern.addPattern(pattern=u'\\c+')
NMTOKEN._InitializeFacetMap(NMTOKEN._CF_pattern)
positiveInteger._CF_minInclusive = facets.CF_minInclusive(value_datatype=positiveInteger, super_facet=nonNegativeInteger._CF_minInclusive, value=positiveInteger(1))
positiveInteger._InitializeFacetMap(positiveInteger._CF_minInclusive)
language._CF_pattern = facets.CF_pattern()
language._CF_pattern.addPattern(pattern=u'[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*')
language._InitializeFacetMap(language._CF_pattern)
unsignedLong._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=unsignedLong, value=unsignedLong(18446744073709551615))
unsignedLong._InitializeFacetMap(unsignedLong._CF_maxInclusive)
int._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=int, super_facet=long._CF_maxInclusive, value=int(2147483647))
int._CF_minInclusive = facets.CF_minInclusive(value_datatype=int, super_facet=long._CF_minInclusive, value=int(-2147483648))
int._InitializeFacetMap(int._CF_maxInclusive,
int._CF_minInclusive)
NCName._CF_pattern = facets.CF_pattern(super_facet=Name._CF_pattern)
NCName._CF_pattern.addPattern(pattern=u'[\\i-[:]][\\c-[:]]*')
NCName._InitializeFacetMap(NCName._CF_pattern)
unsignedInt._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=unsignedInt, super_facet=unsignedLong._CF_maxInclusive, value=unsignedInt(4294967295))
unsignedInt._InitializeFacetMap(unsignedInt._CF_maxInclusive)
short._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=short, super_facet=int._CF_maxInclusive, value=short(32767))
short._CF_minInclusive = facets.CF_minInclusive(value_datatype=short, super_facet=int._CF_minInclusive, value=short(-32768))
short._InitializeFacetMap(short._CF_maxInclusive,
short._CF_minInclusive)
ENTITY._InitializeFacetMap()
IDREF._InitializeFacetMap()
ID._InitializeFacetMap()
unsignedShort._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=unsignedShort, super_facet=unsignedInt._CF_maxInclusive, value=unsignedShort(65535))
unsignedShort._InitializeFacetMap(unsignedShort._CF_maxInclusive)
byte._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=byte, super_facet=short._CF_maxInclusive, value=byte(127))
byte._CF_minInclusive = facets.CF_minInclusive(value_datatype=byte, super_facet=short._CF_minInclusive, value=byte(-128))
byte._InitializeFacetMap(byte._CF_maxInclusive,
byte._CF_minInclusive)
unsignedByte._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=unsignedByte, super_facet=unsignedShort._CF_maxInclusive, value=unsignedByte(255))
unsignedByte._InitializeFacetMap(unsignedByte._CF_maxInclusive)
| jonfoster/pyxb1 | pyxb/binding/datatypes_facets.py | Python | apache-2.0 | 18,254 |
#!/usr/bin/env python
from nose.tools import assert_equal
import networkx as nx
from networkx.algorithms import bipartite
from networkx.testing import assert_edges_equal, assert_nodes_equal
class TestBipartiteProject:
def test_path_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.projected_graph(G, [1, 3])
assert_nodes_equal(list(P), [1, 3])
assert_edges_equal(list(P.edges()), [(1, 3)])
P=bipartite.projected_graph(G, [0, 2])
assert_nodes_equal(list(P), [0, 2])
assert_edges_equal(list(P.edges()), [(0, 2)])
def test_path_projected_properties_graph(self):
G=nx.path_graph(4)
G.add_node(1,name='one')
G.add_node(2,name='two')
P=bipartite.projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
assert_equal(P.node[1]['name'],G.node[1]['name'])
P=bipartite.projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
assert_equal(P.node[2]['name'],G.node[2]['name'])
def test_path_collaboration_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_directed_path_collaboration_projected_graph(self):
G=nx.DiGraph()
G.add_path(list(range(4)))
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_directed_graph(self):
G=nx.DiGraph()
G.add_path(list(range(4)))
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_star_projected_graph(self):
G=nx.star_graph(3)
P=bipartite.projected_graph(G,[1,2,3])
assert_nodes_equal(list(P),[1,2,3])
assert_edges_equal(list(P.edges()),[(1,2),(1,3),(2,3)])
P=bipartite.weighted_projected_graph(G,[1,2,3])
assert_nodes_equal(list(P),[1,2,3])
assert_edges_equal(list(P.edges()),[(1,2),(1,3),(2,3)])
P=bipartite.projected_graph(G,[0])
assert_nodes_equal(list(P),[0])
assert_edges_equal(list(P.edges()),[])
def test_project_multigraph(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('a',2)
G.add_edge('b',2)
P=bipartite.projected_graph(G,'ab')
assert_edges_equal(list(P.edges()),[('a','b')])
P=bipartite.weighted_projected_graph(G,'ab')
assert_edges_equal(list(P.edges()),[('a','b')])
P=bipartite.projected_graph(G,'ab',multigraph=True)
assert_edges_equal(list(P.edges()),[('a','b'),('a','b')])
def test_project_collaboration(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('b',2)
G.add_edge('c',2)
G.add_edge('c',3)
G.add_edge('c',4)
G.add_edge('b',4)
P=bipartite.collaboration_weighted_projected_graph(G,'abc')
assert_equal(P['a']['b']['weight'],1)
assert_equal(P['b']['c']['weight'],2)
def test_directed_projection(self):
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge('B',2)
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
assert_equal(P['A']['B']['weight'],1)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(list(P.edges()),[('A','B')])
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge(2,'B')
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
assert_equal(P['A']['B']['weight'],2)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(list(P.edges()),[('A','B'),('A','B')])
class TestBipartiteWeightedProjection:
def setUp(self):
# Tore Opsahl's example
# http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/
self.G=nx.Graph()
self.G.add_edge('A',1)
self.G.add_edge('A',2)
self.G.add_edge('B',1)
self.G.add_edge('B',2)
self.G.add_edge('B',3)
self.G.add_edge('B',4)
self.G.add_edge('B',5)
self.G.add_edge('C',1)
self.G.add_edge('D',3)
self.G.add_edge('E',4)
self.G.add_edge('E',5)
self.G.add_edge('E',6)
self.G.add_edge('F',6)
# Graph based on figure 6 from Newman (2001)
self.N=nx.Graph()
self.N.add_edge('A',1)
self.N.add_edge('A',2)
self.N.add_edge('A',3)
self.N.add_edge('B',1)
self.N.add_edge('B',2)
self.N.add_edge('B',3)
self.N.add_edge('C',1)
self.N.add_edge('D',1)
self.N.add_edge('E',3)
def test_project_weighted_shared(self):
edges=[('A','B',2),
('A','C',1),
('B','C',1),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3),
('A','E',1),
('A','C',1),
('A','D',1),
('B','E',1),
('B','C',1),
('B','D',1),
('C','D',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_newman(self):
edges=[('A','B',1.5),
('A','C',0.5),
('B','C',0.5),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',11/6.0),
('A','E',1/2.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/2.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_ratio(self):
edges=[('A','B',2/6.0),
('A','C',1/6.0),
('B','C',1/6.0),
('B','D',1/6.0),
('B','E',2/6.0),
('E','F',1/6.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G, 'ABCDEF', ratio=True)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N, 'ABCDE', ratio=True)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_overlap(self):
edges=[('A','B',2/2.0),
('A','C',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('B','E',2/3.0),
('E','F',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF', jaccard=False)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/1.0),
('A','C',1/1.0),
('A','D',1/1.0),
('B','E',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE', jaccard=False)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_jaccard(self):
edges=[('A','B',2/5.0),
('A','C',1/2.0),
('B','C',1/5.0),
('B','D',1/5.0),
('B','E',2/6.0),
('E','F',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_generic_weighted_projected_graph_simple(self):
def shared(G, u, v):
return len(set(G[u]) & set(G[v]))
B = nx.path_graph(5)
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4], weight_function=shared)
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(list(G.edges(data=True))),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(list(G.edges(data=True))),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
B = nx.DiGraph()
B.add_path(list(range(5)))
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(G.edges(data=True)),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
def test_generic_weighted_projected_graph_custom(self):
def jaccard(G, u, v):
unbrs = set(G[u])
vnbrs = set(G[v])
return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
def my_weight(G, u, v, weight='weight'):
w = 0
for nbr in set(G[u]) & set(G[v]):
w += G.edge[u][nbr].get(weight, 1) + G.edge[v][nbr].get(weight, 1)
return w
B = nx.bipartite.complete_bipartite_graph(2, 2)
for i,(u,v) in enumerate(B.edges()):
B.edge[u][v]['weight'] = i + 1
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=jaccard)
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 1.0})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=my_weight)
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 10})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1])
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 2})])
| jcurbelo/networkx | networkx/algorithms/bipartite/tests/test_project.py | Python | bsd-3-clause | 14,106 |
# -*- coding: utf-8 -*-
import json
import os
import re
import sublime
def import_dir(name, fromlist=()):
PACKAGE_EXT = '.sublime-package'
dirname = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
if dirname.endswith(PACKAGE_EXT):
dirname = dirname[:-len(PACKAGE_EXT)]
return __import__('{0}.{1}'.format(dirname, name), fromlist=fromlist)
try:
get_flat_css = import_dir('css_dict_driver', ('get_flat_css',)).get_flat_css
except ImportError:
from css_dict_driver import get_flat_css
try:
imp = import_dir('probe', ('hayaku_extract', 'sub_string'))
hayaku_extract, sub_string = imp.hayaku_extract, imp.sub_string
except ImportError:
from probe import hayaku_extract, sub_string
COLOR_REGEX = re.compile(r'#([0-9a-fA-F]{3,6})')
COMPLEX_COLOR_REGEX = re.compile(r'^\s*(#?([a-fA-F\d]{3}|[a-fA-F\d]{6})|(rgb|hsl)a?\([^\)]+\))\s*$')
IMAGE_REGEX = re.compile(r'^\s*([^\s]+\.(jpg|jpeg|gif|png))\s*$')
CAPTURING_GROUPS = re.compile(r'(?<!\\)\((?!\?[^<])')
CAPTURES = re.compile(r'(\(\?|\$)(\d+)|^(\d)')
def align_prefix(property_name, prefix_list, no_unprefixed_property, aligned_prefixes, use_only):
"""Если есть префиксы, сделать шаблон с правильными отступами"""
# if no_unprefixed_property:
# prefix_list = ('-{0}-{1}'.format(prefix_list[0], property_name),)
# skip if `use_only` is empty
if use_only:
prefix_list = [p for p in prefix_list if p in use_only]
if prefix_list:
prefix_list = ['-{0}-{1}'.format(p, property_name) for p in prefix_list]
if not no_unprefixed_property:
prefix_list.append(property_name)
if not aligned_prefixes:
return prefix_list
max_length = max(len(p) for p in prefix_list)
# TODO: сделать сортировку по размеру значений в prefix_list
return tuple((' '*(max_length-len(p))) + p for p in prefix_list)
return (property_name,)
def hex_to_coloralpha(hex):
if len(hex) == 1:
hex = hex*2
return round(float(int(hex, 16)) / 255, 2)
def color_expand(color,alpha):
if not color:
return '#'
if len(color) == 1:
if color == '#':
color = ''
else:
color = color * 3
elif len(color) == 2:
if color[0] == '#':
color = color[1] * 3
else:
color = color * 3
elif len(color) == 3:
if color[0] == '#':
color = color[1:] * 3
else:
color = color
elif len(color) == 4:
if color[0] != '#' and alpha == 1:
alpha = hex_to_coloralpha(color[3])
color = color[:3]
else:
return color
elif len(color) == 5:
if color[0] != '#':
alpha = hex_to_coloralpha(color[3:5])
color = color[:3]
else:
alpha = hex_to_coloralpha(color[4]*2)
color = color[1:4]
elif len(color) == 6:
if color[0] != '#':
pass
else:
alpha = hex_to_coloralpha(color[4:5])
color = color[1:4]
elif len(color) == 7:
color = color[1:]
else:
return color
# Convert color to rgba if there is some alpha
if alpha == '.' or float(alpha) < 1:
if alpha == '.':
alpha = '.${1:5}' # adding caret for entering alpha value
if alpha == '.0' or alpha == 0:
alpha = '0'
if len(color) == 3:
color = color[0] * 2 + color[1] * 2 + color[2] * 2
return "rgba({0},{1},{2},{3})".format(int(color[:2],16), int(color[2:4],16), int(color[4:],16), alpha)
return '#{0}'.format(color)
def length_expand(name, value, unit, options=None):
if options is None:
options = {}
if unit and 'percents'.startswith(unit):
unit = '%'
if isinstance(value, float):
full_unit = options.get('CSS_default_unit_decimal', 'em')
else:
full_unit = options.get('CSS_default_unit', 'px')
if '<number>' in [val for prop, val in get_flat_css() if prop == name] and not options.get('CSS_units_for_unitless_numbers'):
full_unit = ''
if value == 0:
return '0'
if value == '':
return ''
if unit:
units = (val[1:] for key, val in get_flat_css() if key == name and val.startswith('.'))
req_units = [u for u in units if sub_string(u, unit)]
PRIORITY = ("em", "ex", "vw", "vh", "vmin", "vmax" "vm", "ch", "rem",
"px", "cm", "mm", "in", "pt", "pc")
full_unit = hayaku_extract(unit, req_units, PRIORITY)
if not full_unit:
return
return '{0}{1}'.format(value, full_unit)
def expand_value(args, options=None):
if 'keyword-value' in args:
return args['keyword-value']
if args['property-name'] in set(p for p, v in get_flat_css() if v == '<color_values>'):
if 'color' in args and not args['color']:
return '#'
return color_expand(args.get('color', ''),args.get('color_alpha', 1))
elif args['property-name'] in set(p for p, v in get_flat_css() if v.startswith('.')) and 'keyword-value' not in args:
ret = length_expand(args['property-name'], args.get('type-value', ''), args.get('type-name', ''), options)
return ret
elif 'type-value' in args:
return str(args['type-value'])
return args.get('keyword-value', '')
def split_for_snippet(values, offset=0):
split_lefts = [[]]
split_rights = [[]]
parts = 0
new_offset = offset
for value in (v for v in values if len(v) > 1):
for i in range(1, len(value)):
if value[:i] not in [item for sublist in split_lefts for item in sublist] + values:
if len(split_lefts[parts]) > 98:
parts += 1
split_lefts.append([])
split_rights.append([])
split_lefts[parts].append(value[:i])
split_rights[parts].append(value[i:])
new_offset += 1
for index in range(0, parts + 1):
split_lefts[index] = ''.join('({0}$)?'.format(re.escape(i)) for i in split_lefts[index])
split_rights[index] = ''.join('(?{0}:{1})'.format(i+1+offset,re.escape(f)) for i,f in enumerate(split_rights[index]))
return (split_lefts, split_rights, new_offset)
def convert_to_parts(parts):
matches = []
inserts = []
parts_count = 1
# Function for offsetting the captured groups in inserts
def offset_captures(match):
if match.group(3):
return '()' + match.group(3)
else:
number = int(match.group(2))
return match.group(1) + str(number + parts_count)
for part in parts:
matches.append(''.join([
'(?=(',
part['match'],
')?)',
]))
inserts.append(''.join([
'(?',
str(parts_count),
':',
CAPTURES.sub(offset_captures, part['insert']),
')',
]))
# Incrementing the counter, adding the number of internal capturing groups
parts_count += 1 + len(CAPTURING_GROUPS.findall(part['match'] ))
return { "matches": matches, "inserts": inserts }
def generate_snippet(data):
value = data.get('value')
before = ''.join([
'_PROPERTY_',
data.get('colon'),
data.get('space'),
])
after = ''
importance = ''
if data.get('important'):
importance = ' !important'
if value:
after = importance + data.get('semicolon')
else:
if not importance:
importance_splitted = split_for_snippet(["!important"])
importance = ''.join([
'${1/.*?',
importance_splitted[0][0],
'$/',
importance_splitted[1][0],
'/}',
])
befores = convert_to_parts(data["before"])
before = ''.join([
'${1/^',
''.join(befores["matches"]),
'.+$|.*/',
before,
''.join(befores["inserts"]),
'/m}',
])
if data.get('semicolon') == '':
data['semicolon'] = ' '
afters = convert_to_parts(data["after"])
after = ''.join([
'${1/^',
''.join(afters["matches"]),
'.+$|.*/',
''.join(afters["inserts"]),
'/m}',
data.get('autovalues'),
importance,
data.get('semicolon'),
])
value = ''.join([
'${1:',
data.get('default'),
'}',
])
return (before + value + after).replace('{','{{').replace('}','}}').replace('_PROPERTY_','{0}')
def make_template(args, options):
whitespace = options.get('CSS_whitespace_after_colon', '')
disable_semicolon = options.get('CSS_syntax_no_semicolons', False)
disable_colon = options.get('CSS_syntax_no_colons', False)
disable_prefixes = options.get('CSS_prefixes_disable', False)
clipboard = sublime.get_clipboard()
if not whitespace and disable_colon:
whitespace = ' '
value = expand_value(args, options)
if value is None:
return
if value.startswith('[') and value.endswith(']'):
value = False
semicolon = ';'
colon = ':'
if disable_semicolon:
semicolon = ''
if disable_colon:
colon = ''
snippet_parts = {
'colon': colon,
'semicolon': semicolon,
'space': whitespace,
'default': args.get('default-value',''),
'important': args.get('important'),
'before': [],
'after': [],
'autovalues': '',
}
# Handling prefixes
property_ = (args['property-name'],)
if not disable_prefixes:
property_ = align_prefix(
args['property-name'],
args.get('prefixes', []),
args.get('no-unprefixed-property', False) or options.get('CSS_prefixes_no_unprefixed', False),
options.get('CSS_prefixes_align', True),
options.get('CSS_prefixes_only', []),
)
# Replace the parens with a tabstop snippet
# TODO: Move the inside snippets to the corresponding snippets dict
if value and '()' in value:
if value.replace('()', '') in ['rotate','rotateX','rotateY','rotateZ','skew','skewX','skewY']:
value = value.replace('()', '($1${1/^((?!0$)-?(\d*.)?\d+)?.*$/(?1:deg)/m})')
else:
value = value.replace('()', '($1)')
# Do things when there is no value expanded
if not value or value == "#":
if not options.get('CSS_disable_postexpand', False):
auto_values = [val for prop, val in get_flat_css() if prop == args['property-name']]
if auto_values:
units = []
values = []
for p_value in (v for v in auto_values if len(v) > 1):
if p_value.startswith('.'):
units.append(p_value[1:])
elif not p_value.startswith('<'):
values.append(p_value)
values_splitted = split_for_snippet(values)
snippet_values = ''
for index in range(0,len(values_splitted[0])):
snippet_values += ''.join([
'${1/^\s*',
values_splitted[0][index],
'.*/',
values_splitted[1][index],
'/m}',
])
snippet_parts['autovalues'] += snippet_values
snippet_units = ''
# TODO: find out when to use units or colors
# TODO: Rewrite using after
if units and value != "#":
units_splitted = split_for_snippet(units, 4)
snippet_parts['before'].append({
"match": "%$",
"insert": "100"
})
# If there can be `number` in value, don't add `em` automatically
optional_unit_for_snippet = '(?2:(?3::0)em:px)'
if '<number>' in auto_values and not options.get('CSS_units_for_unitless_numbers'):
optional_unit_for_snippet = '(?2:(?3::0):)'
snippet_units = ''.join([
'${1/^\s*((?!0$)(?=.)[\d\-]*(\.)?(\d+)?((?=.)',
units_splitted[0][0],
')?$)?.*/(?4:',
units_splitted[1][0],
':(?1:' + optional_unit_for_snippet + '))/m}',
])
snippet_parts['autovalues'] += snippet_units
# Adding snippets for colors
if value == "#":
value = ''
# Insert hash and doubling letters
snippet_parts['before'].append({
"match": "([0-9a-fA-F]{1,6}|[0-9a-fA-F]{3,6}\s*(!\w*\s*)?)$",
"insert": "#"
})
snippet_parts['after'].append({
"match": "#?([0-9a-fA-F]{1,2})$",
"insert": "(?1:$1$1)"
})
# Insert `rgba` thingies
snippet_parts['before'].append({
"match": "(\d{1,3}%?),(\.)?.*$",
"insert": "rgba\((?2:$1,$1,)"
})
snippet_parts['after'].append({
"match": "(\d{1,3}%?),(\.)?(.+)?$",
"insert": "(?2:(?3::5):(?3::$1,$1,1))\)"
})
# Getting the value from the clipboard
# TODO: Move to the whole clipboard2default function
check_clipboard_for_color = COMPLEX_COLOR_REGEX.match(clipboard)
if check_clipboard_for_color and 'colors' in options.get('CSS_clipboard_defaults'):
snippet_parts['default'] = check_clipboard_for_color.group(1)
# TODO: move this out of `if not value`,
# so we could use it for found `url()` values
if '<url>' in auto_values:
snippet_parts['before'].append({
"match": "[^\s]+\.(jpg|jpeg|gif|png)$",
"insert": "url\("
})
snippet_parts['after'].append({
"match": "[^\s]+\.(jpg|jpeg|gif|png)$",
"insert": "\)"
})
check_clipboard_for_image = IMAGE_REGEX.match(clipboard)
if check_clipboard_for_image and 'images' in options.get('CSS_clipboard_defaults'):
quote_symbol = ''
if options.get('CSS_syntax_url_quotes'):
quote_symbol = options.get('CSS_syntax_quote_symbol')
snippet_parts['default'] = 'url(' + quote_symbol + check_clipboard_for_image.group(1) + quote_symbol + ')'
snippet_parts['value'] = value or ''
snippet = generate_snippet(snippet_parts)
# Apply settings to the colors in the values
def restyle_colors(match):
color = match.group(1)
# Change case of the colors in the value
if options.get('CSS_colors_case').lower() in ('uppercase' 'upper'):
color = color.upper()
elif options.get('CSS_colors_case').lower() in ('lowercase' 'lower'):
color = color.lower()
# Make colors short or longhand
if options.get('CSS_colors_length').lower() in ('short' 'shorthand') and len(color) == 6:
if color[0] == color[1] and color[2] == color[3] and color[4] == color[5]:
color = color[0] + color[2] + color[4]
elif options.get('CSS_colors_length').lower() in ('long' 'longhand') and len(color) == 3:
color = color[0] * 2 + color[1] * 2 + color[2] * 2
return '#' + color
snippet = COLOR_REGEX.sub(restyle_colors, snippet)
# Apply setting of the prefered quote symbol
if options.get('CSS_syntax_quote_symbol') == "'" and '"' in snippet:
snippet = snippet.replace('"',"'")
if options.get('CSS_syntax_quote_symbol') == '"' and "'" in snippet:
snippet = snippet.replace("'",'"')
newline_ending = ''
if options.get('CSS_newline_after_expand'):
newline_ending = '\n'
return '\n'.join(snippet.format(prop) for prop in property_) + newline_ending
# TODO
# display: -moz-inline-box;
# display: inline-block;
# background-image: -webkit-linear-gradient(top,rgba(255,255,255,0.6),rgba(255,255,255,0));
# background-image: -moz-linear-gradient(top,rgba(255,255,255,0.6),rgba(255,255,255,0));
# background-image: -o-linear-gradient(top,rgba(255,255,255,0.6),rgba(255,255,255,0));
# background-image: linear-gradient(top,rgba(255,255,255,0.6),rgba(255,255,255,0));
| Iristyle/ChocolateyPackages | EthanBrown.SublimeText2.WebPackages/tools/PackageCache/Hayaku - tools for writing CSS faster/templates.py | Python | mit | 17,208 |
# -*- coding: utf-8 -*-
"""
Generate centreline and write it out as .vtk legacy format.
"""
import os
import sys
# Run in current directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Import path for the CentrelineGenerator script.
importPath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../util'))
if not importPath in sys.path:
sys.path.insert(1, importPath)
del importPath
import CentrelineGenerator
# A centreline for a mesh with 4080 cores.
CentrelineGenerator.segmentList = [8.84,[(8.84,70),None,None],[(8.84,150),None,None]]
CentrelineGenerator.radiusBase = 1.2732395447351628
CentrelineGenerator.outputFileName = "c4080Centreline_80.vtk"
CentrelineGenerator.sphereRadius = None
def main():
# CentrelineGenerator.GenerateCentreline(CentrelineGenerator.BuildDecreasingRadiiScalars)
CentrelineGenerator.GenerateCentreline()
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
main()
print "Exiting", os.path.basename(__file__)
| BlueFern/DBiharMesher | meshes/c4080ang/Generate4080Centreline_80.py | Python | gpl-2.0 | 1,015 |
def steps(number):
if number < 1:
raise ValueError("The given value must be a positive integer")
step = 0
while True:
if number == 1:
return step
if number % 2 == 0:
number = number / 2
else:
number = 3 * number + 1
step += 1
| TGITS/programming-workouts | exercism/python/collatz-conjecture/collatz_conjecture.py | Python | mit | 318 |
"""
Test functions for multivariate normal distributions.
"""
from __future__ import division, print_function, absolute_import
import pickle
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_almost_equal, assert_equal,
assert_array_less, assert_)
import pytest
from pytest import raises as assert_raises
from .test_continuous_basic import check_distribution_rvs
import numpy
import numpy as np
import scipy.linalg
from scipy.stats._multivariate import _PSD, _lnB, _cho_inv_batch
from scipy.stats import multivariate_normal
from scipy.stats import matrix_normal
from scipy.stats import special_ortho_group, ortho_group
from scipy.stats import random_correlation
from scipy.stats import unitary_group
from scipy.stats import dirichlet, beta
from scipy.stats import wishart, multinomial, invwishart, chi2, invgamma
from scipy.stats import norm, uniform
from scipy.stats import ks_2samp, kstest
from scipy.stats import binom
from scipy.integrate import romb
from scipy.special import multigammaln
from .common_tests import check_random_state_property
class TestMultivariateNormal(object):
def test_input_shape(self):
mu = np.arange(3)
cov = np.identity(2)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov)
def test_scalar_values(self):
np.random.seed(1234)
# When evaluated on scalar data, the pdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on a single vector, the pdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on scalar data, the cdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
cdf = multivariate_normal.cdf(x, mean, cov)
assert_equal(cdf.ndim, 0)
# When evaluated on a single vector, the cdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
cdf = multivariate_normal.cdf(x, mean, cov)
assert_equal(cdf.ndim, 0)
def test_logpdf(self):
# Check that the log of the pdf is in fact the logpdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logpdf(x, mean, cov)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_logpdf_default_values(self):
# Check that the log of the pdf is in fact the logpdf
# with default parameters Mean=None and cov = 1
np.random.seed(1234)
x = np.random.randn(5)
d1 = multivariate_normal.logpdf(x)
d2 = multivariate_normal.pdf(x)
# check whether default values are being used
d3 = multivariate_normal.logpdf(x, None, 1)
d4 = multivariate_normal.pdf(x, None, 1)
assert_allclose(d1, np.log(d2))
assert_allclose(d3, np.log(d4))
def test_logcdf(self):
# Check that the log of the cdf is in fact the logcdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logcdf(x, mean, cov)
d2 = multivariate_normal.cdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_logcdf_default_values(self):
# Check that the log of the cdf is in fact the logcdf
# with default parameters Mean=None and cov = 1
np.random.seed(1234)
x = np.random.randn(5)
d1 = multivariate_normal.logcdf(x)
d2 = multivariate_normal.cdf(x)
# check whether default values are being used
d3 = multivariate_normal.logcdf(x, None, 1)
d4 = multivariate_normal.cdf(x, None, 1)
assert_allclose(d1, np.log(d2))
assert_allclose(d3, np.log(d4))
def test_rank(self):
# Check that the rank is detected correctly.
np.random.seed(1234)
n = 4
mean = np.random.randn(n)
for expected_rank in range(1, n + 1):
s = np.random.randn(n, expected_rank)
cov = np.dot(s, s.T)
distn = multivariate_normal(mean, cov, allow_singular=True)
assert_equal(distn.cov_info.rank, expected_rank)
def test_degenerate_distributions(self):
def _sample_orthonormal_matrix(n):
M = np.random.randn(n, n)
u, s, v = scipy.linalg.svd(M)
return u
for n in range(1, 5):
x = np.random.randn(n)
for k in range(1, n + 1):
# Sample a small covariance matrix.
s = np.random.randn(k, k)
cov_kk = np.dot(s, s.T)
# Embed the small covariance matrix into a larger low rank matrix.
cov_nn = np.zeros((n, n))
cov_nn[:k, :k] = cov_kk
# Define a rotation of the larger low rank matrix.
u = _sample_orthonormal_matrix(n)
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
y = np.dot(u, x)
# Check some identities.
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
allow_singular=True)
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
allow_singular=True)
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
allow_singular=True)
assert_equal(distn_kk.cov_info.rank, k)
assert_equal(distn_nn.cov_info.rank, k)
assert_equal(distn_rr.cov_info.rank, k)
pdf_kk = distn_kk.pdf(x[:k])
pdf_nn = distn_nn.pdf(x)
pdf_rr = distn_rr.pdf(y)
assert_allclose(pdf_kk, pdf_nn)
assert_allclose(pdf_kk, pdf_rr)
logpdf_kk = distn_kk.logpdf(x[:k])
logpdf_nn = distn_nn.logpdf(x)
logpdf_rr = distn_rr.logpdf(y)
assert_allclose(logpdf_kk, logpdf_nn)
assert_allclose(logpdf_kk, logpdf_rr)
def test_large_pseudo_determinant(self):
# Check that large pseudo-determinants are handled appropriately.
# Construct a singular diagonal covariance matrix
# whose pseudo determinant overflows double precision.
large_total_log = 1000.0
npos = 100
nzero = 2
large_entry = np.exp(large_total_log / npos)
n = npos + nzero
cov = np.zeros((n, n), dtype=float)
np.fill_diagonal(cov, large_entry)
cov[-nzero:, -nzero:] = 0
# Check some determinants.
assert_equal(scipy.linalg.det(cov), 0)
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
(1, large_total_log))
# Check the pseudo-determinant.
psd = _PSD(cov)
assert_allclose(psd.log_pdet, large_total_log)
def test_broadcasting(self):
np.random.seed(1234)
n = 4
# Construct a random covariance matrix.
data = np.random.randn(n, n)
cov = np.dot(data, data.T)
mean = np.random.randn(n)
# Construct an ndarray which can be interpreted as
# a 2x3 array whose elements are random data vectors.
X = np.random.randn(2, 3, n)
# Check that multiple data points can be evaluated at once.
desired_pdf = multivariate_normal.pdf(X, mean, cov)
desired_cdf = multivariate_normal.cdf(X, mean, cov)
for i in range(2):
for j in range(3):
actual = multivariate_normal.pdf(X[i, j], mean, cov)
assert_allclose(actual, desired_pdf[i,j])
# Repeat for cdf
actual = multivariate_normal.cdf(X[i, j], mean, cov)
assert_allclose(actual, desired_cdf[i,j], rtol=1e-3)
def test_normal_1D(self):
# The probability density function for a 1D normal variable should
# agree with the standard normal distribution in scipy.stats.distributions
x = np.linspace(0, 2, 10)
mean, cov = 1.2, 0.9
scale = cov**0.5
d1 = norm.pdf(x, mean, scale)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, d2)
# The same should hold for the cumulative distribution function
d1 = norm.cdf(x, mean, scale)
d2 = multivariate_normal.cdf(x, mean, cov)
assert_allclose(d1, d2)
def test_marginalization(self):
# Integrating out one of the variables of a 2D Gaussian should
# yield a 1D Gaussian
mean = np.array([2.5, 3.5])
cov = np.array([[.5, 0.2], [0.2, .6]])
n = 2 ** 8 + 1 # Number of samples
delta = 6 / (n - 1) # Grid spacing
v = np.linspace(0, 6, n)
xv, yv = np.meshgrid(v, v)
pos = np.empty((n, n, 2))
pos[:, :, 0] = xv
pos[:, :, 1] = yv
pdf = multivariate_normal.pdf(pos, mean, cov)
# Marginalize over x and y axis
margin_x = romb(pdf, delta, axis=0)
margin_y = romb(pdf, delta, axis=1)
# Compare with standard normal distribution
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
norm_frozen = multivariate_normal(mean, cov)
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
assert_allclose(norm_frozen.logpdf(x),
multivariate_normal.logpdf(x, mean, cov))
assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov))
assert_allclose(norm_frozen.logcdf(x),
multivariate_normal.logcdf(x, mean, cov))
def test_pseudodet_pinv(self):
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
# Assemble random covariance matrix with large and small eigenvalues
np.random.seed(1234)
n = 7
x = np.random.randn(n, n)
cov = np.dot(x, x.T)
s, u = scipy.linalg.eigh(cov)
s = np.full(n, 0.5)
s[0] = 1.0
s[-1] = 1e-7
cov = np.dot(u, np.dot(np.diag(s), u.T))
# Set cond so that the lowest eigenvalue is below the cutoff
cond = 1e-5
psd = _PSD(cov, cond=cond)
psd_pinv = _PSD(psd.pinv, cond=cond)
# Check that the log pseudo-determinant agrees with the sum
# of the logs of all but the smallest eigenvalue
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
# Check that the pseudo-determinant of the pseudo-inverse
# agrees with 1 / pseudo-determinant
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
def test_exception_nonsquare_cov(self):
cov = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, _PSD, cov)
def test_exception_nonfinite_cov(self):
cov_nan = [[1, 0], [0, np.nan]]
assert_raises(ValueError, _PSD, cov_nan)
cov_inf = [[1, 0], [0, np.inf]]
assert_raises(ValueError, _PSD, cov_inf)
def test_exception_non_psd_cov(self):
cov = [[1, 0], [0, -1]]
assert_raises(ValueError, _PSD, cov)
def test_exception_singular_cov(self):
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.ones((5, 5))
e = np.linalg.LinAlgError
assert_raises(e, multivariate_normal, mean, cov)
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
assert_raises(e, multivariate_normal.cdf, x, mean, cov)
assert_raises(e, multivariate_normal.logcdf, x, mean, cov)
def test_R_values(self):
# Compare the multivariate pdf with some values precomputed
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
0.0103803050, 0.0140250800])
x = np.linspace(0, 2, 5)
y = 3 * x - 2
z = x + np.cos(y)
r = np.array([x, y, z]).T
mean = np.array([1, 3, 2], 'd')
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
pdf = multivariate_normal.pdf(r, mean, cov)
assert_allclose(pdf, r_pdf, atol=1e-10)
# Compare the multivariate cdf with some values precomputed
# in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma)
r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761,
0.1063242573, 0.2501068509])
cdf = multivariate_normal.cdf(r, mean, cov)
assert_allclose(cdf, r_cdf, atol=1e-5)
# Also test bivariate cdf with some values precomputed
# in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > mu <- c(1, 3)
# > Sigma <- matrix(c(1,2,2,5), 2, 2)
# > r_cdf2 <- pmnorm(cbind(x,y), mu, Sigma)
r_cdf2 = np.array([0.01262147, 0.05838989, 0.18389571,
0.40696599, 0.66470577])
r2 = np.array([x, y]).T
mean2 = np.array([1, 3], 'd')
cov2 = np.array([[1, 2], [2, 5]], 'd')
cdf2 = multivariate_normal.cdf(r2, mean2, cov2)
assert_allclose(cdf2, r_cdf2, atol=1e-5)
def test_multivariate_normal_rvs_zero_covariance(self):
mean = np.zeros(2)
covariance = np.zeros((2, 2))
model = multivariate_normal(mean, covariance, allow_singular=True)
sample = model.rvs()
assert_equal(sample, [0, 0])
def test_rvs_shape(self):
# Check that rvs parses the mean and covariance correctly, and returns
# an array of the right shape
N = 300
d = 4
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
assert_equal(sample.shape, (N, d))
sample = multivariate_normal.rvs(mean=None,
cov=np.array([[2, .1], [.1, 1]]),
size=N)
assert_equal(sample.shape, (N, 2))
u = multivariate_normal(mean=0, cov=1)
sample = u.rvs(N)
assert_equal(sample.shape, (N, ))
def test_large_sample(self):
# Generate large sample and compare sample mean and sample covariance
# with mean and covariance matrix.
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
size = 5000
sample = multivariate_normal.rvs(mean, cov, size)
assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
assert_allclose(sample.mean(0), mean, rtol=1e-1)
def test_entropy(self):
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
rv = multivariate_normal(mean, cov)
# Check that frozen distribution agrees with entropy function
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
# Compare entropy with manually computed expression involving
# the sum of the logs of the eigenvalues of the covariance matrix
eigs = np.linalg.eig(cov)[0]
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
assert_almost_equal(desired, rv.entropy())
def test_lnB(self):
alpha = np.array([1, 1, 1])
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
assert_almost_equal(np.exp(_lnB(alpha)), desired)
class TestMatrixNormal(object):
def test_bad_input(self):
# Check that bad inputs raise errors
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
# Incorrect dimensions
assert_raises(ValueError, matrix_normal, np.zeros((5,4,3)))
assert_raises(ValueError, matrix_normal, M, np.zeros(10), V)
assert_raises(ValueError, matrix_normal, M, U, np.zeros(10))
assert_raises(ValueError, matrix_normal, M, U, U)
assert_raises(ValueError, matrix_normal, M, V, V)
assert_raises(ValueError, matrix_normal, M.T, U, V)
# Singular covariance
e = np.linalg.LinAlgError
assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols)))
assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V)
def test_default_inputs(self):
# Check that default argument handling works
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
Z = np.zeros((num_rows, num_cols))
Zr = np.zeros((num_rows, 1))
Zc = np.zeros((1, num_cols))
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
I1 = np.identity(1)
assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U).shape,
(num_rows, 1))
assert_equal(matrix_normal.rvs(colcov=V).shape,
(1, num_cols))
assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal(mean=M).rowcov, Ir)
assert_equal(matrix_normal(mean=M).colcov, Ic)
assert_equal(matrix_normal(rowcov=U).mean, Zr)
assert_equal(matrix_normal(rowcov=U).colcov, I1)
assert_equal(matrix_normal(colcov=V).mean, Zc)
assert_equal(matrix_normal(colcov=V).rowcov, I1)
assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
def test_covariance_expansion(self):
# Check that covariance can be specified with scalar or vector
num_rows = 4
num_cols = 3
M = np.full((num_rows, num_cols), 0.3)
Uv = np.full(num_rows, 0.2)
Us = 0.2
Vv = np.full(num_cols, 0.1)
Vs = 0.1
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov,
0.1*Ic)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov,
0.1*Ic)
def test_frozen_matrix_normal(self):
for i in range(1,5):
for j in range(1,5):
M = np.full((i,j), 0.3)
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
random_state=1234)
assert_equal(rvs1, rvs2)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(pdf1, pdf2)
logpdf1 = frozen.logpdf(X)
logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(logpdf1, logpdf2)
def test_matches_multivariate(self):
# Check that the pdfs match those obtained by vectorising and
# treating as a multivariate normal.
for i in range(1,5):
for j in range(1,5):
M = np.full((i,j), 0.3)
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
logpdf1 = frozen.logpdf(X)
vecX = X.T.flatten()
vecM = M.T.flatten()
cov = np.kron(V,U)
pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov)
logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
assert_allclose(pdf1, pdf2, rtol=1E-10)
assert_allclose(logpdf1, logpdf2, rtol=1E-10)
def test_array_input(self):
# Check array of inputs has the same output as the separate entries.
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
N = 10
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X1 = frozen.rvs(size=N, random_state=1234)
X2 = frozen.rvs(size=N, random_state=4321)
X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0)
assert_equal(X.shape, (2, N, num_rows, num_cols))
array_logpdf = frozen.logpdf(X)
assert_equal(array_logpdf.shape, (2, N))
for i in range(2):
for j in range(N):
separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M,
rowcov=U, colcov=V)
assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10)
def test_moments(self):
# Check that the sample moments match the parameters
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
N = 1000
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(size=N, random_state=1234)
sample_mean = np.mean(X,axis=0)
assert_allclose(sample_mean, M, atol=0.1)
sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T)
assert_allclose(sample_colcov, V, atol=0.1)
sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape(
N*num_cols,num_rows).T)
assert_allclose(sample_rowcov, U, atol=0.1)
class TestDirichlet(object):
def test_frozen_dirichlet(self):
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
assert_equal(d.var(), dirichlet.var(alpha))
assert_equal(d.mean(), dirichlet.mean(alpha))
assert_equal(d.entropy(), dirichlet.entropy(alpha))
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
def test_numpy_rvs_shape_compatibility(self):
np.random.seed(2846)
alpha = np.array([1.0, 2.0, 3.0])
x = np.random.dirichlet(alpha, size=7)
assert_equal(x.shape, (7, 3))
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
dirichlet.pdf(x.T, alpha)
dirichlet.pdf(x.T[:-1], alpha)
dirichlet.logpdf(x.T, alpha)
dirichlet.logpdf(x.T[:-1], alpha)
def test_alpha_with_zeros(self):
np.random.seed(2846)
alpha = [1.0, 0.0, 3.0]
# don't pass invalid alpha to np.random.dirichlet
x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_with_negative_entries(self):
np.random.seed(2846)
alpha = [1.0, -2.0, 3.0]
# don't pass invalid alpha to np.random.dirichlet
x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_zeros(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
alpha = np.array([1.0, 1.0, 1.0, 1.0])
assert_almost_equal(dirichlet.pdf(x, alpha), 6)
assert_almost_equal(dirichlet.logpdf(x, alpha), np.log(6))
def test_data_with_zeros_and_small_alpha(self):
alpha = np.array([1.0, 0.5, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_negative_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, -0.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_too_large_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 1.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_too_deep_c(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.full((2, 7, 7), 1 / 14)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_too_deep(self):
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
x = np.full((2, 2, 7), 1 / 4)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_correct_depth(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.full((3, 7), 1 / 3)
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
def test_non_simplex_data(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.full((3, 7), 1 / 2)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_short(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.full((2, 7), 1 / 2)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_long(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.full((5, 7), 1 / 5)
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_mean_and_var(self):
alpha = np.array([1., 0.8, 0.2])
d = dirichlet(alpha)
expected_var = [1. / 12., 0.08, 0.03]
expected_mean = [0.5, 0.4, 0.1]
assert_array_almost_equal(d.var(), expected_var)
assert_array_almost_equal(d.mean(), expected_mean)
def test_scalar_values(self):
alpha = np.array([0.2])
d = dirichlet(alpha)
# For alpha of length 1, mean and var should be scalar instead of array
assert_equal(d.mean().ndim, 0)
assert_equal(d.var().ndim, 0)
assert_equal(d.pdf([1.]).ndim, 0)
assert_equal(d.logpdf([1.]).ndim, 0)
def test_K_and_K_minus_1_calls_equal(self):
# Test that calls with K and K-1 entries yield the same results.
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
def test_multiple_entry_calls(self):
# Test that calls with multiple x vectors as matrix work
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
num_multiple = 5
xm = None
for i in range(num_tests):
for m in range(num_multiple):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
if xm is not None:
xm = np.vstack((xm, x))
else:
xm = x
rm = d.pdf(xm.T)
rs = None
for xs in xm:
r = d.pdf(xs)
if rs is not None:
rs = np.append(rs, r)
else:
rs = r
assert_array_almost_equal(rm, rs)
def test_2D_dirichlet_is_beta(self):
np.random.seed(2846)
alpha = np.random.uniform(10e-10, 100, 2)
d = dirichlet(alpha)
b = beta(alpha[0], alpha[1])
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, 2)
x /= np.sum(x)
assert_almost_equal(b.pdf(x), d.pdf([x]))
assert_almost_equal(b.mean(), d.mean()[0])
assert_almost_equal(b.var(), d.var()[0])
def test_multivariate_normal_dimensions_mismatch():
# Regression test for GH #3493. Check that setting up a PDF with a mean of
# length M and a covariance matrix of size (N, N), where M != N, raises a
# ValueError with an informative error message.
mu = np.array([0.0, 0.0])
sigma = np.array([[1.0]])
assert_raises(ValueError, multivariate_normal, mu, sigma)
# A simple check that the right error message was passed along. Checking
# that the entire message is there, word for word, would be somewhat
# fragile, so we just check for the leading part.
try:
multivariate_normal(mu, sigma)
except ValueError as e:
msg = "Dimension mismatch"
assert_equal(str(e)[:len(msg)], msg)
class TestWishart(object):
def test_scale_dimensions(self):
# Test that we can call the Wishart with various scale dimensions
# Test case: dim=1, scale=1
true_scale = np.array(1, ndmin=2)
scales = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2) # 2-dim
]
for scale in scales:
w = wishart(1, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# Test case: dim=2, scale=[[1,0]
# [0,2]
true_scale = np.array([[1,0],
[0,2]])
scales = [
[1,2], # iterable
np.r_[1,2], # 1-dim
np.array([[1,0], # 2-dim
[0,2]])
]
for scale in scales:
w = wishart(2, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# We cannot call with a df < dim
assert_raises(ValueError, wishart, 1, np.eye(2))
# We cannot call with a 3-dimension array
scale = np.array(1, ndmin=3)
assert_raises(ValueError, wishart, 1, scale)
def test_quantile_dimensions(self):
# Test that we can call the Wishart rvs with various quantile dimensions
# If dim == 1, consider x.shape = [1,1,1]
X = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2), # 2-dim
np.array([1], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array(1, ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 1, consider x.shape = [1,1,*]
X = [
[1,2,3], # iterable
np.r_[1,2,3], # 1-dim
np.array([1,2,3], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array([1,2,3], ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 2, consider x.shape = [2,2,1]
# where x[:,:,*] = np.eye(1)*2
X = [
2, # scalar
[2,2], # iterable
np.array(2), # 0-dim
np.r_[2,2], # 1-dim
np.array([[2,0],
[0,2]]), # 2-dim
np.array([[2,0],
[0,2]])[:,:,np.newaxis] # 3-dim
]
w = wishart(2,np.eye(2))
density = w.pdf(np.array([[2,0],
[0,2]])[:,:,np.newaxis])
for x in X:
assert_equal(w.pdf(x), density)
def test_frozen(self):
# Test that the frozen and non-frozen Wishart gives the same answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
w = wishart(df, scale)
assert_equal(w.var(), wishart.var(df, scale))
assert_equal(w.mean(), wishart.mean(df, scale))
assert_equal(w.mode(), wishart.mode(df, scale))
assert_equal(w.entropy(), wishart.entropy(df, scale))
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
def test_1D_is_chisquared(self):
# The 1-dimensional Wishart with an identity scale matrix is just a
# chi-squared distribution.
# Test variance, mean, entropy, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(1, 10, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
w = wishart(df, scale)
c = chi2(df)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
def test_is_scaled_chisquared(self):
# The 2-dimensional Wishart with an arbitrary scale matrix can be
# transformed to a scaled chi-squared distribution.
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
np.random.seed(482974)
sn = 500
df = 10
dim = 4
# Construct an arbitrary positive definite matrix
scale = np.diag(np.arange(4)+1)
scale[np.tril_indices(4, k=-1)] = np.arange(6)
scale = np.dot(scale.T, scale)
# Use :math:`\lambda = [1, \dots, 1]'`
lamda = np.ones((dim,1))
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
w = wishart(df, sigma_lamda)
c = chi2(df, scale=sigma_lamda)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
X = np.linspace(0.1,10,num=10)
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,0,sigma_lamda)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
class TestMultinomial(object):
def test_logpmf(self):
vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7))
assert_allclose(vals1, -1.483270127243324, rtol=1e-8)
vals2 = multinomial.logpmf([3, 4], 0, [.3, .7])
assert_allclose(vals2, np.NAN, rtol=1e-8)
vals3 = multinomial.logpmf([3, 4], 0, [-2, 3])
assert_allclose(vals3, np.NAN, rtol=1e-8)
def test_reduces_binomial(self):
# test that the multinomial pmf reduces to the binomial pmf in the 2d
# case
val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7))
val2 = binom.logpmf(3, 7, 0.3)
assert_allclose(val1, val2, rtol=1e-8)
val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9))
val2 = binom.pmf(6, 14, 0.1)
assert_allclose(val1, val2, rtol=1e-8)
def test_R(self):
# test against the values produced by this R code
# (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html)
# X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
# X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
# X
# apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5)))
n, p = 3, [1./8, 2./8, 5./8]
r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375,
(2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125,
(0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500,
(2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500,
(1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000}
for x in r_vals:
assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14)
def test_rvs_np(self):
# test that .rvs agrees w/numpy
sc_rvs = multinomial.rvs(3, [1/4.]*3, size=7, random_state=123)
rndm = np.random.RandomState(123)
np_rvs = rndm.multinomial(3, [1/4.]*3, size=7)
assert_equal(sc_rvs, np_rvs)
def test_pmf(self):
vals0 = multinomial.pmf((5,), 5, (1,))
assert_allclose(vals0, 1, rtol=1e-8)
vals1 = multinomial.pmf((3,4), 7, (.3, .7))
assert_allclose(vals1, .22689449999999994, rtol=1e-8)
vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8,
(.1, .9))
assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8)
x = np.empty((0,2), dtype=np.float64)
vals3 = multinomial.pmf(x, 4, (.3, .7))
assert_equal(vals3, np.empty([], dtype=np.float64))
vals4 = multinomial.pmf([1,2], 4, (.3, .7))
assert_allclose(vals4, 0, rtol=1e-8)
vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0])
assert_allclose(vals5, 0.219478737997, rtol=1e-8)
def test_pmf_broadcasting(self):
vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]])
assert_allclose(vals0, [.243, .384], rtol=1e-8)
vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9])
assert_allclose(vals1, [.243, 0], rtol=1e-8)
vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9])
assert_allclose(vals2, [[.243, 0]], rtol=1e-8)
vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9])
assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8)
vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9])
assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8)
def test_cov(self):
cov1 = multinomial.cov(5, (.2, .3, .5))
cov2 = [[5*.2*.8, -5*.2*.3, -5*.2*.5],
[-5*.3*.2, 5*.3*.7, -5*.3*.5],
[-5*.5*.2, -5*.5*.3, 5*.5*.5]]
assert_allclose(cov1, cov2, rtol=1e-8)
def test_cov_broadcasting(self):
cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]])
cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]]
assert_allclose(cov1, cov2, rtol=1e-8)
cov3 = multinomial.cov([4, 5], [.1, .9])
cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]]
assert_allclose(cov3, cov4, rtol=1e-8)
cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]],
[[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]]
assert_allclose(cov5, cov6, rtol=1e-8)
def test_entropy(self):
# this is equivalent to a binomial distribution with n=2, so the
# entropy .77899774929 is easily computed "by hand"
ent0 = multinomial.entropy(2, [.2, .8])
assert_allclose(ent0, binom.entropy(2, .2), rtol=1e-8)
def test_entropy_broadcasting(self):
ent0 = multinomial.entropy([2, 3], [.2, .3])
assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)],
rtol=1e-8)
ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]])
assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)],
rtol=1e-8)
ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]])
assert_allclose(ent2,
[[binom.entropy(7, .3), binom.entropy(7, .4)],
[binom.entropy(8, .3), binom.entropy(8, .4)]],
rtol=1e-8)
def test_mean(self):
mean1 = multinomial.mean(5, [.2, .8])
assert_allclose(mean1, [5*.2, 5*.8], rtol=1e-8)
def test_mean_broadcasting(self):
mean1 = multinomial.mean([5, 6], [.2, .8])
assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
n = 12
pvals = (.1, .2, .3, .4)
x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]]
x = np.asarray(x, dtype=np.float64)
mn_frozen = multinomial(n, pvals)
assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals))
assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals))
assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals))
class TestInvwishart(object):
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
def test_1D_is_invgamma(self):
# The 1-dimensional inverse Wishart with an identity scale matrix is
# just an inverse gamma distribution.
# Test variance, mean, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(5, 20, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
iw = invwishart(df, scale)
ig = invgamma(df/2, scale=1./2)
# Statistics
assert_allclose(iw.var(), ig.var())
assert_allclose(iw.mean(), ig.mean())
# PDF
assert_allclose(iw.pdf(X), ig.pdf(X))
# rvs
rvs = iw.rvs(size=sn)
args = (df/2, 0, 1./2)
alpha = 0.01
check_distribution_rvs('invgamma', args, alpha, rvs)
def test_wishart_invwishart_2D_rvs(self):
dim = 3
df = 10
# Construct a simple non-diagonal positive definite matrix
scale = np.eye(dim)
scale[0,1] = 0.5
scale[1,0] = 0.5
# Construct frozen Wishart and inverse Wishart random variables
w = wishart(df, scale)
iw = invwishart(df, scale)
# Get the generated random variables from a known seed
np.random.seed(248042)
w_rvs = wishart.rvs(df, scale)
np.random.seed(248042)
frozen_w_rvs = w.rvs()
np.random.seed(248042)
iw_rvs = invwishart.rvs(df, scale)
np.random.seed(248042)
frozen_iw_rvs = iw.rvs()
# Manually calculate what it should be, based on the Bartlett (1933)
# decomposition of a Wishart into D A A' D', where D is the Cholesky
# factorization of the scale matrix and A is the lower triangular matrix
# with the square root of chi^2 variates on the diagonal and N(0,1)
# variates in the lower triangle.
np.random.seed(248042)
covariances = np.random.normal(size=3)
variances = np.r_[
np.random.chisquare(df),
np.random.chisquare(df-1),
np.random.chisquare(df-2),
]**0.5
# Construct the lower-triangular A matrix
A = np.diag(variances)
A[np.tril_indices(dim, k=-1)] = covariances
# Wishart random variate
D = np.linalg.cholesky(scale)
DA = D.dot(A)
manual_w_rvs = np.dot(DA, DA.T)
# inverse Wishart random variate
# Supposing that the inverse wishart has scale matrix `scale`, then the
# random variate is the inverse of a random variate drawn from a Wishart
# distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
iD = np.linalg.cholesky(np.linalg.inv(scale))
iDA = iD.dot(A)
manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
# Test for equality
assert_allclose(w_rvs, manual_w_rvs)
assert_allclose(frozen_w_rvs, manual_w_rvs)
assert_allclose(iw_rvs, manual_iw_rvs)
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
def test_cho_inv_batch(self):
"""Regression test for gh-8844."""
a0 = np.array([[2, 1, 0, 0.5],
[1, 2, 0.5, 0.5],
[0, 0.5, 3, 1],
[0.5, 0.5, 1, 2]])
a1 = np.array([[2, -1, 0, 0.5],
[-1, 2, 0.5, 0.5],
[0, 0.5, 3, 1],
[0.5, 0.5, 1, 4]])
a = np.array([a0, a1])
ainv = a.copy()
_cho_inv_batch(ainv)
ident = np.eye(4)
assert_allclose(a[0].dot(ainv[0]), ident, atol=1e-15)
assert_allclose(a[1].dot(ainv[1]), ident, atol=1e-15)
def test_logpdf_4x4(self):
"""Regression test for gh-8844."""
X = np.array([[2, 1, 0, 0.5],
[1, 2, 0.5, 0.5],
[0, 0.5, 3, 1],
[0.5, 0.5, 1, 2]])
Psi = np.array([[9, 7, 3, 1],
[7, 9, 5, 1],
[3, 5, 8, 2],
[1, 1, 2, 9]])
nu = 6
prob = invwishart.logpdf(X, nu, Psi)
# Explicit calculation from the formula on wikipedia.
p = X.shape[0]
sig, logdetX = np.linalg.slogdet(X)
sig, logdetPsi = np.linalg.slogdet(Psi)
M = np.linalg.solve(X, Psi)
expected = ((nu/2)*logdetPsi
- (nu*p/2)*np.log(2)
- multigammaln(nu/2, p)
- (nu + p + 1)/2*logdetX
- 0.5*M.trace())
assert_allclose(prob, expected)
class TestSpecialOrthoGroup(object):
def test_reproducibility(self):
np.random.seed(514)
x = special_ortho_group.rvs(3)
expected = np.array([[-0.99394515, -0.04527879, 0.10011432],
[0.04821555, -0.99846897, 0.02711042],
[0.09873351, 0.03177334, 0.99460653]])
assert_array_almost_equal(x, expected)
random_state = np.random.RandomState(seed=514)
x = special_ortho_group.rvs(3, random_state=random_state)
assert_array_almost_equal(x, expected)
def test_invalid_dim(self):
assert_raises(ValueError, special_ortho_group.rvs, None)
assert_raises(ValueError, special_ortho_group.rvs, (2, 2))
assert_raises(ValueError, special_ortho_group.rvs, 1)
assert_raises(ValueError, special_ortho_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = special_ortho_group(dim)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = special_ortho_group.rvs(dim, random_state=1234)
assert_equal(rvs1, rvs2)
def test_det_and_ortho(self):
xs = [special_ortho_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that determinants are always +1
dets = [np.linalg.det(x) for x in xs]
assert_allclose(dets, [1.]*30, rtol=1e-13)
# Test that these are orthogonal matrices
for x in xs:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = .05
np.random.seed(514)
xs = special_ortho_group.rvs(dim, size=samples)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same disribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
class TestOrthoGroup(object):
def test_reproducibility(self):
np.random.seed(515)
x = ortho_group.rvs(3)
x2 = ortho_group.rvs(3, random_state=515)
# Note this matrix has det -1, distinguishing O(N) from SO(N)
assert_almost_equal(np.linalg.det(x), -1)
expected = np.array([[0.94449759, -0.21678569, -0.24683651],
[-0.13147569, -0.93800245, 0.3207266],
[0.30106219, 0.27047251, 0.9144431]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_dim(self):
assert_raises(ValueError, ortho_group.rvs, None)
assert_raises(ValueError, ortho_group.rvs, (2, 2))
assert_raises(ValueError, ortho_group.rvs, 1)
assert_raises(ValueError, ortho_group.rvs, 2.5)
def test_det_and_ortho(self):
xs = [[ortho_group.rvs(dim)
for i in range(10)]
for dim in range(2,12)]
# Test that abs determinants are always +1
dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs])
assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13)
# Test that we get both positive and negative determinants
# Check that we have at least one and less than 10 negative dets in a sample of 10. The rest are positive by the previous test.
# Test each dimension separately
assert_array_less([0]*10, [np.nonzero(d < 0)[0].shape[0] for d in dets])
assert_array_less([np.nonzero(d < 0)[0].shape[0] for d in dets], [10]*10)
# Test that these are orthogonal matrices
for xx in xs:
for x in xx:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = .05
np.random.seed(518) # Note that the test is sensitive to seed too
xs = ortho_group.rvs(dim, size=samples)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same disribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
@pytest.mark.slow
def test_pairwise_distances(self):
# Test that the distribution of pairwise distances is close to correct.
np.random.seed(514)
def random_ortho(dim):
u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim)))
return np.dot(u, v)
for dim in range(2, 6):
def generate_test_statistics(rvs, N=1000, eps=1e-10):
stats = np.array([
np.sum((rvs(dim=dim) - rvs(dim=dim))**2)
for _ in range(N)
])
# Add a bit of noise to account for numeric accuracy.
stats += np.random.uniform(-eps, eps, size=stats.shape)
return stats
expected = generate_test_statistics(random_ortho)
actual = generate_test_statistics(scipy.stats.ortho_group.rvs)
_D, p = scipy.stats.ks_2samp(expected, actual)
assert_array_less(.05, p)
class TestRandomCorrelation(object):
def test_reproducibility(self):
np.random.seed(514)
eigs = (.5, .8, 1.2, 1.5)
x = random_correlation.rvs((.5, .8, 1.2, 1.5))
x2 = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=514)
expected = np.array([[1., -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1., -0.24351129, 0.06703474],
[0.18366501, -0.24351129, 1., 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1.]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_eigs(self):
assert_raises(ValueError, random_correlation.rvs, None)
assert_raises(ValueError, random_correlation.rvs, 'test')
assert_raises(ValueError, random_correlation.rvs, 2.5)
assert_raises(ValueError, random_correlation.rvs, [2.5])
assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]])
assert_raises(ValueError, random_correlation.rvs, [2.5, -.5])
assert_raises(ValueError, random_correlation.rvs, [1, 2, .1])
def test_definition(self):
# Test the definition of a correlation matrix in several dimensions:
#
# 1. Det is product of eigenvalues (and positive by construction
# in examples)
# 2. 1's on diagonal
# 3. Matrix is symmetric
def norm(i, e):
return i*e/sum(e)
np.random.seed(123)
eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)]
eigs.append([4,0,0,0])
ones = [[1.]*len(e) for e in eigs]
xs = [random_correlation.rvs(e) for e in eigs]
# Test that determinants are products of eigenvalues
# These are positive by construction
# Could also test that the eigenvalues themselves are correct,
# but this seems sufficient.
dets = [np.fabs(np.linalg.det(x)) for x in xs]
dets_known = [np.prod(e) for e in eigs]
assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13)
# Test for 1's on the diagonal
diags = [np.diag(x) for x in xs]
for a, b in zip(diags, ones):
assert_allclose(a, b, rtol=1e-13)
# Correlation matrices are symmetric
for x in xs:
assert_allclose(x, x.T, rtol=1e-13)
def test_to_corr(self):
# Check some corner cases in to_corr
# ajj == 1
m = np.array([[0.1, 0], [0, 1]], dtype=float)
m = random_correlation._to_corr(m)
assert_allclose(m, np.array([[1, 0], [0, 0.1]]))
# Floating point overflow; fails to compute the correct
# rotation, but should still produce some valid rotation
# rather than infs/nans
with np.errstate(over='ignore'):
g = np.array([[0, 1], [-1, 0]])
m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m, g.T.dot(m0).dot(g))
m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m, g.T.dot(m0).dot(g))
# Zero discriminant; should set the first diag entry to 1
m0 = np.array([[2, 1], [1, 2]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m[0,0], 1)
# Slightly negative discriminant; should be approx correct still
m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m[0,0], 1)
class TestUnitaryGroup(object):
def test_reproducibility(self):
np.random.seed(514)
x = unitary_group.rvs(3)
x2 = unitary_group.rvs(3, random_state=514)
expected = np.array([[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j],
[0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j],
[-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_dim(self):
assert_raises(ValueError, unitary_group.rvs, None)
assert_raises(ValueError, unitary_group.rvs, (2, 2))
assert_raises(ValueError, unitary_group.rvs, 1)
assert_raises(ValueError, unitary_group.rvs, 2.5)
def test_unitarity(self):
xs = [unitary_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that these are unitary matrices
for x in xs:
assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15)
def test_haar(self):
# Test that the eigenvalues, which lie on the unit circle in
# the complex plane, are uncorrelated.
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
np.random.seed(514) # Note that the test is sensitive to seed too
xs = unitary_group.rvs(dim, size=samples)
# The angles "x" of the eigenvalues should be uniformly distributed
# Overall this seems to be a necessary but weak test of the distribution.
eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs])
x = np.arctan2(eigs.imag, eigs.real)
res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
assert_(res.pvalue > 0.05)
def check_pickling(distfn, args):
# check that a distribution instance pickles and unpickles
# pay special attention to the random_state property
# save the random_state (restore later)
rndm = distfn.random_state
distfn.random_state = 1234
distfn.rvs(*args, size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(*args, size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(*args, size=8)
assert_equal(r0, r1)
# restore the random_state
distfn.random_state = rndm
def test_random_state_property():
scale = np.eye(3)
scale[0, 1] = 0.5
scale[1, 0] = 0.5
dists = [
[multivariate_normal, ()],
[dirichlet, (np.array([1.]), )],
[wishart, (10, scale)],
[invwishart, (10, scale)],
[multinomial, (5, [0.5, 0.4, 0.1])],
[ortho_group, (2,)],
[special_ortho_group, (2,)]
]
for distfn, args in dists:
check_random_state_property(distfn, args)
check_pickling(distfn, args)
| lhilt/scipy | scipy/stats/tests/test_multivariate.py | Python | bsd-3-clause | 64,227 |
#! /usr/bin/env python
import sys, os
import os.path
output_dir = sys.argv[1]
task_id = sys.argv[2]
if not os.path.exists(output_dir):
os.mkdir(output_dir)
new_gene_list = []
refGene = output_dir+"/"+task_id+".txt"
fh = open(refGene, "r")
for line in fh:
line = line.rstrip('\n')
F = line.split('\t')
chr = F[0]
chr = chr.replace("chr", "")
start = F[1]
end = F[2]
genes = F[3]
gene_list = genes.split(";")
for gene_info in gene_list:
gene = gene_info.split("(")[0]
new_gene_list.append(chr +"\t"+ start +"\t"+ end +"\t"+ gene)
fh.close()
new_gene_list = sorted(set(new_gene_list))
hOUT = open(output_dir + "/refGene_tmp."+task_id+".bed", 'w')
for v in new_gene_list:
print >> hOUT, v
hOUT.close()
| ken0-1n/GenomonHotspotDatabase | scripts/modify_refGene_coding.py | Python | gpl-3.0 | 763 |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import time
from threading import current_thread, Lock, Timer
class Timeout(object):
def __init__(self, timeout, error):
self._runner_thread_id = current_thread().ident
self._timer = Timer(timeout, self._timed_out)
self._error = error
self._timeout_occurred = False
self._finished = False
self._lock = Lock()
def execute(self, runnable):
try:
self._start_timer()
try:
result = runnable()
finally:
self._cancel_timer()
self._wait_for_raised_timeout()
return result
finally:
if self._timeout_occurred:
raise self._error
def _start_timer(self):
self._timer.start()
def _cancel_timer(self):
with self._lock:
self._finished = True
self._timer.cancel()
def _wait_for_raised_timeout(self):
if self._timeout_occurred:
while True:
time.sleep(0)
def _timed_out(self):
with self._lock:
if self._finished:
return
self._timeout_occurred = True
self._raise_timeout()
def _raise_timeout(self):
# See, for example, http://tomerfiliba.com/recipes/Thread2/
# for more information about using PyThreadState_SetAsyncExc
tid = ctypes.c_long(self._runner_thread_id)
error = ctypes.py_object(type(self._error))
while ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, error) > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
time.sleep(0) # give time for other threads
| yasharmaster/scancode-toolkit | src/scancode/timeouts/windows.py | Python | apache-2.0 | 2,328 |
import factory
from adhocracy4.test import factories as a4_factories
from meinberlin.apps.livequestions import models
from meinberlin.test.factories import CategoryFactory
class LiveQuestionFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.LiveQuestion
text = factory.Faker('text', max_nb_chars=50)
category = factory.SubFactory(CategoryFactory)
module = factory.SubFactory(a4_factories.ModuleFactory)
| liqd/a4-meinberlin | meinberlin/test/factories/livequestions.py | Python | agpl-3.0 | 452 |
# Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
from azurelinuxagent.common.utils.fileutil import read_file
from azurelinuxagent.common.osutil.nsbsd import NSBSDOSUtil
from tests.tools import AgentTestCase, patch
from os import path
import unittest
class TestNSBSDOSUtil(AgentTestCase):
dhclient_pid_file = "/var/run/dhclient.pid"
def setUp(self):
AgentTestCase.setUp(self)
def tearDown(self):
AgentTestCase.tearDown(self)
def test_get_dhcp_pid_should_return_a_list_of_pids(self):
with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver
original_isfile = path.isfile
def mock_isfile(path):
return True if path == self.dhclient_pid_file else original_isfile(path)
original_read_file = read_file
def mock_read_file(file, *args, **kwargs):
return "123" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs)
with patch("os.path.isfile", mock_isfile):
with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file):
pid_list = NSBSDOSUtil().get_dhcp_pid()
self.assertEquals(pid_list, [123])
def test_get_dhcp_pid_should_return_an_empty_list_when_the_dhcp_client_is_not_running(self):
with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver
#
# PID file does not exist
#
original_isfile = path.isfile
def mock_isfile(path):
return False if path == self.dhclient_pid_file else original_isfile(path)
with patch("os.path.isfile", mock_isfile):
pid_list = NSBSDOSUtil().get_dhcp_pid()
self.assertEquals(pid_list, [])
#
# PID file is empty
#
original_isfile = path.isfile
def mock_isfile(path):
return True if path == self.dhclient_pid_file else original_isfile(path)
original_read_file = read_file
def mock_read_file(file, *args, **kwargs):
return "" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs)
with patch("os.path.isfile", mock_isfile):
with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file):
pid_list = NSBSDOSUtil().get_dhcp_pid()
self.assertEquals(pid_list, [])
if __name__ == '__main__':
unittest.main()
| rjschwei/WALinuxAgent | tests/common/osutil/test_nsbsd.py | Python | apache-2.0 | 3,180 |
"""
Tests of the core auth models (Role, Membership, Collection, FacilityUser, DeviceOwner, etc).
"""
from __future__ import absolute_import, print_function, unicode_literals
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.test import TestCase
from ..constants import role_kinds, collection_kinds
from ..models import FacilityUser, Facility, Classroom, LearnerGroup, Role, Membership, Collection, DeviceOwner
from ..errors import UserDoesNotHaveRoleError, UserHasRoleOnlyIndirectlyThroughHierarchyError, UserIsNotFacilityUser, \
UserIsMemberOnlyIndirectlyThroughHierarchyError, InvalidRoleKind, UserIsNotMemberError
class CollectionRoleMembershipDeletionTestCase(TestCase):
"""
Tests that removing users from a Collection deletes the corresponding Role, and that deleting a Collection
or FacilityUser deletes all associated Roles and Memberships.
"""
def setUp(self):
self.facility = Facility.objects.create()
learner, classroom_coach, facility_admin = self.learner, self.classroom_coach, self.facility_admin = (
FacilityUser.objects.create(username='foo', facility=self.facility),
FacilityUser.objects.create(username='bar', facility=self.facility),
FacilityUser.objects.create(username='baz', facility=self.facility),
)
self.facility.add_admin(facility_admin)
self.cr = Classroom.objects.create(parent=self.facility)
self.cr.add_coach(classroom_coach)
self.lg = LearnerGroup.objects.create(parent=self.cr)
self.lg.add_learner(learner)
self.device_owner = DeviceOwner.objects.create(username="blah", password="*")
def test_remove_learner(self):
self.assertTrue(self.learner.is_member_of(self.lg))
self.assertTrue(self.learner.is_member_of(self.cr))
self.assertTrue(self.learner.is_member_of(self.facility))
self.assertEqual(Membership.objects.filter(user=self.learner, collection=self.lg).count(), 1)
self.lg.remove_learner(self.learner)
self.assertFalse(self.learner.is_member_of(self.lg))
self.assertFalse(self.learner.is_member_of(self.cr))
self.assertTrue(self.learner.is_member_of(self.facility)) # always a member of one's own facility
self.assertEqual(Membership.objects.filter(user=self.learner, collection=self.lg).count(), 0)
with self.assertRaises(UserIsNotMemberError):
self.lg.remove_learner(self.learner)
def test_remove_coach(self):
self.assertTrue(self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.lg))
self.assertTrue(self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.cr))
self.assertFalse(self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.facility))
self.assertFalse(self.classroom_coach.has_role_for_collection(role_kinds.ADMIN, self.lg))
self.assertTrue(self.classroom_coach.has_role_for_user(role_kinds.COACH, self.learner))
self.assertFalse(self.classroom_coach.has_role_for_user(role_kinds.COACH, self.facility_admin))
self.assertFalse(self.classroom_coach.has_role_for_user(role_kinds.ADMIN, self.learner))
self.assertEqual(Role.objects.filter(user=self.classroom_coach, kind=role_kinds.COACH, collection=self.cr).count(), 1)
self.cr.remove_coach(self.classroom_coach)
self.assertFalse(self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.lg))
self.assertFalse(self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.cr))
self.assertFalse(self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.facility))
self.assertFalse(self.classroom_coach.has_role_for_collection(role_kinds.ADMIN, self.lg))
self.assertFalse(self.classroom_coach.has_role_for_user(role_kinds.COACH, self.learner))
self.assertFalse(self.classroom_coach.has_role_for_user(role_kinds.COACH, self.facility_admin))
self.assertFalse(self.classroom_coach.has_role_for_user(role_kinds.ADMIN, self.learner))
self.assertEqual(Role.objects.filter(user=self.classroom_coach, kind=role_kinds.COACH, collection=self.cr).count(), 0)
with self.assertRaises(UserDoesNotHaveRoleError):
self.cr.remove_coach(self.classroom_coach)
def test_remove_admin(self):
self.assertTrue(self.facility_admin.has_role_for_collection(role_kinds.ADMIN, self.lg))
self.assertTrue(self.facility_admin.has_role_for_collection(role_kinds.ADMIN, self.cr))
self.assertTrue(self.facility_admin.has_role_for_collection(role_kinds.ADMIN, self.facility))
self.assertFalse(self.facility_admin.has_role_for_collection(role_kinds.COACH, self.lg))
self.assertTrue(self.facility_admin.has_role_for_user(role_kinds.ADMIN, self.learner))
self.assertTrue(self.facility_admin.has_role_for_user(role_kinds.ADMIN, self.facility_admin))
self.assertTrue(self.facility_admin.has_role_for_user(role_kinds.ADMIN, self.classroom_coach))
self.assertFalse(self.facility_admin.has_role_for_user(role_kinds.COACH, self.learner))
self.assertEqual(Role.objects.filter(user=self.facility_admin, kind=role_kinds.ADMIN, collection=self.facility).count(), 1)
self.facility.remove_admin(self.facility_admin)
self.assertEqual(Role.objects.filter(user=self.facility_admin, kind=role_kinds.ADMIN, collection=self.facility).count(), 0)
with self.assertRaises(UserDoesNotHaveRoleError):
self.facility.remove_admin(self.facility_admin)
def test_remove_nonexistent_role(self):
with self.assertRaises(UserDoesNotHaveRoleError):
self.facility.remove_admin(self.learner)
with self.assertRaises(UserDoesNotHaveRoleError):
self.cr.remove_coach(self.learner)
def test_remove_indirect_admin_role(self):
""" Trying to remove the admin role for a a Facility admin from a descendant classroom doesn't actually remove anything. """
with self.assertRaises(UserHasRoleOnlyIndirectlyThroughHierarchyError):
self.cr.remove_admin(self.facility_admin)
def test_remove_indirect_membership(self):
""" Trying to remove a learner's membership from a classroom doesn't actually remove anything. """
with self.assertRaises(UserIsMemberOnlyIndirectlyThroughHierarchyError):
self.cr.remove_member(self.learner)
def test_delete_learner_group(self):
""" Deleting a LearnerGroup should delete its associated Memberships as well """
self.assertEqual(Membership.objects.filter(collection=self.lg.id).count(), 1)
self.lg.delete()
self.assertEqual(Membership.objects.filter(collection=self.lg.id).count(), 0)
def test_delete_classroom_pt1(self):
""" Deleting a Classroom should delete its associated Roles as well """
self.assertEqual(Role.objects.filter(collection=self.cr.id).count(), 1)
self.cr.delete()
self.assertEqual(Role.objects.filter(collection=self.cr.id).count(), 0)
def test_delete_classroom_pt2(self):
""" Deleting a Classroom should delete its associated LearnerGroups """
self.assertEqual(LearnerGroup.objects.count(), 1)
self.cr.delete()
self.assertEqual(LearnerGroup.objects.count(), 0)
def test_delete_facility_pt1(self):
""" Deleting a Facility should delete associated Roles as well """
self.assertEqual(Role.objects.filter(collection=self.facility.id).count(), 1)
self.facility.delete()
self.assertEqual(Role.objects.filter(collection=self.facility.id).count(), 0)
def test_delete_facility_pt2(self):
""" Deleting a Facility should delete Classrooms under it. """
self.assertEqual(Classroom.objects.count(), 1)
self.facility.delete()
self.assertEqual(Classroom.objects.count(), 0)
def test_delete_facility_pt3(self):
""" Deleting a Facility should delete *every* Collection under it and associated Roles """
self.facility.delete()
self.assertEqual(Collection.objects.count(), 0)
self.assertEqual(Role.objects.count(), 0)
def test_delete_facility_user(self):
""" Deleting a FacilityUser should delete associated Memberships """
membership = Membership.objects.get(user=self.learner)
self.learner.delete()
self.assertEqual(Membership.objects.filter(id=membership.id).count(), 0)
class CollectionRelatedObjectTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
users = self.users = [FacilityUser.objects.create(
username="foo%s" % i,
facility=self.facility,
) for i in range(10)]
self.facility.add_admins(users[8:9])
self.cr = Classroom.objects.create(parent=self.facility)
self.cr.add_coaches(users[5:8])
self.lg = LearnerGroup.objects.create(parent=self.cr)
self.lg.add_learners(users[0:5])
def test_get_learner_groups(self):
self.assertSetEqual({self.lg.pk}, set(lg.pk for lg in self.cr.get_learner_groups()))
def test_get_classrooms(self):
self.assertSetEqual({self.cr.pk}, set(cr.pk for cr in self.facility.get_classrooms()))
def test_get_classroom(self):
self.assertEqual(self.cr.pk, self.lg.get_classroom().pk)
class CollectionsTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.classroom = Classroom.objects.create(parent=self.facility)
def test_add_and_remove_admin(self):
user = FacilityUser.objects.create(username='foo', facility=self.facility)
self.classroom.add_admin(user)
self.facility.add_admin(user)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.ADMIN, collection=self.classroom).count(), 1)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.ADMIN, collection=self.facility).count(), 1)
self.classroom.remove_admin(user)
self.facility.remove_admin(user)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.ADMIN, collection=self.classroom).count(), 0)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.ADMIN, collection=self.facility).count(), 0)
def test_add_and_remove_coach(self):
user = FacilityUser.objects.create(username='foo', facility=self.facility)
self.classroom.add_coach(user)
self.facility.add_coach(user)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.COACH, collection=self.classroom).count(), 1)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.COACH, collection=self.facility).count(), 1)
self.classroom.remove_coach(user)
self.facility.remove_coach(user)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.COACH, collection=self.classroom).count(), 0)
self.assertEqual(Role.objects.filter(user=user, kind=role_kinds.COACH, collection=self.facility).count(), 0)
def test_add_coaches(self):
user1 = FacilityUser.objects.create(username='foo1', facility=self.facility)
user2 = FacilityUser.objects.create(username='foo2', facility=self.facility)
self.classroom.add_coaches([user1, user2])
self.facility.add_coaches([user1, user2])
self.assertEqual(Role.objects.filter(kind=role_kinds.COACH, collection=self.classroom).count(), 2)
self.assertEqual(Role.objects.filter(kind=role_kinds.COACH, collection=self.facility).count(), 2)
def test_add_admins(self):
user1 = FacilityUser.objects.create(username='foo1', facility=self.facility)
user2 = FacilityUser.objects.create(username='foo2', facility=self.facility)
self.classroom.add_admins([user1, user2])
self.facility.add_admins([user1, user2])
self.assertEqual(Role.objects.filter(kind=role_kinds.ADMIN, collection=self.classroom).count(), 2)
self.assertEqual(Role.objects.filter(kind=role_kinds.ADMIN, collection=self.facility).count(), 2)
def test_add_classroom(self):
classroom = Classroom.objects.create(parent=self.facility)
self.assertEqual(Classroom.objects.count(), 2)
self.assertEqual(classroom.get_facility(), self.facility)
def test_add_learner_group(self):
classroom = Classroom.objects.create(name="blah", parent=self.facility)
classroom.full_clean()
LearnerGroup.objects.create(parent=classroom)
self.assertEqual(LearnerGroup.objects.count(), 1)
def test_learner(self):
user = FacilityUser.objects.create(username='foo', facility=self.facility)
classroom = Classroom.objects.create(parent=self.facility)
learner_group = LearnerGroup.objects.create(name="blah", parent=classroom)
learner_group.full_clean()
learner_group.add_learner(user)
self.assertEqual(Membership.objects.filter(user=user, collection=learner_group).count(), 1)
def test_parentless_classroom(self):
classroom = Classroom(name="myclass")
# shouldn't be valid, because no parent was specified, and Classrooms can't be the root of the collection tree
with self.assertRaises(ValidationError):
classroom.full_clean()
with self.assertRaises(IntegrityError):
classroom.save()
def test_parentless_learnergroup(self):
group = LearnerGroup(name="mygroup")
# shouldn't be valid, because no parent was specified, and LearnerGroups can't be the root of the collection tree
with self.assertRaises(ValidationError):
group.full_clean()
with self.assertRaises(IntegrityError):
group.save()
def test_facility_with_parent_facility(self):
with self.assertRaises(IntegrityError):
Facility.objects.create(name="blah", parent=self.facility)
def test_create_bare_collection_without_kind(self):
with self.assertRaises(ValidationError):
Collection(name="qqq", parent=self.facility).full_clean()
class RoleErrorTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.classroom = Classroom.objects.create(parent=self.facility)
self.learner_group = LearnerGroup.objects.create(parent=self.classroom)
self.facility_user = FacilityUser.objects.create(username="blah", password="#", facility=self.facility)
self.device_owner = DeviceOwner.objects.create(username="blooh", password="#")
def test_invalid_role_kind(self):
with self.assertRaises(InvalidRoleKind):
self.learner_group.add_role(self.facility_user, "blahblahnonexistentroletype")
with self.assertRaises(InvalidRoleKind):
self.learner_group.remove_role(self.facility_user, "blahblahnonexistentroletype")
class DeviceOwnerRoleMembershipTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.classroom = Classroom.objects.create(parent=self.facility)
self.learner_group = LearnerGroup.objects.create(parent=self.classroom)
self.facility_user = FacilityUser.objects.create(username="blah", password="#", facility=self.facility)
self.device_owner = DeviceOwner.objects.create(username="blooh", password="#")
self.device_owner2 = DeviceOwner.objects.create(username="bleeh", password="#")
def test_deviceowner_is_not_member_of_any_collection(self):
self.assertFalse(self.device_owner.is_member_of(self.classroom))
self.assertFalse(self.device_owner.is_member_of(self.facility))
self.assertFalse(self.device_owner.is_member_of(self.learner_group))
def test_deviceowner_is_admin_for_everything(self):
self.assertSetEqual(self.device_owner.get_roles_for_collection(self.classroom), set([role_kinds.ADMIN]))
self.assertSetEqual(self.device_owner.get_roles_for_collection(self.facility), set([role_kinds.ADMIN]))
self.assertSetEqual(self.device_owner.get_roles_for_user(self.facility_user), set([role_kinds.ADMIN]))
self.assertSetEqual(self.device_owner.get_roles_for_user(self.device_owner), set([role_kinds.ADMIN]))
self.assertSetEqual(self.device_owner.get_roles_for_user(self.device_owner2), set([role_kinds.ADMIN]))
self.assertTrue(self.device_owner.has_role_for_user([role_kinds.ADMIN], self.facility_user))
self.assertTrue(self.device_owner.has_role_for_collection([role_kinds.ADMIN], self.facility))
def test_device_owners_cannot_be_assigned_or_removed_from_roles(self):
with self.assertRaises(UserIsNotFacilityUser):
self.classroom.add_admin(self.device_owner)
with self.assertRaises(UserIsNotFacilityUser):
self.classroom.remove_admin(self.device_owner)
def test_device_owners_cannot_be_members(self):
with self.assertRaises(UserIsNotFacilityUser):
self.classroom.add_member(self.device_owner)
with self.assertRaises(UserIsNotFacilityUser):
self.classroom.remove_member(self.device_owner)
class DeviceOwnerSuperuserTestCase(TestCase):
def test_device_owner_is_superuser(self):
device_owner = DeviceOwner.objects.create(username="test", password="##")
self.assertTrue(device_owner.is_superuser)
def test_device_owner_manager_supports_superuser_creation(self):
superusername = "boss"
DeviceOwner.objects.create_superuser(superusername, "password")
self.assertEqual(DeviceOwner.objects.get().username, superusername)
def test_device_owner_manager_superuser_creation_fails_with_empty_username(self):
superusername = ""
with self.assertRaises(ValueError):
DeviceOwner.objects.create_superuser(superusername, "password")
self.assertEqual(DeviceOwner.objects.count(), 0)
def test_device_owner_has_all_django_perms_for_django_admin(self):
device_owner = DeviceOwner.objects.create(username="test", password="##")
self.assertTrue(device_owner.has_perm("someperm", object()))
self.assertTrue(device_owner.has_perms(["someperm"], object()))
self.assertTrue(device_owner.has_module_perms("module.someapp"))
class StringMethodTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="Arkham")
learner, classroom_coach, facility_admin = self.learner, self.classroom_coach, self.facility_admin = (
FacilityUser.objects.create(username='foo', facility=self.facility),
FacilityUser.objects.create(username='bar', facility=self.facility),
FacilityUser.objects.create(username='baz', facility=self.facility),
)
self.facility.add_admin(facility_admin)
self.cr = Classroom.objects.create(name="Classroom X", parent=self.facility)
self.cr.add_coach(classroom_coach)
self.lg = LearnerGroup.objects.create(name="Oodles of Fun", parent=self.cr)
self.lg.add_learner(learner)
self.device_owner = DeviceOwner.objects.create(username="blah", password="*")
def test_facility_user_str_method(self):
self.assertEqual(str(self.learner), '"foo"@"Arkham"')
def test_device_owner_str_method(self):
self.assertEqual(str(self.device_owner), "blah")
def test_collection_str_method(self):
self.assertEqual(str(Collection.objects.filter(kind=collection_kinds.FACILITY)[0]), '"Arkham" (facility)')
def test_membership_str_method(self):
self.assertEqual(str(self.learner.membership_set.all()[0]), '"foo"@"Arkham"\'s membership in "Oodles of Fun" (learnergroup)')
def test_role_str_method(self):
self.assertEqual(str(self.classroom_coach.role_set.all()[0]), '"bar"@"Arkham"\'s coach role for "Classroom X" (classroom)')
def test_facility_str_method(self):
self.assertEqual(str(self.facility), "Arkham")
def test_classroom_str_method(self):
self.assertEqual(str(self.cr), "Classroom X")
def test_learner_group_str_method(self):
self.assertEqual(str(self.lg), "Oodles of Fun")
| MCGallaspy/kolibri | kolibri/auth/test/test_models.py | Python | mit | 20,210 |
import os
import sys
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and
os.access(fn, mode) and not
os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to
# the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for folder in path:
normdir = os.path.normcase(folder)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(folder, thefile)
if _access_check(name, mode):
return name
return None
| fpietka/rds-pgbadger | package/which.py | Python | mit | 2,371 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-beyond-top-level
import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
from util import logging as logutil, net as netutil
from .seq import Network as BaseNetwork
from .elements import conv, norm, act, pool, iden, deconv, upconv
logger = logutil.Logger(loggee="networks/convnet")
class Network(BaseNetwork):
def __init__(
self, depth0, depth, kernel, stride, norm_type=None,
act_type='relu', pool_type=None):
super().__init__()
norm_type = self.str2none(norm_type)
pool_type = self.str2none(pool_type)
min_n_ch = depth0
max_n_ch = depth
n_feat = netutil.gen_feat_n(min_n_ch, max_n_ch)
# Stacking the layers
prev_n = 0
self.is_contracting, self.spatsize_changes = [], []
# 1x1 conv to generate an original-res. feature map
self.layers.append(conv(1, n_feat[0], stride=1))
self.is_contracting.append(True)
self.spatsize_changes.append(1)
for n in n_feat[:-1]:
# Contracting spatially
if n >= prev_n: # so 64 -> 64 is considered "contracting"
self.layers.append(
tf.keras.Sequential([
conv(kernel, n, stride=stride),
norm(norm_type), # could be identity
act(act_type),
conv(kernel, n, stride=1),
norm(norm_type),
act(act_type),
pool(pool_type), # could be identity
]))
self.is_contracting.append(True)
spatsize_change = 1 / stride
if pool_type is not None:
spatsize_change *= 1 / 2
self.spatsize_changes.append(spatsize_change)
# Expanding spatially
else:
self.layers.append(
tf.keras.Sequential([
iden() if pool_type is None else upconv(n),
deconv(kernel, n, stride=stride),
norm(norm_type),
act(act_type),
deconv(kernel, n, stride=1),
norm(norm_type),
act(act_type),
]))
self.is_contracting.append(False)
spatsize_change = stride
if pool_type is not None:
spatsize_change *= 2
self.spatsize_changes.append(spatsize_change)
prev_n = n
# Spatial res. should come back to the original by now; a final
# 1x1 conv to aggregate info and ensure desired # output channels
self.layers.append(conv(1, n_feat[-1], stride=1))
self.is_contracting.append(False)
self.spatsize_changes.append(1)
spatsizes = np.cumprod(self.spatsize_changes)
assert spatsizes[-1] == 1, \
"Resolution doesn't return to the original value"
| google/neural-light-transport | nlt/networks/convnet.py | Python | apache-2.0 | 3,625 |
# coding=utf-8
from datetime import datetime
import codecs
from psi.app import const
from tests import fixture
from tests.base_test_case import BaseTestCase
class TestImportStoreDataView(BaseTestCase):
def test_import(self):
from psi.app.models import SalesOrder, SalesOrderLine, Product, Supplier
from psi.app.utils import db_util
import os
fixture.login_as_admin(self.test_client)
file_name = os.path.dirname(os.path.realpath(__file__)) + "/../resources/store_data.csv"
content = codecs.open(file_name, "r", "utf-8").read()
from psi.app.models.user import User
from psi.app.models.role import Role
from psi.app.service import Info
user = Info.get_db().session.query(User).filter_by(login='super_admin').first()
role = Info.get_db().session.query(Role).filter_by(name='import_store_data').first()
user.roles.append(role)
from psi.app.service import Info
Info.get_db().session.add(user)
Info.get_db().session.commit()
rv = self.test_client.get('/admin/import_store_data/', follow_redirects=True)
self.assertEqual(200, rv.status_code)
self.assertIn('导入店铺运营数据'.encode('utf-8'), rv.data)
self.test_client.post('/admin/import_store_data/', data={
'file': (file_name, content),
}, follow_redirects=True)
self.assertIsNotNone(db_util.get_by_external_id(SalesOrder, '01201503090002', user=user))
self.assertIsNotNone(db_util.get_by_external_id(SalesOrderLine, '11', user=user))
self.assertIsNotNone(db_util.get_by_external_id(SalesOrderLine, '15', user=user))
self.assertIsNotNone(db_util.get_by_external_id(SalesOrderLine, '16', user=user))
self.assertIsNotNone(db_util.get_by_external_id(SalesOrderLine, '17', user=user))
self.assertIsNotNone(db_util.get_by_external_id(SalesOrderLine, '18', user=user))
self.assertIsNotNone(db_util.get_by_name(Product, '产品1', user=user))
self.assertIsNotNone(db_util.get_by_name(Product, '产品2', user=user))
self.assertIsNotNone(db_util.get_by_name(Product, '产品3', user=user))
self.assertIsNotNone(db_util.get_by_name(Product, '产品4', user=user))
self.assertIsNotNone(db_util.get_by_name(Product, '产品5', user=user))
self.assertEqual(3, SalesOrder.query.count())
self.assertEqual(5, SalesOrderLine.query.count())
self.assertEqual(5, Product.query.count())
self.assertEqual(3, Supplier.query.count())
sales_order = db_util.get_by_external_id(SalesOrder, '01201503130003', user=user)
""":type: SalesOrder"""
self.assertEqual(3, len(sales_order.lines))
self.assertEqual(const.DIRECT_SO_TYPE_KEY, sales_order.type.code)
sales_order = db_util.get_by_external_id(SalesOrder, '01201503130001', user=user)
self.assertEqual(const.DIRECT_SO_TYPE_KEY, sales_order.type.code)
self.assertEqual(user.organization_id, sales_order.organization_id)
self.assertEqual(1, len(sales_order.lines))
line = sales_order.lines[0]
""":type: SalesOrderLine"""
self.assertEqual('15', line.external_id)
self.assertEqual('产品2', line.product.name)
self.assertEquals(user.organization_id, line.product.organization_id)
self.assertEqual('000010', line.product.external_id)
self.assertEquals('000016', line.product.supplier.external_id)
self.assertEquals('供应商2', line.product.supplier.name)
self.assertEquals(user.organization_id, line.product.supplier.organization_id)
self.assertEquals(16.5000, line.product.purchase_price)
self.assertEquals(33, line.product.retail_price)
self.assertEqual(33, line.unit_price)
self.assertEquals(1, line.quantity)
self.assertEquals(datetime.strptime('2015-03-13 11:04:11.063', '%Y-%m-%d %H:%M:%S.%f'), line.sales_order.order_date)
self.assertEqual(0, line.sales_order.logistic_amount)
| betterlife/psi | tests/views/import_store_data_test.py | Python | mit | 4,050 |
#!/usr/bin/env python2
import argparse
import json
import sys
import socket
import time
from pkg_resources import parse_version
import tinctools
if parse_version(tinctools.__version__) >= parse_version('0.3') and \
parse_version(tinctools.__version__) < parse_version('0.4'):
from tinctools import connection, parse
from tinctools.connection import Request
else:
raise ImportWarning, "tinctools version: {} not supported".format(tinctools.__version__)
class TincVis:
def __init__(self, net, rundir):
self.net = net
self.nodes = {}
self.edges = {}
self.tincctl = connection.Control(net, rundir=rundir, reconn=True)
self.tincinfo = parse.TincInfo()
self.n2id = {}
self.id2n = {}
self.maxWeight = 1
def __computeHash(self, source, target):
if (int(source['id']) <= int(target['id'])):
return "{}-{}".format(source['id'],
target['id'])
else:
return "{}-{}".format(target['id'],
source['id'])
def __parseAll(self):
connData = self.tincctl.communicate(Request.DUMP_CONNECTIONS)
subnetData = self.tincctl.communicate(Request.DUMP_SUBNETS)
nodeData = self.tincctl.communicate(Request.DUMP_NODES)
edgeData = self.tincctl.communicate(Request.DUMP_EDGES)
self.tincinfo.parse_connections(data=connData)
self.tincinfo.parse_networks(data=subnetData)
self.tincinfo.parse_nodes(data=nodeData)
self.tincinfo.parse_edges(data=edgeData)
def prepare(self):
self.__parseAll()
uniqueEdges = set()
try:
del(self.tincinfo.nodes['(broadcast)'])
except:
pass
cnt = 0
for n in self.tincinfo.nodes:
nd = self.tincinfo.nodes.get(n)
node = self.nodes.setdefault(n, {'id': None,
'networks': self.tincinfo.nodes[n].network,
'edges': 0,
'reachable': nd.peer_info['status_int']>>4 & 1,
'version': 0,
'name': n})
node['edges'] = 0
if not node['id']:
node['id'] = cnt+1
cnt += 1
for ed in self.tincinfo.edges:
try:
_hash = self.__computeHash(self.nodes[ed['from']], self.nodes[ed['to']])
except KeyError:
print('warning: empty edge found - ignoring...')
continue
reachable = 1 if self.nodes[ed['from']]['reachable'] == 1 and self.nodes[ed['to']]['reachable'] == 1 else 0
if _hash not in uniqueEdges:
e = self.edges.setdefault(ed['from'], [])
e.append({'source': self.nodes[ed['from']]['id'],
'target' : self.nodes[ed['to']]['id'],
'_hash': _hash,
'reachable': reachable,
'weight': ed['weight']})
self.nodes[ed['from']]['edges'] += 1
self.nodes[ed['to']]['edges'] += 1
uniqueEdges.add(_hash)
self.nodes[ed['to']]['version'] = ed['options']>>24
if reachable and self.maxWeight < int(ed['weight']):
self.maxWeight = int(ed['weight'])
def getFracWeight(self, cur):
return round(1-((cur*100.0)/self.maxWeight)/100, 4)
def writeJSON(self, outfile=None):
if not outfile: return
nodes = []
links = []
for n in self.nodes:
self.n2id[n] = self.nodes[n]['id']
self.id2n[self.nodes[n]['id']] = n
nodes.append({"name": n,
"index": self.nodes[n]['id'],
"id": self.nodes[n]['id'],
"edges": self.nodes[n]['edges'],
"reachable": self.nodes[n]['reachable'],
"version": self.nodes[n]['version'],
"nets": self.nodes[n]['networks'],
"group": 0 if self.nodes[n]['edges'] == 0 else 1})
for en in self.edges:
for e in self.edges[en]:
e['frac'] = self.getFracWeight(int(e['weight']))
e['sname'] = en
e['tname'] = self.id2n[e['target']]
links.append(e)
x= json.dumps({'nodes': nodes,
'links': links})
with open(outfile, "w") as ofp:
ofp.write(x)
ofp.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--net", required=True, help="network name")
parser.add_argument("-o", "--outfile", required=True, help="where to store json file")
parser.add_argument("-f", "--foreground", help="stay in foreground and dump data periodically", action="store_true", default=False)
parser.add_argument("-t", "--timeout", help="wait between dumps", default=30)
parser.add_argument("-r", "--rundir", help="location of pid- and socket-files", default="/var/run")
args = parser.parse_args()
while True:
try:
tv = TincVis(net=args.net, rundir=args.rundir)
tv.prepare()
tv.writeJSON(outfile=args.outfile)
except socket.error as e:
print(e)
if not args.foreground:
sys.exit()
else:
time.sleep(args.timeout)
| exioReed/tinc-vis | bin/gen-data.py | Python | bsd-3-clause | 5,622 |
#!/usr/bin/env python
'''Data Logging classes
'''
import abc
import os
import shutil
import numpy as np
import pandas as pd
class EpochLogger(object):
'''Logs data for each epoch
'''
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def log(self, data, epoch):
'''Logs the given data for the given epoch
'''
pass
class CSVEpochLogger(EpochLogger):
'''Logs data for each epoch using a csv file
'''
EPOCH_COLNAME = "epoch"
def __init__(self, file_fmt_str, link_file, column_names):
super(CSVEpochLogger, self).__init__()
self.__file_fmt = file_fmt_str
self.__link_file = os.path.join(link_file)
self.__column_names = np.concatenate(
([self.EPOCH_COLNAME], column_names))
def _get_filename(self, epoch):
return self.__file_fmt % epoch
def log(self, data, epoch):
data_frame = pd.DataFrame([np.concatenate(([epoch], data))],
columns=self.__column_names)
data_frame[[self.EPOCH_COLNAME]] = (
data_frame[[self.EPOCH_COLNAME]].astype(int))
filename = self._get_filename(epoch)
if epoch == 1:
# Create a new file
with open(filename, 'w') as file_desc:
data_frame.to_csv(file_desc, index=False)
else:
shutil.copy2(self._get_filename(epoch-1), filename)
with open(filename, 'a') as file_desc:
data_frame.to_csv(file_desc, index=False, header=False)
# Update the symlink
if (os.path.exists(self.__link_file) and
os.path.islink(self.__link_file)):
os.unlink(self.__link_file)
os.symlink(filename, self.__link_file)
| wclark3/machine-learning | final-project/code/data_logger.py | Python | mit | 1,787 |
"""
This is a working sample CloudBolt plug-in for you to start with. The run method is required,
but you can change all the code within it. See the "CloudBolt Plug-ins" section of the docs for
more info and the CloudBolt forge for more examples:
https://github.com/CloudBoltSoftware/cloudbolt-forge/tree/master/actions/cloudbolt_plugins
"""
import requests
import json
import time
from common.methods import set_progress
from infrastructure.models import CustomField, Environment, Server
from resourcehandlers.azure_arm.models import AzureARMHandler
from utilities.logger import ThreadLogger
logger = ThreadLogger(__name__)
"""
Todo - Pending feature
1. Create Job
2. Create Table
3. Create Notebook
API reference - https://docs.microsoft.com/en-gb/azure/databricks/dev-tools/api/latest/
"""
def get_or_create_custom_fields():
"""
Get or create a new custom fields
"""
CustomField.objects.get_or_create(
name="dbs_cluster_name",
type="STR",
defaults={
'label': "Cluster Name",
'description': 'Used by the ARM Template blueprint.',
'required': False,
}
)
CustomField.objects.get_or_create(
name="dbs_runtime_version",
type="STR",
defaults={
'label': "Databricks runtime version",
'description': 'Used by the ARM Template blueprint.',
'required': True,
}
)
CustomField.objects.get_or_create(
name="dbs_worker_type",
type="STR",
defaults={
'label': "Worker Type",
'description': 'Used by the ARM Template blueprint.',
'required': True,
}
)
CustomField.objects.get_or_create(
name="dbs_num_workers",
type="INT",
defaults={
'label': "Number Workes",
'description': 'Used by the ARM Template blueprint.',
'required': True,
}
)
CustomField.objects.get_or_create(
name="autotermination_minutes",
type="INT",
defaults={
'label': "Terminate after",
'description': 'the cluster will terminate after the specified time interval of inactivity (i.e., no running commands or active job runs). This feature is best supported in the latest Spark versions',
'required': True,
}
)
def get_token(rs, client_id, client_secret, tenantId):
'''
Generate AD and Management Access Token
'''
as_header = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
'resource': rs
}
# get token
resp = requests.get(f'https://login.microsoftonline.com/{tenantId}/oauth2/token', headers= as_header, data=data)
if resp.status_code != 200:
raise RuntimeError("Unable to get AD or Management access token")
return resp.json()['access_token']
def create_databricks_cluster(rh, resource_group, dbricks_workspace, dbricks_location, cluster_kwargs, count=0):
''' Create databricks workspace cluster'''
# Get a token for the global Databricks application. This value is fixed and never changes.
adbToken = get_token("2ff814a6-3304-4ab8-85cb-cd0e6f879c1d", rh.client_id, rh.secret, rh.azure_tenant_id)
# Get a token for the Azure management API
azToken = get_token("https://management.core.windows.net/", rh.client_id, rh.secret, rh.azure_tenant_id)
dbricks_auth = {
"Authorization": f"Bearer {adbToken}",
"X-Databricks-Azure-SP-Management-Token": azToken,
"X-Databricks-Azure-Workspace-Resource-Id": (
f"/subscriptions/{rh.serviceaccount}"
f"/resourceGroups/{resource_group}"
f"/providers/Microsoft.Databricks"
f"/workspaces/{dbricks_workspace}")}
dbricks_api = f"https://{dbricks_location}/api/2.0"
# create databricks workspace cluster
ds_rsp = requests.post(f"{dbricks_api}/clusters/create", headers= dbricks_auth, data=json.dumps(cluster_kwargs))
result = ds_rsp.json()
if ds_rsp.status_code == 200:
return result
logger.info("Got this error when creating a cluster after creating a workspace : %s", result)
if count < 2:
logger.info("Databricks cluster params %s", cluster_kwargs)
# system got UnknownWorkerEnvironmentException when creating a cluster after creating a workspace
# https://github.com/databrickslabs/terraform-provider-databricks/issues/33
time.sleep(600)
logger.info("retry to create cluster after 60 seconds sleep")
# retry create databricks cluster
create_databricks_cluster(rh, resource_group, dbricks_workspace, dbricks_location, cluster_kwargs, count+1)
raise RuntimeError(result)
def generate_options_for_dbs_runtime_version(field, **kwargs):
"""
Return databricks runtime version
"""
return [ ('10.3.x-scala2.12', '10.3 Beta (Apache Spark 3.2.0, Scala 2.12)'),
('10.2.x-scala2.12', '10.2 (Apache Spark 3.2.0, Scala 2.12)'),
('10.1.x-scala2.12', '10.1 (Apache Spark 3.2.0, Scala 2.12)'),
('10.0.x-scala2.12', '10.0 (Apache Spark 3.2.0, Scala 2.12)'),
('9.1.x-scala2.12', '9.1 LTS (Apache Spark 3.1.2, Scala 2.12)'),
('9.0.x-scala2.12', '9.0 (Apache Spark 3.1.2, Scala 2.12)'),
('7.3.x-scala2.12', '7.3 LTS (Apache Spark 3.0.1, Scala 2.12)'),
('6.4.x-esr-scala2.11', '6.4 Extended Support (Apache Spark 2.4.5, Scala 2.11)')]
def generate_options_for_dbs_worker_type(field, **kwargs):
"""
Return node type
"""
return [('Standard_DS3_v2', 'Standard_DS3_v2 (14 GB Memory, 4 Cores)'),
('Standard_DS4_v2', 'Standard_DS4_v2 (28 GB Memory, 8 Cores)'),
('Standard_DS5_v2', 'Standard_DS5_v2 (56 GB Memory, 16 Cores)'),
('Standard_D3_v2', 'Standard_D3_v2 (14 GB Memory, 4 Cores)'),
('Standard_D4_v2', 'Standard_D4_v2 (28 GB Memory, 8 Cores)'),
('Standard_D5_v2', 'Standard_D5_v2 (56 GB Memory, 16 Cores)'),
('Standard_D12_v2', 'Standard_D12_v2 (28 GB Memory, 4 Cores)'),
('Standard_D13_v2', 'Standard_D13_v2 (56 GB Memory, 8 Cores)'),
('Standard_DS12_v2', 'Standard_DS12_v2 (28 GB Memory, 4 Cores)'),
('Standard_DS13_v2', 'Standard_DS13_v2 (56 GB Memory, 8 Cores)'),
('Standard_H8', 'Standard_H8 (56 GB Memory, 8 Cores)'),
('Standard_NC4as_T4_v3', 'Standard_NC4as_T4_v3 (28 GB Memory, 4 Cores)'),
('Standard_NC8as_T4_v3', 'Standard_NC8as_T4_v3 (56 GB Memory, 8 Cores)')]
def run(job, *args, **kwargs):
cluster_name = '{{dbs_cluster_name}}' # free text
if cluster_name.strip() == "":
return "", "", ""
set_progress("Starting Provision of the databricks workspace cluster...")
logger.info("Starting Provision of the databricks workspace cluster...")
resource = kwargs.get('resource')
# create custom fields if not exists
get_or_create_custom_fields()
create_cluster_params = {
'cluster_name': cluster_name,
'spark_version': '{{dbs_runtime_version}}', # drop down, show/hide based on cluster_name field value
'node_type_id': '{{dbs_worker_type}}', # drop down, show/hide based on cluster_name field value
'num_workers': '{{dbs_num_workers}}', # int, show/hide based on cluster_name field value, min=2
'autotermination_minutes': '{{autotermination_minutes}}', # int, show/hide based on cluster_name field value, min=10 and max=10000
"spark_conf": {
"spark.speculation": True
}
}
logger.info("Databricks worspace cluster params : %s", create_cluster_params)
# get resource handler object
rh = AzureARMHandler.objects.get(id=resource.azure_rh_id)
# deploy databricks workspace cluster
clust_resp = create_databricks_cluster(rh, resource.resource_group, resource.name, resource.azure_dbs_workspace_url, create_cluster_params)
# create cluster server
server = Server.objects.create(
hostname=cluster_name,
resource_handler_svr_id=clust_resp['cluster_id'],
environment=Environment.objects.get(resource_handler_id=resource.azure_rh_id, node_location=resource.azure_region),
resource_handler=rh,
group=resource.group,
owner=resource.owner,
)
resource.server_set.add(server)
return "SUCCESS", "Databricks workspace cluster deployed successfully", "" | CloudBoltSoftware/cloudbolt-forge | blueprints/azure_databricks/create_databricks_workspace_cluster.py | Python | apache-2.0 | 8,721 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from django.core.urlresolvers import reverse
from sentry.models import GroupSeen
from sentry.constants import MAX_JSON_RESULTS
from sentry.testutils import TestCase, fixture
class GroupDetailsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_simple(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert resp.context['group'] == self.group
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
# ensure we've marked the group as seen
assert GroupSeen.objects.filter(
group=self.group, user=self.user).exists()
class GroupListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-stream', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
class GroupEventListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-events', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/event_list.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
class GroupTagListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-tags', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/tag_list.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'tag_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
class GroupEventDetailsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-event', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id': self.event.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
assert resp.context['event'] == self.event
class GroupEventListJsonTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-events-json', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
# HACK: force fixture creation
self.event
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert len(data) == 1
assert data[0]['id'] == str(self.event.event_id)
def test_does_not_allow_beyond_limit(self):
self.login()
resp = self.client.get(self.path, {'limit': MAX_JSON_RESULTS + 1})
assert resp.status_code == 400
class GroupEventJsonTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-event-json', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id_or_latest': self.event.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert data['id'] == self.event.event_id
| SilentCircle/sentry | tests/sentry/web/frontend/groups/tests.py | Python | bsd-3-clause | 5,628 |
# -*- coding: utf-8 -*-
###
# Copyright (c) 2010 by Elián Hanisch <lambdae2@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###
###
#
# This scripts adds word completion, like irssi's /completion
# (depends on WeeChat 0.3.1 or newer)
#
# Commands:
# * /completion: see /help completion
#
#
# Settings:
# * plugins.var.python.completion.replace_values:
# Completion list, it shouldn't be edited by hand.
#
#
# History:
# 2019-08-20
# version 0.3: Ben Harris (benharri)
# * port for python3
#
# 2010-05-08
# version 0.2:
# * complete any word behind the cursor, not just the last one in input line.
# * change script display name 'completion' to 'cmpl'.
#
# 2010-01-26
# version 0.1: release
#
###
try:
import weechat
WEECHAT_RC_OK = weechat.WEECHAT_RC_OK
import_ok = True
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
import_ok = False
SCRIPT_NAME = "completion"
SCRIPT_AUTHOR = "Elián Hanisch <lambdae2@gmail.com>"
SCRIPT_VERSION = "0.3"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Word completions for WeeChat"
SCRIPT_COMMAND = "completion"
### Config ###
settings = {
'replace_values':''
}
### Messages ###
def debug(s, prefix='', buffer=None):
"""Debug msg"""
#if not weechat.config_get_plugin('debug'): return
if buffer is None:
buffer_name = 'DEBUG_' + SCRIPT_NAME
buffer = weechat.buffer_search('python', buffer_name)
if not buffer:
buffer = weechat.buffer_new(buffer_name, '', '', '', '')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
weechat.prnt(buffer, '%s\t%s' %(prefix, s))
def error(s, prefix=None, buffer='', trace=''):
"""Error msg"""
prefix = prefix or script_nick
weechat.prnt(buffer, '%s%s %s' %(weechat.prefix('error'), prefix, s))
if weechat.config_get_plugin('debug'):
if not trace:
import traceback
if traceback.sys.exc_type:
trace = traceback.format_exc()
not trace or weechat.prnt('', trace)
def say(s, prefix=None, buffer=''):
"""normal msg"""
prefix = prefix or script_nick
weechat.prnt(buffer, '%s\t%s' %(prefix, s))
print_replace = lambda k,v : say('%s %s=>%s %s' %(k, color_delimiter, color_reset, v))
### Config functions ###
def get_config_dict(config):
value = weechat.config_get_plugin(config)
if not value:
return {}
values = [s.split('=>') for s in value.split(';;')]
#debug(values)
return dict(values)
def load_replace_table():
global replace_table
replace_table = dict(get_config_dict('replace_values'))
def save_replace_table():
global replace_table
weechat.config_set_plugin('replace_values',
';;'.join(['%s=>%s' %(k, v) for k, v in replace_table.items()]))
### Commands ###
def cmd_completion(data, buffer, args):
global replace_table
if not args:
if replace_table:
for k, v in replace_table.items():
print_replace(k, v)
else:
say('No completions.')
return WEECHAT_RC_OK
cmd, space, args = args.partition(' ')
if cmd == 'add':
word, space, text = args.partition(' ')
k, v = word.strip(), text.strip()
replace_table[k] = v
save_replace_table()
say('added: %s %s=>%s %s' %(k, color_delimiter, color_reset, v))
elif cmd == 'del':
k = args.strip()
try:
del replace_table[k]
save_replace_table()
say("completion for '%s' deleted." %k)
save_replace_table()
except KeyError:
error("completion for '%s' not found." %k)
return WEECHAT_RC_OK
### Completion ###
def completion_replacer(data, completion_item, buffer, completion):
global replace_table
pos = weechat.buffer_get_integer(buffer, 'input_pos')
input = weechat.buffer_get_string(buffer, 'input')
#debug('%r %s %s' %(input, len(input), pos))
if pos > 0 and (pos == len(input) or input[pos] == ' '):
n = input.rfind(' ', 0, pos)
word = input[n+1:pos]
#debug(repr(word))
if word in replace_table:
replace = replace_table[word]
if pos >= len(input.strip()):
# cursor is in the end of line, append a space
replace += ' '
n = len(word)
input = '%s%s%s' %(input[:pos-n], replace, input[pos:])
weechat.buffer_set(buffer, 'input', input)
weechat.buffer_set(buffer, 'input_pos', str(pos - n + len(replace)))
return WEECHAT_RC_OK
def completion_keys(data, completion_item, buffer, completion):
global replace_table
for k in replace_table:
weechat.hook_completion_list_add(completion, k, 0, weechat.WEECHAT_LIST_POS_SORT)
return WEECHAT_RC_OK
### Main ###
if __name__ == '__main__' and import_ok and \
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, \
SCRIPT_DESC, '', ''):
# colors
color_delimiter = weechat.color('chat_delimiters')
color_script_nick = weechat.color('chat_nick')
color_reset = weechat.color('reset')
# pretty [SCRIPT_NAME]
script_nick = '%s[%s%s%s]%s' %(color_delimiter, color_script_nick, 'cmpl', color_delimiter,
color_reset)
version = weechat.info_get('version', '')
if version == '0.3.0':
error('WeeChat 0.3.1 or newer is required for this script.')
else:
# settings
for opt, val in settings.items():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, val)
load_replace_table()
completion_template = 'completion_script'
weechat.hook_completion(completion_template,
"Replaces last word in input by its configured value.", 'completion_replacer', '')
weechat.hook_completion('completion_keys', "Words in completion list.", 'completion_keys', '')
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC , "[add <word> <text>|del <word>]",
"""
add: adds a new completion, <word> => <text>.
del: deletes a completion.
Without arguments it displays current completions.
<word> will be replaced by <text> when pressing tab in input line,
where <word> is any word currently behind the cursor.
Setup:
For this script to work, you must add the template
%%(%(completion)s) to the default completion template, use:
/set weechat.completion.default_template "%%(nicks)|%%(irc_channels)|%%(%(completion)s)"
Examples:
/%(command)s add wee WeeChat (typing wee<tab> will replace 'wee' by 'WeeChat')
/%(command)s add weeurl http://www.weechat.org/
/%(command)s add test This is a test!
""" %dict(completion=completion_template, command=SCRIPT_COMMAND),
'add|del %(completion_keys)', 'cmd_completion', '')
# vim:set shiftwidth=4 tabstop=4 softtabstop=4 expandtab textwidth=100:
| qguv/config | weechat/plugins/python/completion.py | Python | gpl-3.0 | 7,678 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2015 Midokura Europe SARL, All Rights Reserved.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import uuid
from google.protobuf import descriptor
def proto_to_dict(obj, message_type_map=None):
"""Returns a PyDict that describes a Protobuf object
If message_type_map is specified, it will be used as a map between protobuf
types (using full name) and unary methods that return a desired description
for them. It works in mutual recursion with proto_describe_value
"""
out = {}
for field, value in obj.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
out[field.name] = [proto_describe_value(embedded_field, field,
message_type_map)
for embedded_field in value]
elif field.is_extension:
try:
extensions = out['extensions']
except KeyError:
extensions = out['extensions'] = {}
extensions[field.name] = proto_describe_value(value, field,
message_type_map)
else:
out[field.name] = proto_describe_value(value, field,
message_type_map)
return out
def proto_describe_value(obj, field, message_type_map):
"""Returns a Python structure for a Protobuf object
If message_type_map is specified, it will be used as a map between protobuf
types (using full name) and unary methods that return a desired description
for them.
"""
if field.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
if message_type_map is None:
#import ipdb; ipdb.set_trace()
value = proto_to_dict(obj, message_type_map=message_type_map)
else:
formatter = message_type_map.get(field.message_type.full_name)
if formatter is None:
value = proto_to_dict(obj, message_type_map=message_type_map)
else:
value = formatter(obj)
return value
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = field.enum_type.values_by_number.get(obj, None).name
if message_type_map is not None:
formatter = message_type_map.get(field.full_name)
if formatter is not None:
value = formatter(value)
return value
else:
return obj
def int_to_varint(number):
"""Returns a Protobuf varint that encodes the supplied number"""
data = []
current = number & 0x7f
remaining_length = number >> 7 # Move on to the next processing
while remaining_length:
byte = chr(0x80 | current) # It's not the final byte, '1' to msb
data.append(byte)
current = remaining_length & 0x7f
remaining_length >>= 7
# Handle the last byte, msb is already '0'
data.append(chr(current))
return ''.join(data)
def varint_to_int(byte_str):
"""Returns the number(int/long) that was encoded in the Protobuf varint"""
value = 0
size = 0
for current in byte_str:
value |= (ord(current) & 0x7F) << (size * 7)
size += 1
return value
def split_uuid(inp):
"""Splits a uuid.UUID into two 64bit integer halves"""
value = inp.int
mask = (2 ** 64) - 1
msb = (value & (mask << 64)) >> 64
lsb = value & mask
return msb, lsb
def uuid_to_UUID(inp):
"""Returns a uuid.UUID object encoding a Protobuf UUID"""
return uuid.UUID(int=inp.msb << 64 | inp.lsb)
def _bytes_hex(inp):
"""Returns the hexadecimal string that encodes the input bytes"""
return ''.join('{:02x}'.format(ord(char)) for char in inp)
def _hex_bytes(inp):
"""Returns the bytes encoded in the hexadecimal string"""
args = [iter(inp)] * 2
bytes_list = [''.join(tup) for tup in itertools.izip(*args)]
return ''.join(chr(int(char, 16)) for char in bytes_list)
def encode_delimited(data):
"""Generate message where the data bytes are preceeded by length varint"""
length = int_to_varint(len(data))
return length + data
def decode_delimited(data):
"""Return the bytes specified by the length varint"""
length = []
pos = 0
while not _varint_final_byte(data[pos]):
length.append(data[pos])
pos += 1
length.append(data[pos])
pos += 1
payload_length = varint_to_int(''.join(length))
if len(data) < (pos + payload_length):
raise ValueError('The supplied data is shorter than the codified '
'length')
return data[pos:pos + payload_length]
def get_answer(sock, timeout=5):
"""Return the payload of a protobuf message without the leading varint"""
sock.settimeout(timeout)
read = sock.recv(1)
if read == "":
return read # socket closed
data = [read]
while not _varint_final_byte(read):
read = sock.recv(1)
if read == "":
return read # socket closed
data.append(read)
length = varint_to_int(''.join(data))
data = []
remaining = length
while remaining > 0:
read = sock.recv(length)
remaining -= len(read)
data.append(read)
return ''.join(data)
def _varint_final_byte(char):
"""Return True iff the char is the last of current varint"""
return not ord(char) & 0x80
| celebdor/python-midonetclient | src/midonetclient/protobuf/utils.py | Python | apache-2.0 | 5,989 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import tensorflow as tf
from decomposable import DecomposableNLIModel, mask_3d
class LSTMClassifier(DecomposableNLIModel):
"""
Improvements over the multi feed forward classifier. This is mostly
based on "Enhancing and Combining Sequential and Tree LSTM for
Natural Language Inference", by Chen et al. (2016), using LSTMs
instead of MLP networks.
"""
def __init__(self, *args, **kwars):
"""
Initialize the LSTM.
:param args: args passed to DecomposableNLIModel
:param kwars: kwargs passed to DecomposableNLIModel
"""
super(LSTMClassifier, self).__init__(*args, **kwars)
def _create_aggregate_input(self, v1, v2):
"""
Create and return the input to the aggregate step.
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: a tensor with shape (batch, num_aggregate_inputs)
"""
# sum over time steps; resulting shape is (batch, num_units)
v1 = mask_3d(v1, self.sentence1_size, 0, 1)
v2 = mask_3d(v2, self.sentence2_size, 0, 1)
v1_sum = tf.reduce_sum(v1, [1])
v2_sum = tf.reduce_sum(v2, [1])
v1_max = tf.reduce_max(v1, [1])
v2_max = tf.reduce_max(v2, [1])
return tf.concat(axis=1, values=[v1_sum, v2_sum, v1_max, v2_max])
def _num_inputs_on_aggregate(self):
"""
Return the number of units used by the network when computing
the aggregated representation of the two sentences.
"""
# 2 directions * 4 pools from the v1/v2 (v1/2 max, v1/2 mean)
return 8 * self.num_units
def _transformation_attend(self, sentence, num_units, length,
reuse_weights=False):
"""
Transform sentences using the RNN.
:param num_units: the number of units at each time step
:param length: the total number of items in a sentence
"""
expected_num = self.num_units if self.project_input \
else self.embedding_size
assert num_units == expected_num, \
'Expected sentences with dimension %d, got %d instead:' % \
(expected_num, num_units)
after_dropout = tf.nn.dropout(sentence, self.dropout_keep)
return self._apply_lstm(after_dropout, length, self.attend_scope,
reuse_weights)
@classmethod
def _init_from_load(cls, params, training):
return cls(params['num_units'], params['num_classes'],
params['vocab_size'], params['embedding_size'],
training=training, project_input=params['project_input'])
def _transformation_compare(self, sentence, num_units, length,
reuse_weights=False):
"""
Perform the sentence comparison using the RNN.
"""
return self._apply_lstm(sentence, length, self.compare_scope,
reuse_weights)
def _apply_lstm(self, inputs, length, scope=None, reuse_weights=False):
"""
Apply the given RNN cell to the given sentences, taking care of
weight reusing.
"""
scope_name = scope or 'lstm'
initializer = tf.contrib.layers.xavier_initializer()
lstm = tf.nn.rnn_cell.LSTMCell(self.num_units, initializer=initializer)
with tf.variable_scope(scope_name, reuse=reuse_weights) as lstm_scope:
outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm, lstm,
inputs,
dtype=tf.float32,
sequence_length=length,
scope=lstm_scope)
output_fw, output_bw = outputs
concat_outputs = tf.concat(axis=2, values=[output_fw, output_bw])
return concat_outputs
| erickrf/multiffn-nli | src/classifiers/lstm.py | Python | mit | 4,074 |
#!/usr/bin/python
import threading
import time
#from foam.sfa.util.sfalogging import logger
"""
Callids: a simple mechanism to remember the call ids served so fas
memory-only for now - thread-safe
implemented as a (singleton) hash 'callid'->timestamp
"""
debug=False
class _call_ids_impl (dict):
_instance = None
# 5 minutes sounds amply enough
purge_timeout=5*60
# when trying to get a lock
retries=10
# in ms
wait_ms=100
def __init__(self):
self._lock=threading.Lock()
# the only primitive
# return True if the callid is unknown, False otherwise
def already_handled (self,call_id):
# if not provided in the call...
if not call_id: return False
has_lock=False
for attempt in range(_call_ids_impl.retries):
if debug: logger.debug("Waiting for lock (%d)"%attempt)
if self._lock.acquire(False):
has_lock=True
if debug: logger.debug("got lock (%d)"%attempt)
break
time.sleep(float(_call_ids_impl.wait_ms)/1000)
# in the unlikely event where we can't get the lock
if not has_lock:
logger.warning("_call_ids_impl.should_handle_call_id: could not acquire lock")
return False
# we're good to go
if self.has_key(call_id):
self._purge()
self._lock.release()
return True
self[call_id]=time.time()
self._purge()
self._lock.release()
if debug: logger.debug("released lock")
return False
def _purge(self):
now=time.time()
o_keys=[]
for (k,v) in self.iteritems():
if (now-v) >= _call_ids_impl.purge_timeout: o_keys.append(k)
for k in o_keys:
if debug: logger.debug("Purging call_id %r (%s)"%(k,time.strftime("%H:%M:%S",time.localtime(self[k]))))
del self[k]
if debug:
logger.debug("AFTER PURGE")
for (k,v) in self.iteritems(): logger.debug("%s -> %s"%(k,time.strftime("%H:%M:%S",time.localtime(v))))
def Callids ():
if not _call_ids_impl._instance:
_call_ids_impl._instance = _call_ids_impl()
return _call_ids_impl._instance
| dana-i2cat/felix | ofam/src/src/foam/sfa/util/callids.py | Python | apache-2.0 | 2,260 |
from collections import deque
import re
class Formula(object):
"""
Formula allows translation from a prefix-notation expression in a string to a complex number.
This is eventually to be replaced with a cython, c++, or openCL implementation as I'm sure
the performance of this class is pretty horrible.
"""
def __init__(self, formulaString='+ ** z 2 c'):
self.formulaString = formulaString
self.functions = None
"""
Compile:
This method takes in a the prefix statement and evaluates it for given values z and c.
"""
def compile(self, c, z):
form = deque(self.formulaString.split(' '))
return self.parse(form, c, z)
def parse(self, queuestring, c, z=0.0):
value = queuestring.popleft()
if (value == '+'):
return self.parse(queuestring, c, z) + self.parse(queuestring, c, z)
elif (value == '-'):
return self.parse(queuestring, c, z) - self.parse(queuestring, c, z)
elif (value == '*'):
return self.parse(queuestring, c, z) * self.parse(queuestring, c, z)
elif (value == '/'):
return self.parse(queuestring, c, z) / self.parse(queuestring, c, z)
elif (value == '^' or value == '**'):
return self.parse(queuestring, c, z) ** self.parse(queuestring, c, z)
elif (value == 'mod' or value == '%'):
return self.parse(queuestring, c, z) % self.parse(queuestring, c, z)
elif (value == 'rpart'):
return complex(self.parse(queuestring, c, z)).real
elif (value == 'ipart'):
return complex(self.parse(queuestring, c, z)).imag
elif (value == 'z'):
return z
elif (value == 'c'):
return c
elif (re.compile('^[\d\.]+').match(value)):
return float(value)
elif (re.compile('^[\d\.]+[\+\-][\d\.]+i$').match(value)):
nums = re.split('[\+\-]', value)
return complex(float(nums[0], nums[1][:-1]))
else:
return EOFError | dtwiers/PyFracking | src/Formula.py | Python | gpl-3.0 | 2,090 |
"""Create mandate table
Revision ID: 32f5eec11778
Revises: 3a41443d8839
Create Date: 2015-06-29 22:49:29.941746
"""
# revision identifiers, used by Alembic.
revision = '32f5eec11778'
down_revision = '3a41443d8839'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'mandate',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('date_start', sa.Date, nullable=False),
sa.Column('date_end', sa.Date, nullable=False),
sa.Column('legislator_id', sa.Integer, nullable=False),
sa.Column('political_office_id', sa.Integer, nullable=False),
)
op.create_foreign_key(
'fk_political_office',
'mandate', 'political_office',
['political_office_id'], ['id']
)
op.create_foreign_key(
'fk_legislator',
'mandate', 'legislator',
['legislator_id'], ['id']
)
op.create_unique_constraint(
'uk_mandate',
'mandate',
['legislator_id', 'political_office_id', 'date_start', 'date_end']
)
def downgrade():
op.drop_constraint('fk_political_office', 'mandate', type_='foreignkey')
op.drop_constraint('fk_legislator', 'mandate', type_='foreignkey')
op.drop_constraint('uk_mandate', 'mandate', type_='unique')
op.drop_table('mandate')
| scorphus/politicos | politicos/migrations/versions/32f5eec11778_create_mandate_table.py | Python | agpl-3.0 | 1,317 |
import errno
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import urllib2
import pytest
from tools.wpt import wpt
here = os.path.abspath(os.path.dirname(__file__))
def is_port_8000_in_use():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", 8000))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
return True
else:
raise e
finally:
s.close()
return False
@pytest.fixture(scope="module")
def manifest_dir():
def update_manifest():
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["manifest", "--no-download", "--path", os.path.join(path, "MANIFEST.json")])
assert excinfo.value.code == 0
if os.environ.get('TRAVIS') == "true":
path = "~/meta"
update_manifest()
yield path
else:
try:
path = tempfile.mkdtemp()
old_path = os.path.join(wpt.localpaths.repo_root, "MANIFEST.json")
if os.path.exists(os.path.join(wpt.localpaths.repo_root, "MANIFEST.json")):
shutil.copyfile(old_path, os.path.join(path, "MANIFEST.json"))
update_manifest()
yield path
finally:
shutil.rmtree(path)
@pytest.fixture
def temp_test():
os.makedirs("../../.tools-tests")
test_count = {"value": 0}
def make_test(body):
test_count["value"] += 1
test_name = ".tools-tests/%s.html" % test_count["value"]
test_path = "../../%s" % test_name
with open(test_path, "w") as handle:
handle.write("""
<!DOCTYPE html>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>%s</script>
""" % body)
return test_name
yield make_test
shutil.rmtree("../../.tools-tests")
def test_missing():
with pytest.raises(SystemExit):
wpt.main(argv=["#missing-command"])
def test_help():
# TODO: It seems like there's a bug in argparse that makes this argument order required
# should try to work around that
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["--help"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests(manifest_dir):
"""The `--list-tests` option should not produce an error under normal
conditions."""
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--metadata", manifest_dir, "--list-tests",
"--yes", "chrome", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests_missing_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the absence of
a test manifest file."""
os.remove(os.path.join(manifest_dir, "MANIFEST.json"))
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests_invalid_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the presence of
a malformed test manifest file."""
manifest_filename = os.path.join(manifest_dir, "MANIFEST.json")
assert os.path.isfile(manifest_filename)
with open(manifest_filename, "a+") as handle:
handle.write("extra text which invalidates the file")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_firefox(manifest_dir):
# TODO: It seems like there's a bug in argparse that makes this argument order required
# should try to work around that
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
if sys.platform == "darwin":
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "Firefox Nightly.app")
else:
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "firefox")
if os.path.exists(fx_path):
shutil.rmtree(fx_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--no-pause", "--install-browser", "--yes",
# The use of `--binary-args` is intentional: it
# demonstrates that internally-managed command-line
# arguments are properly merged with those specified by
# the user. See
# https://github.com/web-platform-tests/wpt/pull/13154
"--binary-arg=-headless",
"--metadata", manifest_dir,
"firefox", "/dom/nodes/Element-tagName.html"])
assert os.path.exists(fx_path)
shutil.rmtree(fx_path)
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_chrome(manifest_dir):
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--metadata", manifest_dir,
"chrome", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_zero_tests():
"""A test execution describing zero tests should be reported as an error
even in the presence of the `--no-fail-on-unexpected` option."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"chrome", "/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--no-fail-on-unexpected",
"chrome", "/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_failing_test():
"""Failing tests should be reported with a non-zero exit status unless the
`--no-fail-on-unexpected` option has been specified."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
failing_test = "/infrastructure/expected-fail/failing-test.html"
assert os.path.isfile("../../%s" % failing_test)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"chrome", failing_test])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--no-fail-on-unexpected",
"chrome", failing_test])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_verify_unstable(temp_test):
"""Unstable tests should be reported with a non-zero exit status. Stable
tests should be reported with a zero exit status."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
unstable_test = temp_test("""
test(function() {
if (localStorage.getItem('wpt-unstable-test-flag')) {
throw new Error();
}
localStorage.setItem('wpt-unstable-test-flag', 'x');
}, 'my test');
""")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--binary-arg", "headless",
"chrome", unstable_test])
assert excinfo.value.code != 0
stable_test = temp_test("test(function() {}, 'my test');")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--binary-arg", "headless",
"chrome", stable_test])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_install_chromedriver():
chromedriver_path = os.path.join(wpt.localpaths.repo_root, "_venv", "bin", "chromedriver")
if os.path.exists(chromedriver_path):
os.unlink(chromedriver_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["install", "chrome", "webdriver"])
assert excinfo.value.code == 0
assert os.path.exists(chromedriver_path)
os.unlink(chromedriver_path)
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_install_firefox():
if sys.platform == "darwin":
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "Firefox Nightly.app")
else:
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "firefox")
if os.path.exists(fx_path):
shutil.rmtree(fx_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["install", "firefox", "browser", "--channel=nightly"])
assert excinfo.value.code == 0
assert os.path.exists(fx_path)
shutil.rmtree(fx_path)
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_files_changed(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert out == """html/browsers/offline/appcache/workers/appcache-worker.html
html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-worker-data.py
html/browsers/offline/appcache/workers/resources/appcache-worker-import.py
html/browsers/offline/appcache/workers/resources/appcache-worker.manifest
html/browsers/offline/appcache/workers/resources/appcache-worker.py
"""
assert err == ""
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_files_changed_null(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "--null", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert out == "\0".join(["html/browsers/offline/appcache/workers/appcache-worker.html",
"html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-worker-data.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker-import.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker.manifest",
"html/browsers/offline/appcache/workers/resources/appcache-worker.py",
""])
assert err == ""
def test_files_changed_ignore():
from tools.wpt.testfiles import exclude_ignored
files = ["resources/testharness.js", "resources/webidl2/index.js", "test/test.js"]
changed, ignored = exclude_ignored(files, ignore_rules=["resources/testharness*"])
assert changed == [os.path.join(wpt.wpt_root, item) for item in
["resources/webidl2/index.js", "test/test.js"]]
assert ignored == [os.path.join(wpt.wpt_root, item) for item in
["resources/testharness.js"]]
def test_files_changed_ignore_rules():
from tools.wpt.testfiles import compile_ignore_rule
assert compile_ignore_rule("foo*bar*/baz").pattern == "^foo\*bar[^/]*/baz$"
assert compile_ignore_rule("foo**bar**/baz").pattern == "^foo\*\*bar.*/baz$"
assert compile_ignore_rule("foobar/baz/*").pattern == "^foobar/baz/[^/]*$"
assert compile_ignore_rule("foobar/baz/**").pattern == "^foobar/baz/.*$"
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "3a055e818218f548db240c316654f3cc1aeeb733"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "infrastructure/reftest-wait.html" in out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_idlharness(capsys, manifest_dir):
commit = "47cea8c38b88c0ddd3854e4edec0c5b6f2697e62"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "webrtc/idlharness.https.window.js\n" == out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_null(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "9bf1daa3d8b4425f2354c3ca92c4cf0398d329dd"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--null", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
tests = out.split("\0")
assert "dom/interfaces.html" in tests
assert "html/dom/interfaces.https.html" in tests
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_serve():
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
p = subprocess.Popen([os.path.join(wpt.localpaths.repo_root, "wpt"), "serve"],
preexec_fn=os.setsid)
start = time.time()
try:
while True:
if p.poll() is not None:
assert False, "server not running"
if time.time() - start > 60:
assert False, "server did not start responding within 60s"
try:
resp = urllib2.urlopen("http://web-platform.test:8000")
print(resp)
except urllib2.URLError:
print("URLError")
time.sleep(1)
else:
assert resp.code == 200
break
finally:
os.killpg(p.pid, 15)
# The following commands are slow running and used implicitly in other CI
# jobs, so we skip them here:
# wpt check-stability
# wpt manifest
# wpt lint
| danlrobertson/servo | tests/wpt/web-platform-tests/tools/wpt/tests/test_wpt.py | Python | mpl-2.0 | 18,674 |
from django.conf.urls import url
from kitsune.flagit import views
urlpatterns = [
url(r'^$', views.queue, name='flagit.queue'),
url(r'^/flag$', views.flag, name='flagit.flag'),
url(r'^/update/(?P<flagged_object_id>\d+)$', views.update,
name='flagit.update'),
]
| anushbmx/kitsune | kitsune/flagit/urls.py | Python | bsd-3-clause | 283 |
from Queue import Queue
from flask import Flask
from flask.ext.socketio import SocketIO
sio = SocketIO()
app = Flask(__name__)
app.debug = False
app.config['SECRET_KEY'] = 'eicu5jichab5aQuooshohrahghaekajaekahgayaequ0Aix7IHaigh3auphaeCh5'
app.command_queue = Queue()
#
sio.init_app(app)
curry_emit = lambda emitter: lambda key: lambda data: \
data is not None and emitter(key, {'data': data})
import events
from bobby.routes import mod; app.register_blueprint(mod)
| noah/bobby | bobby/__init__.py | Python | mit | 552 |
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
""""
Populate the blank entity and auth group fields for the user groups.
For the blank entity field, fill out with the default entity (id=1).
For the blank auth group field, get or create one by name if not exists.
Usage: ./manage.py populate_entity_and_auth_group_columns
"""
def handle(self, *args, **options):
from tendenci.apps.entities.models import Entity
from tendenci.apps.user_groups.models import Group
groups = Group.objects.all()
if groups:
first_entity = Entity.objects.first()
for ugroup in groups:
if not ugroup.entity:
ugroup.entity = first_entity
ugroup.save()
if not ugroup.group:
# the save method will take care of the auth group.
ugroup.save() | alirizakeles/tendenci | tendenci/apps/user_groups/management/commands/populate_entity_and_auth_group_columns.py | Python | gpl-3.0 | 960 |
"""Subclass of LogViewerBase, which is generated by wxFormBuilder."""
import wx
import mst_gui
import LogEntry
# Implementing LogViewerBase
class LogViewer( mst_gui.LogViewerBase ):
def __init__( self, parent, log_entry ):
mst_gui.LogViewerBase.__init__( self, parent )
self.mLogEntry = log_entry
self.mTimeNumText.SetValue("%s [%s]" % (log_entry.Time, str(log_entry.Num)))
self.mSentText.SetValue(log_entry.Sent)
self.mReceivedText.SetValue(log_entry.Received)
def OnClose(self, event):
self.EndModal(0)
def OnCloseClicked(self, event):
self.EndModal(0)
| zstars/weblabdeusto | tools/ExperimentServerTester/src/LogViewer.py | Python | bsd-2-clause | 605 |
"""
ConfigurationModel for the mobile_api djangoapp.
"""
from config_models.models import ConfigurationModel
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from . import utils
from .mobile_platform import PLATFORM_CLASSES
class MobileApiConfig(ConfigurationModel):
"""
Configuration for the video upload feature.
The order in which the comma-separated list of names of profiles are given
is in priority order.
.. no_pii:
"""
video_profiles = models.TextField(
blank=True,
help_text=u"A comma-separated list of names of profiles to include for videos returned from the mobile API."
)
class Meta(object):
app_label = "mobile_api"
@classmethod
def get_video_profiles(cls):
"""
Get the list of profiles in priority order when requesting from VAL
"""
return [profile.strip() for profile in cls.current().video_profiles.split(",") if profile]
@python_2_unicode_compatible
class AppVersionConfig(models.Model):
"""
Configuration for mobile app versions available.
.. no_pii:
"""
PLATFORM_CHOICES = tuple(
[(platform, platform) for platform in sorted(PLATFORM_CLASSES.keys())]
)
platform = models.CharField(max_length=50, choices=PLATFORM_CHOICES, blank=False)
version = models.CharField(
max_length=50,
blank=False,
help_text=u"Version should be in the format X.X.X.Y where X is a number and Y is alphanumeric"
)
major_version = models.IntegerField()
minor_version = models.IntegerField()
patch_version = models.IntegerField()
expire_at = models.DateTimeField(null=True, blank=True, verbose_name=u"Expiry date for platform version")
enabled = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
app_label = "mobile_api"
unique_together = ('platform', 'version',)
ordering = ['-major_version', '-minor_version', '-patch_version']
def __str__(self):
return "{}_{}".format(self.platform, self.version)
@classmethod
def latest_version(cls, platform):
""" Returns latest supported app version for a platform. """
latest_version_config = cls.objects.filter(platform=platform, enabled=True).first()
if latest_version_config:
return latest_version_config.version
@classmethod
def last_supported_date(cls, platform, version):
""" Returns date when app version will get expired for a platform """
parsed_version = utils.parsed_version(version)
active_configs = cls.objects.filter(platform=platform, enabled=True, expire_at__isnull=False).reverse()
for config in active_configs:
if utils.parsed_version(config.version) >= parsed_version:
return config.expire_at
def save(self, *args, **kwargs):
""" parses version into major, minor and patch versions before saving """
self.major_version, self.minor_version, self.patch_version = utils.parsed_version(self.version)
super(AppVersionConfig, self).save(*args, **kwargs)
class IgnoreMobileAvailableFlagConfig(ConfigurationModel): # pylint: disable=W5101
"""
Configuration for the mobile_available flag. Default is false.
Enabling this configuration will cause the mobile_available flag check in
access.py._is_descriptor_mobile_available to ignore the mobile_available
flag.
.. no_pii:
"""
class Meta(object):
app_label = "mobile_api"
| cpennington/edx-platform | lms/djangoapps/mobile_api/models.py | Python | agpl-3.0 | 3,642 |
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2017, Jean-Philippe Evrard <jean-philippe.evrard@rackspace.co.uk>
# Take a list of paths for a distro, returns which system packages
# (distro_packages, apt_packages, yum_packages) will be installed.
# Outputs a list.
# example:
# tasks:
# - debug:
# var: item
# with_packages_to_install:
# - from:
# - /etc/ansible/roles
# - ./
# for: trusty
# #pkg_blacklist: []
# #var_blacklist: []
import os
import re
import yaml
# import ansible things
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleLookupError
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
BUILT_IN_DISTRO_PACKAGE_VARS = [
'distro_packages',
'apt_packages',
'yum_packages'
]
# Convenience mappings
distro_specific_paths = {}
distro_specific_paths['trusty'] = [
'/vars/ubuntu-14.04.',
'/vars/ubuntu.',
'/vars/debian.',
'/vars/main.',
]
distro_specific_paths['xenial'] = [
'/vars/ubuntu-16.04.',
'/vars/ubuntu.',
'/vars/debian.',
'/vars/main.',
]
distro_specific_paths['ubuntu-14.04'] = [
'/vars/ubuntu-14.04.',
'/vars/ubuntu.',
'/vars/debian.',
'/vars/main.',
]
distro_specific_paths['ubuntu-16.04'] = [
'/vars/ubuntu-16.04.',
'/vars/ubuntu.',
'/vars/debian.',
'/vars/main.',
]
distro_specific_paths['redhat-7'] = [
'/vars/redhat-7.',
'/vars/redhat.',
'/vars/main.',
]
generic_paths = ['/defaults/', '/user_']
def filter_files(file_names, file_name_words):
"""Filter the files and return a sorted list.
:type file_names:
:type ext: ``str`` or ``tuple``
:returns: ``list``
"""
extensions = ('yaml', 'yml')
_file_names = list()
# Uncomment out this for debugging purposes
# print("Filtering according to words {}".format(file_name_words))
for file_name in file_names:
if file_name.endswith(extensions):
if any(i in file_name for i in file_name_words):
# Uncomment out this for listing the matching files
# print("Filename {} is a match".format(file_name))
_file_names.append(file_name)
else:
return _file_names
def get_files(path):
paths = os.walk(os.path.abspath(path))
files = list()
for fpath, _, afiles in paths:
for afile in afiles:
files.append(os.path.join(fpath, afile))
else:
return files
def get_package_list(distro, path, var_blacklist, pkg_blacklist):
path_triggers = []
path_triggers.extend(distro_specific_paths[distro])
path_triggers.extend(generic_paths)
pkg_blklst_re = ""
if pkg_blacklist:
pkg_blklst_re = "(" + ")|(".join(pkg_blacklist) + ")"
packages_list = []
# Uncomment out this for debugging purposes
# print("Scanning path {} for files matching distro {}".format(path,distro))
for folder in path:
all_files = get_files(folder)
for filename in filter_files(all_files, path_triggers):
with open(filename, 'r') as f:
try:
loaded_config = yaml.safe_load(f.read())
except Exception as e:
# Ignore loading errors, the file may be empty
continue
try:
for key, values in loaded_config.items():
key = key.lower()
for type_of_package in BUILT_IN_DISTRO_PACKAGE_VARS:
if (key.endswith(type_of_package) and
key not in var_blacklist):
for value in values:
# If no blacklist or not in blacklist
# append value (package name)
if (not re.match(pkg_blklst_re, value) or
not pkg_blacklist):
# If package is formatted like packagename==version
if value.find('=') != -1:
packages_list.append(value.split('=')[0])
else:
packages_list.append(value)
except AttributeError as e:
continue
return packages_list
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
term_pkgs = []
for term in terms:
if isinstance(term, dict):
paths = term.get('from', [])
distro = term.get('for', "")
pkg_blacklist = term.get('pkg_blacklist', [])
var_blacklist = term.get('var_blacklist', [])
else:
raise AnsibleLookupError("Lookup item should be a dict")
if not distro:
raise AnsibleLookupError("Distro (for:) cannot be empty")
if not paths:
raise AnsibleLookupError("Locations (from:) cannot be empty")
term_pkgs.extend(get_package_list(distro, paths, var_blacklist, pkg_blacklist))
return term_pkgs
# For debug purposes
if __name__ == '__main__':
import sys
import json
call_term = {}
call_term['for'] = sys.argv[1]
call_term['from'] = sys.argv[2:]
call_terms = [call_term]
print(json.dumps(LookupModule().run(terms=call_terms), indent=4, sort_keys=True))
| cfarquhar/rpc-openstack | scripts/artifacts-building/apt/lookup/packages_to_install.py | Python | apache-2.0 | 6,059 |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""rlu_rwrl dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.rl_unplugged.rlu_rwrl import rlu_rwrl
class RluRwrlTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for rlu_rwrl dataset."""
DATASET_CLASS = rlu_rwrl.RluRwrl
SPLITS = {
'train': 1, # Number of fake train example
}
SKIP_TF1_GRAPH_MODE = True
# The different configs are only different in the name of the files.
BUILDER_CONFIG_NAMES_TO_TEST = [
'cartpole_swingup_combined_challenge_none_1_percent'
]
@classmethod
def setUpClass(cls):
rlu_rwrl.RluRwrl._INPUT_FILE_PREFIX = cls.dummy_data
rlu_rwrl.RluRwrl._SHARDS = 1
super().setUpClass()
if __name__ == '__main__':
tfds.testing.test_main()
| tensorflow/datasets | tensorflow_datasets/rl_unplugged/rlu_rwrl/rlu_rwrl_test.py | Python | apache-2.0 | 1,352 |
from hashlib import sha1
from django.test import TestCase
from mooch.postfinance import PostFinanceMoocher
from testapp.models import Payment
def _messages(response):
return [m.message for m in response.context["messages"]]
class Request:
def build_absolute_uri(*arg):
return ""
class MoochTest(TestCase):
def test_postfinance_postsale(self):
payment = Payment.objects.create(amount=100)
ipn_data = {
"orderID": "%s-random" % payment.id.hex,
"currency": "CHF",
"amount": "100.00",
"PM": "Postfinance",
"ACCEPTANCE": "xxx",
"STATUS": "5", # Authorized
"CARDNO": "xxxxxxxxxxxx1111",
"PAYID": "123456789",
"NCERROR": "",
"BRAND": "VISA",
"SHASIGN": "this-value-is-invalid",
}
sha1_source = "".join(
(
ipn_data["orderID"],
"CHF",
"100.00",
ipn_data["PM"],
ipn_data["ACCEPTANCE"],
ipn_data["STATUS"],
ipn_data["CARDNO"],
ipn_data["PAYID"],
ipn_data["NCERROR"],
ipn_data["BRAND"],
"nothing",
)
)
ipn_data["SHASIGN"] = sha1(sha1_source.encode("utf-8")).hexdigest()
response = self.client.post("/postfinance_postsale/", ipn_data)
self.assertEqual(response.status_code, 200)
payment.refresh_from_db()
self.assertIsNotNone(payment.charged_at)
def test_postfinance_payment_method(self):
post_finance_moocher = PostFinanceMoocher(
pspid="fake",
live=False,
sha1_in="fake",
sha1_out="fake",
payment_methods=None,
)
payment = Payment.objects.create(amount=100, email="fake@fake.com")
request = Request()
response = post_finance_moocher.payment_form(request, payment)
self.assertTrue(
'<input type="hidden" name="PMLIST" '
'value="PostFinance Card;PostFinance e-finance">' in response
)
post_finance_moocher.payment_methods = ["PostFinance Card", "TWINT", "PAYPAL"]
response = post_finance_moocher.payment_form(request, payment)
self.assertTrue(
'<input type="hidden" name="PMLIST" value="PostFinance Card;TWINT;PAYPAL">'
in response
)
def test_banktransfer(self):
payment = Payment.objects.create(amount=50)
response = self.client.post("/banktransfer_confirm/", {"id": payment.id.hex})
self.assertRedirects(response, "/", fetch_redirect_response=False)
payment.refresh_from_db()
self.assertIsNotNone(payment.charged_at)
| matthiask/django-mooch | tests/testapp/test_mooch.py | Python | mit | 2,802 |
"""Acceptances tests using py.test fixtures.
All fixtures from ../conftest.py and :mod: `pytest_splinter.plugin` are
available.
The test case structure should follow the If-When-Then pattern.
"""
#########
# Tests #
#########
def test_user_want_to_explore_news(browser):
# import ipdb; ipdb.set_trace() # python interactive debugger
visit_page(browser, 'the-project')
input_in_search_box_and_press_enter(browser, 'Plenar Meeting')
is_on_page(browser, 'Search')
is_in_listing(browser, '2nd Plenary Meeting')
###########################
# Common helper functions #
###########################
def visit_page(browser, url):
browser.visit('http://policycompass.eu')
browser.browser.click_link_by_partial_href('the-project')
def input_in_search_box_and_press_enter(browser, text):
button = browser.browser.find_by_id('s').first
button.fill(text + '\r')
def is_on_page(browser, partial_url):
assert partial_url in browser.browser.url
def is_in_listing(browser, heading):
assert browser.browser.is_text_present(heading)
| FabiApfelkern/policycompass | tests-acceptance-frontend/test_example_story.py | Python | agpl-3.0 | 1,073 |
#!/usr/bin/env python3
import re
COMPRESSION_CUTOFF = 3
def compress(string_to_compress: str) -> str:
output = "" # The compressed string that will be outputted
character = "" # character the string_to_compress at blank
occurrences = 0 # the number of occurrences of this letter
for index, char in enumerate(string_to_compress): # goes through both the range and the characters at the same time
if char != character or index == len(string_to_compress)-1: # if we spot a different character,
# or are at the end of the string_to_compress
# go here
if index == len(string_to_compress)-1 and character == char: # If we are at the end of
# the string_to_compress
# but the last character is the same, add to the occurrences of the character
occurrences += 1
if occurrences > COMPRESSION_CUTOFF:
# If we have more than three occurrences, add the compress format to the output
output += character + '#' + str(occurrences)
else:
# the next line puts 'occurrences' number of 'character' in the output
output += character * occurrences
if index == len(string_to_compress)-1 and character != char:
# If we are at the end of the string_to_compress and the character
# is not the same as the last. Top it off
output += char
character = char # set char to character so we know the last character we looked at
occurrences = 1
else:
occurrences += 1
return output
def decompress(string_to_uncompress: str) -> str:
# Using regular expressions to parse the string_to_uncompress
matches = re.findall(r'(.#\d+)', string_to_uncompress) # Looking for [anything]#[at least one number]
decompressed = string_to_uncompress # decompressed is the new string that we will output
for match in matches: # Scan through the matches and uncompress each of them
# match the character so we know what character we need to expand
char = re.match(r'(.)#\d+', match)
# Determine the number of times it occurred
times = re.search(r'(\d+)', match)
replacement = char.group(1) * int(times.group(1)) # To get the matches specifically we need to access
# them at group 1
decompressed = decompressed.replace(match, replacement)
return decompressed
print(decompress(compress("aaaaaaaa")))
| mindm/2017Challenges | challenge_12/python/slandau3/compression.py | Python | mit | 2,546 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ArtifactParameterProperties(Model):
"""Properties of an artifact parameter.
:param name: The name of the artifact parameter.
:type name: str
:param value: The value of the artifact parameter.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, name=None, value=None):
super(ArtifactParameterProperties, self).__init__()
self.name = name
self.value = value
| lmazuel/azure-sdk-for-python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/artifact_parameter_properties.py | Python | mit | 1,047 |
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.interpolate as interp
from numpy.testing import assert_almost_equal
class TestRegression(object):
def test_spalde_scalar_input(self):
"""Ticket #629"""
x = np.linspace(0,10)
y = x**3
tck = interp.splrep(x, y, k=3, t=[5])
res = interp.spalde(np.float64(1), tck)
des = np.array([1., 3., 6., 6.])
assert_almost_equal(res, des)
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_regression.py | Python | mit | 484 |
'''
_ __ ___ ___ _ __ ___ _ __ ___ ___ _ __ _ _
| '__/ _ \/ __| '_ \ / _ \| '_ \/ __|/ _ \ | '_ \| | | |
| | | __/\__ \ |_) | (_) | | | \__ \ __/_| |_) | |_| |
|_| \___||___/ .__/ \___/|_| |_|___/\___(_) .__/ \__, |
|_| |_| |___/
'''
from contextlib import contextmanager
from threading import local
# We _really_ want _everything_ from `requests`.
from requests import *
__title__ = 'response'
__author__ = 'Genadi Samokovarov'
__copyright__ = 'Copyrignt 2012 Genadi Samokovarov'
class ResponseStack(local):
'''
Thread-local stack of responses.
'''
def __init__(self):
self.content = []
def top(self):
if self:
return self.content[-1]
def push(self, response):
self.content.append(response)
def pop(self):
if self:
try:
return self.top()
finally:
del self.content[-1]
__iter__ = lambda self: iter(self.content)
__len__ = lambda self: len(self.content)
__nonzero__ = lambda self: bool(self.content)
__bool__ = __nonzero__
class ResponseProxy(local):
'''
Thread-local response objects proxy.
'''
def __init__(self, response_factory):
self.response_factory = response_factory
@property
def response(self):
return self.response_factory()
@property
def __class__(self):
return self.response.__class__
@property
def __dict__(self):
return self.response.__dict__
__getattr__ = lambda self, name: getattr(self.response, name)
__repr__ = lambda self: repr(self.response)
__nonzero__ = lambda self: bool(self.response)
__bool__ = __nonzero__
def convert_to_context_manager(func):
'''
Converts a `requests` API function to a context manager.
'''
def wrapper(*args, **kw):
response = func(*args, **kw)
try:
yield responses.push(response)
finally:
responses.pop()
return contextmanager(wrapper)
get = convert_to_context_manager(get)
options = convert_to_context_manager(options)
head = convert_to_context_manager(head)
post = convert_to_context_manager(post)
put = convert_to_context_manager(put)
patch = convert_to_context_manager(patch)
delete = convert_to_context_manager(delete)
request = convert_to_context_manager(request)
responses = ResponseStack()
response = ResponseProxy(responses.top)
last = response
| gsamokovarov/response.py | response.py | Python | mit | 2,531 |
"""
Django settings for testproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import os.path
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(BASE_DIR, '../src'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!m!0t)c4mge%=v)uk3v0r+j_rl+=vw=&uc-#(koa4i2pa=(%31'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testproject.testapp',
'roma',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'testproject.urls'
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| nathforge/django-roma | testproject/testproject/settings.py | Python | gpl-3.0 | 2,177 |
import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "omsim"
PACKAGES = find_packages(where="src")
META_PATH = os.path.join("src", "omsim", "__init__.py")
KEYWORDS = ["optical maps"]
CLASSIFIERS = [
"Development Status :: 0 - Unstable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: GPL2 License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = ["scipy"]
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
# author_email=find_meta("email"),
maintainer=find_meta("author"),
# maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.rst"),
packages=PACKAGES,
package_dir={"": "src"},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
| biointec/omsim | omsim/setup.py | Python | gpl-2.0 | 2,077 |
# A list is used to manage the list of Particles.
from particle import Particle
from crazy_particle import CrazyParticle
class ParticleSystem(object):
def __init__(self, num, v):
self.particles = [] # Initialize the list.
self.origin = v.get() # Store the origin point.
for i in range(num):
# Add "num" amount of particles to the list.
self.particles.append(Particle(self.origin))
def run(self):
# Cycle through the list backwards, because we are deleting while
# iterating.
for i in reversed(range(len(self.particles))):
p = self.particles[i]
p.run()
if p.isDead():
del self.particles[i]
def addParticle(self):
p = None
# Add either a Particle or CrazyParticle to the system.
if int(random(0, 2)) == 0:
p = Particle(self.origin)
else:
p = CrazyParticle(self.origin)
self.particles.append(p)
# A method to test if the particle system still has particles.
def dead(self):
return self.particles.isEmpty()
| kantel/processingpy | sketches/modes/PythonMode/examples/Topics/Simulate/MultipleParticleSystems/particle_system.py | Python | mit | 1,129 |
# -*- coding: utf-8 -*-
"""Test Call class
"""
import vcfpy
from vcfpy import record
__author__ = "Manuel Holtgrewe <manuel.holtgrewe@bihealth.de>"
def build_rec(calls=None, format_extras=None):
calls = calls or []
format_extras = format_extras or []
alt1 = record.Substitution(vcfpy.SNV, "T")
alt2 = record.Substitution(vcfpy.SNV, "A")
return record.Record(
"2",
100,
[],
"C",
[alt1, alt2],
None,
[],
vcfpy.OrderedDict(),
["GT"] + format_extras,
calls,
)
# Call.is_phased() ------------------------------------------------------------
def test_is_phased_true():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|1")]))
assert call.is_phased is True
def test_is_phased_mixed():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0/1|2")]))
assert call.is_phased is True
def test_is_phased_false():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0/1")]))
assert call.is_phased is False
# Call.gt_phase_char() --------------------------------------------------------
def test_gt_phase_char_pipe():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|1")]))
assert call.gt_phase_char == "|"
def test_gt_phase_char_slash():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0/1")]))
assert call.gt_phase_char == "/"
# Call.gt_bases() -------------------------------------------------------------
def test_gt_bases_0_0():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|0")]))
build_rec([call])
assert call.gt_bases == ("C", "C")
def test_gt_bases_0_1():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|1")]))
build_rec([call])
assert call.gt_bases == ("C", "T")
def test_gt_bases_1_1():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1|1")]))
build_rec([call])
assert call.gt_bases == ("T", "T")
def test_gt_bases_0_2():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|2")]))
build_rec([call])
assert call.gt_bases == ("C", "A")
# Call.gt_type() --------------------------------------------------------------
def test_gt_type_het():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|1")]))
assert call.gt_type == vcfpy.HET
def test_gt_type_hom_ref():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0/0")]))
assert call.gt_type == vcfpy.HOM_REF
def test_gt_type_hom_alt():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1/1")]))
assert call.gt_type == vcfpy.HOM_ALT
# Call.is_het() ---------------------------------------------------------------
def test_is_het_het():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|1")]))
assert call.is_het
def test_is_het_hom_ref():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0/0")]))
assert not call.is_het
def test_is_het_hom_alt():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1/1")]))
assert not call.is_het
# Call.is_variant() -----------------------------------------------------------
def test_is_variant_het():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0|1")]))
assert call.is_variant
def test_is_variant_hom_ref():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "0/0")]))
assert not call.is_variant
def test_is_variant_hom_alt():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1/1")]))
assert call.is_variant
def test_is_variant_no_call():
call1 = record.Call("sample", vcfpy.OrderedDict([("GT", ".")]))
assert not call1.is_variant
call2 = record.Call("sample", vcfpy.OrderedDict([("GT", "./.")]))
assert not call2.is_variant
# Call.ploidy ----------------------------------------------------------------
def test_ploidy_nocall():
call = record.Call("sample", vcfpy.OrderedDict([("GT", ".")]))
assert call.ploidy == 1
def test_ploidy_one():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1")]))
assert call.ploidy == 1
def test_ploidy_two():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1/1")]))
assert call.ploidy == 2
# Call.is_filtered() ----------------------------------------------------------
def test_gt_type_filtered_no_ft():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1/1")]))
assert not call.is_filtered()
def test_gt_type_filtered_empty():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1/1"), ("FT", [])]))
assert not call.is_filtered()
def test_gt_type_filtered_pass():
call = record.Call("sample", vcfpy.OrderedDict([("GT", "1/1"), ("FT", ["PASS"])]))
assert not call.is_filtered()
| bihealth/vcfpy | tests/test_call.py | Python | mit | 4,742 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Jose-Ignacio Riaño Chico
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
import logging
import ntpath
import os
import random
import tempfile
import time
from contextlib import contextmanager
from functools import wraps
import luigi.format
from luigi.target import FileSystem, FileSystemTarget, AtomicLocalFile
logger = logging.getLogger('luigi-interface')
try:
import dropbox.dropbox
import dropbox.exceptions
import dropbox.files
except ImportError:
logger.warning("Loading Dropbox module without the python package dropbox (https://pypi.org/project/dropbox/). "
"Will crash at runtime if Dropbox functionality is used.")
def accept_trailing_slash_in_existing_dirpaths(func):
@wraps(func)
def wrapped(self, path, *args, **kwargs):
if path != '/' and path.endswith('/'):
logger.warning("Dropbox paths should NOT have trailing slashes. This causes additional API calls")
logger.warning("Consider modifying your calls to {}, so that they don't use paths than end with '/'".format(func.__name__))
if self._exists_and_is_dir(path[:-1]):
path = path[:-1]
return func(self, path, *args, **kwargs)
return wrapped
def accept_trailing_slash(func):
@wraps(func)
def wrapped(self, path, *args, **kwargs):
if path != '/' and path.endswith('/'):
path = path[:-1]
return func(self, path, *args, **kwargs)
return wrapped
class DropboxClient(FileSystem):
"""
Dropbox client for authentication, designed to be used by the :py:class:`DropboxTarget` class.
"""
def __init__(self, token, user_agent="Luigi"):
"""
:param str token: Dropbox Oauth2 Token. See :class:`DropboxTarget` for more information about generating a token
"""
if not token:
raise ValueError("The token parameter must contain a valid Dropbox Oauth2 Token")
try:
conn = dropbox.dropbox.Dropbox(oauth2_access_token=token, user_agent=user_agent)
except Exception as e:
raise Exception("Cannot connect to Dropbox. Check your Internet connection and the token. \n" + repr(e))
self.token = token
self.conn = conn
@accept_trailing_slash_in_existing_dirpaths
def exists(self, path):
if path == '/':
return True
if path.endswith('/'):
path = path[:-1]
return self._exists_and_is_dir(path)
try:
self.conn.files_get_metadata(path)
return True
except dropbox.exceptions.ApiError as e:
if isinstance(e.error.get_path(), dropbox.files.LookupError):
return False
else:
raise e
@accept_trailing_slash_in_existing_dirpaths
def remove(self, path, recursive=True, skip_trash=True):
if not self.exists(path):
return False
self.conn.files_delete_v2(path)
return True
@accept_trailing_slash
def mkdir(self, path, parents=True, raise_if_exists=False):
if self.exists(path):
if not self.isdir(path):
raise luigi.target.NotADirectory()
elif raise_if_exists:
raise luigi.target.FileAlreadyExists()
else:
return
self.conn.files_create_folder_v2(path)
@accept_trailing_slash_in_existing_dirpaths
def isdir(self, path):
if path == '/':
return True
try:
md = self.conn.files_get_metadata(path)
return isinstance(md, dropbox.files.FolderMetadata)
except dropbox.exceptions.ApiError as e:
if isinstance(e.error.get_path(), dropbox.files.LookupError):
return False
else:
raise e
@accept_trailing_slash_in_existing_dirpaths
def listdir(self, path, **kwargs):
dirs = []
lister = self.conn.files_list_folder(path, recursive=True, **kwargs)
dirs.extend(lister.entries)
while lister.has_more:
lister = self.conn.files_list_folder_continue(lister.cursor)
dirs.extend(lister.entries)
return [d.path_display for d in dirs]
@accept_trailing_slash_in_existing_dirpaths
def move(self, path, dest):
self.conn.files_move_v2(from_path=path, to_path=dest)
@accept_trailing_slash_in_existing_dirpaths
def copy(self, path, dest):
self.conn.files_copy_v2(from_path=path, to_path=dest)
def download_as_bytes(self, path):
metadata, response = self.conn.files_download(path)
return response.content
def upload(self, tmp_path, dest_path):
with open(tmp_path, 'rb') as f:
file_size = os.path.getsize(tmp_path)
CHUNK_SIZE = 4 * 1000 * 1000
upload_session_start_result = self.conn.files_upload_session_start(f.read(CHUNK_SIZE))
commit = dropbox.files.CommitInfo(path=dest_path)
cursor = dropbox.files.UploadSessionCursor(session_id=upload_session_start_result.session_id,
offset=f.tell())
if f.tell() >= file_size:
self.conn.files_upload_session_finish(f.read(CHUNK_SIZE), cursor, commit)
return
while f.tell() < file_size:
if (file_size - f.tell()) <= CHUNK_SIZE:
self.conn.files_upload_session_finish(f.read(CHUNK_SIZE), cursor, commit)
else:
self.conn.files_upload_session_append_v2(f.read(CHUNK_SIZE), cursor)
cursor.offset = f.tell()
def _exists_and_is_dir(self, path):
"""
Auxiliary method, used by the 'accept_trailing_slash' and 'accept_trailing_slash_in_existing_dirpaths' decorators
:param path: a Dropbox path that does NOT ends with a '/' (even if it is a directory)
"""
if path == '/':
return True
try:
md = self.conn.files_get_metadata(path)
is_dir = isinstance(md, dropbox.files.FolderMetadata)
return is_dir
except dropbox.exceptions.ApiError:
return False
class ReadableDropboxFile:
def __init__(self, path, client):
"""
Represents a file inside the Dropbox cloud which will be read
:param str path: Dropbpx path of the file to be read (always starting with /)
:param DropboxClient client: a DropboxClient object (initialized with a valid token)
"""
self.path = path
self.client = client
self.download_file_location = os.path.join(tempfile.mkdtemp(prefix=str(time.time())),
ntpath.basename(path))
self.fid = None
self.closed = False
def read(self):
return self.client.download_as_bytes(self.path)
def __enter__(self):
return self
def __exit__(self, exc_type, exc, traceback):
self.close()
def __del__(self):
self.close()
if os.path.exists(self.download_file_location):
os.remove(self.download_file_location)
def close(self):
self.closed = True
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
class AtomicWritableDropboxFile(AtomicLocalFile):
def __init__(self, path, client):
"""
Represents a file that will be created inside the Dropbox cloud
:param str path: Destination path inside Dropbox
:param DropboxClient client: a DropboxClient object (initialized with a valid token, for the desired account)
"""
super(AtomicWritableDropboxFile, self).__init__(path)
self.path = path
self.client = client
def move_to_final_destination(self):
"""
After editing the file locally, this function uploads it to the Dropbox cloud
"""
self.client.upload(self.tmp_path, self.path)
class DropboxTarget(FileSystemTarget):
"""
A Dropbox filesystem target.
"""
def __init__(self, path, token, format=None, user_agent="Luigi"):
"""
Create an Dropbox Target for storing data in a dropbox.com account
**About the path parameter**
The path must start with '/' and should not end with '/' (even if it is a directory).
The path must not contain adjacent slashes ('/files//img.jpg' is an invalid path)
If the app has 'App folder' access, then / will refer to this app folder (which
mean that there is no need to prepend the name of the app to the path)
Otherwise, if the app has 'full access', then / will refer to the root of the Dropbox folder
**About the token parameter:**
The Dropbox target requires a valid OAuth2 token as a parameter (which means that a `Dropbox API app
<https://www.dropbox.com/developers/apps>`_ must be created. This app can have 'App folder' access
or 'Full Dropbox', as desired).
Information about generating the token can be read here:
- https://dropbox-sdk-python.readthedocs.io/en/latest/api/oauth.html#dropbox.oauth.DropboxOAuth2Flow
- https://blogs.dropbox.com/developers/2014/05/generate-an-access-token-for-your-own-account/
:param str path: Remote path in Dropbox (starting with '/').
:param str token: a valid OAuth2 Dropbox token.
:param luigi.Format format: the luigi format to use (e.g. `luigi.format.Nop`)
"""
super(DropboxTarget, self).__init__(path)
if not token:
raise ValueError("The token parameter must contain a valid Dropbox Oauth2 Token")
self.path = path
self.token = token
self.client = DropboxClient(token, user_agent)
self.format = format or luigi.format.get_default_format()
@property
def fs(self):
return self.client
@contextmanager
def temporary_path(self):
tmp_dir = tempfile.mkdtemp()
num = random.randrange(0, 1e10)
temp_path = '{}{}luigi-tmp-{:010}{}'.format(
tmp_dir, os.sep,
num, ntpath.basename(self.path))
yield temp_path
# We won't reach here if there was an user exception.
self.fs.upload(temp_path, self.path)
def open(self, mode):
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
return self.format.pipe_reader(ReadableDropboxFile(self.path, self.client))
else:
return self.format.pipe_writer(AtomicWritableDropboxFile(self.path, self.client))
| riga/luigi | luigi/contrib/dropbox.py | Python | apache-2.0 | 11,273 |
# -*- encoding: utf-8 -*-
"""
Hymmnoserver support module: add_words
Purpose
=======
Endlessly prompts the user to provide word elements so that new Hymmnos may be
quickly added to the database.
Note that this script performs NO error-handling.
Legal
=====
All code, unless otherwise indicated, is original, and subject to the terms of
the GNU GPLv3 or, at your option, any later version of the GPL.
All content is derived from public domain, promotional, or otherwise-compatible
sources and published uniformly under the
Creative Commons Attribution-Share Alike 3.0 license.
See license.README for details.
(C) Neil Tallim, 2009
"""
import _db
import _romaji
db_con = _db.getConnection()
cursor = db_con.cursor()
try:
while True:
print "Word to be added:"
word = raw_input()
print "English translation:"
meaning = raw_input()
print "Japanese form:"
japanese = raw_input()
print "Kana form:"
kana = raw_input()
print "Syllables in lower-case, separated by spaces (; for E.V.):"
syllables = None
while True:
syllables = raw_input()
if syllables.replace(' ', '').lower() == word.lower():
break
elif syllables.endswith(";"):
syllables = syllables[:-1]
break
else:
print "Entry does not match word structure"
syllables = syllables.replace(' ', '/')
print "1) 中央正純律(共通語) | Central"
print "2) クルトシエール律(Ⅰ紀前古代語) | Cult Ciel"
print "3) クラスタ律(クラスタ地方語) | Cluster"
print "4) アルファ律(オリジンスペル) | Alpha"
print "5) 古メタファルス律(Ⅰ紀神聖語) | Metafalss"
print "6) 新約パスタリエ(パスタリア成語) | Pastalie"
print "7) アルファ律(オリジンスペル:EOLIA属) | Alpha (EOLIA)"
print "0) Unknown"
print "Dialect (add 50 to mark as unofficial):"
dialect = int(raw_input())
print "1) Emotion Verb"
print "2) verb"
print "3) adverb"
print "4) noun"
print "5) conjunction"
print "6) preposition"
print "7) Emotion Sound (II)"
print "8) adjective"
print "9) noun, verb"
print "10) adjective, noun"
print "11) adjective, verb"
print "12) particle"
print "13) Emotion Sound (III)"
print "14) Emotion Sound (I)"
print "15) pronoun"
print "16) interjection"
print "17) preposition, particle"
print "18) language construct"
print "19) adverb, noun"
print "20) adjective, adverb"
print "21) conjunction, preposition"
print "22) particle, verb"
print "23) adverb, particle"
print "24) noun, prepositon"
print "25) adverb, preposition"
print "Syntax class:"
syntax = int(raw_input())
print "Description:"
description = raw_input()
if not description.strip():
description = None
if kana == '?':
romaji = "?"
else:
romaji = _romaji.getRomaji(kana.decode('utf-8'))
cursor.execute(' '.join((
"INSERT INTO hymmnos",
"(word, meaning, japanese, dialect, kana, romaji, description, class, syllables)",
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
)), (word, meaning, japanese, dialect, kana, romaji, description, syntax, syllables,))
db_con.commit()
print
finally:
try:
cursor.close()
except:
pass
try:
db_con.close()
except:
pass
| Baughn/hymmnoserver | database/add_words.py | Python | gpl-3.0 | 3,279 |
import wx
import odict
from cuttlebug import util
import random, inspect
# APPLICATION SPECIFIC
# pub/sub topics
PROJECT_OPEN = 1
PROJECT_CLOSE = 2
TARGET_ATTACHED = 3
TARGET_DETACHED = 4
TARGET_RUNNING = 5
TARGET_HALTED = 6
BUILD_STARTED = 7
BUILD_FINISHED = 8
class MenuItemProxy(object):
def __init__(self, parent, label='', func=None, icon=None, kind=wx.ITEM_NORMAL, separator=False):
self.parent = parent
self._label = label
self._func = func
self.is_separator = bool(separator)
self.kind = kind
self.visible = True
self.enabled = True
self.icon = icon
def __str__(self):
return "<MenuItemProxy '%s'>" % self.label
def __repr__(self):
return str(self)
def __get_icon(self):
return self._icon
def __set_icon(self, icon):
if self.is_separator:
return
if isinstance(icon, str):
self._icon = util.get_icon(icon)
else:
self._icon = icon
self.update()
icon = property(__get_icon, __set_icon)
def __get_label(self):
return self._label
def __set_label(self, lbl):
self._label = str(lbl)
self.update()
label = property(__get_label, __set_label)
def show(self):
self.visible = True
self.update()
def hide(self):
self.visible = False
self.update()
def enable(self):
self.enabled = True
self.update()
def disable(self):
self.enabled = False
self.update()
def build(self, menu, window):
if self.is_separator:
return wx.MenuItem(menu, id=wx.ID_SEPARATOR)
else:
menuitem = wx.MenuItem(menu, id=-1, text=self._label, kind=self.kind)
if self._icon:
menuitem.SetBitmap(self._icon)
menuitem.SetDisabledBitmap(util.get_icon('blank.png'))
if self._func and window:
window.Bind(wx.EVT_MENU, self._func, id=menuitem.GetId())
return menuitem
def update(self):
self.parent.update()
class MenuProxy(object):
def __init__(self, manager, parent, label):
self.manager = manager
self.parent = parent
self.label = label
self.visible = True
self.enabled = True
self.is_separator = False
self._items = []
@property
def is_submenu(self):
if self.parent and isinstance(self.parent, MenuProxy):
return True
return False
@property
def is_popup_menu(self):
return not self.parent
def __repr__(self):
return "<MenuProxy '%s'>" % self.label
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self._items)
def build(self, window=None):
#items = []
retval = wx.Menu()
# Trim out everything that's invisible
visible_items = [item for item in self._items if item.visible]
trimmed_items = []
if visible_items:
for i in range(len(visible_items)-1):
item = visible_items[i]
next_item = visible_items[i+1]
if not (item.is_separator and next_item.is_separator):
trimmed_items.append(item)
trimmed_items.append(visible_items[-1])
if trimmed_items:
while trimmed_items[-1].is_separator:
trimmed_items.pop(-1)
for item in trimmed_items:
# menuitem = item.build(retval, window)
if isinstance(item, MenuItemProxy):
menuitem = item.build(retval, window)
retval.AppendItem(menuitem)
if item.enabled:
menuitem.Enable()
else:
menuitem.Enable(False)
elif isinstance(item, MenuProxy):
menuitem = item.build(window)
retval.AppendMenu(wx.ID_ANY, item.label, menuitem)
return retval
def item(self, label, func=None, icon=None, kind=wx.ITEM_NORMAL, disable=None, enable=None, show=None, hide=None):
item = MenuItemProxy(self, label=label, func=func, icon=icon, kind=kind)
self.manager.subscribe(item, enable=enable, disable=disable, show=show, hide=hide)
self._items.append(item)
self.update()
return item
def submenu(self, label, icon=None, disable=None, enable=None, show=None, hide=None):
# sm = SubMenuProxy(self, self.manager, self.parent, label, icon)
sm = self.manager.menu(label, enable, disable, show, hide)
sm.parent = self
sm.icon = icon
self._items.append(sm)
self.update(sm)
return sm
def show(self):
self.visible = True
self.update()
def hide(self):
self.visible = False
self.update()
def enable(self):
self.enabled = True
self.update()
def disable(self):
self.enabled = False
self.update()
def separator(self):
item = MenuItemProxy(self.manager, self, separator=True)
self._items.append(item)
return item
def update(self, child=None):
if self.parent:
self.parent.update(self)
class MenuBar(wx.MenuBar):
def __init__(self, manager, window=None):
self.window = window
self.manager = manager
wx.MenuBar.__init__(self)
self._menus = {}
def menu(self, label, enable=None, disable=None, show=None, hide=None):
retval = MenuProxy(self.manager, self, label)
self.Append(retval.build(self.window), label)
self._menus[retval] = len(self._menus)
# TODO Implement subscription for showing/hiding/enabling/disabling at the menu level
return retval
def update(self, menu):
self.Replace(self._menus[menu], menu.build(self.window), menu.label).Destroy()
class MenuManager(object):
def __init__(self):
self._subscriptions = {}
def menu_bar(self, window):
retval = MenuBar(self, window)
window.SetMenuBar(retval)
return retval
def pretty(self):
retval = ''
for topic, d in self._subscriptions.iteritems():
retval += "%s\n" % topic
for k, v in d.iteritems():
name = ""
for f in v:
if hasattr(f, '__func__'):
name += f.__func__.__name__ + ","
elif hasattr(v, '__name__'):
name += f.__name__ + ","
else:
name += str(f)
name.strip(",")
retval += " %10s : %s\n" % (name, k)
return retval
def menu(self, label='', enable=None, disable=None, show=None, hide=None):
retval = MenuProxy(self, None, label=label)
self.subscribe(retval, enable=enable, disable=disable, show=show, hide=hide)
return retval
def subscribe(self, item, enable=None, disable=None, show=None, hide=None):
for topics, func in [(enable, item.enable), (disable, item.disable), (show, item.show), (hide, item.hide)]:
if topics != None:
if not (isinstance(topics, list) or isinstance(topics, tuple)):
topics = [topics]
for topic in topics:
if topic not in self._subscriptions:
self._subscriptions[topic] = {}
d = self._subscriptions[topic]
if item in d:
d[item].append(func)
else:
d[item] = [func]
def update(self, token):
if token not in self._subscriptions:
return
subscription = self._subscriptions[token]
# Call the appropriate functions for all the subscribed items
for item in subscription:
for func in subscription[item]:
func()
def publish(self, token):
self.update(token)
manager = MenuManager()
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = wx.Frame(None)
menubar = manager.menu_bar(frame)
frame.SetMenuBar(menubar)
pmenu = manager.menu()
pmenu.item("Popup item 1")
pmenu.item("Popup item 2")
pmenu.separator()
pmenu.item("Popup item 3")
def on_context_menu(evt):
panel.PopupMenu(pmenu.build(panel))
# Stuff for popup menus
panel = wx.Panel(frame)
panel.SetBackgroundColour(wx.BLUE)
panel.Bind(wx.EVT_CONTEXT_MENU, on_context_menu)
file = menubar.menu('File')
edit = menubar.menu('Edit')
view = menubar.menu('View')
def save_func(evt):
manager.publish("SAVE")
def close_func(evt):
manager.publish("CLOSE")
new = file.item("New", hide="SAVE", show="CLOSE")
open = file.item("Open", disable="SAVE")
close = file.item("Close", func=close_func)
save = file.item("Save", icon="disk.png", func=save_func)
cut = edit.item("Cut")
copy = edit.item("Copy")
sub = edit.submenu("Paste")
sub.item("Subitem 1", disable="CLOSE")
sub.item("Subitem 2")
sub.update()
print manager.pretty()
frame.Show()
app.MainLoop()
| ryansturmer/cuttlebug | cuttlebug/ui/menu.py | Python | mit | 9,395 |
import director_unroll
class MyFoo(director_unroll.Foo):
def ping(self):
return "MyFoo::ping()"
a = MyFoo()
b = director_unroll.Bar()
b.set(a)
c = b.get()
if not (a.this == c.this):
print a, c
raise RuntimeError
| jrversteegh/softsailor | deps/swig-2.0.4/Examples/test-suite/python/director_unroll_runme.py | Python | gpl-3.0 | 225 |
#!/usr/bin/env python
#
# Copyright (C) 2012 Per Myren
#
# This file is part of Bryton-GPS-Linux
#
# Bryton-GPS-Linux is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Bryton-GPS-Linux is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Bryton-GPS-Linux. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import glob
import contextlib
import argparse
import warnings
import datetime
import os
import getpass
import time
from functools import partial
from itertools import chain
import rider40
import gpx
import tcx
import json_export
import strava
from common import print_msg
def find_device():
devices = glob.glob('/dev/disk/by-id/usb-BRYTON_MASS_STORAGE_*')
if len(devices) > 1:
raise RuntimeError('Multiple Devices Found')
elif not devices:
raise RuntimeError('Device Not Found')
device = devices[0]
return device
def get_device(dev):
data = dev.read_addr(6, 1, 0x10).tostring()
if not data.startswith('Hera Data'):
return None
dev_id = data[16:16 + 4]
if dev_id not in ['1504', '1510']:
warnings.warn('Unknown device model.', RuntimeWarning)
return rider40, rider40.Rider40(dev)
def open_device(dev_path):
import device_access
dev_access = device_access.DeviceAccess(dev_path)
dev_access.open()
return contextlib.closing(dev_access)
def get_tracks(history, track_ids):
tracks = []
for id in track_ids:
try:
tracks.append(history[int(id)])
except (IndexError, TypeError):
raise RuntimeError('Invalid track_id {0}'.format(id))
return tracks
def print_history(history, print_storage=False):
if not history:
print_msg("No tracks")
return
i = 0
for t in history:
if print_storage:
u = t.storage_usage
print_msg(format(i, '2d'), ':', t.name, ' - Trackpoints ', \
format_bytes(u['trackpoints']), ' - ', \
'Logpoints', format_bytes(u['logpoints']))
else:
print_msg(format(i, '2d'), ':', t.name)
i += 1
def print_summaries(tracks, print_storage=False):
for t in tracks:
print_summary(t.summary, t, print_storage)
def print_summary(s, track=None, print_storage=False):
ts = datetime.datetime.fromtimestamp
print_msg('===================================================')
print_msg(ts(s.start))
print_msg('{0} - {1} ({2})'.format(ts(s.start), ts(s.end),
datetime.timedelta(seconds=s.ride_time)))
print_msg(' Dist: {0:.2f}Km'.format(s.distance / 1000.0))
print_msg(' Cal: {0}'.format(s.calories))
print_msg(' Alt: {0}m / {1}m (gain/loss)'.format(s.altitude_gain,
s.altitude_loss))
print_msg(' Speed: {0}Kph / {1}Kph (avg/max)'.format(s.speed.avg,
s.speed.max))
if s.heartrate is not None and s.heartrate.max > 0:
print_msg(' Hr: {0}bpm / {1}bpm (avg/max)'.format(s.heartrate.avg,
s.heartrate.max))
if s.cadence is not None and s.cadence.max > 0:
print_msg(' Cad: {0}rpm / {1}rpm (avg/max)'.format(s.cadence.avg,
s.cadence.max))
if s.watts is not None and s.watts.max > 0:
print_msg(' Watts: {0}/{1} (avg/max)'.format(s.watts.avg,
s.watts.max))
if track is not None and track.lap_count > 0:
print_msg(' Laps: {0}'.format(len(track.lap_summaries)))
if print_storage:
u = track.storage_usage
print_msg('Storage: Trackpoints', \
format_bytes(u['trackpoints']), ' - ', \
'Logpoints', format_bytes(u['logpoints']))
def print_storage_usage(device):
print_msg('{:>12} | {:>10} | {:>16} | {:>10}'.format('Type', 'Total',
'Used', 'Left'))
print_msg('{}|{}|{}|{}'.format('-'*13, '-'*12, '-'*18, '-'*17))
u = device.read_storage_usage()
_print_storage_row(u, 'trackpoints', 'Trackpoints')
_print_storage_row(u, 'logpoints', 'Logpoints')
_print_storage_row(u, 'tracklist', 'Tracks')
_print_storage_row(u, 'laps', 'Laps')
def _print_storage_row(u, key, title):
print_msg('{:>12} | {:>10} | {:>10} ({:>2}%) | {:>10} ({:>2}%)'.format(
title,
format_bytes(u[key]['total']),
format_bytes(u[key]['total'] - u[key]['left']),
100 * (u[key]['total'] - u[key]['left']) / u[key]['total'],
format_bytes(u[key]['left']),
100 - 100 * (u[key]['total'] - u[key]['left']) / u[key]['total']))
def export_tracks(tracks, export_func, file_ext, args):
if args.out_name is not None and len(tracks) > 1:
raise RuntimeError('--out-name can only be used with a single track.')
for t in tracks:
out = export_func(t, pretty=args.no_whitespace)
if args.save_to is None and args.out_name is None:
print out
continue
if args.out_name:
path = args.out_name
else:
fname = t.name.replace('/', '').replace(':', '') \
.replace(' ', '-') + '.' + file_ext
path = os.path.join(args.save_to, fname)
with open(path, 'w') as f:
f.write(out)
def export_fake_garmin(tracks, args):
export_func = partial(tcx.track_to_tcx, fake_garmin_device=True,
no_laps=args.no_laps)
export_tracks(tracks, export_func, 'tcx', args)
def upload_strava(tracks, args, fake_garmin_device=False):
if args.strava_email is None:
print_msg('Missing email for strava.com')
return
password = args.strava_password
if password is None:
password = getpass.getpass('Strava.com password:')
uploader = strava.StravaUploader(fake_garmin_device=fake_garmin_device,
no_laps=args.no_laps)
try:
print_msg('Authenticating to strava.com')
uploader.authenticate(args.strava_email, password)
except strava.StravaError, e:
print_msg('StravaError:', e.reason)
return
for t in tracks:
try:
print_msg('Uploading track: {0}'.format(t.name))
upload = uploader.upload(t)
while not upload.finished:
time.sleep(3)
p = upload.check_progress()
print_msg('Uploaded OK')
except strava.StravaError, e:
print_msg('StravaError:', e.reason)
def options():
p = argparse.ArgumentParser(description='Bryton GPS Linux')
p.add_argument('--device', '-D',
help='Path to the device. If not specified'
' it will try to be autodetected.')
p.add_argument('--list-history', '-L', action='store_true',
help='List track history')
p.add_argument('--tracks', '-T', nargs='+',
help='Tracks ids to do actions upon. '
'Ids can be found using --list-history.')
p.add_argument('--summary', action='store_true',
help='Print summary of the selected tracks.')
p.add_argument('--gpx', action='store_true',
help='Generate plain GPX files of the selected tracks.')
p.add_argument('--gpxx', action='store_true',
help='Generate GPX files using Garmin TrackPointExtension '
'of the selected tracks.')
p.add_argument('--tcx', action='store_true',
help='Generate TCX files of the selected tracks.')
p.add_argument('--json', action='store_true',
help='Generate JSON files of the selected tracks.')
p.add_argument('--save-to', '-S',
help='Directory to store expored files.')
p.add_argument('--out-name', '-O',
help='Filename to export to. Only one track.')
p.add_argument('--no-whitespace', action='store_false',
help='No unnecessary whitespace in exported files.')
p.add_argument('--strava', action='store_true',
help='Upload tracks to strava.com')
p.add_argument('--strava-email', nargs='?',
help='strava.com email')
p.add_argument('--strava-password', nargs='?',
help='strava.com password')
p.add_argument('--fake-garmin', action='store_true',
help='This will add a created with Garmin Edge 800 element '
'to tcx files which will make strava.com trust the '
'elevation data. Useful if your device has an '
'altimeter. Used when exporting to tcx and when '
'uploading to strava.com')
p.add_argument('--fix-elevation', nargs='?', type=int, metavar='N',
help='Set the elevation of the first trackpoint to N. '
'The other trackpoints will be adjusted relative to '
'the first one. '
'This is useful if you forget to calibrate the '
'altimeter before the ride and you know the elevation '
'where you started. Only useful if you device has an '
'altimeter.')
p.add_argument('--strip-elevation', action='store_true',
help='Set the elevation to 0 on all trackpoints.')
p.add_argument('--use-elevation-db', action='store_true',
help='Use the SRTM Elevation Database v4.1 to set the '
'elevation. Requires the GDAL library.')
p.add_argument('--storage', action='store_true',
help='This will show the storage usage on the deviced. '
'When used together with --list-history or --summary '
'the storage space used by each track will be shown.')
p.add_argument('--no-laps', action='store_true',
help='When this is used the TCX files generated will not '
'use the laps that are recorded in a track. '
'This will only have effect when generating TCX files '
'and uploading to strava.com')
p.add_argument('--adj-time', nargs='?', type=int, metavar='N',
help='Adjust the timestamps of the tracks in +- hours.')
return p
def main():
opts = options()
args = opts.parse_args()
dev_path = args.device
if dev_path is None:
dev_path = find_device()
with open_device(dev_path) as dev_access:
module, device = get_device(dev_access)
if args.list_history or args.tracks:
history = list(reversed(module.read_history(device)))
if args.list_history:
print_history(history, args.storage)
elif args.tracks:
tracks = get_tracks(history, args.tracks)
if args.adj_time:
adjust_time(tracks, args.adj_time)
if args.summary:
print_summaries(tracks, args.storage)
if args.fix_elevation:
fix_elevation(tracks, args.fix_elevation)
if args.strip_elevation:
strip_elevation(tracks)
if args.use_elevation_db:
set_elevation_from_db(tracks)
if args.gpx:
export_tracks(tracks, gpx.track_to_plain_gpx, 'gpx', args)
if args.gpxx:
export_tracks(tracks, gpx.track_to_garmin_gpxx, 'gpx', args)
if args.json:
export_tracks(tracks, json_export.track_to_json, 'json', args)
if args.tcx:
if args.fake_garmin:
export_fake_garmin(tracks, args)
else:
export_tracks(tracks,
partial(tcx.track_to_tcx, no_laps=args.no_laps),
'tcx', args)
if args.strava:
upload_strava(tracks, args,
fake_garmin_device=args.fake_garmin)
elif args.storage:
print_storage_usage(device)
else:
opts.print_help()
return 0
def strip_elevation(tracks):
for t in tracks:
for seg in t.trackpoints:
for tp in seg:
tp.elevation = 0
def set_elevation_from_db(tracks):
import srtm
db = srtm.SrtmLayer()
for t in tracks:
for seg in t.trackpoints:
for tp in seg:
tp.elevation = round(
db.get_elevation(tp.latitude, tp.longitude), 1)
def fix_elevation(tracks, new_elevation):
for t in tracks:
fix_track_elevation(t, new_elevation)
def fix_track_elevation(track, new_elevation):
diff = None
for seg in track.trackpoints:
for tp in seg:
if diff is None:
diff = new_elevation - tp.elevation
tp.elevation += diff
return track
def format_bytes(num):
for x in ['B','KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def adjust_time(tracks, adjustment):
"""Fix track and trackpoint timestamps. adjustment=+-n hours"""
for t in tracks:
adjust_track_time(t, adjustment)
def adjust_track_time(track, adjustment):
adjustment = adjustment * 60 * 60
for sum in track.lap_summaries:
sum.start += adjustment
sum.end += adjustment
track.timestamp += adjustment
for seg in chain(track.trackpoints, track.logpoints):
seg.timestamp += adjustment
for pt in seg:
pt.timestamp += adjustment
return track
if __name__ == '__main__':
try:
sys.exit(main())
except RuntimeError, e:
print_msg('Error: ', e.message)
sys.exit(1)
| Pitmairen/bryton-gps-linux | code/brytongps.py | Python | gpl-3.0 | 14,536 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import quote
import sys
import unittest
verbose = False
# Wrapped versions of the functions that we're testing, so that during
# debugging we can more easily see what their inputs were.
def VerboseQuote(in_string, specials, *args, **kwargs):
if verbose:
sys.stdout.write('Invoking quote(%s, %s, %s)\n' %
(repr(in_string), repr(specials),
', '.join([repr(a) for a in args] +
[repr(k) + ':' + repr(v)
for k, v in kwargs])))
return quote.quote(in_string, specials, *args, **kwargs)
def VerboseUnquote(in_string, specials, *args, **kwargs):
if verbose:
sys.stdout.write('Invoking unquote(%s, %s, %s)\n' %
(repr(in_string), repr(specials),
', '.join([repr(a) for a in args] +
[repr(k) + ':' + repr(v)
for k, v in kwargs])))
return quote.unquote(in_string, specials, *args, **kwargs)
class TestQuote(unittest.TestCase):
# test utilities
def generic_test(self, fn, in_args, expected_out_obj):
actual = apply(fn, in_args)
self.assertEqual(actual, expected_out_obj)
def check_invertible(self, in_string, specials, escape='\\'):
q = VerboseQuote(in_string, specials, escape)
qq = VerboseUnquote(q, specials, escape)
self.assertEqual(''.join(qq), in_string)
def run_test_tuples(self, test_tuples):
for func, in_args, expected in test_tuples:
self.generic_test(func, in_args, expected)
def testQuote(self):
test_tuples = [[VerboseQuote,
['foo, bar, baz, and quux too!', 'abc'],
'foo, \\b\\ar, \\b\\az, \\and quux too!'],
[VerboseQuote,
['when \\ appears in the input', 'a'],
'when \\\\ \\appe\\ars in the input']]
self.run_test_tuples(test_tuples)
def testUnquote(self):
test_tuples = [[VerboseUnquote,
['key\\:still_key:value\\:more_value', ':'],
['key:still_key', ':', 'value:more_value']],
[VerboseUnquote,
['about that sep\\ar\\ator in the beginning', 'ab'],
['', 'ab', 'out th', 'a', 't separator in the ',
'b', 'eginning']],
[VerboseUnquote,
['the rain in spain fall\\s ma\\i\\nly on the plains',
'ins'],
['the ra', 'in', ' ', 'in', ' ', 's', 'pa', 'in',
' falls mainly o', 'n', ' the pla', 'ins']],
]
self.run_test_tuples(test_tuples)
def testInvertible(self):
self.check_invertible('abcdefg', 'bc')
self.check_invertible('a\\bcdefg', 'bc')
self.check_invertible('ab\\cdefg', 'bc')
self.check_invertible('\\ab\\cdefg', 'abc')
self.check_invertible('abcde\\fg', 'efg')
self.check_invertible('a\\b', '')
# Invoke this file directly for simple manual testing. For running
# the unittests, use the -t flag. Any flags to be passed to the
# unittest module should be passed as after the optparse processing,
# e.g., "quote_test.py -t -- -v" to pass the -v flag to the unittest
# module.
def main(argv):
global verbose
parser = optparse.OptionParser(
usage='Usage: %prog [options] word...')
parser.add_option('-s', '--special-chars', dest='special_chars', default=':',
help='Special characters to quote (default is ":")')
parser.add_option('-q', '--quote', dest='quote', default='\\',
help='Quote or escape character (default is "\")')
parser.add_option('-t', '--run-tests', dest='tests', action='store_true',
help='Run built-in tests\n')
parser.add_option('-u', '--unquote-input', dest='unquote_input',
action='store_true', help='Unquote command line argument')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose test output')
options, args = parser.parse_args(argv)
if options.verbose:
verbose = True
num_errors = 0
if options.tests:
sys.argv = [sys.argv[0]] + args
unittest.main()
else:
for word in args:
# NB: there are inputs x for which quote(unquote(x) != x, but
# there should be no input x for which unquote(quote(x)) != x.
if options.unquote_input:
qq = quote.unquote(word, options.special_chars, options.quote)
sys.stdout.write('unquote(%s) = %s\n'
% (word, ''.join(qq)))
# There is no expected output for unquote -- this is just for
# manual testing, so it is okay that we do not (and cannot)
# update num_errors here.
else:
q = quote.quote(word, options.special_chars, options.quote)
qq = quote.unquote(q, options.special_chars, options.quote)
sys.stdout.write('quote(%s) = %s, unquote(%s) = %s\n'
% (word, q, q, ''.join(qq)))
if word != ''.join(qq):
num_errors += 1
if num_errors > 0:
sys.stderr.write('[ FAILED ] %d test failures\n' % num_errors)
return num_errors
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| tierney/mustached-bear | tools/quote_test.py | Python | bsd-3-clause | 5,459 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnStringInfo(Model):
"""Database connection string information.
:param name: Name of connection string.
:type name: str
:param connection_string: Connection string value.
:type connection_string: str
:param type: Type of database. Possible values include: 'MySql',
'SQLServer', 'SQLAzure', 'Custom', 'NotificationHub', 'ServiceBus',
'EventHub', 'ApiHub', 'DocDb', 'RedisCache', 'PostgreSQL'
:type type: str or ~azure.mgmt.web.models.ConnectionStringType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'type': {'key': 'type', 'type': 'ConnectionStringType'},
}
def __init__(self, name=None, connection_string=None, type=None):
self.name = name
self.connection_string = connection_string
self.type = type
| AutorestCI/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/conn_string_info.py | Python | mit | 1,412 |
from ..cache import cache_class, cache_provider, set_storage, do_updates
from ..backends.Local import LocalBackend
from ..backends import Cache
from nose.tools import with_setup
_provider_count = 0
class MockBackend(Cache):
def __init__(self, Backend):
super(MockBackend, self).__init__(Backend)
def get_storage(self):
return self.backend._storage
storage = MockBackend(LocalBackend())
set_storage(storage)
@cache_class(id_attribute='user_id')
class C(object):
def __init__(self, id_):
self.user_id = id_
@cache_provider
def _provider(self):
global _provider_count
_provider_count += 1
return {
'name': 'User name ' + str(self.user_id),
'id': self.user_id}
def setup():
global storage
storage = MockBackend(LocalBackend())
set_storage(storage)
def teardown():
global _provider_count
_provider_count = 0
@with_setup(setup, teardown)
def test_class():
"""Test basic usage, setting attributes, instantiating a class, etc."""
c1 = C(1)
assert c1.name == 'User name 1'
c1.name = 'Example user'
assert c1.name == 'Example user'
c1.new_field = 'new value'
assert c1.new_field == 'new value'
@with_setup(setup, teardown)
def test_classes():
"""Make sure multiple instances don't overwrite each others values"""
c1 = C(1)
c2 = C(2)
assert c1.name == 'User name 1'
assert c2.name == 'User name 2'
c2.name = 'Example user'
assert c1.name == 'User name 1'
assert c2.name == 'Example user'
c2.new_field = 'new value'
assert hasattr(c1, 'new_field') != True
assert c2.new_field == 'new value'
@with_setup(setup, teardown)
def test_provider():
"""Provider should not be called when class is instantiated"""
c1 = C(1)
assert _provider_count == 0, 'Instantiating a class should not retrieve data'
c2 = C(2)
assert _provider_count == 0, 'Instantiating a second class should not retrieve data'
c1.name # Access data, execute pipeline
assert _provider_count == 2, 'Two objects from pipeline should be loaded'
@with_setup(setup, teardown)
def test_write():
"""Make sure that cache gets updated after cachable attributes are set"""
import json
c1 = C(1)
c1.name = 'Testname'
do_updates()
s = storage.get_storage()['class:C1'][1]
s = json.loads(s)
assert s['name'] == 'Testname'
@with_setup(setup, teardown)
def test_has():
"""Check that hasattr works with auto-load attributes"""
c1 = C(1)
assert hasattr(c1, 'name'), 'This should be autoloaded when checking existence'
assert _provider_count == 1, 'Provider should have been called'
@with_setup(setup, teardown)
def test_multi_write():
"""Make sure that writes into multiple classes don't interfere with
each other"""
import json
c1 = C(1)
c2 = C(2)
c1.name = 'Testname one'
c2.name = 'Testname two'
do_updates()
s = storage.get_storage()['class:C1'][1]
s = json.loads(s)
assert s['name'] == 'Testname one', 'Wrong name stored for first class'
s = storage.get_storage()['class:C2'][1]
s = json.loads(s)
assert s['name'] == 'Testname two', 'Wrong name stored for second class'
@with_setup(setup, teardown)
def test_cache_read():
"""Make sure that provider isn't called if there's a cache hit, and the
changed value in cache is respected"""
c1 = C(1)
c1.name = 'Some name'
do_updates()
c_test = C(1)
assert c_test.name == 'Some name', 'Name is wrong in second instance'
assert _provider_count == 1, 'Provider should only have been called once (second instance should be a cache hit)'
| trb/Multicache | tests/test_class.py | Python | bsd-2-clause | 3,710 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
"""
g++/llvm detection.
"""
import os, sys
from waflib import Configure, Options, Utils
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_avr_gxx(conf):
"""
Find the program g++, and if present, try to detect its version number
"""
cxx = conf.find_program(['avr-g++'], var='CXX')
# Shortcut...
cxx = "/Applications/Arduino.app//Contents/Resources/Java/hardware/tools/avr/bin/avr-g++"
cxx = conf.cmd_to_list(cxx)
conf.get_cc_version(cxx, gcc=True)
conf.env.CXX_NAME = 'gcc'
conf.env.CXX = cxx
@conf
def avr_gxx_common_flags(conf):
"""
Common flags for g++ on nearly all platforms
"""
v = conf.env
v['CXX_SRC_F'] = []
v['CXX_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
v['CXXLNK_SRC_F'] = []
v['CXXLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
v['STLIB_MARKER'] = '-Wl,-Bstatic'
# program
v['cxxprogram_PATTERN'] = '%s'
# shared library
v['CXXFLAGS_cxxshlib'] = ['-fPIC']
v['LINKFLAGS_cxxshlib'] = ['-shared']
v['cxxshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cxxstlib'] = ['-Wl,-Bstatic']
v['cxxstlib_PATTERN'] = 'lib%s.a'
# osx stuff
v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
v['CXXFLAGS_MACBUNDLE'] = ['-fPIC']
v['macbundle_PATTERN'] = '%s.bundle'
@conf
def avr_gxx_modifier_win32(conf):
"""Configuration flags for executing gcc on Windows"""
v = conf.env
v['cxxprogram_PATTERN'] = '%s.exe'
v['cxxshlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = 'lib%s.dll.a'
v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
v['CXXFLAGS_cxxshlib'] = []
v.append_value('CXXFLAGS_cxxshlib', ['-DDLL_EXPORT']) # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
@conf
def avr_gxx_modifier_cygwin(conf):
"""Configuration flags for executing g++ on Cygwin"""
avr_gxx_modifier_win32(conf)
v = conf.env
v['cxxshlib_PATTERN'] = 'cyg%s.dll'
v.append_value('LINKFLAGS_cxxshlib', ['-Wl,--enable-auto-image-base'])
v['CXXFLAGS_cxxshlib'] = []
@conf
def avr_gxx_modifier_darwin(conf):
"""Configuration flags for executing g++ on MacOS"""
v = conf.env
v['CXXFLAGS_cxxshlib'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
v['LINKFLAGS_cxxshlib'] = ['-dynamiclib']
v['cxxshlib_PATTERN'] = 'lib%s.dylib'
v['FRAMEWORKPATH_ST'] = '-F%s'
v['FRAMEWORK_ST'] = ['-framework']
v['ARCH_ST'] = ['-arch']
v['LINKFLAGS_cxxstlib'] = []
v['SHLIB_MARKER'] = []
v['STLIB_MARKER'] = []
v['SONAME_ST'] = []
@conf
def avr_gxx_modifier_aix(conf):
"""Configuration flags for executing g++ on AIX"""
v = conf.env
v['LINKFLAGS_cxxprogram']= ['-Wl,-brtl']
v['LINKFLAGS_cxxshlib'] = ['-shared', '-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER'] = []
@conf
def avr_gxx_modifier_hpux(conf):
v = conf.env
v['SHLIB_MARKER'] = []
v['CFLAGS_cxxshlib'] = ['-fPIC','-DPIC']
v['cxxshlib_PATTERN'] = 'lib%s.sl'
@conf
def avr_gxx_modifier_platform(conf):
"""Execute platform-specific functions based on *avr_gxx_modifier_+NAME*"""
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
avr_gxx_modifier_func = getattr(conf, 'avr_gxx_modifier_' + conf.env.DEST_OS, None)
if avr_gxx_modifier_func:
avr_gxx_modifier_func()
def configure(conf):
"""
Configuration for g++
"""
conf.find_avr_gxx()
conf.find_ar()
conf.avr_gxx_common_flags()
conf.avr_gxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| sofian/qualia | examples/makefiletests/avr_gxx.py | Python | gpl-3.0 | 4,490 |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
class StreamcloudEu(XFSHoster):
__name__ = "StreamcloudEu"
__type__ = "hoster"
__version__ = "0.13"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?streamcloud\.eu/\w{12}'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Streamcloud.eu hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("seoester", "seoester@googlemail.com")]
PLUGIN_DOMAIN = "streamcloud.eu"
WAIT_PATTERN = r'var count = (\d+)'
def setup(self):
self.multiDL = True
self.chunk_limit = 1
self.resume_download = self.premium
getInfo = create_getInfo(StreamcloudEu)
| fzimmermann89/pyload | module/plugins/hoster/StreamcloudEu.py | Python | gpl-3.0 | 788 |
# -*- coding: utf-8 -*-
__author__ = 'Joel Bastos'
__email__ = 'kintoandar@gmail.com'
__version__ = '0.2.5'
| kintoandar/caixabreak | caixabreak/__init__.py | Python | mit | 109 |
from collections import Iterator, namedtuple
from operator import add, setitem
import pickle
from random import random
from toolz import identity, partial
import pytest
from dask.compatibility import PY2, PY3
from dask.delayed import delayed, to_task_dasks, compute, Delayed
def test_to_task_dasks():
a = delayed(1, name='a')
b = delayed(2, name='b')
task, dasks = to_task_dasks([a, b, 3])
assert task == (list, ['a', 'b', 3])
assert len(dasks) == 2
assert a.dask in dasks
assert b.dask in dasks
task, dasks = to_task_dasks({a: 1, b: 2})
assert (task == (dict, [['b', 2], ['a', 1]]) or
task == (dict, [['a', 1], ['b', 2]]))
assert len(dasks) == 2
assert a.dask in dasks
assert b.dask in dasks
f = namedtuple('f', ['x', 'y'])
x = f(1, 2)
task, dasks = to_task_dasks(x)
assert task == x
assert dasks == []
def test_delayed():
add2 = delayed(add)
assert add2(1, 2).compute() == 3
assert (add2(1, 2) + 3).compute() == 6
assert add2(add2(1, 2), 3).compute() == 6
a = delayed(1)
assert a.compute() == 1
assert 1 in a.dask.values()
b = add2(add2(a, 2), 3)
assert a.key in b.dask
def test_operators():
a = delayed([1, 2, 3])
assert a[0].compute() == 1
assert (a + a).compute() == [1, 2, 3, 1, 2, 3]
a = delayed(10)
assert (a + 1).compute() == 11
assert (1 + a).compute() == 11
assert (a >> 1).compute() == 5
assert (a > 2).compute()
assert (a ** 2).compute() == 100
def test_methods():
a = delayed("a b c d e")
assert a.split(' ').compute() == ['a', 'b', 'c', 'd', 'e']
assert a.upper().replace('B', 'A').split().count('A').compute() == 2
assert a.split(' ', pure=True).key == a.split(' ', pure=True).key
o = a.split(' ', dask_key_name='test')
assert o.key == 'test'
def test_attributes():
a = delayed(2 + 1j)
assert a.real.compute() == 2
assert a.imag.compute() == 1
assert (a.real + a.imag).compute() == 3
def test_method_getattr_optimize():
a = delayed([1, 2, 3])
o = a.index(1)
dsk = o._optimize(o.dask, o._keys())
# Don't getattr the method, then call in separate task
assert getattr not in set(v[0] for v in dsk.values())
def test_delayed_errors():
a = delayed([1, 2, 3])
# Immutable
pytest.raises(TypeError, lambda: setattr(a, 'foo', 1))
pytest.raises(TypeError, lambda: setitem(a, 1, 0))
# Can't iterate, or check if contains
pytest.raises(TypeError, lambda: 1 in a)
pytest.raises(TypeError, lambda: list(a))
# No dynamic generation of magic/hidden methods
pytest.raises(AttributeError, lambda: a._hidden())
# Truth of delayed forbidden
pytest.raises(TypeError, lambda: bool(a))
def test_compute():
a = delayed(1) + 5
b = a + 1
c = a + 2
assert compute(b, c) == (7, 8)
assert compute(b) == (7,)
assert compute([a, b], c) == ([6, 7], 8)
def test_common_subexpressions():
a = delayed([1, 2, 3])
res = a[0] + a[0]
assert a[0].key in res.dask
assert a.key in res.dask
assert len(res.dask) == 3
def test_lists():
a = delayed(1)
b = delayed(2)
c = delayed(sum)([a, b])
assert c.compute() == 3
def test_literates():
a = delayed(1)
b = a + 1
lit = (a, b, 3)
assert delayed(lit).compute() == (1, 2, 3)
lit = [a, b, 3]
assert delayed(lit).compute() == [1, 2, 3]
lit = set((a, b, 3))
assert delayed(lit).compute() == set((1, 2, 3))
lit = {a: 'a', b: 'b', 3: 'c'}
assert delayed(lit).compute() == {1: 'a', 2: 'b', 3: 'c'}
assert delayed(lit)[a].compute() == 'a'
lit = {'a': a, 'b': b, 'c': 3}
assert delayed(lit).compute() == {'a': 1, 'b': 2, 'c': 3}
assert delayed(lit)['a'].compute() == 1
def test_literates_keys():
a = delayed(1)
b = a + 1
lit = (a, b, 3)
assert delayed(lit).key != delayed(lit).key
assert delayed(lit, pure=True).key == delayed(lit, pure=True).key
def test_lists_are_concrete():
a = delayed(1)
b = delayed(2)
c = delayed(max)([[a, 10], [b, 20]], key=lambda x: x[0])[1]
assert c.compute() == 20
@pytest.mark.xfail
def test_iterators():
a = delayed(1)
b = delayed(2)
c = delayed(sum)(iter([a, b]))
assert c.compute() == 3
def f(seq):
assert isinstance(seq, Iterator)
return sum(seq)
c = delayed(f)(iter([a, b]))
assert c.compute() == 3
def test_pure():
v1 = delayed(add, pure=True)(1, 2)
v2 = delayed(add, pure=True)(1, 2)
assert v1.key == v2.key
myrand = delayed(random)
assert myrand().key != myrand().key
def test_nout():
func = delayed(lambda x: (x, -x), nout=2, pure=True)
x = func(1)
assert len(x) == 2
a, b = x
assert compute(a, b) == (1, -1)
assert a._length is None
assert b._length is None
pytest.raises(TypeError, lambda: len(a))
pytest.raises(TypeError, lambda: list(a))
pytest.raises(ValueError, lambda: delayed(add, nout=-1))
pytest.raises(ValueError, lambda: delayed(add, nout=True))
func = delayed(add, nout=1)
a = func(1)
assert a._length is None
pytest.raises(TypeError, lambda: list(a))
pytest.raises(TypeError, lambda: len(a))
def test_kwargs():
def mysum(a, b, c=(), **kwargs):
return a + b + sum(c) + sum(kwargs.values())
dmysum = delayed(mysum)
ten = dmysum(1, 2, c=[delayed(3), 0], four=dmysum(2, 2))
assert ten.compute() == 10
dmysum = delayed(mysum, pure=True)
c = [delayed(3), 0]
ten = dmysum(1, 2, c=c, four=dmysum(2, 2))
assert ten.compute() == 10
assert dmysum(1, 2, c=c, four=dmysum(2, 2)).key == ten.key
assert dmysum(1, 2, c=c, four=dmysum(2, 3)).key != ten.key
assert dmysum(1, 2, c=c, four=4).key != ten.key
assert dmysum(1, 2, c=c, four=4).key != dmysum(2, 2, c=c, four=4).key
def test_array_delayed():
np = pytest.importorskip('numpy')
da = pytest.importorskip('dask.array')
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(5, 5))
val = delayed(sum)([arr, darr, 1])
assert isinstance(val, Delayed)
assert np.allclose(val.compute(), arr + arr + 1)
assert val.sum().compute() == (arr + arr + 1).sum()
assert val[0, 0].compute() == (arr + arr + 1)[0, 0]
task, dasks = to_task_dasks(darr)
assert len(dasks) == 1
orig = set(darr.dask)
final = set(dasks[0])
assert orig.issubset(final)
diff = final.difference(orig)
assert len(diff) == 1
def test_array_bag_delayed():
db = pytest.importorskip('dask.bag')
da = pytest.importorskip('dask.array')
np = pytest.importorskip('numpy')
arr1 = np.arange(100).reshape((10, 10))
arr2 = arr1.dot(arr1.T)
darr1 = da.from_array(arr1, chunks=(5, 5))
darr2 = da.from_array(arr2, chunks=(5, 5))
b = db.from_sequence([1, 2, 3])
seq = [arr1, arr2, darr1, darr2, b]
out = delayed(sum)([i.sum() for i in seq])
assert out.compute() == 2*arr1.sum() + 2*arr2.sum() + sum([1, 2, 3])
def test_delayed_picklable():
# Delayed
x = delayed(divmod, nout=2, pure=True)(1, 2)
y = pickle.loads(pickle.dumps(x))
assert x.dask == y.dask
assert x._key == y._key
assert x._length == y._length
# DelayedLeaf
x = delayed(1j + 2)
y = pickle.loads(pickle.dumps(x))
assert x.dask == y.dask
assert x._key == y._key
assert x._nout == y._nout
assert x._pure == y._pure
# DelayedAttr
x = x.real
y = pickle.loads(pickle.dumps(x))
assert x._obj._key == y._obj._key
assert x._obj.dask == y._obj.dask
assert x._attr == y._attr
assert x._key == y._key
def test_delayed_compute_forward_kwargs():
x = delayed(1) + 2
x.compute(bogus_keyword=10)
def test_delayed_method_descriptor():
delayed(bytes.decode)(b'') # does not err
def test_delayed_callable():
f = delayed(add, pure=True)
v = f(1, 2)
assert v.dask == {v.key: (add, 1, 2)}
assert f.dask == {f.key: add}
assert f.compute() == add
def test_delayed_name_on_call():
f = delayed(add, pure=True)
assert f(1, 2, dask_key_name='foo')._key == 'foo'
def test_callable_obj():
class Foo(object):
def __init__(self, a):
self.a = a
def __call__(self):
return 2
foo = Foo(1)
f = delayed(foo)
assert f.compute() is foo
assert f.a.compute() == 1
assert f().compute() == 2
def test_name_consitent_across_instances():
func = delayed(identity, pure=True)
data = {'x': 1, 'y': 25, 'z': [1, 2, 3]}
if PY2:
assert func(data)._key == 'identity-777036d61a8334229dc0eda4454830d7'
if PY3:
assert func(data)._key == 'identity-1de4057b4cfa0ba7faed76b9c383cc99'
data = {'x': 1, 1: 'x'}
assert func(data)._key == func(data)._key
if PY2:
assert func(1)._key == 'identity-d3eda9ebeead15c7e491960e89605b7f'
if PY3:
assert func(1)._key == 'identity-5390b9efe3ddb6ea0557139003eef253'
def test_sensitive_to_partials():
assert (delayed(partial(add, 10), pure=True)(2)._key !=
delayed(partial(add, 20), pure=True)(2)._key)
def test_delayed_name():
assert delayed(1)._key.startswith('int-')
assert delayed(1, pure=True)._key.startswith('int-')
assert delayed(1, name='X')._key == 'X'
def myfunc(x):
return x + 1
assert delayed(myfunc)(1).key.startswith('myfunc')
def test_finalize_name():
import dask.array as da
x = da.ones(10, chunks=5)
v = delayed([x])
assert set(x.dask).issubset(v.dask)
def key(s):
if isinstance(s, tuple):
s = s[0]
return s.split('-')[0]
assert all(key(k).isalpha() for k in v.dask)
| cowlicks/dask | dask/tests/test_delayed.py | Python | bsd-3-clause | 9,711 |
"""
WSGI config for jiuye project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jiuye.settings")
application = get_wsgi_application()
| liucode/liucode.github.io | jiuye/wsgi.py | Python | mit | 388 |
# a minimal tracking script - this will start all peer
# services and attach everything appropriately
# change parameters depending on your pan tilt, pins and
# Arduino details
# all commented code is not necessary but allows custom
# options
port = "COM15"
xServoPin = 13
yServoPin = 12
tracker = Runtime.createAndStart("tracker", "Tracking")
# set specifics on each Servo
servoX = tracker.getX()
servoX.setPin(xServoPin)
servoX.setMinMax(30, 150)
servoY = tracker.getY()
servoY.setPin(yServoPin)
servoY.setMinMax(30, 150)
# changing PID values change the
# speed and "jumpyness" of the Servos
xpid = tracker.getXPID()
ypid = tracker.getYPID()
# these are default setting
# adjust to make more smooth
# or faster
# xpid.setPID(5.0, 5.0, 0.1)
# ypid.setPID(5.0, 5.0, 0.1)
# optional filter settings
opencv = tracker.getOpenCV()
# setting camera index to 1 default is 0
opencv.setCameraIndex(1)
# connect to the Arduino
tracker.connect(port)
# Gray & PyramidDown make face tracking
# faster - if you dont like these filters - you
# may remove them before you select a tracking type with
# the following command
# tracker.clearPreFilters()
# simple face detection and tracking
tracker.faceDetect()
# scans for faces - tracks if found
# tracker.findFace()
| sstocker46/pyrobotlab | home/Alessandruino/Tracking.faceDetection.py | Python | apache-2.0 | 1,267 |
#!/usr/bin/env python3
from pathlib import Path
def main():
rootdir = Path('.')
def input_files():
for path in rootdir.glob('*.*'):
if path.suffix in ('.html', '.txt'):
yield path
text = ''
for path in input_files():
text += path.read_text(encoding='utf8')
for encoding in ('utf8', 'utf16', 'utf32'):
path = rootdir / f'combined.{encoding}'
print(f"Writing {path}")
path.write_text(text, encoding=encoding)
if __name__ == '__main__':
main()
| WojciechMula/toys | avx512-utf8-to-utf32/dataset/combine.py | Python | bsd-2-clause | 548 |
from __future__ import print_function
import argparse
import ast
import cProfile
import imp
import locale
import os
import pkgutil
import select
import sys
from collections import OrderedDict
from contextlib import contextmanager
from copy import deepcopy
from datetime import datetime, timedelta
from json import dumps, loads
from py3status import modules as sitepkg_modules
from re import findall
from signal import signal
from signal import SIGTERM, SIGUSR1
from subprocess import Popen
from subprocess import PIPE
from subprocess import call
from tempfile import NamedTemporaryFile
from threading import Event, Thread
from time import sleep, time
from syslog import syslog, LOG_ERR, LOG_INFO, LOG_WARNING
try:
from setproctitle import setproctitle
setproctitle('py3status')
except ImportError:
pass
# Used in development
enable_profiling = False
def profile(thread_run_fn):
if not enable_profiling:
return thread_run_fn
def wrapper_run(self):
"""Wrap the Thread.run() method
"""
profiler = cProfile.Profile()
try:
return profiler.runcall(thread_run_fn, self)
finally:
thread_id = getattr(self, 'ident', 'core')
profiler.dump_stats("py3status-%s.profile" % thread_id)
return wrapper_run
@contextmanager
def jsonify(string):
"""
Transform the given string to a JSON in a context manager fashion.
"""
prefix = ''
if string.startswith(','):
prefix, string = ',', string[1:]
yield (prefix, loads(string))
def print_line(line):
"""
Print given line to stdout (i3bar).
"""
sys.__stdout__.write('{}\n'.format(line))
sys.__stdout__.flush()
def print_stderr(line):
"""Print line to stderr
"""
print(line, file=sys.stderr)
class IOPoller:
"""
This class implements a predictive and timing-out I/O reader
using select and the poll() mechanism for greater compatibility.
"""
def __init__(self, io, eventmask=select.POLLIN):
"""
Our default is to read (POLLIN) the specified 'io' file descriptor.
"""
self.io = io
self.poller = select.poll()
self.poller.register(io, eventmask)
def readline(self, timeout=500):
"""
Try to read our I/O for 'timeout' milliseconds, return None otherwise.
This makes calling and reading I/O non blocking !
"""
poll_result = self.poller.poll(timeout)
if poll_result:
line = self.io.readline().strip()
if self.io == sys.stdin and line == '[':
# skip first event line wrt issue #19
line = self.io.readline().strip()
try:
# python3 compatibility code
line = line.decode()
except (AttributeError, UnicodeDecodeError):
pass
return line
else:
return None
class I3status(Thread):
"""
This class is responsible for spawning i3status and reading its output.
"""
def __init__(self, lock, i3status_config_path, standalone):
"""
Our output will be read asynchronously from 'last_output'.
"""
Thread.__init__(self)
self.error = None
self.i3status_module_names = [
'battery', 'cpu_temperature', 'cpu_usage', 'ddate', 'disk',
'ethernet', 'ipv6', 'load', 'path_exists', 'run_watch', 'time',
'tztime', 'volume', 'wireless'
]
self.json_list = None
self.json_list_ts = None
self.last_output = None
self.last_output_ts = None
self.last_prefix = None
self.lock = lock
self.ready = False
self.standalone = standalone
self.tmpfile_path = None
#
self.config = self.i3status_config_reader(i3status_config_path)
def valid_config_param(self, param_name, cleanup=False):
"""
Check if a given section name is a valid parameter for i3status.
"""
if cleanup:
valid_config_params = [
_
for _ in self.i3status_module_names
if _ not in ['cpu_usage', 'ddate', 'ipv6', 'load', 'time']
]
else:
valid_config_params = self.i3status_module_names + [
'general', 'order'
]
return param_name.split(' ')[0] in valid_config_params
@staticmethod
def eval_config_parameter(param):
"""
Try to evaluate the given parameter as a string or integer and return
it properly. This is used to parse i3status configuration parameters
such as 'disk "/home" {}' or worse like '"cpu_temperature" 0 {}'.
"""
params = param.split(' ')
result_list = list()
for p in params:
try:
e_value = eval(p)
if isinstance(e_value, str) or isinstance(e_value, int):
p = str(e_value)
else:
raise ValueError()
except (NameError, SyntaxError, ValueError):
pass
finally:
result_list.append(p)
return ' '.join(result_list)
@staticmethod
def eval_config_value(value):
"""
Try to evaluate the given parameter as a string or integer and return
it properly. This is used to parse i3status configuration parameters
such as 'disk "/home" {}' or worse like '"cpu_temperature" 0 {}'.
"""
if value.lower() in ('true', 'false'):
return eval(value.title())
try:
e_value = eval(value)
if isinstance(e_value, str):
if e_value.lower() in ('true', 'false'):
value = eval(e_value.title())
else:
value = e_value
elif isinstance(e_value, int):
value = e_value
else:
raise ValueError()
except (NameError, ValueError):
pass
finally:
return value
def i3status_config_reader(self, i3status_config_path):
"""
Parse i3status.conf so we can adapt our code to the i3status config.
"""
config = {
'general': {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00',
'color_separator': '#333333',
'colors': False,
'interval': 5,
'output_format': 'i3bar'
},
'i3s_modules': [],
'on_click': {},
'order': [],
'py3_modules': []
}
# some ugly parsing
in_section = False
section_name = ''
for line in open(i3status_config_path, 'r'):
line = line.strip(' \t\n\r')
if not line or line.startswith('#'):
continue
if line.startswith('order'):
in_section = True
section_name = 'order'
if not in_section:
section_name = line.split('{')[0].strip()
section_name = self.eval_config_parameter(section_name)
if not section_name:
continue
else:
in_section = True
if section_name not in config:
config[section_name] = {}
if '{' in line:
in_section = True
if section_name and '=' in line:
section_line = line
# one liner cases
if line.endswith('}'):
section_line = section_line.split('}', -1)[0].strip()
if line.startswith(section_name + ' {'):
section_line = section_line.split(section_name + ' {')[
1].strip()
key = section_line.split('=', 1)[0].strip()
key = self.eval_config_parameter(key)
value = section_line.split('=', 1)[1].strip()
value = self.eval_config_value(value)
if section_name == 'order':
config[section_name].append(value)
line = '}'
# create an empty config for this module
if value not in config:
config[value] = {}
# detect internal modules to be loaded dynamically
if not self.valid_config_param(value):
config['py3_modules'].append(value)
else:
config['i3s_modules'].append(value)
else:
if not key.startswith('on_click'):
config[section_name][key] = value
else:
# on_click special parameters
try:
button = int(key.split()[1])
if button not in range(1, 6):
raise ValueError('should be 1, 2, 3, 4 or 5')
except IndexError as e:
raise IndexError(
'missing "button id" for "on_click" '
'parameter in section {}'.format(section_name))
except ValueError as e:
raise ValueError('invalid "button id" '
'for "on_click" parameter '
'in section {} ({})'.format(
section_name, e))
on_c = config['on_click']
on_c[section_name] = on_c.get(section_name, {})
on_c[section_name][button] = value
if line.endswith('}'):
in_section = False
section_name = ''
# py3status only uses the i3bar protocol because it needs JSON output
if config['general']['output_format'] != 'i3bar':
raise RuntimeError('i3status output_format should be set' +
' to "i3bar" on {}'.format(
i3status_config_path,
' or on your own {}/.i3status.conf'.format(
os.path.expanduser(
'~')) if i3status_config_path ==
'/etc/i3status.conf' else ''))
# cleanup unconfigured i3status modules that have no default
for module_name in deepcopy(config['order']):
if (self.valid_config_param(module_name,
cleanup=True) and
not config.get(module_name)):
config.pop(module_name)
config['i3s_modules'].remove(module_name)
config['order'].remove(module_name)
return config
def set_responses(self, json_list):
"""
Set the given i3status responses on their respective configuration.
"""
for index, item in enumerate(self.json_list):
conf_name = self.config['i3s_modules'][index]
self.config[conf_name]['response'] = item
def get_delta_from_format(self, i3s_time, time_format):
"""
Guess the time delta from %z time formats such as +0400.
When such a format is found, replace it in the string so we respect
i3status' output while being able to correctly adjust the time.
"""
try:
if '%z' in time_format:
res = findall('[\-+]{1}[\d]{4}', i3s_time)[0]
if res:
operator = res[0]
hours = int(res[1:3])
minutes = int(res[-2:])
return (time_format.replace('%z', res), timedelta(
hours=eval('{}{}'.format(operator, hours)),
minutes=eval('{}{}'.format(operator, minutes))))
except Exception:
err = sys.exc_info()[1]
syslog(
LOG_ERR,
'i3status get_delta_from_format failed "{}" "{}" ({})'.format(
i3s_time, time_format, err))
return (time_format, None)
def set_time_modules(self):
"""
This method is executed only once after the first i3status output.
We parse all the i3status time and tztime modules and generate
a datetime for each of them while preserving (or defaulting) their
configured time format.
We also calculate a timedelta for each of them representing their
timezone offset. This is this delta that we'll be using from now on as
any future time or tztime update from i3status will be overwritten
thanks to our pre-parsed date here.
"""
default_time_format = '%Y-%m-%d %H:%M:%S'
default_tztime_format = '%Y-%m-%d %H:%M:%S %Z'
utcnow = self.last_output_ts
#
for index, item in enumerate(self.json_list):
if item.get('name') in ['time', 'tztime']:
conf_name = self.config['i3s_modules'][index]
time_name = item.get('name')
# time and tztime have different defaults
if time_name == 'time':
time_format = self.config.get(
conf_name, {}).get('format', default_time_format)
else:
time_format = self.config.get(
conf_name, {}).get('format', default_tztime_format)
# handle format_time parameter
if 'format_time' in self.config.get(conf_name, {}):
time_format = time_format.replace(
'%time', self.config[conf_name]['format_time'])
# parse i3status date
i3s_time = item['full_text'].encode('UTF-8', 'replace')
try:
# python3 compatibility code
i3s_time = i3s_time.decode()
except:
pass
time_format, delta = self.get_delta_from_format(i3s_time,
time_format)
try:
if '%Z' in time_format:
raise ValueError('%Z directive is not supported')
# add mendatory items in i3status time format wrt issue #18
time_fmt = time_format
for fmt in ['%Y', '%m', '%d']:
if fmt not in time_format:
time_fmt = '{} {}'.format(time_fmt, fmt)
i3s_time = '{} {}'.format(
i3s_time, datetime.now().strftime(fmt))
# get a datetime from the parsed string date
date = datetime.strptime(i3s_time, time_fmt)
# calculate the delta if needed
if not delta:
delta = (
datetime(date.year, date.month, date.day,
date.hour, date.minute) - datetime(
utcnow.year, utcnow.month, utcnow.day,
utcnow.hour, utcnow.minute))
except ValueError:
date = i3s_time
except Exception:
err = sys.exc_info()[1]
syslog(LOG_ERR,
'i3status set_time_modules "{}" failed ({})'.format(
conf_name, err))
date = i3s_time
finally:
self.config[conf_name]['date'] = date
self.config[conf_name]['delta'] = delta
self.config[conf_name]['time_format'] = time_format
def tick_time_modules(self, json_list, force):
"""
Adjust the 'time' and 'tztime' objects from the given json_list so that
they are updated only at py3status interval seconds.
This method is used to overwrite any i3status time or tztime output
with respect to their parsed and timezone offset detected on start.
"""
utcnow = datetime.utcnow()
# every whole minute, resync our time from i3status'
# this ensures we will catch any daylight savings time change
if utcnow.second == 0:
self.set_time_modules()
#
for index, item in enumerate(json_list):
if item.get('name') in ['time', 'tztime']:
conf_name = self.config['i3s_modules'][index]
time_module = self.config[conf_name]
if not isinstance(time_module['date'], datetime):
# something went wrong in the datetime parsing
# output i3status' date string
item['full_text'] = time_module['date']
else:
if force:
date = utcnow + time_module['delta']
time_module['date'] = date
else:
date = time_module['date']
time_format = self.config[conf_name].get('time_format')
# set the full_text date on the json_list to be returned
item['full_text'] = date.strftime(time_format)
json_list[index] = item
# reset the full_text date on the config object for next
# iteration to be consistent with this one
time_module['response']['full_text'] = item['full_text']
return json_list
def update_json_list(self):
"""
Copy the last json list output from i3status so that any module
can modify it without altering the original output.
This is done so that any module's alteration of a i3status output json
will not be overwritten when the next i3status output gets polled.
"""
self.json_list = deepcopy(self.last_output)
self.json_list_ts = deepcopy(self.last_output_ts)
def get_modules_output(self, json_list, py3_modules):
"""
Return the final json list to be displayed on the i3bar by taking
into account every py3status configured module and i3status'.
Simply put, this method honors the initial 'order' configured by
the user in his i3status.conf.
"""
ordered = []
for module_name in self.config['order']:
if module_name in py3_modules:
for method in py3_modules[module_name].methods.values():
ordered.append(method['last_output'])
else:
if self.config.get(module_name, {}).get('response'):
ordered.append(self.config[module_name]['response'])
return ordered
@staticmethod
def write_in_tmpfile(text, tmpfile):
"""
Write the given text in the given tmpfile in python2 and python3.
"""
try:
tmpfile.write(text)
except TypeError:
tmpfile.write(str.encode(text))
def write_tmp_i3status_config(self, tmpfile):
"""
Given a temporary file descriptor, write a valid i3status config file
based on the parsed one from 'i3status_config_path'.
"""
for section_name, conf in sorted(self.config.items()):
if section_name in ['i3s_modules', 'py3_modules']:
continue
elif section_name == 'order':
for module_name in conf:
if self.valid_config_param(module_name):
self.write_in_tmpfile('order += "%s"\n' % module_name,
tmpfile)
self.write_in_tmpfile('\n', tmpfile)
elif self.valid_config_param(section_name) and conf:
self.write_in_tmpfile('%s {\n' % section_name, tmpfile)
for key, value in conf.items():
if isinstance(value, bool):
value = '{}'.format(value).lower()
self.write_in_tmpfile(' %s = "%s"\n' % (key, value),
tmpfile)
self.write_in_tmpfile('}\n\n', tmpfile)
tmpfile.flush()
@profile
def run(self):
"""
Spawn i3status using a self generated config file and poll its output.
"""
try:
with NamedTemporaryFile(prefix='py3status_') as tmpfile:
self.write_tmp_i3status_config(tmpfile)
syslog(LOG_INFO,
'i3status spawned using config file {}'.format(
tmpfile.name))
i3status_pipe = Popen(
['i3status', '-c', tmpfile.name],
stdout=PIPE,
stderr=PIPE, )
self.poller_inp = IOPoller(i3status_pipe.stdout)
self.poller_err = IOPoller(i3status_pipe.stderr)
self.tmpfile_path = tmpfile.name
try:
# loop on i3status output
while self.lock.is_set():
line = self.poller_inp.readline()
if line:
if line.startswith('[{'):
print_line(line)
with jsonify(line) as (prefix, json_list):
self.last_output = json_list
self.last_output_ts = datetime.utcnow()
self.last_prefix = ','
self.update_json_list()
self.set_responses(json_list)
# on first i3status output, we parse
# the time and tztime modules
self.set_time_modules()
self.ready = True
elif not line.startswith(','):
if 'version' in line:
header = loads(line)
header.update({'click_events': True})
line = dumps(header)
print_line(line)
else:
with jsonify(line) as (prefix, json_list):
self.last_output = json_list
self.last_output_ts = datetime.utcnow()
self.last_prefix = prefix
self.update_json_list()
self.set_responses(json_list)
else:
err = self.poller_err.readline()
code = i3status_pipe.poll()
if code is not None:
msg = 'i3status died'
if err:
msg += ' and said: {}'.format(err)
else:
msg += ' with code {}'.format(code)
raise IOError(msg)
except IOError:
err = sys.exc_info()[1]
self.error = err
except OSError:
# we cleanup the tmpfile ourselves so when the delete will occur
# it will usually raise an OSError: No such file or directory
pass
def cleanup_tmpfile(self):
"""
Cleanup i3status tmp configuration file.
"""
if os.path.isfile(self.tmpfile_path):
os.remove(self.tmpfile_path)
def mock(self):
"""
Mock i3status behavior, used in standalone mode.
"""
# mock thread is_alive() method
self.is_alive = lambda: True
# mock i3status base output
init_output = ['{"click_events": true, "version": 1}', '[', '[]']
for line in init_output:
print_line(line)
# mock i3status output parsing
self.last_output = []
self.last_output_ts = datetime.utcnow()
self.last_prefix = ','
self.update_json_list()
class Events(Thread):
"""
This class is responsible for dispatching event JSONs sent by the i3bar.
"""
def __init__(self, lock, config, modules, i3s_config):
"""
We need to poll stdin to receive i3bar messages.
"""
Thread.__init__(self)
self.config = config
self.i3s_config = i3s_config
self.last_refresh_ts = time()
self.lock = lock
self.modules = modules
self.on_click = i3s_config['on_click']
self.poller_inp = IOPoller(sys.stdin)
def dispatch(self, module, obj, event):
"""
Dispatch the event or enforce the default clear cache action.
"""
module_name = '{} {}'.format(module.module_name,
module.module_inst).strip()
#
if module.click_events:
# module accepts click_events, use it
module.click_event(event)
if self.config['debug']:
syslog(LOG_INFO, 'dispatching event {}'.format(event))
else:
# default button 2 action is to clear this method's cache
if self.config['debug']:
syslog(LOG_INFO, 'dispatching default event {}'.format(event))
# to make the bar more responsive to users we ask for a refresh
# of the module or of i3status if the module is an i3status one
self.refresh(module_name)
def i3bar_click_events_module(self):
"""
Detect the presence of the special i3bar_click_events.py module.
When py3status detects a module named 'i3bar_click_events.py',
it will dispatch i3status click events to this module so you can catch
them and trigger any function call based on the event.
"""
for module in self.modules.values():
if not module.click_events:
continue
if module.module_name == 'i3bar_click_events.py':
return module
else:
return False
def refresh(self, module_name):
"""
Force a cache expiration for all the methods of the given module.
We rate limit the i3status refresh to 100ms.
"""
module = self.modules.get(module_name)
if module is not None:
if self.config['debug']:
syslog(LOG_INFO, 'refresh module {}'.format(module_name))
for obj in module.methods.values():
obj['cached_until'] = time()
else:
if time() > (self.last_refresh_ts + 0.1):
if self.config['debug']:
syslog(
LOG_INFO,
'refresh i3status for module {}'.format(module_name))
call(['killall', '-s', 'USR1', 'i3status'])
self.last_refresh_ts = time()
def refresh_all(self, module_name):
"""
Force a full refresh of py3status and i3status modules by sending
a SIGUSR1 signal to py3status.
We rate limit this command to 100ms for obvious abusive behavior.
"""
if time() > (self.last_refresh_ts + 0.1):
call(['killall', '-s', 'USR1', 'py3status'])
self.last_refresh_ts = time()
def on_click_dispatcher(self, module_name, command):
"""
Dispatch on_click config parameters to either:
- Our own methods for special py3status commands (listed below)
- The i3-msg program which is part of i3wm
"""
py3_commands = ['refresh', 'refresh_all']
if command is None:
return
elif command in py3_commands:
# this is a py3status command handled by this class
method = getattr(self, command)
method(module_name)
else:
# this is a i3 message
self.i3_msg(module_name, command)
# to make the bar more responsive to users we ask for a refresh
# of the module or of i3status if the module is an i3status one
self.refresh(module_name)
@staticmethod
def i3_msg(module_name, command):
"""
Execute the given i3 message and log its output.
"""
i3_msg_pipe = Popen(['i3-msg', command], stdout=PIPE)
syslog(LOG_INFO, 'i3-msg module="{}" command="{}" stdout={}'.format(
module_name, command, i3_msg_pipe.stdout.read()))
def i3status_mod_guess(self, instance, name):
"""
Some i3status modules output a name and instance that are different
from their configuration name in i3status.conf.
For example the 'disk' module will output with name 'disk_info' so
we try to be clever and figure it out here, case by case.
Guessed modules:
- battery
- cpu_temperature
- disk_info
- ethernet
- run_watch
- volume
- wireless
"""
try:
# /sys/class/power_supply/BAT0/uevent and _first_
if name == 'battery':
for k, v in self.i3s_config.items():
if k.startswith('battery') and isinstance(v, dict) and \
v.get('response', {}).get('instance') == instance:
instance = k.split(' ', 1)[1]
break
else:
instance = str([int(s) for s in instance if s.isdigit()][
0])
# /sys/devices/platform/coretemp.0/temp1_input
elif name == 'cpu_temperature':
instance = str([int(s) for s in instance if s.isdigit()][0])
# disk_info /home
elif name == 'disk_info':
name = 'disk'
# ethernet _first_
elif name == 'ethernet':
for k, v in self.i3s_config.items():
if k.startswith('ethernet') and isinstance(v, dict) and \
v.get('response', {}).get('instance') == instance:
instance = k.split(' ', 1)[1]
# run_watch /var/run/openvpn.pid
elif name == 'run_watch':
for k, v in self.i3s_config.items():
if k.startswith('run_watch') and isinstance(v, dict) and \
v.get('pidfile') == instance:
instance = k.split(' ', 1)[1]
break
# volume default.Master.0
elif name == 'volume':
device, mixer, mixer_idx = instance.split('.')
for k, v in self.i3s_config.items():
if k.startswith('volume') and isinstance(v, dict) and \
v.get('device') == device and \
v.get('mixer') == mixer and \
str(v.get('mixer_idx')) == mixer_idx:
instance = k.split(' ', 1)[1]
break
else:
instance = 'master'
# wireless _first_
elif name == 'wireless':
for k, v in self.i3s_config.items():
if k.startswith('wireless') and isinstance(v, dict) and \
v.get('response', {}).get('instance') == instance:
instance = k.split(' ', 1)[1]
except:
pass
finally:
return (instance, name)
@profile
def run(self):
"""
Wait for an i3bar JSON event, then find the right module to dispatch
the message to based on the 'name' and 'instance' of the event.
In case the module does NOT support click_events, the default
implementation is to clear the module's cache
when the MIDDLE button (2) is pressed on it.
Example event:
{'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}
"""
while self.lock.is_set():
event = self.poller_inp.readline()
if not event:
continue
try:
with jsonify(event) as (prefix, event):
if self.config['debug']:
syslog(LOG_INFO, 'received event {}'.format(event))
# usage variables
button = event.get('button', 0)
default_event = False
dispatched = False
instance = event.get('instance', '')
name = event.get('name', '')
# i3status module name guess
instance, name = self.i3status_mod_guess(instance, name)
if self.config['debug']:
syslog(
LOG_INFO,
'trying to dispatch event to module "{}"'.format(
'{} {}'.format(name, instance).strip()))
# guess the module config name
module_name = '{} {}'.format(name, instance).strip()
# execute any configured i3-msg command
if self.on_click.get(module_name, {}).get(button):
self.on_click_dispatcher(
module_name,
self.on_click[module_name].get(button))
dispatched = True
# otherwise setup default action on button 2 press
elif button == 2:
default_event = True
for module in self.modules.values():
# skip modules not supporting click_events
# unless we have a default_event set
if not module.click_events and not default_event:
continue
# check for the method name/instance
for obj in module.methods.values():
if name == obj['name']:
if instance:
if instance == obj['instance']:
self.dispatch(module, obj, event)
dispatched = True
break
else:
self.dispatch(module, obj, event)
dispatched = True
break
# fall back to i3bar_click_events.py module if present
if not dispatched:
module = self.i3bar_click_events_module()
if module:
if self.config['debug']:
syslog(
LOG_INFO,
'dispatching event to i3bar_click_events')
self.dispatch(module, obj, event)
except Exception:
err = sys.exc_info()[1]
syslog(LOG_WARNING, 'event failed ({})'.format(err))
class Module(Thread):
"""
This class represents a user module (imported file).
It is reponsible for executing it every given interval and
caching its output based on user will.
"""
def __init__(self, lock, config, module, i3_thread, user_modules):
"""
We need quite some stuff to occupy ourselves don't we ?
"""
Thread.__init__(self)
self.click_events = False
self.config = config
self.has_kill = False
self.i3status_thread = i3_thread
self.last_output = []
self.lock = lock
self.methods = OrderedDict()
self.module_class = None
self.module_inst = ''.join(module.split(' ')[1:])
self.module_name = module.split(' ')[0]
#
self.load_methods(module, user_modules)
@staticmethod
def load_from_file(filepath):
"""
Return user-written class object from given path.
"""
class_inst = None
expected_class = 'Py3status'
module_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
if file_ext.lower() == '.py':
py_mod = imp.load_source(module_name, filepath)
if hasattr(py_mod, expected_class):
class_inst = py_mod.Py3status()
return class_inst
@staticmethod
def load_from_namespace(module_name):
"""
Load a py3status bundled module.
"""
class_inst = None
name = 'py3status.modules.{}'.format(module_name)
py_mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
py_mod = getattr(py_mod, comp)
class_inst = py_mod.Py3status()
return class_inst
def clear_cache(self):
"""
Reset the cache for all methods of this module.
"""
for meth in self.methods:
self.methods[meth]['cached_until'] = time()
if self.config['debug']:
syslog(LOG_INFO, 'clearing cache for method {}'.format(meth))
def load_methods(self, module, user_modules):
"""
Read the given user-written py3status class file and store its methods.
Those methods will be executed, so we will deliberately ignore:
- private methods starting with _
- decorated methods such as @property or @staticmethod
- 'on_click' methods as they'll be called upon a click_event
- 'kill' methods as they'll be called upon this thread's exit
"""
# user provided modules take precedence over py3status provided modules
if self.module_name in user_modules:
include_path, f_name = user_modules[self.module_name]
syslog(LOG_INFO,
'loading module "{}" from {}{}'.format(module, include_path,
f_name))
class_inst = self.load_from_file(include_path + f_name)
# load from py3status provided modules
else:
syslog(LOG_INFO,
'loading module "{}" from py3status.modules.{}'.format(
module, self.module_name))
class_inst = self.load_from_namespace(self.module_name)
if class_inst:
self.module_class = class_inst
# apply module configuration from i3status config
mod_config = self.i3status_thread.config.get(module, {})
for config, value in mod_config.items():
setattr(self.module_class, config, value)
# get the available methods for execution
for method in sorted(dir(class_inst)):
if method.startswith('_'):
continue
else:
m_type = type(getattr(class_inst, method))
if 'method' in str(m_type):
if method == 'on_click':
self.click_events = True
elif method == 'kill':
self.has_kill = True
else:
# the method_obj stores infos about each method
# of this module.
method_obj = {
'cached_until': time(),
'instance': None,
'last_output': {
'name': method,
'full_text': ''
},
'method': method,
'name': None
}
self.methods[method] = method_obj
# done, syslog some debug info
if self.config['debug']:
syslog(LOG_INFO,
'module "{}" click_events={} has_kill={} methods={}'.format(
module, self.click_events, self.has_kill,
self.methods.keys()))
def click_event(self, event):
"""
Execute the 'on_click' method of this module with the given event.
"""
try:
click_method = getattr(self.module_class, 'on_click')
click_method(self.i3status_thread.json_list,
self.i3status_thread.config['general'], event)
except Exception:
err = sys.exc_info()[1]
msg = 'on_click failed with ({}) for event ({})'.format(err, event)
syslog(LOG_WARNING, msg)
@profile
def run(self):
"""
On a timely fashion, execute every method found for this module.
We will respect and set a cache timeout for each method if the user
didn't already do so.
We will execute the 'kill' method of the module when we terminate.
"""
while self.lock.is_set():
# execute each method of this module
for meth, obj in self.methods.items():
my_method = self.methods[meth]
# always check the lock
if not self.lock.is_set():
break
# respect the cache set for this method
if time() < obj['cached_until']:
continue
try:
# execute method and get its output
method = getattr(self.module_class, meth)
response = method(self.i3status_thread.json_list,
self.i3status_thread.config['general'])
if isinstance(response, dict):
# this is a shiny new module giving a dict response
result = response
elif isinstance(response, tuple):
# this is an old school module reporting its position
position, result = response
if not isinstance(result, dict):
raise TypeError('response should be a dict')
else:
raise TypeError('response should be a dict')
# validate the response
if 'full_text' not in result:
raise KeyError('missing "full_text" key in response')
else:
result['instance'] = self.module_inst
result['name'] = self.module_name
# initialize method object
if my_method['name'] is None:
my_method['name'] = result['name']
if 'instance' in result:
my_method['instance'] = result['instance']
else:
my_method['instance'] = result['name']
# update method object cache
if 'cached_until' in result:
cached_until = result['cached_until']
else:
cached_until = time() + self.config['cache_timeout']
my_method['cached_until'] = cached_until
# update method object output
my_method['last_output'] = result
# debug info
if self.config['debug']:
syslog(LOG_INFO,
'method {} returned {} '.format(meth, result))
except Exception:
err = sys.exc_info()[1]
syslog(LOG_WARNING,
'user method {} failed ({})'.format(meth, err))
# don't be hasty mate, let's take it easy for now
sleep(self.config['interval'])
# check and execute the 'kill' method if present
if self.has_kill:
try:
kill_method = getattr(self.module_class, 'kill')
kill_method(self.i3status_thread.json_list,
self.i3status_thread.config['general'])
except Exception:
# this would be stupid to die on exit
pass
class Py3statusWrapper():
"""
This is the py3status wrapper.
"""
def __init__(self):
"""
Useful variables we'll need.
"""
self.last_refresh_ts = time()
self.lock = Event()
self.modules = {}
self.py3_modules = []
def get_config(self):
"""
Create the py3status based on command line options we received.
"""
# get home path
home_path = os.path.expanduser('~')
# defaults
config = {
'cache_timeout': 60,
'include_paths': ['{}/.i3/py3status/'.format(home_path)],
'interval': 1
}
# package version
try:
import pkg_resources
version = pkg_resources.get_distribution('py3status').version
except:
version = 'unknown'
config['version'] = version
# i3status config file default detection
# respect i3status' file detection order wrt issue #43
i3status_config_file_candidates = [
'{}/.i3status.conf'.format(home_path),
'{}/.config/i3status/config'.format(os.environ.get(
'XDG_CONFIG_HOME', home_path)), '/etc/i3status.conf',
'{}/i3status/config'.format(os.environ.get('XDG_CONFIG_DIRS',
'/etc/xdg'))
]
for fn in i3status_config_file_candidates:
if os.path.isfile(fn):
i3status_config_file_default = fn
break
else:
# if none of the default files exists, we will default
# to ~/.i3/i3status.conf
i3status_config_file_default = '{}/.i3/i3status.conf'.format(
home_path)
# command line options
parser = argparse.ArgumentParser(
description='The agile, python-powered, i3status wrapper')
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-c',
'--config',
action="store",
dest="i3status_conf",
type=str,
default=i3status_config_file_default,
help="path to i3status config file")
parser.add_argument('-d',
'--debug',
action="store_true",
help="be verbose in syslog")
parser.add_argument('-i',
'--include',
action="append",
dest="include_paths",
help="""include user-written modules from those
directories (default ~/.i3/py3status)""")
parser.add_argument('-n',
'--interval',
action="store",
dest="interval",
type=float,
default=config['interval'],
help="update interval in seconds (default 1 sec)")
parser.add_argument('-s',
'--standalone',
action="store_true",
help="standalone mode, do not use i3status")
parser.add_argument('-t',
'--timeout',
action="store",
dest="cache_timeout",
type=int,
default=config['cache_timeout'],
help="""default injection cache timeout in seconds
(default 60 sec)""")
parser.add_argument('-v',
'--version',
action="store_true",
help="""show py3status version and exit""")
parser.add_argument('cli_command', nargs='*', help=argparse.SUPPRESS)
options = parser.parse_args()
if options.cli_command:
config['cli_command'] = options.cli_command
# only asked for version
if options.version:
from platform import python_version
print('py3status version {} (python {})'.format(config['version'],
python_version()))
sys.exit(0)
# override configuration and helper variables
config['cache_timeout'] = options.cache_timeout
config['debug'] = options.debug
if options.include_paths:
config['include_paths'] = options.include_paths
config['interval'] = int(options.interval)
config['standalone'] = options.standalone
config['i3status_config_path'] = options.i3status_conf
# all done
return config
def get_user_modules(self):
"""
Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
}
"""
user_modules = {}
for include_path in sorted(self.config['include_paths']):
include_path = os.path.abspath(include_path) + '/'
if not os.path.isdir(include_path):
continue
for f_name in sorted(os.listdir(include_path)):
if not f_name.endswith('.py'):
continue
module_name = f_name[:-3]
user_modules[module_name] = (include_path, f_name)
return user_modules
def get_all_modules(self):
"""
Search and yield all available py3status modules:
- in the current python's implementation site-packages
- provided by the user using the inclusion directories
User provided modules take precedence over py3status generic modules.
"""
all_modules = {}
for importer, module_name, ispkg in \
pkgutil.iter_modules(sitepkg_modules.__path__):
if not ispkg:
mod = importer.find_module(module_name)
all_modules[module_name] = (mod, None)
user_modules = self.get_user_modules()
all_modules.update(user_modules)
for module_name, module_info in sorted(all_modules.items()):
yield (module_name, module_info)
def get_user_configured_modules(self):
"""
Get a dict of all available and configured py3status modules
in the user's i3status.conf.
"""
user_modules = {}
if not self.py3_modules:
return user_modules
for module_name, module_info in self.get_user_modules().items():
for module in self.py3_modules:
if module_name == module.split(' ')[0]:
include_path, f_name = module_info
user_modules[module_name] = (include_path, f_name)
return user_modules
def load_modules(self, modules_list, user_modules):
"""
Load the given modules from the list (contains instance name) with
respect to the user provided modules dict.
modules_list: ['weather_yahoo paris', 'net_rate']
user_modules: {
'weather_yahoo': ('/etc/py3status.d/', 'weather_yahoo.py')
}
"""
for module in modules_list:
# ignore already provided modules (prevents double inclusion)
if module in self.modules:
continue
try:
my_m = Module(self.lock, self.config, module,
self.i3status_thread, user_modules)
# only start and handle modules with available methods
if my_m.methods:
my_m.start()
self.modules[module] = my_m
elif self.config['debug']:
syslog(LOG_INFO,
'ignoring module "{}" (no methods found)'.format(
module))
except Exception:
err = sys.exc_info()[1]
msg = 'loading module "{}" failed ({})'.format(module, err)
self.i3_nagbar(msg, level='warning')
def setup(self):
"""
Setup py3status and spawn i3status/events/modules threads.
"""
# set the Event lock
self.lock.set()
# setup configuration
self.config = self.get_config()
if self.config.get('cli_command'):
self.handle_cli_command(self.config['cli_command'])
sys.exit()
if self.config['debug']:
syslog(LOG_INFO,
'py3status started with config {}'.format(self.config))
# setup i3status thread
self.i3status_thread = I3status(self.lock,
self.config['i3status_config_path'],
self.config['standalone'])
if self.config['standalone']:
self.i3status_thread.mock()
else:
self.i3status_thread.start()
while not self.i3status_thread.ready:
if not self.i3status_thread.is_alive():
err = self.i3status_thread.error
raise IOError(err)
sleep(0.1)
if self.config['debug']:
syslog(LOG_INFO, 'i3status thread {} with config {}'.format(
'started' if not self.config['standalone'] else 'mocked',
self.i3status_thread.config))
# setup input events thread
self.events_thread = Events(self.lock, self.config, self.modules,
self.i3status_thread.config)
self.events_thread.start()
if self.config['debug']:
syslog(LOG_INFO, 'events thread started')
# suppress modules' ouput wrt issue #20
if not self.config['debug']:
sys.stdout = open('/dev/null', 'w')
sys.stderr = open('/dev/null', 'w')
# get the list of py3status configured modules
self.py3_modules = self.i3status_thread.config['py3_modules']
# get a dict of all user provided modules
user_modules = self.get_user_configured_modules()
if self.config['debug']:
syslog(LOG_INFO, 'user_modules={}'.format(user_modules))
if self.py3_modules:
# load and spawn i3status.conf configured modules threads
self.load_modules(self.py3_modules, user_modules)
def i3_nagbar(self, msg, level='error'):
"""
Make use of i3-nagbar to display errors and warnings to the user.
We also make sure to log anything to keep trace of it.
"""
msg = 'py3status: {}. '.format(msg)
msg += 'please try to fix this and reload i3wm (Mod+Shift+R)'
try:
log_level = LOG_ERR if level == 'error' else LOG_WARNING
syslog(log_level, msg)
Popen(['i3-nagbar', '-m', msg, '-t', level],
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'))
except:
pass
def stop(self):
"""
Clear the Event lock, this will break all threads' loops.
"""
try:
self.lock.clear()
if self.config['debug']:
syslog(LOG_INFO, 'lock cleared, exiting')
self.i3status_thread.cleanup_tmpfile()
except:
pass
def sig_handler(self, signum, frame):
"""
SIGUSR1 was received, the user asks for an immediate refresh of the bar
so we force i3status to refresh by sending it a SIGUSR1
and we clear all py3status modules' cache.
To prevent abuse, we rate limit this function to 100ms.
"""
if time() > (self.last_refresh_ts + 0.1):
syslog(LOG_INFO, 'received USR1, forcing refresh')
# send SIGUSR1 to i3status
call(['killall', '-s', 'USR1', 'i3status'])
# clear the cache of all modules
self.clear_modules_cache()
# reset the refresh timestamp
self.last_refresh_ts = time()
else:
syslog(LOG_INFO,
'received USR1 but rate limit is in effect, calm down')
def clear_modules_cache(self):
"""
For every module, reset the 'cached_until' of all its methods.
"""
for module in self.modules.values():
module.clear_cache()
def terminate(self, signum, frame):
"""
Received request to terminate (SIGTERM), exit nicely.
"""
raise KeyboardInterrupt()
@profile
def run(self):
"""
Main py3status loop, continuously read from i3status and modules
and output it to i3bar for displaying.
"""
# SIGUSR1 forces a refresh of the bar both for py3status and i3status,
# this mimics the USR1 signal handling of i3status (see man i3status)
signal(SIGUSR1, self.sig_handler)
signal(SIGTERM, self.terminate)
# initialize usage variables
delta = 0
last_delta = -1
previous_json_list = []
# main loop
while True:
# check i3status thread
if not self.i3status_thread.is_alive():
err = self.i3status_thread.error
if not err:
err = 'i3status died horribly'
self.i3_nagbar(err)
break
# check events thread
if not self.events_thread.is_alive():
# don't spam the user with i3-nagbar warnings
if not hasattr(self.events_thread, 'i3_nagbar'):
self.events_thread.i3_nagbar = True
err = 'events thread died, click events are disabled'
self.i3_nagbar(err, level='warning')
# check that every module thread is alive
for module in self.modules.values():
if not module.is_alive():
# don't spam the user with i3-nagbar warnings
if not hasattr(module, 'i3_nagbar'):
module.i3_nagbar = True
msg = 'output frozen for dead module(s) {}'.format(
','.join(module.methods.keys()))
self.i3_nagbar(msg, level='warning')
# get output from i3status
prefix = self.i3status_thread.last_prefix
json_list = deepcopy(self.i3status_thread.json_list)
# transform time and tztime outputs from i3status
# every configured interval seconds
if self.config['interval'] <= 1 or \
int(delta) % self.config['interval'] == 0 \
and int(last_delta) != int(delta):
delta = 0
last_delta = 0
json_list = self.i3status_thread.tick_time_modules(json_list,
force=True)
else:
json_list = self.i3status_thread.tick_time_modules(json_list,
force=False)
# construct the global output
if self.modules and self.py3_modules:
# new style i3status configured ordering
json_list = self.i3status_thread.get_modules_output(
json_list, self.modules)
# dump the line to stdout only on change
if json_list != previous_json_list:
print_line('{}{}'.format(prefix, dumps(json_list)))
# remember the last json list output
previous_json_list = deepcopy(json_list)
# reset i3status json_list and json_list_ts
self.i3status_thread.update_json_list()
# sleep a bit before doing this again to avoid killing the CPU
delta += 0.1
sleep(0.1)
@staticmethod
def print_module_description(details, mod_name, mod_info):
"""Print module description extracted from its docstring.
"""
if mod_name == '__init__':
return
mod, f_name = mod_info
if f_name:
path = os.path.join(*mod_info)
with open(path) as f:
module = ast.parse(f.read())
else:
path = mod.get_filename(mod_name)
module = ast.parse(mod.get_source(mod_name))
try:
docstring = ast.get_docstring(module, clean=True)
if docstring:
short_description = docstring.split('\n')[0].rstrip('.')
print_stderr(' %-22s %s.' % (mod_name, short_description))
if details:
for description in docstring.split('\n')[1:]:
print_stderr(' ' * 25 + '%s' % description)
print_stderr(' ' * 25 + '---')
else:
print_stderr(' %-22s No docstring in %s' % (mod_name, path))
except Exception:
print_stderr(' %-22s Unable to parse %s' % (mod_name, path))
def handle_cli_command(self, cmd):
"""Handle a command from the CLI.
"""
# aliases
if cmd[0] in ['mod', 'module', 'modules']:
cmd[0] = 'modules'
# allowed cli commands
if cmd[:2] in (['modules', 'list'], ['modules', 'details']):
details = cmd[1] == 'details'
print_stderr('Available modules:')
for mod_name, mod_info in self.get_all_modules():
self.print_module_description(details, mod_name, mod_info)
elif cmd[:2] in (['modules', 'enable'], ['modules', 'disable']):
# TODO: to be implemented
pass
else:
print_stderr('Error: unknown command')
sys.exit(1)
def main():
try:
locale.setlocale(locale.LC_ALL, '')
py3 = Py3statusWrapper()
py3.setup()
except KeyboardInterrupt:
err = sys.exc_info()[1]
py3.i3_nagbar('setup interrupted (KeyboardInterrupt)')
sys.exit(0)
except Exception:
err = sys.exc_info()[1]
py3.i3_nagbar('setup error ({})'.format(err))
py3.stop()
sys.exit(2)
try:
py3.run()
except Exception:
err = sys.exc_info()[1]
py3.i3_nagbar('runtime error ({})'.format(err))
sys.exit(3)
except KeyboardInterrupt:
pass
finally:
py3.stop()
sys.exit(0)
if __name__ == '__main__':
main()
| Shir0kamii/py3status | py3status/__init__.py | Python | bsd-3-clause | 63,701 |
import csv
import gzip
import cPickle
import numpy as np
def convert_kaggle_data_to_npy(choice='train'):
print 'Converting data, please wait...'
if choice is 'train':
X = np.zeros((105471, 769), dtype=np.float32)
y = np.zeros((105471, 1), dtype=np.float32)
one_percent_const = 1055
file_name = './Data/train_v2.csv'
else:
file_name = './Data/test_v2.csv'
X = np.zeros((316415, 769), dtype=np.float32)
one_percent_const = 3164
index = 0
with open(file_name, 'rb') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
if row[0] == 'id':
continue
else:
row = np.array(row)
row[row == 'NA'] = np.nan
if choice is 'train':
X[index, :] = row[1:-1]
y[index] = row[-1]
else:
X[index, :] = row[1:]
index += 1
if index % one_percent_const == 0:
print int(index/one_percent_const), '% done'
print 'Data Loaded!!'
if choice is 'train':
f = open('Data/trainData.npy', 'wb')
np.save(f, X)
np.save(f, y)
f.close()
else:
f = open('Data/testData.npy', 'wb')
np.save(f, X)
f.close()
def load_data_set(file_name):
with open(file_name, 'rb') as f:
data = np.load(f)
return data
def mnist_loader():
# Load the dataset
with gzip.open('Data/mnist.pkl.gz', 'rb') as f:
train_set, valid_set, test_set = cPickle.load(f)
f.close()
return train_set, valid_set, test_set
| abhitopia/DimReductionMissingData | utils.py | Python | mit | 1,691 |
# init this directory
| genegis/genegis | Install/__init__.py | Python | mpl-2.0 | 23 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pycon', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PyConTutorialMessage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('message', models.TextField()),
('submitted_at', models.DateTimeField(default=datetime.datetime.now, editable=False)),
('tutorial', models.ForeignKey(related_name='tutorial_messages', to='pycon.PyConTutorialProposal')),
('user', models.ForeignKey(help_text='User who submitted the message', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-submitted_at'],
},
bases=(models.Model,),
),
]
| njl/pycon | pycon/tutorials/migrations/0001_initial.py | Python | bsd-3-clause | 1,093 |
from urllib.parse import urlencode
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from bedrock.base.middleware import LocaleURLMiddleware
@override_settings(DEV=True)
class TestLocaleURLMiddleware(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.middleware = LocaleURLMiddleware()
@override_settings(DEV_LANGUAGES=('de', 'fr'))
def test_redirects_to_correct_language(self):
"""Should redirect to lang prefixed url."""
path = '/the/dude/'
req = self.rf.get(path, HTTP_ACCEPT_LANGUAGE='de')
resp = self.middleware.process_request(req)
self.assertEqual(resp['Location'], '/de' + path)
@override_settings(DEV_LANGUAGES=('es', 'fr'),
LANGUAGE_CODE='en-US')
def test_redirects_to_default_language(self):
"""Should redirect to default lang if not in settings."""
path = '/the/dude/'
req = self.rf.get(path, HTTP_ACCEPT_LANGUAGE='de')
resp = self.middleware.process_request(req)
self.assertEqual(resp['Location'], '/en-US' + path)
@override_settings(DEV_LANGUAGES=('de', 'fr'))
def test_redirects_to_correct_language_despite_unicode_errors(self):
"""Should redirect to lang prefixed url, stripping invalid chars."""
path = '/the/dude/'
corrupt_querystring = '?' + urlencode(
{b'a\xa4\x91b\xa4\x91i\xc0de': 's'})
corrected_querystring = '?abide=s'
req = self.rf.get(path + corrupt_querystring,
HTTP_ACCEPT_LANGUAGE='de')
resp = self.middleware.process_request(req)
self.assertEqual(resp['Location'],
'/de' + path + corrected_querystring)
| sgarrity/bedrock | bedrock/base/tests/test_middleware.py | Python | mpl-2.0 | 1,763 |
#from django.conf.urls import patterns, include, url
from django.conf.urls import *
from mysite.views import *
from contact.views import *
from django.contrib import admin
#from django.views.generic.simple import direct_to_template
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', search),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/$', include(admin.site.urls)),
#url(r'^search/$', search),
url(r'^meta/$', dislpay_meta),
(r'^foo/$', foobar_view, {'template_name': 'template1.html'}),
(r'^bar/$', foobar_view, {'template_name': 'template2.html'}),
(r'^mydata/(?P<month>\w{3}/(?P<day>\d\d))/$', my_view),
# (r'^events/$', object_list, {'model': models.Event}),
# (r'^blog/entries/$', object_list, {'model': models.models.BlogEntry}),
# (R'^somepage/$', views.method_splitter, {'GET': views.some_page_get, 'POST': views_some_page_post}),
# (r'^about/$', direct_to_template, {
# 'template': 'about.html'
# }),
# (r'^about/(\w+)/$', about_pages),
)
urlpatterns += patterns('',
url(r'^contact/$', contact),
url(r'^contact/thanks/$', thanks),
)
#if setting.DEBUG:
# urlpatterns += patterns('',
# (r'^debuginfo/$', debug),
#)
| kk47/Python | django/mysite/mysite/urls.py | Python | lgpl-3.0 | 1,234 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 D. de Vries
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains the definition the `DisciplineComponent` class.
"""
from __future__ import absolute_import, division, print_function
from typing import Optional
from .abstract_discipline import AbstractDiscipline
from .xml_component import XMLComponent
class DisciplineComponent(XMLComponent):
"""Specialized `XMLComponent` wrapping an `AbstractDiscipline`.
This version of `XMLComponent` defines in- and output variables based on the in- and output
template XML files generated by a subclass of `AbstractDiscipline`. The `execute()` method
simply forwards to that of the discipline.
Attributes
----------
discipline
"""
def __init__(self, discipline, data_folder='', keep_files=False, base_file=None):
# type: (AbstractDiscipline, Optional[str], bool, Optional[str]) -> None
"""Initialize a `Component` using a given `discipline`.
Stores a reference to the given `discipline`. The in- and output XML templates should
already exist at the paths specified in the `discipline`. This constructor uses those files
to create the ``OpenMDAO`` `params` and `unknowns` using the methods exposed by the
`XMLComponent` class this class inherits from.
Parameters
----------
discipline : :obj:`AbstractDiscipline`
Instance of a subclass of `AbstractDiscipline` this `Component` will represent.
data_folder : str(''), optional
Path to folder in which (temporary) data of this `Component` is stored.
keep_files : bool(False), optional
Set to `True` to keep the data files generated by this `Component` during execution.
base_file : str, optional
Path to an XML file which should be kept up-to-date with the latest data.
Notes
-----
Although this constructor could use the supplied `discipline` to also automatically
generate its in- and output XML templates on the fly, the user is left in control of
their generation. This is to allow for a `discipline` to generate different in- and
output templates dynamically based on certain parameters. During execution only the
static methods of the `discipline`s are used. Hence, any instance variables will not be
accessible then. Therefore it is impossible to guarantee consistency if the in- and
output XML files are generated here.
"""
self._discipline = discipline
self.number_of_computes = 0
self.number_of_linearizes = 0
super(DisciplineComponent, self).__init__(discipline.in_file_content, discipline.out_file_content,
discipline.partials_file_content, data_folder, keep_files, base_file)
if not discipline.supplies_partials:
self.partials_from_xml = None
@property
def discipline(self):
# type: () -> AbstractDiscipline
""":obj:`AbstractDiscipline`: Read-only reference to the specific discipline this
`Component` wraps."""
return self._discipline
def execute(self, input_xml=None, output_xml=None):
# type: (str, str) -> None
"""Call the `execute()` method of this `Component`'s discipline.
Parameters
----------
input_xml : str
Path to the input XML file.
output_xml : str
Path to the output XML file.
Raises
------
ValueError
If either no `input_xml` or `output_xml` path was specified.
Notes
-----
Since this class inherits from `XMLComponent` the interface, including the optionality
of its arguments, are left untouched. For this method this means the `input_xml` and
`output_xml` parameters are strictly optional. However, in the context of the
`DisciplineComponent` they should always be given. Therefore an exception is raised here
when one of them or both are omitted.
"""
if input_xml is None or output_xml is None:
raise ValueError('Both an input_xml and output_xml path are expected.')
self.discipline.execute(input_xml, output_xml)
self.number_of_computes += 1
def linearize(self, input_xml=None, partials_xml=None):
# type: (str, str) -> None
"""Call the `linearize()` method of this `Component`'s discipline.
Parameters
----------
input_xml : str
Path to the input XML file.
partials_xml : str
Path to the partials XML file.
Raises
------
ValueError
If either no `input_xml` or `partials_xml` path was specified.
"""
if self.discipline.supplies_partials:
if input_xml is None or partials_xml is None:
raise ValueError('Both an input_xml and a partials_xml path are expected.')
self.discipline.linearize(input_xml, partials_xml)
self.number_of_linearizes += 1
def cleanup(self):
super(DisciplineComponent, self).cleanup()
self._discipline.cleanup()
| daniel-de-vries/OpenLEGO | openlego/core/discipline_component.py | Python | apache-2.0 | 5,874 |
import sys
import getopt
import os.path
from project import Project, RugError
import output
from version import __version__
def init(output_buffer, optdict, project_dir=None):
Project.init(project_dir, optdict.has_key('--bare'), output_buffer=output_buffer)
def clone(output_buffer, optdict, url=None, project_dir=None):
if not url:
raise RugError('url must be specified')
if optdict.has_key('-c'):
repo_config = dict(map(lambda x: x.split('='), optdict['-c'].split(',')))
else:
repo_config = None
Project.clone(
url=url,
project_dir=project_dir,
source=optdict.get('-o'),
revset=optdict.get('-b'),
bare=optdict.has_key('--bare'),
repo_config=repo_config,
output_buffer=output_buffer
)
def checkout(proj, optdict, rev=None, src=None):
if '-b' in optdict:
proj.revset_create(rev, src)
proj.checkout(rev)
def fetch(proj, optdict, repos=None):
proj.fetch(repos=repos)
def update(proj, optdict):
proj.update(recursive=optdict.has_key('-r'))
def status_recurse(project, project_status, level=0):
indent = ' '
output = []
for (path, (stat, child_stat)) in project_status.items():
r = project.repos[path]
output.append('%2s %s%s%s' % (stat, indent*level, level and '\\' or '', path))
if r['vcs'] == 'rug':
#subproject
output += status_recurse(r['repo'].project, child_stat, level+1)
else:
#repo
for (file_path, s) in child_stat.items():
output.append('%2s %s%s%s' % (s, indent*(level+1), level and '\\' or '', file_path))
return output
def status(proj, optdict):
porcelain = optdict.has_key('-p')
if porcelain:
stat = proj.status(porcelain=True)
return '\n'.join(status_recurse(proj, stat))
else:
return proj.status(porcelain=False)
def revset(proj, optdict, dst=None, src=None):
if dst is None:
return proj.revset().get_short_name()
else:
proj.revset_create(dst, src)
def revset_list(proj, optdict):
return '\n'.join(map(lambda rs: rs.get_short_name(), proj.revset_list()))
def add(proj, optdict, project_dir=None, name=None, remote=None, rev=None):
if not project_dir:
raise RugError('unspecified directory')
vcs = optdict.get('-v')
use_sha = optdict.has_key('-s')
#Command-line interprets relative to cwd,
#but python interface is relative to project root
abs_path = os.path.abspath(project_dir)
path = os.path.relpath(abs_path, proj.dir)
proj.add(path=path, name=name, remote=remote, rev=rev, vcs=vcs, use_sha=use_sha)
def remove(proj, optdict, project_dir=None):
if not project_dir:
raise RugError('unspecified directory')
#Command-line interprets relative to cwd,
#but python interface is relative to project root
abs_path = os.path.abspath(project_dir)
path = os.path.relpath(abs_path, proj.dir)
proj.remove(path=path)
def commit(proj, optdict):
proj.commit(message=optdict.get('-m'), all=optdict.has_key('-a'), recursive=optdict.has_key('-r'))
def publish(proj, optdict, source=None):
proj.publish(source)
def remote_list(proj, optdict):
return '\n'.join(proj.remote_list())
def remote_add(proj, optdict, remote=None, fetch=None):
proj.remote_add(remote, fetch)
def source_list(proj, optdict):
return '\n'.join(proj.source_list())
def source_add(proj, optdict, source=None, url=None):
proj.source_add(source, url)
#(function, pass project flag, options, long_options, return_stdout)
rug_commands = {
'init': (init, False, '', ['bare'], False),
'clone': (clone, False, 'b:o:c:', ['bare'], False),
'checkout': (checkout, True, 'b', [], False),
'fetch': (fetch, True, '', [], False),
'update': (update, True, 'r', [], False),
'status': (status, True, 'p', [], True),
'revset': (revset, True, '', [], True),
'revset_list': (revset_list, True, '', [], True),
'add': (add, True, 'sv:', [], False),
'remove': (remove, True, '', [], False),
'commit': (commit, True, 'm:ar', [], False),
'publish': (publish, True, '', [], False),
'remote_list': (remote_list, True, '', [], True),
'remote_add': (remote_add, True, '', [], False),
'source_list': (source_list, True, '', [], True),
'source_add': (source_add, True, '', [], False),
#'reset': (Project.reset, True, ['soft', 'mixed', 'hard']),
}
def main():
if (len(sys.argv) < 2):
#TODO: write usage
print 'rug usage'
else:
command = sys.argv[1]
if command == 'version':
print 'rug version %s' % __version__
elif command not in rug_commands:
print 'rug usage'
else:
(func, pass_project, optspec, long_options, return_stdout) = rug_commands[command]
[optlist, args] = getopt.gnu_getopt(sys.argv[2:], optspec, long_options)
optdict = dict(optlist)
if return_stdout:
file = sys.stderr
else:
file = sys.stdout
output_buffer = output.WriterOutputBuffer(output.FileWriter(file))
if pass_project:
ret = func(Project.find_project(output_buffer=output_buffer), optdict, *args)
else:
ret = func(output_buffer, optdict, *args)
if return_stdout:
print ret
if __name__ == '__main__':
main()
| abstrakraft/rug | rug/rug.py | Python | gpl-3.0 | 4,932 |
"""
Error types
"""
class AlreadyBoundError(Exception):
"""
Raised if a factory is already bound to a name.
"""
pass
class CyclicGraphError(Exception):
"""
Raised if a graph has a cycle.
"""
pass
class LockedGraphError(Exception):
"""
Raised when attempting to create a component in a locked object graph.
"""
pass
class NotBoundError(Exception):
"""
Raised if not factory is bound to a name.
"""
pass
class ValidationError(Exception):
"""
Raised if a configuration value fails validation.
"""
pass
| globality-corp/microcosm | microcosm/errors.py | Python | apache-2.0 | 595 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# Python AI Battle
#
# Copyright 2011 Matthew Thompson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
'''
Wander Brain
This is a sample wandering brain. It just drives until it
hits an obstacle then chooses a new direction. It also changes
direction periodically.
Variables available to brains:
color - string, tank color
position - tuple (x,y), current tank grid position
facing - symbol UP, DOWN, LEFT, RIGHT, current tank facing
direction - tuple (x,y), unit vector representing tank facing
shots - how many shots have been fired
tanks - list of other tanks in map
tank_positions - [(x,y)] other tank positions
tank_states - list of other tank states (see Tank States)
Functions available to brains:
memory() - returns [symbol], a read only copy of queued commands
forget() - clear all queued brain commands
face(symbol) - change tank facing to symbol UP, DOWN, LEFT, or RIGHT
forward() - move tank forward one space
backward() - move tank backward one space
shoot() - fire tank's weapon with current facing
radar(x,y) - get a tuple (tile, item) from the map's x,y coordinate
kill() - self destruct tank
Facings:
UP, DOWN, LEFT, RIGHT,
Brain Commands:
FORWARD, BACKWARD, SHOOT
Tank States:
IDLE, MOVING, TURNING, SHOOTING, DEAD
Tiles:
GRASS, DIRT, PLAIN, WATER
SAFE_TILES = (GRASS, DIRT, PLAIN) - can be driven on safely
UNSAFE_TILES = (WATER,) - will destroy your tank if you drive into them
Items:
ROCK, TREE - blocking items that can be destroyed
TANK_BLUE, TANK_RED - tanks located on a tile
Lookup Helper Dictionaries:
FACING_TO_VEC - takes a facing symbol and returns the (x,y) unit vector
'''
import random
counter = 1
def think(game):
#forget() # clear old commands
x, y = game.position
dx, dy = game.direction
global counter
print "counter is", counter
counter += 1
tile, item = game.radar(x + dx, y + dy)
print "at", x, y, "and facing", game.facing
print "will be moving into:", tile, item
def new_facing():
# out of all facing possibilities, choose one we don't have currently
new_facing = [game.UP, game.DOWN, game.LEFT, game.RIGHT]
new_facing.remove(game.facing)
# evaluate the possible facings and remove ones that will block tank
good_facing = []
for f in new_facing:
v = game.FACING_TO_VEC[f]
nt, ni = game.radar(x + v[0], y + v[1])
if ni is None and nt not in (None, game.WATER):
good_facing.append(f)
return random.choice(good_facing or new_facing)
# avoid moving into blocking items
if item is not None or tile in (game.WATER, None):
game.forget() # clear possibly bad commands
game.face(new_facing())
elif random.randint(0,5) == 0:
# 1 out of 5 times choose a new direction
game.face(new_facing())
if game.FORWARD not in game.memory:
game.forward()
if random.randint(0,3) == 0:
# 1 out of 3 times try to shoot
game.shoot()
print "brain queue:", game.memory
| rjm535/BrainTank | brains/wander.py | Python | gpl-3.0 | 3,925 |
# powerup.py
# this function defines classes for powerups
import pygame
import globalvars
import info
from pygame.locals import *
class PowerUp(pygame.sprite.Sprite):
""" PowerUp:
this class is the generic class for PowerUps. It will not be used directly, but other classes will
inherit PowerUp's memebers. """
p = 1 # the probability of appearing. for example: this class has a 1/p probability of appearing
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
self.rect = self.image.get_rect()
self.rect.topleft = pos
self._next_update = 0
# members you should edit in descendants
self._speed = 1
self.activate = self.do_nothing
def do_nothing(self):
pass
def update(self, t):
if t > self._next_update:
self.move()
self._next_update = t + 15
# If we've reached a border, kill it.
if self.rect.top > globalvars.HEIGHT:
self.kill()
def set_image(self, filename):
self.image = pygame.image.load_basic(filename)
self.image.set_colorkey((255, 255, 255), RLEACCEL)
def move(self):
self.rect.top += self._speed
# TODO: FIX ME
class Health(PowerUp):
p = 1
def __init__(self, pos):
self.set_image("img/health.bmp")
PowerUp.__init__(self, pos)
self.activate = self.give_health
def give_health(self):
print 'yay2'
globalvars.hero.give_hp(1)
class Money(PowerUp):
p = 10 # probability of appearing
def __init__(self, pos):
self.set_image("img/money.bmp")
PowerUp.__init__(self, pos)
self.activate = self.give_money
def give_money(self):
globalvars.scr.add(1000) | prg318/invade-spacers | powerup.py | Python | gpl-2.0 | 1,863 |
#!/usr/bin/python
'''Test for an openGL based stereo renderer - test rendering same scene to multiple windows
David Dunn
Feb 2017 - created
www.qenops.com
'''
__author__ = ('David Dunn')
__version__ = '1.0'
import OpenGL
OpenGL.ERROR_CHECKING = False # Uncomment for 2x speed up
OpenGL.ERROR_LOGGING = False # Uncomment for speed up
#OpenGL.FULL_LOGGING = True # Uncomment for verbose logging
#OpenGL.ERROR_ON_COPY = True # Comment for release
import OpenGL.GL as GL
import math, os
import numpy as np
import dGraph as dg
import dGraph.ui as ui
import dGraph.cameras as dgc
import dGraph.shapes as dgs
import dGraph.materials as dgm
import dGraph.shaders as dgshdr
import dGraph.config as config
import dGraph.util.imageManip as im
import time
MODELDIR = '%s/data'%os.path.dirname(__file__)
WINDOWS = [{
"name": 'HMD Right',
"location": (900, 0),
#"location": (2436, 1936), # px coordinates of the startup screen for window location
"size": (830, 800), # px size of the startup screen for centering
"center": (290,216), # center of the display
"refresh_rate": 60, # refreshrate of the display for precise time measuring
"px_size_mm": 0.09766, # px size of the display in mm
"distance_cm": 20, # distance from the viewer in cm,
#"is_hmd": False,
#"warp_path": 'data/calibration/newRight/',
},
{
"name": 'HMD Left',
"location": (0, 0),
#"location": (3266, 1936), # px coordinates of the startup screen for window location
"size": (830, 800), # px size of the startup screen for centering
"center": (290,216), # center of the display
"refresh_rate": 60, # refreshrate of the display for precise time measuring
"px_size_mm": 0.09766, # px size of the display in mm
"distance_cm": 20, # distance from the viewer in cm,
#"is_hmd": False,
#"warp_path": 'data/calibration/newRight/',
},
]
def loadScene(renderStack,file=None):
'''Load or create our sceneGraph'''
scene = dg.SceneGraph(file)
cam = dgc.Camera('cam', scene)
cam.setResolution((renderStack.width, renderStack.height))
cam.setTranslate(0.,0.,0.)
cam.setFOV(50.)
rs.cameras.append(cam)
teapot = dgs.PolySurface('teapot', scene, file = '%s/teapot.obj'%MODELDIR)
teapot.setScale(.1,.1,.1)
teapot.setTranslate(.0,-.05,-2.)
teapot.setRotate(5.,0.,0.)
for rs in renderStacks:
rs.objects['teapot'] = teapot
material1 = dgm.Test('material1',ambient=(1,0,0), amb_coeff=0.2, diffuse=(1,1,1), diff_coeff=1)
teapot.setMaterial(material1)
#for obj in renderStack.objects.itervalues():
# obj.setMaterial(material1)
renderStacks[0].append(cam.right)
#warp = dgm.warp.Lookup('lookup1',lutFile='%s/warp_0020.npy'%MODELDIR)
#renderStacks[0].append(warp)
renderStacks[1].append(cam.left)
#warp = dgm.warp.Lookup('lookup1',lutFile='%s/warp_0000.npy'%MODELDIR)
#renderStacks[1].append(warp)
return scene
def addInput():
ui.add_key_callback(arrowKey, ui.KEY_RIGHT, renderStack=renderStack, direction=3)
ui.add_key_callback(arrowKey, ui.KEY_LEFT, renderStack=renderStack, direction=2)
ui.add_key_callback(arrowKey, ui.KEY_UP, renderStack=renderStack, direction=1)
ui.add_key_callback(arrowKey, ui.KEY_DOWN, renderStack=renderStack, direction=0)
def arrowKey(window,renderStack,direction):
if direction == 3: # print "right"
renderStack.objects['teapot'].rotate += np.array((0.,5.,0.))
elif direction == 2: # print "left"
renderStack.objects['teapot'].rotate -= np.array((0.,5.,0.))
elif direction == 1: # print 'up'
renderStack.objects['teapot'].translate += np.array((0.,.01,0.))
else: # print "down"
renderStack.objects['teapot'].translate -= np.array((0.,.01,0.))
def drawScene(renderStack):
''' Render the stack '''
myStack = list(renderStack) # copy the renderStack so we can pop and do it again next frame
temp = myStack.pop()
temp.render(renderStack.width, renderStack.height, myStack) # Render our warp to screen
def setup():
ui.init()
renderStacks = []
windows = []
for idx, winData in enumerate(WINDOWS):
renderStack = ui.RenderStack()
renderStack.display = ui.Display(resolution=winData['size'])
share = None if idx == 0 else windows[0]
window = renderStack.addWindow(ui.open_window(winData['name'], winData['location'][0], winData['location'][1], renderStack.display.width, renderStack.display.height, share=share))
if not window:
ui.terminate()
exit(1)
ui.make_context_current(window)
dg.initGL()
windows.append(window)
renderStacks.append(renderStack)
ui.add_key_callback(ui.close_window, ui.KEY_ESCAPE)
scene = loadScene(renderStacks)
for rs in renderStacks:
rs.graphicsCardInit()
return renderStacks, scene, windows
def runLoop(renderStacks, windows):
# Print message to console, and kick off the loop to get it rolling.
print("Hit ESC key to quit.")
start = time.time()
while not ui.window_should_close(windows[0]):
for rs in renderStacks:
print rs
for window in rs.windows:
print window
ui.make_context_current(window)
drawScene(rs)
ui.swap_buffers(window)
ui.poll_events()
now = time.time()
time.sleep(max((frame+1)/config.maxFPS+start-now,0))
#ui.wait_events()
ui.terminate()
exit(0)
if __name__ == '__main__':
renderStack, scene, windows = setup()
addInput()
runLoop(renderStack, windows)
| qenops/dGraph | test/test7.py | Python | apache-2.0 | 5,724 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from nova.common import config
import nova.conf
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import rpc
from nova import version
CONF = nova.conf.CONF
def parse_args(argv, default_config_files=None, configure_db=True,
init_rpc=True):
log.register_options(CONF)
# We use the oslo.log default log levels which includes suds=INFO
# and add only the extra levels that Nova needs
if CONF.glance.debug:
extra_default_log_levels = ['glanceclient=DEBUG']
else:
extra_default_log_levels = ['glanceclient=WARN']
log.set_defaults(default_log_levels=log.get_default_log_levels() +
extra_default_log_levels)
rpc.set_defaults(control_exchange='nova')
config.set_middleware_defaults()
CONF(argv[1:],
project='nova',
version=version.version_string(),
default_config_files=default_config_files)
if init_rpc:
rpc.init(CONF)
if configure_db:
sqlalchemy_api.configure(CONF)
| hanlind/nova | nova/config.py | Python | apache-2.0 | 1,807 |
import os
import sys
import sphinx_rtd_theme
sys.path.append(os.path.abspath('../'))
VERSION = open('../setup.py').read().split("version='")[1].split("'")[0]
project = 'facadedevice'
version = VERSION
author = 'Vincent Michel'
copyright = u'2016, MAX-IV'
master_doc = 'index'
highlight_language = 'python'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
suppress_warnings = ['image.nonlocal_uri']
| MaxIV-KitsControls/tango-facadedevice | docs/conf.py | Python | gpl-3.0 | 504 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.