code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from .AlFeatureTemplate import AlFeatureTemplate
from .sensorCountRoutine import AlFeatureSensorCountRoutine
import numpy as np
class AlFeatureSensorCount(AlFeatureTemplate):
def __init__(self, normalize=False):
"""
Initialization of Template Class
:return:
"""
AlFeatureTemplate.__init__(self,
name='sensorCount',
description='Number of Events in the window related to the sensor',
per_sensor=True,
enabled=True,
routine=AlFeatureSensorCountRoutine())
# Normalize the number between 0 to 1
self.normalize = normalize
def get_feature_value(self, data_list, cur_index, window_size, sensor_name=None):
"""
Counts the number of occurrence of the sensor specified in current window.
:param data_list: list of sensor data
:param cur_index: current data record index
:param window_size: window size
:param sensor_name: name of sensor
:return: a double value
"""
if self.normalize:
return np.float(self.routine.sensor_count[sensor_name])/(window_size * 2)
else:
return np.float(self.routine.sensor_count[sensor_name])
| TinghuiWang/ActivityLearning | actlearn/feature/sensorCount.py | Python | bsd-3-clause | 1,358 |
import sqlalchemy
import config, constants, util
def textclause_repr(self):
return 'text(%r)' % self.text
def table_repr(self):
data = {
'name': self.name,
'columns': constants.NLTAB.join([repr(cl) for cl in self.columns]),
'constraints': constants.NLTAB.join(
[repr(cn) for cn in self.constraints
if not isinstance(cn, sqlalchemy.PrimaryKeyConstraint)]),
'index': '',
'schema': self.schema != None and "schema='%s'" % self.schema or '',
}
if data['constraints']:
data['constraints'] = data['constraints'] + ','
return util.as_out_str(constants.TABLE % data)
def _repr_coltype_as(coltype, as_type):
"""repr a Type instance as a super type."""
specimen = object.__new__(as_type)
specimen.__dict__ = coltype.__dict__
return repr(specimen)
def column_repr(self):
kwarg = []
if self.key != self.name:
kwarg.append( 'key')
if hasattr(self, 'primary_key'):
kwarg.append( 'primary_key')
if not self.nullable:
kwarg.append( 'nullable')
if self.onupdate:
kwarg.append( 'onupdate')
if self.default:
kwarg.append( 'default')
ks = ', '.join('%s=%r' % (k, getattr(self, k)) for k in kwarg )
name = self.name
if not hasattr(config, 'options') and config.options.generictypes:
coltype = repr(self.type)
elif type(self.type).__module__ == 'sqlalchemy.types':
coltype = repr(self.type)
else:
# Try to 'cast' this column type to a cross-platform type
# from sqlalchemy.types, dropping any database-specific type
# arguments.
for base in type(self.type).__mro__:
if (base.__module__ == 'sqlalchemy.types' and
base.__name__ in sqlalchemy.__all__):
coltype = _repr_coltype_as(self.type, base)
break
# FIXME: if a dialect has a non-standard type that does not
# derive from an ANSI type, there's no choice but to ignore
# generic-types and output the exact type. However, import
# headers have already been output and lack the required
# dialect import.
else:
coltype = repr(self.type)
data = {'name': self.name,
'type': coltype,
'constraints': ', '.join([repr(cn) for cn in self.constraints]),
'args': ks and ks or '',
}
if data['constraints']:
if data['constraints']: data['constraints'] = ', ' + data['constraints']
if data['args']:
if data['args']: data['args'] = ', ' + data['args']
return util.as_out_str(constants.COLUMN % data)
def foreignkeyconstraint_repr(self):
data = {'name': repr(self.name),
'names': repr([x.parent.name for x in self.elements]),
'specs': repr([x._get_colspec() for x in self.elements])
}
return util.as_out_str(constants.FOREIGN_KEY % data)
def index_repr(index):
cols = []
for column in index.columns:
# FIXME: still punting on the issue of unicode table names
if util.is_python_identifier(column.name):
cols.append('%s.c.%s' % (column.table.name, column.name))
else:
cols.append('%s.c[%r]' % (column.table.name, column.name))
data = {'name': repr(index.name),
'columns': ', '.join(cols),
'unique': repr(index.unique),
}
return util.as_out_str(constants.INDEX % data)
def monkey_patch_sa():
sqlalchemy.sql.expression._TextClause.__repr__ = textclause_repr
sqlalchemy.schema.Table.__repr__ = table_repr
sqlalchemy.schema.Column.__repr__ = column_repr
sqlalchemy.schema.ForeignKeyConstraint.__repr__ = foreignkeyconstraint_repr
sqlalchemy.schema.Index.__repr__ = index_repr
| DarioGT/SqlAutoCode- | sqlautocode/formatter.py | Python | mit | 3,809 |
# -*- coding: utf-8 -*-
"""Scripts for counting recently added users by email domain; pushes results
to the specified project.
"""
import csv
import datetime
import collections
from cStringIO import StringIO
import requests
from dateutil.relativedelta import relativedelta
from framework.auth import Auth
from framework.mongo import database
from website import models
from website.app import app, init_app
from website.addons.osfstorage import utils as storage_utils
NODE_ID = '95nv8'
USER_ID = 'icpnw'
FILE_NAME = 'daily-users.csv'
TIME_DELTA = relativedelta(days=1)
def get_emails(query=None):
users = database['user'].find(query, {'username': True})
counts = collections.Counter(
user['username'].split('@')[-1]
for user in users
)
return counts.most_common()
def get_emails_since(delta):
return get_emails({
'date_confirmed': {
'$gte': datetime.datetime.utcnow() - delta,
}
})
def main():
init_app()
node = models.Node.load(NODE_ID)
user = models.User.load(USER_ID)
emails = get_emails_since(TIME_DELTA)
sio = StringIO()
writer = csv.writer(sio)
writer.writerow(['affiliation', 'count'])
writer.writerows(emails)
nchar = sio.tell()
sio.seek(0)
with app.test_request_context():
upload_url = storage_utils.get_upload_url(
node,
user,
nchar,
'text/csv',
FILE_NAME,
)
requests.put(
upload_url,
data=sio,
headers={'Content-Type': 'text/csv'},
)
if __name__ == '__main__':
main()
| AndrewSallans/osf.io | scripts/analytics/tabulate_emails.py | Python | apache-2.0 | 1,617 |
# -*- coding: UTF-8 -*-
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
import datetime
from south.modelsinspector import add_introspection_rules
from ckeditor.fields import RichTextField
from multimedia.models import Audio, Fotos, Videos, Adjuntos
add_introspection_rules ([], ["^ckeditor\.fields\.RichTextField"])
class Foros(models.Model):
nombre = models.CharField(max_length=200)
creacion = models.DateField(default=datetime.datetime.now())
apertura = models.DateField('Apertura y recepción de aportes')
cierre = models.DateField('Cierre de aportes')
fecha_skype = models.DateField('Propuesta de reunión skype')
memoria = models.DateField('Propuesta entrega de memoria')
contenido = RichTextField()
contraparte = models.ForeignKey(User)
documentos = generic.GenericRelation(Adjuntos)
fotos = generic.GenericRelation(Fotos)
video = generic.GenericRelation(Videos)
audio = generic.GenericRelation(Audio)
class Meta:
verbose_name_plural = "Foros"
ordering = ['-creacion']
def __unicode__(self):
return self.nombre
def get_absolute_url(self):
return "/foros/ver/%d" % (self.id)
class Aportes(models.Model):
foro = models.ForeignKey(Foros)
fecha = models.DateField(default=datetime.datetime.now())
contenido = RichTextField()
user = models.ForeignKey(User)
adjuntos = generic.GenericRelation(Adjuntos)
fotos = generic.GenericRelation(Fotos)
video = generic.GenericRelation(Videos)
audio = generic.GenericRelation(Audio)
class Meta:
verbose_name_plural = "Aportes"
def __unicode__(self):
return self.foro.nombre
class Comentarios(models.Model):
fecha = models.DateField(default=datetime.datetime.now())
usuario = models.ForeignKey(User)
comentario = RichTextField()
aporte = models.ForeignKey(Aportes)
class Meta:
verbose_name_plural = "Comentarios"
def __unicode__(self):
return self.usuario.username
| CARocha/sitioreddes | foros/models.py | Python | mit | 2,070 |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""DBCore is an abstract database package that forms the basis for beets'
Library.
"""
from __future__ import absolute_import
from .db import Model, Database
from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery
from .types import Type
from .queryparse import query_from_strings
from .queryparse import sort_from_strings
from .queryparse import parse_sorted_query
from .query import InvalidQueryError
# flake8: noqa
| LordSputnik/beets | beets/dbcore/__init__.py | Python | mit | 1,103 |
import sys
r, c = map(int, input().split())
while r and c:
lines = [input().strip() for i in range(r)]
rotatedLines = []
for i in range(c):
rotatedLines.append("".join([lines[j][i] for j in range(r)]))
rotatedLines.sort(key=lambda s: s.lower())
for i in range(r):
print("".join([rotatedLines[j][i] for j in range(c)]))
print()
r, c = map(int, input().split())
| SirDavidLudwig/KattisSolutions | problems/sidewayssorting/sidewayssorting.py | Python | gpl-3.0 | 372 |
# -*- coding: utf-8 -*-
"""
Generate commands and configs for codalab runs.
"""
# Copyright (c) 2018 Ben Zimmer. All rights reserved.
import os
import shutil
import attr
from handwriting import run_charclassml, config as cf
DATE = "20180610"
RUN_COMMAND_PREFIX = "source activate handwriting36; export OMP_NUM_THREADS=1; python -m "
CODE_BUNDLE = "code"
DATA_BUNDLE = "data"
CONFIG_BUNDLE = "config_charclass_" + DATE
PYTHON_MODULE = "handwriting.run_charclassml"
MODULE_ARGS = "train"
CONFIG_DEFAULT_FILENAME = "config/charclass_20180523.json"
OUTPUT_DIR = "C:/Ben/code/codalab/" + DATE
CONFIG_DEFAULT = run_charclassml.load_config(CONFIG_DEFAULT_FILENAME)
def main():
"""main program"""
def update(x, **kwargs):
"""functional dict update"""
res = dict(x)
for k, v in kwargs.items():
res[k] = v
return res
# make a subdirectory in the output dir for the config bundle
config_bundle_dirname = os.path.join(OUTPUT_DIR, CONFIG_BUNDLE)
if not os.path.exists(config_bundle_dirname):
os.makedirs(config_bundle_dirname)
# dictionary of configs
configs = {
"reg_0.001": attr.assoc(
CONFIG_DEFAULT,
nn_opt=update(CONFIG_DEFAULT.nn_opt, weight_decay=0.001)),
"reg_0.005": attr.assoc(
CONFIG_DEFAULT,
nn_opt=update(CONFIG_DEFAULT.nn_opt, weight_decay=0.005)),
"reg_0.01": attr.assoc(
CONFIG_DEFAULT,
nn_opt=update(CONFIG_DEFAULT.nn_opt, weight_decay=0.01))
}
# save config files to bundle subdir
for k, v in configs.items():
config_filename = os.path.join(config_bundle_dirname, k + ".json")
cf.save(v, config_filename)
# TODO: zip bundle subdir
shutil.make_archive(config_bundle_dirname, "zip", config_bundle_dirname)
# generate a text file of run commands
runs_filename = os.path.join(OUTPUT_DIR, "runs.txt")
with open(runs_filename, "w") as runs_file:
for k in configs.keys():
config_filename = CONFIG_BUNDLE + "/" + k + ".json"
run = " ".join([
"run",
"handwriting:" + CODE_BUNDLE + "/handwriting",
":" + DATA_BUNDLE,
":" + CONFIG_BUNDLE,
"\"" + " ".join([
RUN_COMMAND_PREFIX,
PYTHON_MODULE,
MODULE_ARGS,
config_filename,
"model.pkl"
]) + "\"",
"-n " + "run_" + k + "_" + DATE,
"--request-docker-image bdzimmer/handwriting:0.1",
"--request-memory 16g"
]) + "\n"
print(run, file=runs_file)
if __name__ == "__main__":
main()
| bdzimmer/handwriting | handwriting/runs.py | Python | bsd-3-clause | 2,766 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test graph equality of caffe2 models."""
import nnvm
from nnvm.compiler import graph_util, graph_attr
from model_zoo import c2_squeezenet, squeezenet
def compare_graph(init, predict, nnvm_sym, ishape):
caffe2_sym, params = nnvm.frontend.from_caffe2(init, predict)
g1 = nnvm.graph.create(caffe2_sym)
g2 = nnvm.graph.create(nnvm_sym)
input_name = predict.external_input[0]
ishapes = {input_name: ishape}
graph_attr.set_shape_inputs(g1, ishapes)
graph_attr.set_shape_inputs(g2, ishapes)
g1 = g1.apply("InferShape").apply("SimplifyInference")
g2 = g2.apply("InferShape").apply("SimplifyInference")
graph_util.check_graph_equal(g1, g2)
def test_squeeze_net():
symbol, params = squeezenet.get_workload(version='1.1')
compare_graph(c2_squeezenet.init_net, c2_squeezenet.predict_net, symbol, ishape=(1, 3, 224, 224))
if __name__ == '__main__':
test_squeeze_net()
| Huyuwei/tvm | nnvm/tests/python/frontend/caffe2/test_graph.py | Python | apache-2.0 | 1,699 |
import datetime
import os.path
from django.utils import timezone
from django.db import models
from tinymce.models import HTMLField
from django.contrib.auth.models import User
from django import forms
from django.forms import ModelForm
from captcha.fields import CaptchaField
class Category(models.Model):
name = models.CharField(max_length=100)
pub_date = models.DateTimeField("Added date")
flat = models.BooleanField("Flat Category")
def __unicode__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(User, blank=True, null=True)
category = models.ForeignKey(Category, blank=True, null=True, on_delete=models.SET_NULL)
pub_date = models.DateTimeField("posted date")
content = models.TextField()
def __unicode__(self):
return self.title
class Attachment(models.Model):
attachments = models.FileField(upload_to='attachments/%Y/%m/%d/', max_length=100, blank=True)
article = models.ForeignKey(Article,)
def filename(self):
return os.path.basename(self.attachments.name)
class Comment(models.Model):
name = models.CharField("Your name", max_length=100)
comment = models.TextField("Your Comment")
pub_date = models.DateTimeField("Posted date", auto_now=False, auto_now_add=True)
article = models.ForeignKey(Article)
class CommentForm(ModelForm):
class Meta:
model = Comment
| bolan/django_simple_blog | blog/models.py | Python | gpl-3.0 | 1,456 |
from sklearn import datasets
from fickle.testing import TestCase
from fickle.predictors import GenericSVMClassifier as Backend
class BackendTest(TestCase):
def test_load(self):
backend = Backend()
dataset = datasets.load_iris()
self.assertTrue(backend.load(dataset))
def test_isloaded(self):
backend = Backend()
dataset = datasets.load_iris()
self.assertFalse(backend.isloaded())
backend.load(dataset)
self.assertTrue(backend.isloaded())
backend.load(dataset)
self.assertTrue(backend.isloaded())
def test_fit_when_not_loaded(self):
backend = Backend()
with self.assertRaises(RuntimeError):
backend.fit()
def test_fit_when_loaded(self):
backend = Backend()
dataset = datasets.load_iris()
backend.load(dataset)
self.assertTrue(backend.fit())
def test_istrained_without_load(self):
backend = Backend()
dataset = datasets.load_iris()
self.assertFalse(backend.istrained())
backend.load(dataset)
self.assertFalse(backend.istrained())
backend.fit()
self.assertTrue(backend.istrained())
def test_istrained_with_load(self):
old_backend = Backend()
dataset = datasets.load_iris()
old_backend.load(dataset)
old_backend.fit()
new_backend = Backend()
self.assertTrue(new_backend.istrained(read=True))
def test_predict_when_trained(self):
backend = Backend()
dataset = datasets.load_iris()
backend.load(dataset)
backend.fit()
sample = dataset['data'][:10]
predictions = backend.predict(sample)
self.assertEqual(len(predictions), 10)
| norbert/fickle | test/backend_test.py | Python | mit | 1,757 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import importlib
import numpy as np
from tensorflow.python.eager import backprop as tfe_backprop
from tensorflow.python.eager import context as tfe_context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
sm = special_math
def _check_strictly_increasing(array_1d):
diff = np.diff(array_1d)
np.testing.assert_array_less(0, diff)
def _make_grid(dtype, grid_spec):
"""Returns a uniform grid + noise, reshaped to shape argument."""
rng = np.random.RandomState(0)
num_points = np.prod(grid_spec.shape)
grid = np.linspace(grid_spec.min, grid_spec.max, num=num_points).astype(dtype)
grid_spacing = (grid_spec.max - grid_spec.min) / num_points
grid += 0.1 * grid_spacing * rng.randn(*grid.shape) # pylint: disable=not-an-iterable
# More useful if it's sorted (e.g. for testing monotonicity, or debugging).
grid = np.sort(grid)
return np.reshape(grid, grid_spec.shape)
def _value_and_gradient(fn, *args):
"""Calls `fn` and computes the gradient of the result wrt `arg`."""
if tfe_context.executing_eagerly():
v, g = tfe_backprop.val_and_grad_function(fn)(args)
else:
v = fn(*args)
g = gradients_impl.gradients(v, args)
return v, g
GridSpec = collections.namedtuple("GridSpec", ["min", "max", "shape"])
ErrorSpec = collections.namedtuple("ErrorSpec", ["rtol", "atol"])
class NdtriTest(test.TestCase):
def assertAllFinite(self, x):
is_finite = np.isfinite(x)
all_true = np.ones_like(is_finite, dtype=np.bool_)
self.assertAllEqual(all_true, is_finite)
@test_util.run_in_graph_and_eager_modes
def testNdtri(self):
"""Verifies that ndtri computation is correct."""
if not special:
return
p = np.linspace(0., 1.0, 50).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-32), 1. - np.exp(-32), np.exp(-2),
1. - np.exp(-2)))
expected_x = special.ndtri(p)
x = special_math.ndtri(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
@test_util.run_deprecated_v1
def testNdtriDynamicShape(self):
"""Verifies that ndtri computation is correct."""
with self.cached_session() as sess:
if not special:
return
p = array_ops.placeholder(np.float32)
p_ = np.linspace(0., 1.0, 50).astype(np.float32)
x = special_math.ndtri(p)
x_ = sess.run(x, feed_dict={p: p_})
expected_x_ = special.ndtri(p_)
self.assertAllClose(expected_x_, x_, atol=0.)
def _baseNdtriFiniteGradientTest(self, dtype):
"""Verifies that ndtri has finite gradients at interesting points."""
# Tests gradients at 0, 1, and piece-wise boundaries.
p = constant_op.constant(
np.array([
0.,
np.exp(-32.),
np.exp(-2.),
1. - np.exp(-2.),
1. - np.exp(-32.),
1.,
]).astype(dtype))
# Not having the lambda sanitizer means we'd get an `IndexError` whenever
# the user supplied function has default args.
_, grads = _value_and_gradient(
lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda
self.assertAllFinite(self.evaluate(grads[0]))
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat32(self):
self._baseNdtriFiniteGradientTest(np.float32)
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat64(self):
self._baseNdtriFiniteGradientTest(np.float64)
@test_util.run_all_in_graph_and_eager_modes
class NdtrTest(test.TestCase):
_use_log = False
# Grid min/max chosen to ensure 0 < cdf(x) < 1.
_grid32 = GridSpec(min=-12.9, max=5., shape=[100])
_grid64 = GridSpec(min=-37.5, max=8., shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-6, atol=0.)
def _test_grid(self, dtype, grid_spec, error_spec):
if self._use_log:
self._test_grid_log(dtype, grid_spec, error_spec)
else:
self._test_grid_no_log(dtype, grid_spec, error_spec)
def _test_grid_log(self, dtype, grid_spec, error_spec):
if not special:
return
grid = _make_grid(dtype, grid_spec)
actual = self.evaluate(sm.log_ndtr(grid))
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, -inf < log_cdf(x) < 0. In this case, we should be able
# to use a huge grid because we have used tricks to escape numerical
# difficulties.
self.assertTrue((actual < 0).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.log_ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def _test_grid_no_log(self, dtype, grid_spec, error_spec):
if not special:
return
grid = _make_grid(dtype, grid_spec)
actual = self.evaluate(sm.ndtr(grid))
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, 0 < cdf(x) < 1. The grid cannot contain everything due
# to numerical limitations of cdf.
self.assertTrue((actual > 0).all())
self.assertTrue((actual < 1).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
@test_util.run_deprecated_v1
def test_float32(self):
self._test_grid(np.float32, self._grid32, self._error32)
@test_util.run_deprecated_v1
def test_float64(self):
self._test_grid(np.float64, self._grid64, self._error64)
class LogNdtrTestLower(NdtrTest):
_use_log = True
_grid32 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT32_LOWER, shape=[100])
_grid64 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT64_LOWER, shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-4, atol=0.)
# The errors are quite large when the input is > 6 or so. Also,
# scipy.special.log_ndtr becomes zero very early, before 10,
# (due to ndtr becoming 1). We approximate Log[1 + epsilon] as epsilon, and
# avoid this issue.
class LogNdtrTestMid(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100])
# Differences show up as soon as we're in the tail, so add some atol.
_error32 = ErrorSpec(rtol=0.1, atol=1e-7)
_error64 = ErrorSpec(rtol=0.1, atol=1e-7)
class LogNdtrTestUpper(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_UPPER,
max=12., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_UPPER,
max=35., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_error32 = ErrorSpec(rtol=1e-6, atol=1e-14)
_error64 = ErrorSpec(rtol=1e-6, atol=1e-14)
class NdtrGradientTest(test.TestCase):
_use_log = False
_grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8])
_error32 = ErrorSpec(rtol=1e-4, atol=0)
_error64 = ErrorSpec(rtol=1e-7, atol=0)
def assert_all_true(self, v):
self.assertAllEqual(np.ones_like(v, dtype=np.bool_), v)
def assert_all_false(self, v):
self.assertAllEqual(np.zeros_like(v, dtype=np.bool_), v)
def _test_grad_finite(self, dtype):
x = constant_op.constant([-100., 0., 100.], dtype=dtype)
output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x))
fn = sm.log_ndtr if self._use_log else sm.ndtr
# Not having the lambda sanitizer means we'd get an `IndexError` whenever
# the user supplied function has default args.
output, grad_output = _value_and_gradient(
lambda x_: fn(x_), x) # pylint: disable=unnecessary-lambda
# isfinite checks for NaN and Inf.
output_, grad_output_ = self.evaluate([output, grad_output])
self.assert_all_true(np.isfinite(output_))
self.assert_all_true(np.isfinite(grad_output_[0]))
def _test_grad_accuracy(self, dtype, grid_spec, error_spec):
raw_grid = _make_grid(dtype, grid_spec)
grid = ops.convert_to_tensor(raw_grid)
with self.cached_session():
fn = sm.log_ndtr if self._use_log else sm.ndtr
# If there are N points in the grid,
# grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of
# the ith output point w.r.t. the jth grid point. We only expect the
# diagonal to be nonzero.
# TODO(b/31131137): Replace tf.compat.v1.test.compute_gradient with our
# own custom gradient evaluation to ensure we correctly handle small
# function delta.
grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,
fn(grid),
grid_spec.shape)
grad_eval = np.diag(grad_eval)
# Check for NaN separately in order to get informative failures.
self.assert_all_false(np.isnan(grad_eval))
self.assert_all_true(grad_eval > 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Do the same checks but explicitly compute the gradient.
# (We did this because we're not sure if we trust
# tf.test.compute_gradient.)
grad_eval = gradients_impl.gradients(fn(grid), grid)[0].eval()
self.assert_all_false(np.isnan(grad_eval))
if self._use_log:
g = np.reshape(grad_eval, [-1])
half = np.ceil(len(g) / 2)
self.assert_all_true(g[:int(half)] > 0.)
self.assert_all_true(g[int(half):] >= 0.)
else:
# The ndtr gradient will only be non-zero in the range [-14, 14] for
# float32 and [-38, 38] for float64.
self.assert_all_true(grad_eval >= 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Versus scipy.
if not (special and stats):
return
expected = stats.norm.pdf(raw_grid)
if self._use_log:
expected /= special.ndtr(raw_grid)
expected[np.isnan(expected)] = 0.
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
grad_eval.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
@test_util.run_deprecated_v1
def test_float32(self):
self._test_grad_accuracy(np.float32, self._grid, self._error32)
self._test_grad_finite(np.float32)
@test_util.run_deprecated_v1
def test_float64(self):
self._test_grad_accuracy(np.float64, self._grid, self._error64)
self._test_grad_finite(np.float64)
class LogNdtrGradientTest(NdtrGradientTest):
_use_log = True
class ErfInvTest(test.TestCase):
def testErfInvValues(self):
with self.cached_session():
if not special:
return
x = np.linspace(0., 1.0, 50).astype(np.float64)
expected_x = special.erfinv(x)
x = special_math.erfinv(x)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testErfInvIntegerInput(self):
with self.cached_session():
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int32)
special_math.erfinv(x)
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int64)
special_math.erfinv(x)
class LogCDFLaplaceTest(test.TestCase):
# Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot
# rely on scipy to cross check the extreme values.
# Test will be done differently over different ranges. These are the values
# such that when exceeded by x, produce output that causes the naive (scipy)
# implementation to have numerical issues.
#
# If x = log(1 / (2 * eps)), then 0.5 * exp{-x} = eps.
# With inserting eps = np.finfo(dtype).eps, we see that log(1 / (2 * eps)) is
# the value of x such that any larger value will result in
# 1 - 0.5 * exp{-x} = 0, which will cause the log_cdf_laplace code to take a
# log # of zero. We therefore choose these as our cutoffs for testing.
CUTOFF_FLOAT64_UPPER = np.log(1. / (2. * np.finfo(np.float64).eps)) - 1.
CUTOFF_FLOAT32_UPPER = np.log(1. / (2. * np.finfo(np.float32).eps)) - 1.
def assertAllTrue(self, x):
self.assertAllEqual(np.ones_like(x, dtype=np.bool_), x)
def _test_grid_log(self, dtype, scipy_dtype, grid_spec, error_spec):
with self.cached_session():
grid = _make_grid(dtype, grid_spec)
actual = sm.log_cdf_laplace(grid).eval()
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual))
self.assertAllTrue((actual < 0))
_check_strictly_increasing(actual)
# Versus scipy.
if not stats:
return
scipy_dist = stats.laplace(loc=0., scale=1.)
expected = scipy_dist.logcdf(grid.astype(scipy_dtype))
self.assertAllClose(
expected.astype(np.float64),
actual.astype(np.float64),
rtol=error_spec.rtol,
atol=error_spec.atol)
@test_util.run_deprecated_v1
def test_float32_lower_and_mid_segment_scipy_float32_ok(self):
# Choose values mild enough that we can use scipy in float32, which will
# allow for a high accuracy match to scipy (since we both use float32).
self._test_grid_log(
np.float32, # dtype
np.float32, # scipy_dtype
GridSpec(min=-10, max=self.CUTOFF_FLOAT32_UPPER - 5, shape=[100]),
ErrorSpec(rtol=5e-4, atol=0))
@test_util.run_deprecated_v1
def test_float32_all_segments_with_scipy_float64_ok(self):
# Choose values outside the range where scipy float32 works.
# Let scipy use float64. This means we
# won't be exactly the same since we are in float32.
self._test_grid_log(
np.float32, # dtype
np.float64, # scipy_dtype
GridSpec(min=-50, max=self.CUTOFF_FLOAT32_UPPER + 5, shape=[100]),
ErrorSpec(rtol=0.05, atol=0))
@test_util.run_deprecated_v1
def test_float32_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.cached_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float32, GridSpec(min=-200, max=80, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = self.evaluate([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
@test_util.run_deprecated_v1
def test_float64_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.cached_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float64, GridSpec(min=-200, max=700, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = self.evaluate([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
if __name__ == "__main__":
test.main()
| frreiss/tensorflow-fred | tensorflow/python/kernel_tests/distributions/special_math_test.py | Python | apache-2.0 | 17,681 |
#!/usr/bin/env python
import argparse
import json
import os
import random
import numpy as np
import ray
from ray.tune import Trainable, run, sample_from
from ray.tune.schedulers import AsyncHyperBandScheduler
class MyTrainableClass(Trainable):
"""Example agent whose learning curve is a random sigmoid.
The dummy hyperparameters "width" and "height" determine the slope and
maximum reward value reached.
"""
def _setup(self, config):
self.timestep = 0
def _train(self):
self.timestep += 1
v = np.tanh(float(self.timestep) / self.config.get("width", 1))
v *= self.config.get("height", 1)
# Here we use `episode_reward_mean`, but you can also report other
# objectives such as loss or accuracy.
return {"episode_reward_mean": v}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"timestep": self.timestep}))
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
self.timestep = json.loads(f.read())["timestep"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--ray-address",
help="Address of Ray cluster for seamless distributed execution.")
args, _ = parser.parse_known_args()
ray.init(address=args.ray_address)
# asynchronous hyperband early stopping, configured with
# `episode_reward_mean` as the
# objective and `training_iteration` as the time unit,
# which is automatically filled by Tune.
ahb = AsyncHyperBandScheduler(
time_attr="training_iteration",
metric="episode_reward_mean",
mode="max",
grace_period=5,
max_t=100)
run(MyTrainableClass,
name="asynchyperband_test",
scheduler=ahb,
stop={"training_iteration": 1 if args.smoke_test else 99999},
num_samples=20,
resources_per_trial={
"cpu": 1,
"gpu": 0
},
config={
"width": sample_from(lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
| stephanie-wang/ray | python/ray/tune/examples/async_hyperband_example.py | Python | apache-2.0 | 2,373 |
# coding=utf-8
# URL: https://pymedusa.com
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
"""Custom exceptions used or raised by indexer_api"""
from tvdb_api.tvdb_exceptions import (tvdb_exception, tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_showincomplete,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
indexerExcepts = ["indexer_exception", "indexer_error", "indexer_userabort", "indexer_shownotfound", "indexer_showincomplete",
"indexer_seasonnotfound", "indexer_episodenotfound", "indexer_attributenotfound"]
tvdbExcepts = ["tvdb_exception", "tvdb_error", "tvdb_userabort", "tvdb_shownotfound", "tvdb_showincomplete",
"tvdb_seasonnotfound", "tvdb_episodenotfound", "tvdb_attributenotfound"]
# link API exceptions to our exception handler
indexer_exception = tvdb_exception
indexer_error = tvdb_error
indexer_userabort = tvdb_userabort
indexer_attributenotfound = tvdb_attributenotfound
indexer_episodenotfound = tvdb_episodenotfound
indexer_seasonnotfound = tvdb_seasonnotfound
indexer_shownotfound = tvdb_shownotfound
indexer_showincomplete = tvdb_showincomplete
| Thraxis/pymedusa | sickbeard/indexers/indexer_exceptions.py | Python | gpl-3.0 | 1,776 |
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
import thinkbayes2
import thinkplot
from collections import Counter
FORMATS = ['pdf', 'eps', 'png']
class SurvivalFunction(object):
"""Represents a survival function."""
def __init__(self, ts, ss, label=''):
self.ts = ts
self.ss = ss
self.label = label
def __len__(self):
return len(self.ts)
def __getitem__(self, t):
return self.Prob(t)
def Prob(self, t):
"""Returns S(t), the probability that corresponds to value t.
t: time
returns: float probability
"""
return np.interp(t, self.ts, self.ss, left=1.0)
def Probs(self, ts):
"""Gets probabilities for a sequence of values."""
return np.interp(ts, self.ts, self.ss, left=1.0)
def Items(self):
"""Sorted list of (t, s) pairs."""
return zip(self.ts, self.ss)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, survival function)
"""
return self.ts, self.ss
def MakeHazardFunction(self, label=''):
"""Computes the hazard function.
This simple version does not take into account the
spacing between the ts. If the ts are not equally
spaced, it is not valid to compare the magnitude of
the hazard function across different time steps.
label: string
returns: HazardFunction object
"""
lams = pd.Series(index=self.ts)
prev = 1.0
for t, s in zip(self.ts, self.ss):
lams[t] = (prev - s) / prev
prev = s
return HazardFunction(lams, label=label)
def MakePmf(self, filler=None):
"""Makes a PMF of lifetimes.
filler: value to replace missing values
returns: Pmf
"""
cdf = thinkbayes2.Cdf(self.ts, 1-self.ss)
pmf = thinkbayes2.Pmf()
for val, prob in cdf.Items():
pmf.Set(val, prob)
cutoff = cdf.ps[-1]
if filler is not None:
pmf[filler] = 1-cutoff
return pmf
def RemainingLifetime(self, filler=None, func=thinkbayes2.Pmf.Mean):
"""Computes remaining lifetime as a function of age.
func: function from conditional Pmf to expected liftime
returns: Series that maps from age to remaining lifetime
"""
pmf = self.MakePmf(filler=filler)
d = {}
for t in sorted(pmf.Values())[:-1]:
pmf[t] = 0
if pmf.Total():
pmf.Normalize()
d[t] = func(pmf) - t
return pd.Series(d)
def MakeSurvivalFromSeq(values, label=''):
"""Makes a survival function based on a complete dataset.
values: sequence of observed lifespans
returns: SurvivalFunction
"""
counter = Counter(values)
ts, freqs = zip(*sorted(counter.items()))
ts = np.asarray(ts)
ps = np.cumsum(freqs, dtype=np.float)
ps /= ps[-1]
ss = 1 - ps
return SurvivalFunction(ts, ss, label)
def MakeSurvivalFromCdf(cdf, label=''):
"""Makes a survival function based on a CDF.
cdf: Cdf
returns: SurvivalFunction
"""
ts = cdf.xs
ss = 1 - cdf.ps
return SurvivalFunction(ts, ss, label)
class HazardFunction(object):
"""Represents a hazard function."""
def __init__(self, d, label=''):
"""Initialize the hazard function.
d: dictionary (or anything that can initialize a series)
label: string
"""
self.series = pd.Series(d)
self.label = label
def __len__(self):
return len(self.series)
def __getitem__(self, t):
return self.series[t]
def Get(self, t, default=np.nan):
return self.series.get(t, default)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, hazard function)
"""
return self.series.index, self.series.values
def MakeSurvival(self, label=''):
"""Makes the survival function.
returns: SurvivalFunction
"""
ts = self.series.index
ss = (1 - self.series).cumprod()
sf = SurvivalFunction(ts, ss, label=label)
return sf
def Extend(self, other):
"""Extends this hazard function by copying the tail from another.
other: HazardFunction
"""
last_index = self.series.index[-1] if len(self) else 0
more = other.series[other.series.index > last_index]
self.series = pd.concat([self.series, more])
def Truncate(self, t):
"""Truncates this hazard function at the given value of t.
t: number
"""
self.series = self.series[self.series.index < t]
def ConditionalSurvival(pmf, t0):
"""Computes conditional survival function.
Probability that duration exceeds t0+t, given that
duration >= t0.
pmf: Pmf of durations
t0: minimum time
returns: tuple of (ts, conditional survivals)
"""
cond = thinkbayes2.Pmf()
for t, p in pmf.Items():
if t >= t0:
cond.Set(t-t0, p)
cond.Normalize()
return MakeSurvivalFromCdf(cond.MakeCdf())
def PlotConditionalSurvival(durations):
"""Plots conditional survival curves for a range of t0.
durations: list of durations
"""
pmf = thinkbayes2.Pmf(durations)
times = [8, 16, 24, 32]
thinkplot.PrePlot(len(times))
for t0 in times:
sf = ConditionalSurvival(pmf, t0)
label = 't0=%d' % t0
thinkplot.Plot(sf, label=label)
thinkplot.Show()
def PlotSurvival(complete):
"""Plots survival and hazard curves.
complete: list of complete lifetimes
"""
thinkplot.PrePlot(3, rows=2)
cdf = thinkbayes2.Cdf(complete, label='cdf')
sf = MakeSurvivalFromCdf(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config()
thinkplot.SubPlot(2)
hf = sf.MakeHazardFunction(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75])
def PlotHazard(complete, ongoing):
"""Plots the hazard function and survival function.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
"""
# plot S(t) based on only complete pregnancies
sf = MakeSurvivalFromSeq(complete)
thinkplot.Plot(sf, label='old S(t)', alpha=0.1)
thinkplot.PrePlot(2)
# plot the hazard function
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf, label='lams(t)', alpha=0.5)
# plot the survival function
sf = hf.MakeSurvival()
thinkplot.Plot(sf, label='S(t)')
thinkplot.Show(xlabel='t (weeks)')
def EstimateHazardFunction(complete, ongoing, label='', verbose=False):
"""Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
verbose: whether to display intermediate results
"""
if np.sum(np.isnan(complete)):
raise ValueError("complete contains NaNs")
if np.sum(np.isnan(ongoing)):
raise ValueError("ongoing contains NaNs")
hist_complete = Counter(complete)
hist_ongoing = Counter(ongoing)
ts = list(hist_complete | hist_ongoing)
ts.sort()
at_risk = len(complete) + len(ongoing)
lams = pd.Series(index=ts)
for t in ts:
ended = hist_complete[t]
censored = hist_ongoing[t]
lams[t] = ended / at_risk
if verbose:
print('%0.3g\t%d\t%d\t%d\t%0.2g' %
(t, at_risk, ended, censored, lams[t]))
at_risk -= ended + censored
return HazardFunction(lams, label=label)
def EstimateHazardNumpy(complete, ongoing, label=''):
"""Estimates the hazard function by Kaplan-Meier.
Just for fun, this is a version that uses NumPy to
eliminate loops.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
"""
hist_complete = Counter(complete)
hist_ongoing = Counter(ongoing)
ts = set(hist_complete) | set(hist_ongoing)
at_risk = len(complete) + len(ongoing)
ended = [hist_complete[t] for t in ts]
ended_c = np.cumsum(ended)
censored_c = np.cumsum([hist_ongoing[t] for t in ts])
not_at_risk = np.roll(ended_c, 1) + np.roll(censored_c, 1)
not_at_risk[0] = 0
at_risk_array = at_risk - not_at_risk
hs = ended / at_risk_array
lams = dict(zip(ts, hs))
return HazardFunction(lams, label=label)
| AllenDowney/ProbablyOverthinkingIt | survival.py | Python | mit | 8,921 |
from django.db import models
from django.db.models import Count
import data
import datetime
import sys
class House(models.Model):
year_construct = models.IntegerField(verbose_name="Year of construction")
TYPE_FLAT = "FLAT"
TYPE_HOUSE_4FACE = "H4"
TYPE_HOUSE_3FACE = "H3"
TYPE_HOUSE_2FACE = "H2"
TYPE_OF_HOUSE = (
(TYPE_FLAT, "Flat"),
(TYPE_HOUSE_4FACE, "4 Face House"),
(TYPE_HOUSE_3FACE, "3 Face House"),
(TYPE_HOUSE_2FACE, "2 Face House"),
)
type = models.CharField(max_length=4,
choices=TYPE_OF_HOUSE,
default=TYPE_HOUSE_4FACE,
verbose_name = "Type of house")
LOCATION_URBAN = "URBAN"
LOCATION_PERIPHERAL = "PERIPHERAL"
LOCATION_RURAL = "RURAL"
LOCATION = (
(LOCATION_URBAN, "Urban"),
(LOCATION_PERIPHERAL, "Peripheral"),
(LOCATION_RURAL, "Rural"),
)
location = models.CharField(max_length=10,
choices=LOCATION,
default=LOCATION_URBAN)
length = models.IntegerField(verbose_name="Maximum length of the house")
width = models.IntegerField(verbose_name="Maximum width of the house")
name = models.CharField(max_length = 64)
def getDimension(self) :
"""Return the real size of the house (not the house.length/width, computed from the position of the walls of each floors"""
minCorner = Position(x=sys.maxint,y=sys.maxint)
maxCorner = Position(x=0,y=0)
for f in self.floors.all():
floorMinCorner,floorMaxCorner = f.getDimension()
minCorner.x = min(minCorner.x,floorMinCorner.x)
minCorner.y = min(minCorner.y,floorMinCorner.y)
maxCorner.x = max(maxCorner.x,floorMaxCorner.x)
maxCorner.y = max(maxCorner.y,floorMaxCorner.y)
return (minCorner,maxCorner)
def getEnergies(self):
"""Return the energies used in the house"""
q = ('SELECT e.* FROM builder_energy e ' +
'INNER JOIN builder_appliancetype_energies ate ON (ate.energy_id = e.id) '+
'INNER JOIN builder_appliancetype at ON (ate.appliancetype_id = at.id) '+
'INNER JOIN builder_appliancelink al ON (al.appliance_id = at.id) ' +
'INNER JOIN builder_floor f ON (f.id=al.floor_id) '+
'WHERE f.house_id = %s OR e.type = \'STATE\' ' +
'GROUP BY e.id')
return Energy.objects.raw( q, [self.id])
def get_default_wall_int(self):
"""Return the default size of an internal wall in cm"""
return 12;
def get_default_insulating_int(self):
"""Return the default size of the isolation of an internal wall in cm"""
return 3;
def get_default_wall_ext(self):
"""Return the default size of an external wall in cm"""
if (self.year_construct < 1960):
return 22;
else:
return 28;
def get_default_insulating_ext(self):
"""Return the default size of the isolation of an external wall in cm"""
if (self.year_construct < 1960):
return 14;
else:
return 18;
class Meta:
permissions = (
("view_house", "Can see house data"),
("edit_house", "Can change house data"),
)
class Position(models.Model):
x = models.FloatField()
y = models.FloatField()
def natural_key(self):
return {"x" : self.x, "y" : self.y}
class Floor(models.Model):
house = models.ForeignKey(House, related_name='floors')
height = models.FloatField()
FLOOR_FLOOR = "FLOOR"
FLOOR_CRAWL_SPACE = "CRAWL_SPACE"
FLOOR_SLAB = "SLAB"
FLOOR_CELLAR = "CELLAR"
FLOOR_ROOF_ATTIC = "ROOF_ATTIC"
FLOOR_ROOF_LIVING = "ROOF_LIVING"
FLOOR = ((FLOOR_FLOOR, "Normal floor"),
(FLOOR_CRAWL_SPACE, "Crawl space"),
(FLOOR_SLAB, "Slab"),
(FLOOR_CELLAR, "Cellar"),
(FLOOR_ROOF_ATTIC, "Stockage attic"),
(FLOOR_ROOF_LIVING, "Living attic"))
floor = models.CharField(max_length=12,
choices=FLOOR,
default=FLOOR_FLOOR)
def getDimension(self):
"""Returns the most upperleft position of a wall, and the most bottom down"""
leftCorner = Position(x=sys.maxint,y=sys.maxint)
maxCorner = Position(x=0,y=0)
for w in self.walls.select_related('start','end').all():
if w.start.x < leftCorner.x : leftCorner.x = w.start.x
if w.start.x > maxCorner.x : maxCorner.x = w.start.x
if w.start.y < leftCorner.y : leftCorner.y = w.start.y
if w.start.y > maxCorner.y : maxCorner.y = w.start.y
if w.end.x < leftCorner.x : leftCorner.x = w.end.x
if w.end.x > maxCorner.x : maxCorner.x = w.end.x
if w.end.y < leftCorner.y : leftCorner.y = w.end.y
if w.end.y > maxCorner.y : maxCorner.y = w.end.y
return (leftCorner,maxCorner)
def has_meters(self):
"""True if there is meters on this floor"""
for a in self.appliance_links.all():
if (a.meter.all().count() > 0):
return True;
return False
def has_appliances(self):
"""True if there is appliances on this floor"""
n = self.appliance_links.aggregate(c = Count('id'));
return n['c'] > 0
class Wall(models.Model):
start = models.OneToOneField(Position,related_name='wall_start')
end = models.OneToOneField(Position,related_name='wall_end')
floor = models.ForeignKey(Floor,related_name='walls')
insulating_size = models.FloatField()
wall_size = models.FloatField()
class Window(models.Model):
center = models.OneToOneField(Position)
width = models.FloatField()
height = models.FloatField()
floor = models.ForeignKey(Floor)
WINDOW_SIMPLE = "SIMPLE"
WINDOW_DOUBLE = "DOUBLE"
WINDOW_DOUBLE_HE = "HE"
WINDOW_TRIPLE = "TRIPLE"
WINDOW_TYPE = ((WINDOW_SIMPLE, "Simple"),
(WINDOW_DOUBLE, "Double"),
(WINDOW_DOUBLE_HE, "Double with high efficiency"),
(WINDOW_TRIPLE, "Triple"))
type = models.CharField(max_length=6,
choices=WINDOW_TYPE,
default=WINDOW_DOUBLE_HE)
WINDOW_ORIENTATION_H = "H"
WINDOW_ORIENTATION_V = "V"
WINDOW_ORIENTATION = ((WINDOW_ORIENTATION_H, "Horizontal"),
(WINDOW_ORIENTATION_V, "Vertical"))
orientation = models.CharField(max_length=1,
choices=WINDOW_ORIENTATION,
default=WINDOW_ORIENTATION_H)
class Person(models.Model):
age = models.PositiveSmallIntegerField(max_length=3)
house = models.ForeignKey(House)
name = models.CharField(max_length = 32)
WORK_UNEMPLOYED = "UNEMPLOYED"
WORK_LABORER = "LABORER"
WORK_STUDENT = "STUDENT"
WORK_EMPLOYEE = "EMPLOYEE"
WORK_FREELANCE = "FREELANCE"
WORK = ((WORK_UNEMPLOYED, "Unemployed"),
(WORK_LABORER, "Laborer"),
(WORK_EMPLOYEE, "Employee"),
(WORK_STUDENT, "Student"),
(WORK_FREELANCE, "Freelance"))
work = models.CharField(max_length=10,
choices=WORK,
default=WORK_UNEMPLOYED)
class Energy(models.Model):
title = models.CharField(max_length = 32)
short_name = models.CharField(max_length = 12)
unit = models.CharField(max_length = 8, blank = True, default="")
unit_instant = models.CharField(max_length = 8, blank = True, default="")
color = models.CharField(max_length = 15)
overall = models.BooleanField();
TYPE_POWER = "POWER"
TYPE_CONSU = "CONSU"
TYPE_STATE = "STATE"
TYPE = ((TYPE_POWER, "Power"),
(TYPE_CONSU, "Consumable"),
(TYPE_STATE, "State"))
type = models.CharField( max_length=10,
choices=TYPE,
default=TYPE_POWER)
lhv = models.FloatField()
price = models.FloatField()
def __unicode__(self):
return self.title
class ApplianceType(models.Model):
title = models.CharField(max_length = 32)
APPLIANCE_CATEGORY = (('H','Heater'),
('L','Lights'),
('C','Computing'),
('ET','TV'),
('K','Kitchen'),
('W','Washing'),
('ES','Sound'),
('B','Bathroom'),
('I','Invisible'),)
APPLIANCE_CATEGORY_VISIBLE = APPLIANCE_CATEGORY[:-1]
category = models.CharField(max_length=2,
choices=APPLIANCE_CATEGORY,
default='H')
energies = models.ManyToManyField(Energy,blank=True)
class ApplianceLink(models.Model):
appliance = models.ForeignKey(ApplianceType)
center = models.OneToOneField(Position)
floor = models.ForeignKey(Floor,related_name='appliance_links')
class Meter(models.Model):
energy = models.ForeignKey(Energy)
MODE_TOTAL = "TOT"
MODE_INSTANT = "INS"
MODE_DIFFERENCE = "DIF"
MODES = (
(MODE_TOTAL, "Total count"),
(MODE_INSTANT, "Instant consumption"),
(MODE_DIFFERENCE, "Difference"),
)
mode = models.CharField(max_length=3,
choices=MODES,
default=MODE_TOTAL,
verbose_name = "Mode of meter")
hash = models.CharField(max_length=12)
house = models.ForeignKey(House)
appliance_link = models.ForeignKey(ApplianceLink,blank=True,null=True,related_name='meter');
options = models.TextField();
def get_instant(self, delta):
"""Return the last instant consumption measured on this meter, return NODATA if no reading received
@param delta: consider only data which are maximum delta seconds old
"""
try:
latest = self.readings.latest('date')
import pytz
if (delta != None and (pytz.utc.localize(datetime.datetime.now()) - latest.date).seconds > float(delta)):
return 'NODATA'
except data.models.Reading.DoesNotExist:
return 'NODATA'
if (self.mode == Meter.MODE_DIFFERENCE):
try:
last = self.readings.order_by('-date')[1:2].get()
dt = latest.date - last.date
consumed = latest.amount
return (consumed / float(dt.seconds)) * 3600.0
except data.models.Reading.DoesNotExist:
return 'NODATA'
elif (self.mode == Meter.MODE_INSTANT or self.energy.type == Energy.TYPE_STATE):
return latest.amount
elif (self.mode == Meter.MODE_TOTAL):
try:
last = self.readings.filter(date__lt=latest.date).order_by('-date')[:1].get()
dt = latest.date - last.date
consumed = latest.amount - last.amount
return (consumed / float(dt.seconds)) * 3600.0
except data.models.Reading.DoesNotExist:
return 'NODATA' | tbarbette/monitoring | builder/models.py | Python | gpl-2.0 | 11,895 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2016, Martin Guenther
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This is a converter for the Rawseeds Datasets to ROSbag files'''
import rospy
import rosbag
import numpy #Used for matrices and quaternion transform for IMU, don't forget to sudo apt-get install python-numpy
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Imu
from math import pi
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Quaternion
import tf
#The below snippet is taken from http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def make_tf_msg(x, y, theta, t): #This sets up the transform maker, how the ROSbag file makes the TF topic
trans = TransformStamped()
trans.header.stamp = t
trans.header.frame_id = '/odom'
trans.child_frame_id = '/base_footprint'
trans.transform.translation.x = x
trans.transform.translation.y = y
q = tf.transformations.quaternion_from_euler(0, 0, theta)
trans.transform.rotation.x = q[0]
trans.transform.rotation.y = q[1]
trans.transform.rotation.z = q[2]
trans.transform.rotation.w = q[3]
msg = TFMessage()
msg.transforms.append(trans)
return msg
def make_tf2_msg(t): #This sets up the transform maker, how the ROSbag file makes the TF topic
trans = TransformStamped()
trans.header.stamp = t
trans.header.frame_id = '/base_link'
trans.child_frame_id = '/SICK_FRONT'
trans.transform.translation.x = .08
trans.transform.translation.y = 0
trans.transform.translation.z = .450
q = tf.transformations.quaternion_from_euler(0, 0, 0)
trans.transform.rotation.x = q[0]
trans.transform.rotation.y = q[1]
trans.transform.rotation.z = q[2]
trans.transform.rotation.w = q[3]
msg = TFMessage()
msg.transforms.append(trans)
return msg
def make_tf3_msg(t): #This sets up the transform maker, how the ROSbag file makes the TF topic
trans = TransformStamped()
trans.header.stamp = t
trans.header.frame_id = '/base_footprint'
trans.child_frame_id = '/base_link'
trans.transform.translation.x = 0
trans.transform.translation.y = 0
trans.transform.translation.z = 0
q = tf.transformations.quaternion_from_euler(0, 0, 0)
trans.transform.rotation.x = q[0]
trans.transform.rotation.y = q[1]
trans.transform.rotation.z = q[2]
trans.transform.rotation.w = q[3]
msg = TFMessage()
msg.transforms.append(trans)
return msg
def make_tf4_msg(t): #This sets up the transform maker, how the ROSbag file makes the TF topic
trans = TransformStamped()
trans.header.stamp = t
trans.header.frame_id = '/base_link'
trans.child_frame_id = '/IMU'
trans.transform.translation.x = -0.192
trans.transform.translation.y = -0.007
trans.transform.translation.z = 0.537
q = tf.transformations.quaternion_from_euler(0, 0, 0)
trans.transform.rotation.x = q[0]
trans.transform.rotation.y = q[1]
trans.transform.rotation.z = q[2]
trans.transform.rotation.w = q[3]
def make_tf5_msg(t): #This sets up the transform maker, how the ROSbag file makes the TF topic
trans = TransformStamped()
trans.header.stamp = t
trans.header.frame_id = '/base_link'
trans.child_frame_id = '/SICK_REAR'
trans.transform.translation.x = -0.463
trans.transform.translation.y = 0.001
trans.transform.translation.z = 0.454
q = tf.transformations.quaternion_from_euler(0, 0, pi)
trans.transform.rotation.x = q[0]
trans.transform.rotation.y = q[1]
trans.transform.rotation.z = q[2]
trans.transform.rotation.w = q[3]
msg = TFMessage()
msg.transforms.append(trans)
return msg
with rosbag.Bag('rawseeds.bag', 'w') as bag: #Create the rawseeds.bag bag file and use it
with open('SICK_FRONT_Matched_CSV.csv') as dataset: #Open the Sick_Front.csv and use it
for line in dataset.readlines(): #For each line in the dataset, which is the CSV file
line = line.strip() #Get the line
tokens = line.split(',') #And break it into an array of each CSV part
if len(tokens) <= 2: #Ignore the terms if they are less than 2
continue
if 1: #Ignore this line, we're not doing the .clf stuff
msg = LaserScan() #Sick_Front is a Laser Scan using the Sick sensor
num_scans = int(tokens[1]) #The number of scans is the first term
'''if num_scans != 181 or len(tokens) < num_scans + 9:
rospy.logwarn("unsupported scan format")
continue''' #This part is a check to make sure you're using the right file
msg.header.frame_id = 'SICK_FRONT' #The message header tells that this is a laser scan
t = rospy.Time(float(tokens[0])) #The first term states the time in seconds
msg.header.stamp = t #And now it's the header
msg.angle_min = -90.0 / 180.0 * pi #This is the minimum angle of the sensor scan
msg.angle_max = 90.0 / 180.0 * pi #This is the maximum angle of the sensor scan
msg.angle_increment = pi / num_scans #Each increment is how far the sensor moves in angular movement between scans
msg.time_increment = 0.2 / 360.0 #This is how long each scan takes per angle?
msg.scan_time = 0.2 #This is how long each scan takes?
msg.range_min = 0.001 #This is the minimum range of the sensor?
msg.range_max = 50.0 #This is the maximum range of the sensor?
msg.ranges = [float(r) for r in tokens[2:(num_scans + 2)]] #This is the part where it pastes the data into that message of the bag file
msg.intensities = []
bag.write('SICK_FRONT', msg, t) #Create this and call it the "SICK_FRONT" topic in the bag file
with open('SICK_REAR_Matched_CSV.csv') as dataset: #Open the Sick_Front.csv and use it
for line in dataset.readlines(): #For each line in the dataset, which is the CSV file
line = line.strip() #Get the line
tokens = line.split(',') #And break it into an array of each CSV part
if len(tokens) <= 2: #Ignore the terms if they are less than 2
continue
if 1: #Ignore this line, we're not doing the .clf stuff
msg = LaserScan() #Sick_Front is a Laser Scan using the Sick sensor
num_scans = int(tokens[1]) #The number of scans is the first term
'''if num_scans != 181 or len(tokens) < num_scans + 9:
rospy.logwarn("unsupported scan format")
continue''' #This part is a check to make sure you're using the right file
msg.header.frame_id = 'SICK_REAR' #The message header tells that this is a laser scan
t = rospy.Time(float(tokens[0])) #The first term states the time in seconds
msg.header.stamp = t #And now it's the header
msg.angle_min = -90.0 / 180.0 * pi #This is the minimum angle of the sensor scan
msg.angle_max = 90.0 / 180.0 * pi #This is the maximum angle of the sensor scan
msg.angle_increment = pi / num_scans #Each increment is how far the sensor moves in angular movement between scans
msg.time_increment = 0.2 / 360.0 #This is how long each scan takes per angle?
msg.scan_time = 0.2 #This is how long each scan takes?
msg.range_min = 0.001 #This is the minimum range of the sensor?
msg.range_max = 50.0 #This is the maximum range of the sensor?
msg.ranges = [float(r) for r in tokens[2:(num_scans + 2)]] #This is the part where it pastes the data into that message of the bag file
msg.intensities = []
bag.write('SICK_REAR', msg, t) #Create this and call it the "SICK_REAR" topic in the bag file
with open('IMU_Matched_CSV.csv') as dataset: #Open the IMU file and use it
for line in dataset.readlines(): #For each line in the dataset, which is the CSV file
line = line.strip() #Get the line
tokens = line.split(',') #And break it into an array of each CSV part
if len(tokens) <= 2: #Ignore the terms if they are less than 2
continue
msg = Imu() #IMU_STRETCHED is the IMU datatype using the IMU_STRETCHED sensor
msg.header.frame_id = 'IMU' #The message header labels the topic
t = rospy.Time(float(tokens[0])) #The first term states the time in seconds
msg.header.stamp = t #And now it's the header
imumatrixelements = numpy.array([[float(tokens[11]), float(tokens[12]), float(tokens[13])], [float(tokens[14]), float(tokens[15]), float(tokens[16])], [float(tokens[17]), float(tokens[18]), float(tokens[19])]])
imuquaternion = quaternion_from_matrix(imumatrixelements) #This returns the quaternion from the matrix we just created
#print(imuquaternion)
msg.orientation.x = float(imuquaternion[1])
msg.orientation.y = float(imuquaternion[2])
msg.orientation.z = float(imuquaternion[3])
msg.orientation.w = float(imuquaternion[0])
'''We are now going to define the angular velocities and linear velocities in the IMU type in the bag file by their appropriate numbers from the rawseeds files'''
angvel = Vector3() #IMU's angular_velocity variable requires that the x, y, and z coordinates are given using Vector3() library. This sets that up. Therefore, Vector3 must be imported from geometry_msgs for this to work.
angvel.x = float(tokens[5])
angvel.y = float(tokens[6])
angvel.z = float(tokens[7])
msg.angular_velocity.x = angvel.x
msg.angular_velocity.y = angvel.y
msg.angular_velocity.z = angvel.z
linacc = Vector3()
linacc.x = float(tokens[2])
linacc.y = float(tokens[3])
linacc.z = float(tokens[4])
msg.linear_acceleration.x = linacc.x
msg.linear_acceleration.y = linacc.y
msg.linear_acceleration.z = linacc.z
bag.write('IMU', msg, t) #Create this and call it the "IMU" topic in the bag file
with open('Bicocca_2009-02-25b-ODOMETRY_XYT_Matched.csv') as dataset: #Open the Sick_Front.csv and use it
count = 0
for line in dataset.readlines(): #For each line in the dataset, which is the CSV file
line = line.strip() #Get the line
tokens = line.split(',') #And break it into an array of each CSV part
count = count + 1
if len(tokens) <= 2: #Ignore the terms if they are less than 2
continue
if 1: #Ignore this line, we're not doing the .clf stuff
t = rospy.Time(float(tokens[0]))
odom_x, odom_y, odom_theta = [float(r) for r in tokens[(4):(7)]] #Collects the odometry data in the file and loads it
tf_msg = make_tf_msg(odom_x, odom_y, odom_theta, t) #This needs to be changed to real odometry data
bag.write('tf', tf_msg, t) #This writes the transform based on the odometry data
tf_msg = make_tf2_msg(t)
bag.write('tf', tf_msg, t) #This writes the transform for the SICK_FRONT to the base_link
tf_msg = make_tf3_msg(t)
bag.write('tf', tf_msg, t) #This writes the transform for the footprint based on the base link
'''tf_msg = make_tf4_msg(t)
bag.write('tf', tf_msg, t) #This writes the transform for the IMU based on the base link'''
tf_msg = make_tf5_msg(t)
bag.write('tf', tf_msg, t) #This writes the transform for the SICK_REAR based on the base link
'''elif tokens[0] == 'ODOM':
odom_x, odom_y, odom_theta = [float(t) for t in tokens[1:4]]
t = rospy.Time(float(tokens[7]))
tf_msg = make_tf_msg(odom_x, odom_y, odom_theta, t)
bag.write('tf', tf_msg, t)'''
| bobman192/ROS | csv2bag.py | Python | bsd-3-clause | 16,639 |
# flake8: NOQA
from cupyx.scipy.fft._fft import (
fft, ifft, fft2, ifft2, fftn, ifftn,
rfft, irfft, rfft2, irfft2, rfftn, irfftn,
hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn
)
from cupyx.scipy.fft._fft import (
__ua_domain__, __ua_convert__, __ua_function__)
from cupyx.scipy.fft._fft import _scipy_150, _scipy_160
from cupyx.scipy.fft._helper import next_fast_len # NOQA
from cupy.fft import fftshift, ifftshift, fftfreq, rfftfreq
from cupyx.scipy.fftpack import get_fft_plan
from cupyx.scipy.fft._realtransforms import (
dct, dctn, dst, dstn, idct, idctn, idst, idstn
)
| cupy/cupy | cupyx/scipy/fft/__init__.py | Python | mit | 591 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questionnaire', '0015_merge'),
]
operations = [
migrations.AlterModelOptions(
name='questionnaire',
options={'ordering': ['-updated'], 'permissions': (('review_questionnaire', 'Can review questionnaire'), ('publish_questionnaire', 'Can publish questionnaire'), ('assign_questionnaire', 'Can assign questionnaire (for review/publish)'), ('view_questionnaire', 'Can view questionnaire'), ('edit_questionnaire', 'Can edit questionnaire'), ('delete_questionnaire', 'Can delete questionnaire'), ('flag_unccd_questionnaire', 'Can flag UNCCD questionnaire'), ('unflag_unccd_questionnaire', 'Can unflag UNCCD questionnaire'))},
),
migrations.AlterField(
model_name='questionnairetranslation',
name='language',
field=models.CharField(max_length=63, choices=[('en', 'English'), ('fr', 'French'), ('es', 'Spanish'), ('ru', 'Russian'), ('km', 'Khmer'), ('ar', 'Arabic'), ('bs', 'Bosnian'), ('pt', 'Portuguese')]),
),
]
| CDE-UNIBE/qcat | apps/questionnaire/migrations/0016_auto_20170124_1119.py | Python | apache-2.0 | 1,191 |
"""
yamllog
===============
.. moduleauthor:: David Fallis
"""
import yaml
OUTPUT_ORDER = ['plot_name',
'variable',
'depth',
'plot_projection',
'plot_type',
'comp_file',
'dates',
'comp_dates',
'frequency',
'units',
'stats'
]
def convert(plot):
yamplot = {}
yamplot['plot_name'] = plot['plot_name']
yamplot['variable'] = plot['variable']
yamplot['depth'] = str(plot['plot_depth'])
yamplot['plot_projection'] = plot['plot_projection']
yamplot['plot_type'] = plot['plot_type']
try:
yamplot['comp_file'] = plot['comp_file']
except:
yamplot['comp_file'] = 'N/A'
yamplot['dates'] = plot['dates']
yamplot['comp_dates'] = plot['comp_dates']
yamplot['frequency'] = plot['frequency']
yamplot['units'] = str(plot['units'])
try:
yamplot['stats'] = plot['stats']
except:
yamplot['stats'] = 'N/A'
# for key in plot['stats']:
# if type(plot['stats'][key]) is dict:
# yamplot['stats'][key] = plot['stats'][key]
# else:
# yamplot['stats'][plot['obs']] = plot['stats']
return yamplot
def output(yamplot):
with open('logs/log.yml', 'a') as outfile:
outfile.write('\n-----\n\n')
for name in OUTPUT_ORDER:
printer = {name: yamplot[name]}
with open('logs/log.yml', 'a') as outfile:
outfile.write(yaml.dump(printer, default_flow_style=False))
def log(plot):
yamplot = convert(plot)
output(yamplot)
if __name__ == "__main__":
pass
| fallisd/validate | validate/yamllog.py | Python | gpl-2.0 | 1,682 |
import typing
from os import PathLike
from starlette.background import BackgroundTask
from starlette.responses import Response
from starlette.types import Receive, Scope, Send
try:
import jinja2
# @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1
if hasattr(jinja2, "pass_context"):
pass_context = jinja2.pass_context
else: # pragma: nocover
pass_context = jinja2.contextfunction
except ImportError: # pragma: nocover
jinja2 = None # type: ignore
class _TemplateResponse(Response):
media_type = "text/html"
def __init__(
self,
template: typing.Any,
context: dict,
status_code: int = 200,
headers: typing.Mapping[str, str] = None,
media_type: str = None,
background: BackgroundTask = None,
):
self.template = template
self.context = context
content = template.render(context)
super().__init__(content, status_code, headers, media_type, background)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
request = self.context.get("request", {})
extensions = request.get("extensions", {})
if "http.response.template" in extensions:
await send(
{
"type": "http.response.template",
"template": self.template,
"context": self.context,
}
)
await super().__call__(scope, receive, send)
class Jinja2Templates:
"""
templates = Jinja2Templates("templates")
return templates.TemplateResponse("index.html", {"request": request})
"""
def __init__(
self, directory: typing.Union[str, PathLike], **env_options: typing.Any
) -> None:
assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates"
self.env = self._create_env(directory, **env_options)
def _create_env(
self, directory: typing.Union[str, PathLike], **env_options: typing.Any
) -> "jinja2.Environment":
@pass_context
def url_for(context: dict, name: str, **path_params: typing.Any) -> str:
request = context["request"]
return request.url_for(name, **path_params)
loader = jinja2.FileSystemLoader(directory)
env_options.setdefault("loader", loader)
env_options.setdefault("autoescape", True)
env = jinja2.Environment(**env_options)
env.globals["url_for"] = url_for
return env
def get_template(self, name: str) -> "jinja2.Template":
return self.env.get_template(name)
def TemplateResponse(
self,
name: str,
context: dict,
status_code: int = 200,
headers: typing.Mapping[str, str] = None,
media_type: str = None,
background: BackgroundTask = None,
) -> _TemplateResponse:
if "request" not in context:
raise ValueError('context must include a "request" key')
template = self.get_template(name)
return _TemplateResponse(
template,
context,
status_code=status_code,
headers=headers,
media_type=media_type,
background=background,
)
| encode/starlette | starlette/templating.py | Python | bsd-3-clause | 3,288 |
from thefuck.rules.ls_all import match, get_new_command
from thefuck.types import Command
def test_match():
assert match(Command('ls', ''))
assert not match(Command('ls', 'file.py\n'))
def test_get_new_command():
assert get_new_command(Command('ls empty_dir', '')) == 'ls -A empty_dir'
assert get_new_command(Command('ls', '')) == 'ls -A'
| scorphus/thefuck | tests/rules/test_ls_all.py | Python | mit | 359 |
import os
import subprocess
import tempfile
from mozprocess import ProcessHandler
from tools.serve.serve import make_hosts_file
from .base import Browser, require_arg, get_free_port, browser_command, ExecutorBrowser
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor, # noqa: F401
ServoWebDriverRefTestExecutor) # noqa: F401
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {
"product": "servodriver",
"check_args": "check_args",
"browser": "ServoWebDriverBrowser",
"executor": {
"testharness": "ServoWebDriverTestharnessExecutor",
"reftest": "ServoWebDriverRefTestExecutor",
},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"update_properties": "update_properties",
}
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {
"binary": kwargs["binary"],
"binary_args": kwargs["binary_args"],
"debug_info": kwargs["debug_info"],
"server_config": config,
"user_stylesheets": kwargs.get("user_stylesheets"),
}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
return rv
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1",
"testharnessreport": "testharnessreport-servodriver.js",
"supports_debugger": True}
def update_properties():
return ["debug", "os", "version", "processor", "bits"], None
def write_hosts_file(config):
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(make_hosts_file(config, "127.0.0.1"))
return hosts_path
class ServoWebDriverBrowser(Browser):
used_ports = set()
init_timeout = 300 # Large timeout for cases where we're booting an Android emulator
def __init__(self, logger, binary, debug_info=None, webdriver_host="127.0.0.1",
server_config=None, binary_args=None, user_stylesheets=None):
Browser.__init__(self, logger)
self.binary = binary
self.binary_args = binary_args or []
self.webdriver_host = webdriver_host
self.webdriver_port = None
self.proc = None
self.debug_info = debug_info
self.hosts_path = write_hosts_file(server_config)
self.server_ports = server_config.ports if server_config else {}
self.command = None
self.user_stylesheets = user_stylesheets if user_stylesheets else []
def start(self, **kwargs):
self.webdriver_port = get_free_port(4444, exclude=self.used_ports)
self.used_ports.add(self.webdriver_port)
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
env["EMULATOR_REVERSE_FORWARD_PORTS"] = ",".join(
str(port)
for _protocol, ports in self.server_ports.items()
for port in ports
if port
)
debug_args, command = browser_command(
self.binary,
self.binary_args + [
"--hard-fail",
"--webdriver=%s" % self.webdriver_port,
"about:blank",
],
self.debug_info
)
for stylesheet in self.user_stylesheets:
command += ["--user-stylesheet", stylesheet]
self.command = command
self.command = debug_args + self.command
if not self.debug_info or not self.debug_info.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
self.logger.debug("Servo Started")
def stop(self, force=False):
self.logger.debug("Stopping browser")
if self.proc is not None:
try:
self.proc.kill()
except OSError:
# This can happen on Windows if the process is already dead
pass
def pid(self):
if self.proc is None:
return None
try:
return self.proc.pid
except AttributeError:
return None
def on_output(self, line):
"""Write a line of output from the process to the log"""
self.logger.process_output(self.pid(),
line.decode("utf8", "replace"),
command=" ".join(self.command))
def is_alive(self):
if self.runner:
return self.runner.is_running()
return False
def cleanup(self):
self.stop()
os.remove(self.hosts_path)
def executor_browser(self):
assert self.webdriver_port is not None
return ExecutorBrowser, {"webdriver_host": self.webdriver_host,
"webdriver_port": self.webdriver_port,
"init_timeout": self.init_timeout}
| danlrobertson/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/servodriver.py | Python | mpl-2.0 | 5,486 |
class LocusItem(object):
def __init__(self):
self.Locus = ""
self.Chromosome = ""
self.Start = 0
self.End = 0
self._name = ""
self.Overlapped = False
def setLocus(self, chromosome, start, end):
self.Chromosome = chromosome
self.Start = start
self.End = end
def getLocusString(self, extend_bases=0):
return("%s:%d-%d" % (self.Chromosome, self.Start - extend_bases, self.End + extend_bases))
def setLocusString(self, locus):
#print(locus)
self.Locus = locus
parts = locus.split(":")
#print(parts)
self.Chromosome = parts[0]
positions = parts[1].split("-")
self.Start = int(positions[0])
self.End = int(positions[1])
def getName(self):
if self._name == "":
return self.getLocusString()
else:
return self._name
def setName(self, name):
self._name = name
def getLocusFileString(self):
return("%s_%d_%d" % (self.Chromosome, self.Start, self.End))
def str(self):
return self.getLocusString()
def overlapPosition(self, locus, distance = 0):
if self.Start > locus.End + distance:
return False
if locus.Start > self.End + distance:
return False
return True
def overlap(self, locus, distance = 0):
if self.Chromosome != locus.Chromosome:
return(False)
return self.overlapPosition(locus, distance)
def containsPosition(self, chromosome, position):
if self.Start > position:
return(False)
if self.End < position:
return(False)
return(True)
def contains(self, chromosome, position):
if self.Chromosome != chromosome:
return(False)
return self.containsPosition(chromosome, position)
def readBedFile(fileName):
"""Read bed file to list of LocusItem
Arguments:
fileName {str} -- bed file name
Returns:
Array of LocusItem
"""
result = []
with open(fileName, "r") as fin:
for line in fin:
if line.startswith("#"):
continue
parts = line.rstrip().split('\t')
chrom = parts[0]
start = int(parts[1])
end = int(parts[2])
locus = LocusItem()
locus.setLocus(chrom, start, end)
if len(parts) > 4:
locus.setName(parts[4])
result.append(locus)
return(result)
def getChromosomeMap(bedItems):
result = {}
for bi in bedItems:
result.setdefault(bi.Chromosome, []).append(bi)
return(result)
| shengqh/ngsperl | lib/CQS/LocusItem.py | Python | apache-2.0 | 2,423 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.initialization Contains the FittingInitializer class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
# Import astronomical modules
from astropy.units import Unit, dimensionless_angles
from astropy import constants
from astropy.table import Table
# Import the relevant PTS classes and modules
from .component import FittingComponent
from ...core.tools import introspection, tables
from ...core.tools import filesystem as fs
from ...core.simulation.skifile import SkiFile, LabeledSkiFile
from ...core.basics.filter import Filter
from ..basics.models import SersicModel, DeprojectionModel
from ...magic.basics.coordinatesystem import CoordinateSystem
from ..decomposition.decomposition import load_parameters
from ...magic.basics.skyregion import SkyRegion
from ..basics.instruments import SEDInstrument, FrameInstrument
from ..core.sun import Sun
from ..core.mappings import Mappings
from ...magic.tools import wavelengths
from ...core.tools.logging import log
from ..basics.projection import GalaxyProjection
from ..core.sed import ObservedSED
from .wavelengthgrids import WavelengthGridGenerator
from .dustgrids import DustGridGenerator
from ...core.basics.range import IntegerRange, RealRange, QuantityRange
# -----------------------------------------------------------------
class FittingInitializer(FittingComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(FittingInitializer, self).__init__(config)
# -- Attributes --
# The ski file
self.ski = None
# The structural parameters
self.parameters = None
# The projection system
self.projection = None
# The truncation ellipse
self.ellipse = None
# The geometric bulge model
self.bulge = None
# The deprojection model
self.deprojection = None
self.deprojections = dict()
# The instrument
self.instrument = None
# The table of weights for each band
self.weights = None
# The observed SED
self.observed_sed = None
# Filters
self.i1 = None
self.fuv = None
# Solar luminosity units
self.sun_fuv = None
self.sun_i1 = None
# Coordinate system
self.reference_wcs = None
# The ski files for simulating the contributions of the various stellar components
self.ski_contributions = dict()
# The ski file for generating simulated images
self.ski_images = None
# The wavelength grid and dust grid generators
self.wg_generator = None
self.dg_generator = None
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the necessary input
self.load_input()
# 3. Create the wavelength grid
self.create_wavelength_grids()
# 4. Create the bulge model
self.create_bulge_model()
# 5. Create the deprojection model
self.create_deprojection_model()
# 6. Create the instrument
self.create_instrument()
# 7. Create the dust grids
self.create_dust_grids()
# 8. Adjust the ski file
self.adjust_ski()
# 9. Adjust the ski files for simulating the contributions of the various stellar components
self.adjust_ski_contributions()
# 10. Adjust the ski file for generating simulated images
self.adjust_ski_images()
# 11. Calculate the weight factor to give to each band
self.calculate_weights()
# 12. Writing
self.write()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(FittingInitializer, self).setup()
# Create filters
self.i1 = Filter.from_string("I1")
self.fuv = Filter.from_string("FUV")
# Solar properties
sun = Sun()
self.sun_fuv = sun.luminosity_for_filter_as_unit(self.fuv) # Get the luminosity of the Sun in the FUV band
self.sun_i1 = sun.luminosity_for_filter_as_unit(self.i1) # Get the luminosity of the Sun in the IRAC I1 band
# Reference coordinate system
reference_path = fs.join(self.truncation_path, self.reference_image + ".fits")
self.reference_wcs = CoordinateSystem.from_file(reference_path)
# Create a WavelengthGridGenerator
self.wg_generator = WavelengthGridGenerator()
# Create the DustGridGenerator
self.dg_generator = DustGridGenerator()
# Create the table to contain the weights
self.weights = Table(names=["Instrument", "Band", "Weight"], dtype=["S5", "S7", "float64"])
# -----------------------------------------------------------------
def load_input(self):
"""
This function ...
:return:
"""
# 1. Load the template ski file
self.load_template()
# 2. Load the structural parameters of the galaxy
self.load_parameters()
# 3. Load the projection system
self.load_projection()
# 4. Load the truncation ellipse
self.load_truncation_ellipse()
# 5. Load the observed SED
self.load_observed_sed()
# -----------------------------------------------------------------
def load_template(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the ski file template ...")
# Open the template ski file
self.ski = LabeledSkiFile(self.template_ski_path)
# -----------------------------------------------------------------
def load_parameters(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the decomposition parameters ...")
# Determine the path to the parameters file
path = fs.join(self.components_path, "parameters.dat")
# Load the parameters
self.parameters = load_parameters(path)
# -----------------------------------------------------------------
def load_projection(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the projection system ...")
# Determine the path to the projection file
path = fs.join(self.components_path, "earth.proj")
# Load the projection system
self.projection = GalaxyProjection.from_file(path)
# -----------------------------------------------------------------
def load_truncation_ellipse(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the ellipse region used for truncating the observed images ...")
# Determine the path
path = fs.join(self.truncation_path, "ellipse.reg")
# Get the ellipse
region = SkyRegion.from_file(path)
self.ellipse = region[0]
# -----------------------------------------------------------------
def load_observed_sed(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the observed SED ...")
# Load the SED
self.observed_sed = ObservedSED.from_file(self.observed_sed_path)
# -----------------------------------------------------------------
def create_wavelength_grids(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the wavelength grids ...")
# Create the range of npoints for the wavelength grids
npoints_range = IntegerRange(150, 500)
# Fixed wavelengths (always in the grid)
fixed = [self.i1.pivotwavelength(), self.fuv.pivotwavelength()]
# Generate the wavelength grids
self.wg_generator.run(npoints_range, 10, fixed=fixed)
# -----------------------------------------------------------------
def create_bulge_model(self):
"""
:return:
"""
# Inform the user
log.info("Creating the bulge model ...")
# Create a Sersic model for the bulge
self.bulge = SersicModel.from_galfit(self.parameters.bulge, self.parameters.inclination, self.parameters.disk.PA)
# -----------------------------------------------------------------
def create_deprojection_model(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the deprojection parameters ...")
filename = None
hz = None
# Get the galaxy distance, the inclination and position angle
distance = self.parameters.distance
inclination = self.parameters.inclination
pa = self.parameters.disk.PA
# Get the center pixel
pixel_center = self.parameters.center.to_pixel(self.reference_wcs)
xc = pixel_center.x
yc = pixel_center.y
# Get the pixelscale in physical units
pixelscale_angular = self.reference_wcs.average_pixelscale * Unit("pix") # in deg
pixelscale = (pixelscale_angular * distance).to("pc", equivalencies=dimensionless_angles())
# Get the number of x and y pixels
x_size = self.reference_wcs.xsize
y_size = self.reference_wcs.ysize
# Create the deprojection model
self.deprojection = DeprojectionModel(filename, pixelscale, pa, inclination, x_size, y_size, xc, yc, hz)
# -----------------------------------------------------------------
def create_instrument(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the instrument ...")
# Create an SED instrument
self.instrument = SEDInstrument.from_projection(self.projection)
# -----------------------------------------------------------------
def create_dust_grids(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the grids ...")
# Calculate the major radius of the truncation ellipse in physical coordinates (pc)
major_angular = self.ellipse.major # major axis length of the sky ellipse
radius_physical = (major_angular * self.parameters.distance).to("pc", equivalencies=dimensionless_angles())
# Get the pixelscale in physical units
distance = self.parameters.distance
pixelscale_angular = self.reference_wcs.average_pixelscale * Unit("pix") # in deg
pixelscale = (pixelscale_angular * distance).to("pc", equivalencies=dimensionless_angles())
# BINTREE: (smallest_cell_pixels, min_level, max_mass_fraction)
# Low-resolution: 10., 6, 1e-5
# High-resolution: 0.5, 9, 0.5e-6
# OCTTREE:
# Low-resolution: 10., 2, 1e-5
# High-resolution: 0.5, 3, 0.5e-6
# Because we (currently) can't position the grid exactly as the 2D pixels (rotation etc.),
# take half of the pixel size to avoid too much interpolation
min_scale = 0.5 * pixelscale
max_scale = 10. * pixelscale
scale_range = QuantityRange(min_scale, max_scale, invert=True)
# The range of the maximum depth level of the tree
level_range = IntegerRange(6, 9)
# The range of the max mass fraction
mass_fraction_range = RealRange(0.5e-6, 1e-5, invert=True)
# Set fixed grid properties
self.dg_generator.grid_type = "bintree" # set grid type
self.dg_generator.x_radius = radius_physical
self.dg_generator.y_radius = radius_physical
self.dg_generator.z_radius = 3. * Unit("kpc")
# Generate the dust grids
self.dg_generator.run(scale_range, level_range, mass_fraction_range, 10)
# -----------------------------------------------------------------
def adjust_ski(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Adjusting the ski file parameters ...")
# Remove the existing instruments
self.ski.remove_all_instruments()
# Add the instrument
self.ski.add_instrument("earth", self.instrument)
# Set the number of photon packages
self.ski.setpackages(self.config.packages)
# Set the name of the wavelength grid file
self.ski.set_file_wavelength_grid("wavelengths_lowres.txt")
# Set the stellar and dust components
self.set_components()
# Set transient dust emissivity
self.ski.set_transient_dust_emissivity()
# Set the lowest-resolution dust grid
self.ski.set_dust_grid(self.dg_generator.grids[0])
# Set all-cells dust library
self.ski.set_allcells_dust_lib()
# Dust self-absorption
if self.config.selfabsorption: self.ski.enable_selfabsorption()
else: self.ski.disable_selfabsorption()
# Disable all writing options
self.ski.disable_all_writing_options()
# -----------------------------------------------------------------
def set_components(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the stellar and dust components ...")
# Set the evolved stellar bulge component
self.set_bulge_component()
# Set the evolved stellar disk component
self.set_old_stellar_component()
# Set the young stellar component
self.set_young_stellar_component()
# Set the ionizing stellar component
self.set_ionizing_stellar_component()
# The dust component
self.set_dust_component()
# -----------------------------------------------------------------
def set_bulge_component(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Configuring the bulge component ...")
# Like M31
bulge_template = "BruzualCharlot"
bulge_age = 12
#bulge_metallicity = 0.02
bulge_metallicity = 0.03
# Get the flux density of the bulge
fluxdensity = self.parameters.bulge.fluxdensity # In Jy
# Convert the flux density into a spectral luminosity
luminosity = fluxdensity_to_luminosity(fluxdensity, self.i1.pivotwavelength() * Unit("micron"), self.parameters.distance)
# Get the spectral luminosity in solar units
#luminosity = luminosity.to(self.sun_i1).value
# Set the parameters of the bulge
self.ski.set_stellar_component_geometry("Evolved stellar bulge", self.bulge)
self.ski.set_stellar_component_sed("Evolved stellar bulge", bulge_template, bulge_age, bulge_metallicity) # SED
#self.ski.set_stellar_component_luminosity("Evolved stellar bulge", luminosity, self.i1) # normalization by band
self.ski.set_stellar_component_luminosity("Evolved stellar bulge", luminosity, self.i1.centerwavelength() * Unit("micron"))
# -----------------------------------------------------------------
def set_old_stellar_component(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Configuring the old stellar component ...")
# Like M31
disk_template = "BruzualCharlot"
disk_age = 8
#disk_metallicity = 0.02
disk_metallicity = 0.03
# Get the scale height
#scale_height = 521. * Unit("pc") # first models
scale_height = self.parameters.disk.hr / 8.26 # De Geyter et al. 2014
# Get the 3.6 micron flux density with the bulge subtracted
i1_index = tables.find_index(self.observed_sed.table, "I1", "Band")
fluxdensity = self.observed_sed.table["Flux"][i1_index] * Unit("Jy") - self.parameters.bulge.fluxdensity
# Convert the flux density into a spectral luminosity
luminosity = fluxdensity_to_luminosity(fluxdensity, self.i1.pivotwavelength() * Unit("micron"), self.parameters.distance)
# Get the spectral luminosity in solar units
#luminosity = luminosity.to(self.sun_i1).value
# Set the parameters of the evolved stellar component
deprojection = self.deprojection.copy()
deprojection.filename = "old_stars.fits"
deprojection.scale_height = scale_height
self.deprojections["Old stars"] = deprojection
# Adjust the ski file
self.ski.set_stellar_component_geometry("Evolved stellar disk", deprojection)
self.ski.set_stellar_component_sed("Evolved stellar disk", disk_template, disk_age, disk_metallicity) # SED
#self.ski.set_stellar_component_luminosity("Evolved stellar disk", luminosity, self.i1) # normalization by band
self.ski.set_stellar_component_luminosity("Evolved stellar disk", luminosity, self.i1.centerwavelength() * Unit("micron"))
# -----------------------------------------------------------------
def set_young_stellar_component(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Configuring the young stellar component ...")
# Like M31
young_template = "BruzualCharlot"
young_age = 0.1
#young_metallicity = 0.02
young_metallicity = 0.03
# Get the scale height
#scale_height = 150 * Unit("pc") # first models
scale_height = 100. * Unit("pc") # M51
# Get the FUV flux density
fuv_index = tables.find_index(self.observed_sed.table, "FUV", "Band")
fluxdensity = 2. * self.observed_sed.table["Flux"][fuv_index] * Unit("Jy")
# Convert the flux density into a spectral luminosity
luminosity = fluxdensity_to_luminosity(fluxdensity, self.fuv.pivotwavelength() * Unit("micron"), self.parameters.distance)
# Get the spectral luminosity in solar units
#luminosity = luminosity.to(self.sun_fuv).value
# Set the parameters of the young stellar component
deprojection = self.deprojection.copy()
deprojection.filename = "young_stars.fits"
deprojection.scale_height = scale_height
self.deprojections["Young stars"] = deprojection
# Adjust the ski file
self.ski.set_stellar_component_geometry("Young stars", deprojection)
self.ski.set_stellar_component_sed("Young stars", young_template, young_age, young_metallicity) # SED
#self.ski.set_stellar_component_luminosity("Young stars", luminosity, self.fuv) # normalization by band
self.ski.set_stellar_component_luminosity("Young stars", luminosity, self.fuv.centerwavelength() * Unit("micron"))
# -----------------------------------------------------------------
def set_ionizing_stellar_component(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Configuring the ionizing stellar component ...")
# Like M51 and M31
#ionizing_metallicity = 0.02
ionizing_metallicity = 0.03 # XU KONG et al. 2000
ionizing_compactness = 6
ionizing_pressure = 1e12 * Unit("K/m3")
ionizing_covering_factor = 0.2
# Get the scale height
#scale_height = 150 * Unit("pc") # first models
scale_height = 100. * Unit("pc") # M51
# Convert the SFR into a FUV luminosity
sfr = 0.8 # The star formation rate # see Perez-Gonzalez 2006 (mentions Devereux et al 1995)
mappings = Mappings(ionizing_metallicity, ionizing_compactness, ionizing_pressure, ionizing_covering_factor, sfr)
luminosity = mappings.luminosity_for_filter(self.fuv)
#luminosity = luminosity.to(self.sun_fuv).value
# Set the parameters of the ionizing stellar component
deprojection = self.deprojection.copy()
deprojection.filename = "ionizing_stars.fits"
deprojection.scale_height = scale_height
self.deprojections["Ionizing stars"] = deprojection
# Adjust the ski file
self.ski.set_stellar_component_geometry("Ionizing stars", deprojection)
self.ski.set_stellar_component_mappingssed("Ionizing stars", ionizing_metallicity, ionizing_compactness, ionizing_pressure, ionizing_covering_factor) # SED
#self.ski.set_stellar_component_luminosity("Ionizing stars", luminosity, self.fuv) # normalization by band
self.ski.set_stellar_component_luminosity("Ionizing stars", luminosity, self.fuv.centerwavelength() * Unit("micron"))
# -----------------------------------------------------------------
def set_dust_component(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Configuring the dust component ...")
#scale_height = 260.5 * Unit("pc") # first models
scale_height = 200. * Unit("pc") # M51
dust_mass = 1.5e7 * Unit("Msun")
hydrocarbon_pops = 25
enstatite_pops = 25
forsterite_pops = 25
# Set the parameters of the dust component
deprojection = self.deprojection.copy()
deprojection.filename = "dust.fits"
deprojection.scale_height = scale_height
self.deprojections["Dust"] = deprojection
# Adjust the ski file
self.ski.set_dust_component_geometry(0, deprojection)
self.ski.set_dust_component_themis_mix(0, hydrocarbon_pops, enstatite_pops, forsterite_pops) # dust mix
self.ski.set_dust_component_mass(0, dust_mass) # dust mass
# -----------------------------------------------------------------
def adjust_ski_contributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Adjusting ski files for simulating the contribution of the various stellar components ...")
# Loop over the different contributions, create seperate ski file instance
contributions = ["old", "young", "ionizing"]
component_names = {"old": ["Evolved stellar bulge", "Evolved stellar disk"],
"young": "Young stars",
"ionizing": "Ionizing stars"}
for contribution in contributions:
# Create a copy of the ski file instance
ski = self.ski.copy()
# Remove other stellar components
ski.remove_stellar_components_except(component_names[contribution])
# Add the ski file to the dictionary
self.ski_contributions[contribution] = ski
# -----------------------------------------------------------------
def adjust_ski_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Adjusting ski files for generating simulated images ...")
# Create a copy of the ski file instance
self.ski_images = self.ski.copy()
# Remove all instruments
self.ski_images.remove_all_instruments()
# Create frame instrument to generate datacube
frame_instrument = FrameInstrument.from_projection(self.projection)
# Add the frame instrument
self.ski_images.add_instrument("earth", frame_instrument)
# Add the SED instrument
self.ski_images.add_instrument("earth", self.instrument)
# -----------------------------------------------------------------
def calculate_weights(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the weight to give to each band ...")
# Initialize lists to contain the filters of the different wavelength ranges
uv_bands = []
optical_bands = []
nir_bands = []
mir_bands = []
fir_bands = []
submm_bands = []
# Set the number of groups
number_of_groups = 6
# Loop over the observed SED filters
for fltr in self.observed_filters:
# Get the central wavelength
wavelength = fltr.centerwavelength() * Unit("micron")
# Get a string identifying which portion of the wavelength spectrum this wavelength belongs to
spectrum = wavelengths.name_in_spectrum(wavelength)
# Determine to which group
if spectrum[0] == "UV": uv_bands.append(fltr)
elif spectrum[0] == "Optical": optical_bands.append(fltr)
elif spectrum[0] == "Optical/IR": optical_bands.append(fltr)
elif spectrum[0] == "IR":
if spectrum[1] == "NIR": nir_bands.append(fltr)
elif spectrum[1] == "MIR": mir_bands.append(fltr)
elif spectrum[1] == "FIR": fir_bands.append(fltr)
else: raise RuntimeError("Unknown IR range")
elif spectrum[0] == "Submm": submm_bands.append(fltr)
else: raise RuntimeError("Unknown wavelength range")
# Determine the weight for each group of filters
number_of_data_points = len(self.observed_sed.table)
uv_weight = 1. / (len(uv_bands) * number_of_groups) * number_of_data_points
optical_weight = 1. / (len(optical_bands) * number_of_groups) * number_of_data_points
nir_weight = 1. / (len(nir_bands) * number_of_groups) * number_of_data_points
mir_weight = 1. / (len(mir_bands) * number_of_groups) * number_of_data_points
fir_weight = 1. / (len(fir_bands) * number_of_groups) * number_of_data_points
submm_weight = 1. / (len(submm_bands) * number_of_groups) * number_of_data_points
#print("UV", len(uv_bands), uv_weight)
#print("Optical", len(optical_bands), optical_weight)
#print("NIR", len(nir_bands), nir_weight)
#print("MIR", len(mir_bands), mir_weight)
#print("FIR", len(fir_bands), fir_weight)
#print("Submm", len(submm_bands), submm_weight)
# Loop over the bands in each group and set the weight in the weights table
for fltr in uv_bands: self.weights.add_row([fltr.instrument, fltr.band, uv_weight])
for fltr in optical_bands: self.weights.add_row([fltr.instrument, fltr.band, optical_weight])
for fltr in nir_bands: self.weights.add_row([fltr.instrument, fltr.band, nir_weight])
for fltr in mir_bands: self.weights.add_row([fltr.instrument, fltr.band, mir_weight])
for fltr in fir_bands: self.weights.add_row([fltr.instrument, fltr.band, fir_weight])
for fltr in submm_bands: self.weights.add_row([fltr.instrument, fltr.band, submm_weight])
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the ski file
self.write_ski_file()
# Write the ski files for simulating the contributions of the various stellar components
self.write_ski_files_contributions()
# Write the ski file for generating simulated images
self.write_ski_file_images()
# Write the weights table
self.write_weights()
# Write the geometries
self.write_geometries()
# Write the wavelength grids
self.write_wavelength_grids()
# Write the dust grids
self.write_dust_grids()
# -----------------------------------------------------------------
def write_ski_file(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the ski file to " + self.fit_ski_path + " ...")
# Save the ski file to the specified location
self.ski.saveto(self.fit_ski_path)
# -----------------------------------------------------------------
def write_ski_files_contributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the ski files for simulating the contribution of the various stellar components ...")
# Loop over the ski files
for contribution in self.ski_contributions:
# Determine the path to the ski file
ski_path = fs.join(self.fit_best_contribution_paths[contribution], self.galaxy_name + ".ski")
# Write the ski file
self.ski_contributions[contribution].saveto(ski_path)
# -----------------------------------------------------------------
def write_ski_file_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the ski file for creating simulated images ...")
# Determine the path to the ski file
ski_path = fs.join(self.fit_best_images_path, self.galaxy_name + ".ski")
# Write the ski file
self.ski_images.saveto(ski_path)
# -----------------------------------------------------------------
def write_weights(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the table with weights to " + self.weights_table_path + " ...")
# Write the table with weights
tables.write(self.weights, self.weights_table_path, format="ascii.ecsv")
# -----------------------------------------------------------------
def write_geometries(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the Sersic model for the bulge and the deprojection model for the other components ...")
# Write the bulge model
bulge_path = fs.join(self.fit_geometries_path, "bulge.mod")
self.bulge.save(bulge_path)
# Write the deprojection models
for label in self.deprojections:
# Save the deprojection model
path = fs.join(self.fit_geometries_path, label + ".mod")
self.deprojections[label].save(path)
# -----------------------------------------------------------------
def write_wavelength_grids(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the wavelength grids ...")
# Loop over the grids
index = 0
for grid in self.wg_generator.grids:
# Determine the path to the grid
path = fs.join(self.fit_wavelength_grids_path, str(index) + ".txt")
# Save the wavelength grid
grid.to_skirt_input(path)
# Increment the index
index += 1
# Write the wavelength grids table
table_path = fs.join(self.fit_wavelength_grids_path, "grids.dat")
tables.write(self.wg_generator.table, table_path)
# -----------------------------------------------------------------
def write_dust_grids(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the dust grids ...")
# Loop over the grids
index = 0
for grid in self.dg_generator.grids:
# Determine the path to the grid
path = fs.join(self.fit_dust_grids_path, str(index) + ".dg")
# Save the dust grid
grid.save(path)
# Increment the index
index += 1
# Write the dust grids table
table_path = fs.join(self.fit_dust_grids_path, "grids.dat")
tables.write(self.dg_generator.table, table_path)
# -----------------------------------------------------------------
# The speed of light
speed_of_light = constants.c
# -----------------------------------------------------------------
def spectral_factor_hz_to_micron(wavelength):
"""
This function ...
:param wavelength:
:return:
"""
wavelength_unit = "micron"
frequency_unit = "Hz"
# Convert string units to Unit objects
if isinstance(wavelength_unit, basestring): wavelength_unit = Unit(wavelength_unit)
if isinstance(frequency_unit, basestring): frequency_unit = Unit(frequency_unit)
conversion_factor_unit = wavelength_unit / frequency_unit
# Calculate the conversion factor
factor = (wavelength ** 2 / speed_of_light).to(conversion_factor_unit).value
return 1. / factor
# -----------------------------------------------------------------
def fluxdensity_to_luminosity(fluxdensity, wavelength, distance):
"""
This function ...
:param fluxdensity:
:param wavelength:
:param distance:
:return:
"""
luminosity = (fluxdensity * 4. * math.pi * distance ** 2.).to("W/Hz")
# 3 ways:
#luminosity_ = luminosity.to("W/micron", equivalencies=spectral_density(wavelength)) # does not work
luminosity_ = (speed_of_light * luminosity / wavelength**2).to("W/micron")
luminosity = luminosity.to("W/Hz").value * spectral_factor_hz_to_micron(wavelength) * Unit("W/micron")
#print(luminosity_, luminosity) # is OK!
return luminosity
# -----------------------------------------------------------------
| Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/fitting/initialization.py | Python | mit | 34,092 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygtk, gtk, os
pygtk.require("2.0")
from tab_channels import GuiTabChannels
class GuiConfig:
def __init__(self, desc, text = None, choices = None, integer = None, boolean = None, mini = None, maxi = None):
self.tab = [desc]
if text != None:
self.tab.append(str(text))
self.tab.append(choices == None)
else:
self.tab.append('')
self.tab.append(False)
if boolean != None:
self.tab.append(bool(boolean))
self.tab.append(True)
else:
self.tab.append(False)
self.tab.append(False)
ls = gtk.ListStore(str)
if choices:
for a in choices:
ls.append((str(a),))
self.tab.append(ls)
self.tab.append(True)
else:
self.tab.append(ls)
self.tab.append(False)
if integer != None:
self.tab.append(int(integer))
self.tab.append(True)
self.tab.append(gtk.Adjustment(value=integer, lower=mini, upper=maxi, step_incr=1))
else:
self.tab.append(0)
self.tab.append(False)
self.tab.append(gtk.Adjustment())
def __iter__(self):
return self.tab.__iter__()
class Gui(GuiTabChannels, object):
def __init__(self):
# Window loading
self.builder = gtk.Builder()
self.builder.add_from_file(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.ui"))
# Init tabs
GuiTabChannels.__init__(self)
# Connecting signals
self.builder.connect_signals(self)
# Viewing
self.window = self.builder.get_object("w_main")
self.window.show()
gtk.main()
def on_quit(self,widget=None,data=None):
widget.show()
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, "Do you really want to quit ?")
dialog.set_title('Closing Iris Monitor')
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_YES:
if self.channel:
self.channel.thread_stop()
gtk.main_quit()
else:
return True
| JackDesBwa/IrisMonitor | gui/__init__.py | Python | mit | 1,900 |
from setuptools import setup, find_packages
install_requires = ["execnet>=1.1", "pytest>=4.4.0", "pytest-forked", "six"]
with open("README.rst") as f:
long_description = f.read()
setup(
name="pytest-xdist",
use_scm_version={"write_to": "src/xdist/_version.py"},
description="pytest xdist plugin for distributed testing"
" and loop-on-failing modes",
long_description=long_description,
license="MIT",
author="holger krekel and contributors",
author_email="pytest-dev@python.org,holger@merlinux.eu",
url="https://github.com/pytest-dev/pytest-xdist",
platforms=["linux", "osx", "win32"],
packages=find_packages(where="src"),
package_dir={"": "src"},
extras_require={"testing": ["filelock"]},
entry_points={
"pytest11": ["xdist = xdist.plugin", "xdist.looponfail = xdist.looponfail"]
},
zip_safe=False,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=install_requires,
setup_requires=["setuptools_scm"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Pytest",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Utilities",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| RonnyPfannschmidt/pytest-xdist | setup.py | Python | mit | 1,890 |
from ..route_handler import BaseModelRouter, SUCCESS, ERROR
from ..serializers.model_serializer import ModelSerializer
from .dragon_test_case import DragonTestCase
from swampdragon.tests.models import TwoFieldModel
class Serializer(ModelSerializer):
class Meta:
update_fields = ('text', 'number')
model = TwoFieldModel
class Router(BaseModelRouter):
model = TwoFieldModel
serializer_class = Serializer
class TestBaseModelRouter(DragonTestCase):
def setUp(self):
self.router = Router(self.connection)
def test_successful_create(self):
data = {'text': 'text', 'number': 3}
self.router.create(**data)
model = self.router.model.objects.get()
self.assertIsNotNone(model)
def test_error_on_create(self):
data = {'text': 'text'}
self.router.create(**data)
actual = self.connection.last_message
self.assertEqual(actual['context']['state'], ERROR)
self.assertIn('number', actual['data'])
def test_created(self):
data = {'text': 'text', 'number': 3}
self.router.create(**data)
model = self.router.model.objects.get()
actual = self.connection.last_message
self.assertEqual(actual['context']['state'], SUCCESS)
self.assertDictEqual(actual['data'], Serializer(instance=model).serialize())
| h-hirokawa/swampdragon | swampdragon/tests/test_base_model_router_create.py | Python | bsd-3-clause | 1,357 |
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of integers
# only have to use level order traversal.
def rightSideView(self, root):
if not root: return []
q = [root]
ret = [root.val]
while q != []:
tmp = []
while q != []:
node = q.pop(0)
if node.left:
tmp.append(node.left)
if node.right:
tmp.append(node.right)
if tmp != []:
ret.append(tmp[-1].val)
q += tmp
return ret
| linyaoli/acm | tree/intermediate/binary_tree_right_side_view.py | Python | gpl-2.0 | 787 |
# encoding: utf-8
"""
Expose the multiengine controller over the Foolscap network protocol.
"""
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import cPickle as pickle
from types import FunctionType
from zope.interface import Interface, implements
from twisted.internet import defer
from twisted.python import components, failure, log
from foolscap import Referenceable
from IPython.kernel import error
from IPython.kernel.util import printer
from IPython.kernel import map as Map
from IPython.kernel.parallelfunction import ParallelFunction
from IPython.kernel.mapper import (
MultiEngineMapper,
IMultiEngineMapperFactory,
IMapper
)
from IPython.kernel.twistedutil import gatherBoth
from IPython.kernel.multiengine import (MultiEngine,
IMultiEngine,
IFullSynchronousMultiEngine,
ISynchronousMultiEngine)
from IPython.kernel.multiengineclient import wrapResultList
from IPython.kernel.pendingdeferred import PendingDeferredManager
from IPython.kernel.pickleutil import (can, canDict,
canSequence, uncan, uncanDict, uncanSequence)
from IPython.kernel.clientinterfaces import (
IFCClientInterfaceProvider,
IBlockingClientAdaptor
)
# Needed to access the true globals from __main__.__dict__
import __main__
#-------------------------------------------------------------------------------
# The Controller side of things
#-------------------------------------------------------------------------------
def packageResult(wrappedMethod):
def wrappedPackageResult(self, *args, **kwargs):
d = wrappedMethod(self, *args, **kwargs)
d.addCallback(self.packageSuccess)
d.addErrback(self.packageFailure)
return d
return wrappedPackageResult
class IFCSynchronousMultiEngine(Interface):
"""Foolscap interface to `ISynchronousMultiEngine`.
The methods in this interface are similar to those of
`ISynchronousMultiEngine`, but their arguments and return values are pickled
if they are not already simple Python types that can be send over XML-RPC.
See the documentation of `ISynchronousMultiEngine` and `IMultiEngine` for
documentation about the methods.
Most methods in this interface act like the `ISynchronousMultiEngine`
versions and can be called in blocking or non-blocking mode.
"""
pass
class FCSynchronousMultiEngineFromMultiEngine(Referenceable):
"""Adapt `IMultiEngine` -> `ISynchronousMultiEngine` -> `IFCSynchronousMultiEngine`.
"""
implements(IFCSynchronousMultiEngine, IFCClientInterfaceProvider)
addSlash = True
def __init__(self, multiengine):
# Adapt the raw multiengine to `ISynchronousMultiEngine` before saving
# it. This allow this class to do two adaptation steps.
self.smultiengine = ISynchronousMultiEngine(multiengine)
self._deferredIDCallbacks = {}
#---------------------------------------------------------------------------
# Non interface methods
#---------------------------------------------------------------------------
def packageFailure(self, f):
f.cleanFailure()
return self.packageSuccess(f)
def packageSuccess(self, obj):
serial = pickle.dumps(obj, 2)
return serial
#---------------------------------------------------------------------------
# Things related to PendingDeferredManager
#---------------------------------------------------------------------------
@packageResult
def remote_get_pending_deferred(self, deferredID, block):
d = self.smultiengine.get_pending_deferred(deferredID, block)
try:
callback = self._deferredIDCallbacks.pop(deferredID)
except KeyError:
callback = None
if callback is not None:
d.addCallback(callback[0], *callback[1], **callback[2])
return d
@packageResult
def remote_clear_pending_deferreds(self):
return defer.maybeDeferred(self.smultiengine.clear_pending_deferreds)
def _addDeferredIDCallback(self, did, callback, *args, **kwargs):
self._deferredIDCallbacks[did] = (callback, args, kwargs)
return did
#---------------------------------------------------------------------------
# IEngineMultiplexer related methods
#---------------------------------------------------------------------------
@packageResult
def remote_execute(self, lines, targets, block):
return self.smultiengine.execute(lines, targets=targets, block=block)
@packageResult
def remote_push(self, binaryNS, targets, block):
try:
namespace = pickle.loads(binaryNS)
except:
d = defer.fail(failure.Failure())
else:
d = self.smultiengine.push(namespace, targets=targets, block=block)
return d
@packageResult
def remote_pull(self, keys, targets, block):
d = self.smultiengine.pull(keys, targets=targets, block=block)
return d
@packageResult
def remote_push_function(self, binaryNS, targets, block):
try:
namespace = pickle.loads(binaryNS)
except:
d = defer.fail(failure.Failure())
else:
namespace = uncanDict(namespace)
d = self.smultiengine.push_function(namespace, targets=targets, block=block)
return d
def _canMultipleKeys(self, result):
return [canSequence(r) for r in result]
@packageResult
def remote_pull_function(self, keys, targets, block):
def can_functions(r, keys):
if len(keys)==1 or isinstance(keys, str):
result = canSequence(r)
elif len(keys)>1:
result = [canSequence(s) for s in r]
return result
d = self.smultiengine.pull_function(keys, targets=targets, block=block)
if block:
d.addCallback(can_functions, keys)
else:
d.addCallback(lambda did: self._addDeferredIDCallback(did, can_functions, keys))
return d
@packageResult
def remote_push_serialized(self, binaryNS, targets, block):
try:
namespace = pickle.loads(binaryNS)
except:
d = defer.fail(failure.Failure())
else:
d = self.smultiengine.push_serialized(namespace, targets=targets, block=block)
return d
@packageResult
def remote_pull_serialized(self, keys, targets, block):
d = self.smultiengine.pull_serialized(keys, targets=targets, block=block)
return d
@packageResult
def remote_get_result(self, i, targets, block):
if i == 'None':
i = None
return self.smultiengine.get_result(i, targets=targets, block=block)
@packageResult
def remote_reset(self, targets, block):
return self.smultiengine.reset(targets=targets, block=block)
@packageResult
def remote_keys(self, targets, block):
return self.smultiengine.keys(targets=targets, block=block)
@packageResult
def remote_kill(self, controller, targets, block):
return self.smultiengine.kill(controller, targets=targets, block=block)
@packageResult
def remote_clear_queue(self, targets, block):
return self.smultiengine.clear_queue(targets=targets, block=block)
@packageResult
def remote_queue_status(self, targets, block):
return self.smultiengine.queue_status(targets=targets, block=block)
@packageResult
def remote_set_properties(self, binaryNS, targets, block):
try:
ns = pickle.loads(binaryNS)
except:
d = defer.fail(failure.Failure())
else:
d = self.smultiengine.set_properties(ns, targets=targets, block=block)
return d
@packageResult
def remote_get_properties(self, keys, targets, block):
if keys=='None':
keys=None
return self.smultiengine.get_properties(keys, targets=targets, block=block)
@packageResult
def remote_has_properties(self, keys, targets, block):
return self.smultiengine.has_properties(keys, targets=targets, block=block)
@packageResult
def remote_del_properties(self, keys, targets, block):
return self.smultiengine.del_properties(keys, targets=targets, block=block)
@packageResult
def remote_clear_properties(self, targets, block):
return self.smultiengine.clear_properties(targets=targets, block=block)
#---------------------------------------------------------------------------
# IMultiEngine related methods
#---------------------------------------------------------------------------
def remote_get_ids(self):
"""Get the ids of the registered engines.
This method always blocks.
"""
return self.smultiengine.get_ids()
#---------------------------------------------------------------------------
# IFCClientInterfaceProvider related methods
#---------------------------------------------------------------------------
def remote_get_client_name(self):
return 'IPython.kernel.multienginefc.FCFullSynchronousMultiEngineClient'
# The __init__ method of `FCMultiEngineFromMultiEngine` first adapts the
# `IMultiEngine` to `ISynchronousMultiEngine` so this is actually doing a
# two phase adaptation.
components.registerAdapter(FCSynchronousMultiEngineFromMultiEngine,
IMultiEngine, IFCSynchronousMultiEngine)
#-------------------------------------------------------------------------------
# The Client side of things
#-------------------------------------------------------------------------------
class FCFullSynchronousMultiEngineClient(object):
implements(
IFullSynchronousMultiEngine,
IBlockingClientAdaptor,
IMultiEngineMapperFactory,
IMapper
)
def __init__(self, remote_reference):
self.remote_reference = remote_reference
self._deferredIDCallbacks = {}
# This class manages some pending deferreds through this instance. This
# is required for methods like gather/scatter as it enables us to
# create our own pending deferreds for composite operations.
self.pdm = PendingDeferredManager()
#---------------------------------------------------------------------------
# Non interface methods
#---------------------------------------------------------------------------
def unpackage(self, r):
return pickle.loads(r)
#---------------------------------------------------------------------------
# Things related to PendingDeferredManager
#---------------------------------------------------------------------------
def get_pending_deferred(self, deferredID, block=True):
# Because we are managing some pending deferreds locally (through
# self.pdm) and some remotely (on the controller), we first try the
# local one and then the remote one.
if self.pdm.quick_has_id(deferredID):
d = self.pdm.get_pending_deferred(deferredID, block)
return d
else:
d = self.remote_reference.callRemote('get_pending_deferred', deferredID, block)
d.addCallback(self.unpackage)
try:
callback = self._deferredIDCallbacks.pop(deferredID)
except KeyError:
callback = None
if callback is not None:
d.addCallback(callback[0], *callback[1], **callback[2])
return d
def clear_pending_deferreds(self):
# This clear both the local (self.pdm) and remote pending deferreds
self.pdm.clear_pending_deferreds()
d2 = self.remote_reference.callRemote('clear_pending_deferreds')
d2.addCallback(self.unpackage)
return d2
def _addDeferredIDCallback(self, did, callback, *args, **kwargs):
self._deferredIDCallbacks[did] = (callback, args, kwargs)
return did
#---------------------------------------------------------------------------
# IEngineMultiplexer related methods
#---------------------------------------------------------------------------
def execute(self, lines, targets='all', block=True):
d = self.remote_reference.callRemote('execute', lines, targets, block)
d.addCallback(self.unpackage)
return d
def push(self, namespace, targets='all', block=True):
serial = pickle.dumps(namespace, 2)
d = self.remote_reference.callRemote('push', serial, targets, block)
d.addCallback(self.unpackage)
return d
def pull(self, keys, targets='all', block=True):
d = self.remote_reference.callRemote('pull', keys, targets, block)
d.addCallback(self.unpackage)
return d
def push_function(self, namespace, targets='all', block=True):
cannedNamespace = canDict(namespace)
serial = pickle.dumps(cannedNamespace, 2)
d = self.remote_reference.callRemote('push_function', serial, targets, block)
d.addCallback(self.unpackage)
return d
def pull_function(self, keys, targets='all', block=True):
def uncan_functions(r, keys):
if len(keys)==1 or isinstance(keys, str):
return uncanSequence(r)
elif len(keys)>1:
return [uncanSequence(s) for s in r]
d = self.remote_reference.callRemote('pull_function', keys, targets, block)
if block:
d.addCallback(self.unpackage)
d.addCallback(uncan_functions, keys)
else:
d.addCallback(self.unpackage)
d.addCallback(lambda did: self._addDeferredIDCallback(did, uncan_functions, keys))
return d
def push_serialized(self, namespace, targets='all', block=True):
cannedNamespace = canDict(namespace)
serial = pickle.dumps(cannedNamespace, 2)
d = self.remote_reference.callRemote('push_serialized', serial, targets, block)
d.addCallback(self.unpackage)
return d
def pull_serialized(self, keys, targets='all', block=True):
d = self.remote_reference.callRemote('pull_serialized', keys, targets, block)
d.addCallback(self.unpackage)
return d
def get_result(self, i=None, targets='all', block=True):
if i is None: # This is because None cannot be marshalled by xml-rpc
i = 'None'
d = self.remote_reference.callRemote('get_result', i, targets, block)
d.addCallback(self.unpackage)
return d
def reset(self, targets='all', block=True):
d = self.remote_reference.callRemote('reset', targets, block)
d.addCallback(self.unpackage)
return d
def keys(self, targets='all', block=True):
d = self.remote_reference.callRemote('keys', targets, block)
d.addCallback(self.unpackage)
return d
def kill(self, controller=False, targets='all', block=True):
d = self.remote_reference.callRemote('kill', controller, targets, block)
d.addCallback(self.unpackage)
return d
def clear_queue(self, targets='all', block=True):
d = self.remote_reference.callRemote('clear_queue', targets, block)
d.addCallback(self.unpackage)
return d
def queue_status(self, targets='all', block=True):
d = self.remote_reference.callRemote('queue_status', targets, block)
d.addCallback(self.unpackage)
return d
def set_properties(self, properties, targets='all', block=True):
serial = pickle.dumps(properties, 2)
d = self.remote_reference.callRemote('set_properties', serial, targets, block)
d.addCallback(self.unpackage)
return d
def get_properties(self, keys=None, targets='all', block=True):
if keys==None:
keys='None'
d = self.remote_reference.callRemote('get_properties', keys, targets, block)
d.addCallback(self.unpackage)
return d
def has_properties(self, keys, targets='all', block=True):
d = self.remote_reference.callRemote('has_properties', keys, targets, block)
d.addCallback(self.unpackage)
return d
def del_properties(self, keys, targets='all', block=True):
d = self.remote_reference.callRemote('del_properties', keys, targets, block)
d.addCallback(self.unpackage)
return d
def clear_properties(self, targets='all', block=True):
d = self.remote_reference.callRemote('clear_properties', targets, block)
d.addCallback(self.unpackage)
return d
#---------------------------------------------------------------------------
# IMultiEngine related methods
#---------------------------------------------------------------------------
def get_ids(self):
d = self.remote_reference.callRemote('get_ids')
return d
#---------------------------------------------------------------------------
# ISynchronousMultiEngineCoordinator related methods
#---------------------------------------------------------------------------
def _process_targets(self, targets):
def create_targets(ids):
if isinstance(targets, int):
engines = [targets]
elif targets=='all':
engines = ids
elif isinstance(targets, (list, tuple)):
engines = targets
for t in engines:
if not t in ids:
raise error.InvalidEngineID("engine with id %r does not exist"%t)
return engines
d = self.get_ids()
d.addCallback(create_targets)
return d
def scatter(self, key, seq, dist='b', flatten=False, targets='all', block=True):
# Note: scatter and gather handle pending deferreds locally through self.pdm.
# This enables us to collect a bunch fo deferred ids and make a secondary
# deferred id that corresponds to the entire group. This logic is extremely
# difficult to get right though.
def do_scatter(engines):
nEngines = len(engines)
mapClass = Map.dists[dist]
mapObject = mapClass()
d_list = []
# Loop through and push to each engine in non-blocking mode.
# This returns a set of deferreds to deferred_ids
for index, engineid in enumerate(engines):
partition = mapObject.getPartition(seq, index, nEngines)
if flatten and len(partition) == 1:
d = self.push({key: partition[0]}, targets=engineid, block=False)
else:
d = self.push({key: partition}, targets=engineid, block=False)
d_list.append(d)
# Collect the deferred to deferred_ids
d = gatherBoth(d_list,
fireOnOneErrback=0,
consumeErrors=1,
logErrors=0)
# Now d has a list of deferred_ids or Failures coming
d.addCallback(error.collect_exceptions, 'scatter')
def process_did_list(did_list):
"""Turn a list of deferred_ids into a final result or failure."""
new_d_list = [self.get_pending_deferred(did, True) for did in did_list]
final_d = gatherBoth(new_d_list,
fireOnOneErrback=0,
consumeErrors=1,
logErrors=0)
final_d.addCallback(error.collect_exceptions, 'scatter')
final_d.addCallback(lambda lop: [i[0] for i in lop])
return final_d
# Now, depending on block, we need to handle the list deferred_ids
# coming down the pipe diferently.
if block:
# If we are blocking register a callback that will transform the
# list of deferred_ids into the final result.
d.addCallback(process_did_list)
return d
else:
# Here we are going to use a _local_ PendingDeferredManager.
deferred_id = self.pdm.get_deferred_id()
# This is the deferred we will return to the user that will fire
# with the local deferred_id AFTER we have received the list of
# primary deferred_ids
d_to_return = defer.Deferred()
def do_it(did_list):
"""Produce a deferred to the final result, but first fire the
deferred we will return to the user that has the local
deferred id."""
d_to_return.callback(deferred_id)
return process_did_list(did_list)
d.addCallback(do_it)
# Now save the deferred to the final result
self.pdm.save_pending_deferred(d, deferred_id)
return d_to_return
d = self._process_targets(targets)
d.addCallback(do_scatter)
return d
def gather(self, key, dist='b', targets='all', block=True):
# Note: scatter and gather handle pending deferreds locally through self.pdm.
# This enables us to collect a bunch fo deferred ids and make a secondary
# deferred id that corresponds to the entire group. This logic is extremely
# difficult to get right though.
def do_gather(engines):
nEngines = len(engines)
mapClass = Map.dists[dist]
mapObject = mapClass()
d_list = []
# Loop through and push to each engine in non-blocking mode.
# This returns a set of deferreds to deferred_ids
for index, engineid in enumerate(engines):
d = self.pull(key, targets=engineid, block=False)
d_list.append(d)
# Collect the deferred to deferred_ids
d = gatherBoth(d_list,
fireOnOneErrback=0,
consumeErrors=1,
logErrors=0)
# Now d has a list of deferred_ids or Failures coming
d.addCallback(error.collect_exceptions, 'scatter')
def process_did_list(did_list):
"""Turn a list of deferred_ids into a final result or failure."""
new_d_list = [self.get_pending_deferred(did, True) for did in did_list]
final_d = gatherBoth(new_d_list,
fireOnOneErrback=0,
consumeErrors=1,
logErrors=0)
final_d.addCallback(error.collect_exceptions, 'gather')
final_d.addCallback(lambda lop: [i[0] for i in lop])
final_d.addCallback(mapObject.joinPartitions)
return final_d
# Now, depending on block, we need to handle the list deferred_ids
# coming down the pipe diferently.
if block:
# If we are blocking register a callback that will transform the
# list of deferred_ids into the final result.
d.addCallback(process_did_list)
return d
else:
# Here we are going to use a _local_ PendingDeferredManager.
deferred_id = self.pdm.get_deferred_id()
# This is the deferred we will return to the user that will fire
# with the local deferred_id AFTER we have received the list of
# primary deferred_ids
d_to_return = defer.Deferred()
def do_it(did_list):
"""Produce a deferred to the final result, but first fire the
deferred we will return to the user that has the local
deferred id."""
d_to_return.callback(deferred_id)
return process_did_list(did_list)
d.addCallback(do_it)
# Now save the deferred to the final result
self.pdm.save_pending_deferred(d, deferred_id)
return d_to_return
d = self._process_targets(targets)
d.addCallback(do_gather)
return d
def raw_map(self, func, sequences, dist='b', targets='all', block=True):
"""
A parallelized version of Python's builtin map.
This has a slightly different syntax than the builtin `map`.
This is needed because we need to have keyword arguments and thus
can't use *args to capture all the sequences. Instead, they must
be passed in a list or tuple.
raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
Most users will want to use parallel functions or the `mapper`
and `map` methods for an API that follows that of the builtin
`map`.
"""
if not isinstance(sequences, (list, tuple)):
raise TypeError('sequences must be a list or tuple')
max_len = max(len(s) for s in sequences)
for s in sequences:
if len(s)!=max_len:
raise ValueError('all sequences must have equal length')
if isinstance(func, FunctionType):
d = self.push_function(dict(_ipython_map_func=func), targets=targets, block=False)
d.addCallback(lambda did: self.get_pending_deferred(did, True))
sourceToRun = '_ipython_map_seq_result = map(_ipython_map_func, *zip(*_ipython_map_seq))'
elif isinstance(func, str):
d = defer.succeed(None)
sourceToRun = \
'_ipython_map_seq_result = map(%s, *zip(*_ipython_map_seq))' % func
else:
raise TypeError("func must be a function or str")
d.addCallback(lambda _: self.scatter('_ipython_map_seq', zip(*sequences), dist, targets=targets))
d.addCallback(lambda _: self.execute(sourceToRun, targets=targets, block=False))
d.addCallback(lambda did: self.get_pending_deferred(did, True))
d.addCallback(lambda _: self.gather('_ipython_map_seq_result', dist, targets=targets, block=block))
return d
def map(self, func, *sequences):
"""
A parallel version of Python's builtin `map` function.
This method applies a function to sequences of arguments. It
follows the same syntax as the builtin `map`.
This method creates a mapper objects by calling `self.mapper` with
no arguments and then uses that mapper to do the mapping. See
the documentation of `mapper` for more details.
"""
return self.mapper().map(func, *sequences)
def mapper(self, dist='b', targets='all', block=True):
"""
Create a mapper object that has a `map` method.
This method returns an object that implements the `IMapper`
interface. This method is a factory that is used to control how
the map happens.
:Parameters:
dist : str
What decomposition to use, 'b' is the only one supported
currently
targets : str, int, sequence of ints
Which engines to use for the map
block : boolean
Should calls to `map` block or not
"""
return MultiEngineMapper(self, dist, targets, block)
def parallel(self, dist='b', targets='all', block=True):
"""
A decorator that turns a function into a parallel function.
This can be used as:
@parallel()
def f(x, y)
...
f(range(10), range(10))
This causes f(0,0), f(1,1), ... to be called in parallel.
:Parameters:
dist : str
What decomposition to use, 'b' is the only one supported
currently
targets : str, int, sequence of ints
Which engines to use for the map
block : boolean
Should calls to `map` block or not
"""
mapper = self.mapper(dist, targets, block)
pf = ParallelFunction(mapper)
return pf
#---------------------------------------------------------------------------
# ISynchronousMultiEngineExtras related methods
#---------------------------------------------------------------------------
def _transformPullResult(self, pushResult, multitargets, lenKeys):
if not multitargets:
result = pushResult[0]
elif lenKeys > 1:
result = zip(*pushResult)
elif lenKeys is 1:
result = list(pushResult)
return result
def zip_pull(self, keys, targets='all', block=True):
multitargets = not isinstance(targets, int) and len(targets) > 1
lenKeys = len(keys)
d = self.pull(keys, targets=targets, block=block)
if block:
d.addCallback(self._transformPullResult, multitargets, lenKeys)
else:
d.addCallback(lambda did: self._addDeferredIDCallback(did, self._transformPullResult, multitargets, lenKeys))
return d
def run(self, fname, targets='all', block=True):
fileobj = open(fname,'r')
source = fileobj.read()
fileobj.close()
# if the compilation blows, we get a local error right away
try:
code = compile(source,fname,'exec')
except:
return defer.fail(failure.Failure())
# Now run the code
d = self.execute(source, targets=targets, block=block)
return d
#---------------------------------------------------------------------------
# IBlockingClientAdaptor related methods
#---------------------------------------------------------------------------
def adapt_to_blocking_client(self):
from IPython.kernel.multiengineclient import IFullBlockingMultiEngineClient
return IFullBlockingMultiEngineClient(self)
| mastizada/kuma | vendor/packages/ipython/IPython/kernel/multienginefc.py | Python | mpl-2.0 | 30,932 |
# coding utf-8
import unittest
from processor.processor import *
from bs4 import BeautifulSoup
class TestProcessor(unittest.TestCase):
def setUp(self):
html = """
<html>
<head>
<link href="link_link" rel="stylesheet" />
<script></script>
<script src="link_script"></script>
</head>
<body>
<img src="link_img" />
<embed src="link_embed">
<object data="link_object"></object>
</body>
</html>
"""
self.soup = BeautifulSoup(html)
def test_img_is_resource(self):
tags_imgs = self.soup.find_all('img')
for img in tags_imgs:
self.assertTrue(ImgProcessor.is_resource(img))
def test_img_get_resource_url(self):
tags_imgs = self.soup.find_all('img')
for img in tags_imgs:
imagem = ImgProcessor(img)
if ImgProcessor.is_resource(img):
self.assertEqual(imagem.get_resource_url(), 'link_img')
def test_script_is_resource(self):
tags_script = self.soup.find_all('script')
self.assertFalse(ScriptProcessor.is_resource(tags_script[0]))
self.assertTrue(ScriptProcessor.is_resource(tags_script[1]))
def test_script_get_resource_url(self):
tags_script = self.soup.find_all('script')
for script in tags_script:
scr = ScriptProcessor(script)
if ScriptProcessor.is_resource(script):
self.assertEqual(scr.get_resource_url(), 'link_script')
def test_link_is_resource(self):
tags_links = self.soup.find_all('link')
for link in tags_links:
self.assertTrue(LinkProcessor.is_resource(link))
def test_link_get_resource_url(self):
tags_link = self.soup.find_all('link')
for link in tags_link:
lnk = LinkProcessor(link)
if LinkProcessor.is_resource(link):
self.assertEqual(lnk.get_resource_url(), 'link_link')
def test_embed_is_resource(self):
tags_embeds = self.soup.find_all('embed')
for embed in tags_embeds:
self.assertTrue(EmbedProcessor.is_resource(embed))
def test_embed_get_resource_url(self):
tags_embeds = self.soup.find_all('embed')
for embed in tags_embeds:
ebd = EmbedProcessor(embed)
if EmbedProcessor.is_resource(embed):
self.assertEqual(ebd.get_resource_url(), 'link_embed')
def test_object_is_resource(self):
tags_objects = self.soup.find_all('object')
for objecte in tags_objects:
self.assertTrue(ObjectProcessor.is_resource(objecte))
def test_object_get_resource_url(self):
tags_objects = self.soup.find_all('object')
for objecte in tags_objects:
obj = ObjectProcessor(objecte)
if ObjectProcessor.is_resource(objecte):
self.assertEqual(obj.get_resource_url(), 'link_object')
unittest.main() | pitomba/libra | libra/test_processor.py | Python | mit | 2,993 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "appservercms.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| markussitzmann/webapps | wagtail/project_template/appservercms/manage.py | Python | mit | 259 |
import os
import logging
import traceback
import shlex
import subprocess
import string
from collections import defaultdict
from loader import Loader, LoadResult, Timeout, TimeoutError
NODE = '/usr/bin/env node'
NODEHTTP2 = 'node-http2/example/objloader_client.js' # Put your path here
class NodeJsLoader(Loader):
'''Subclass of :class:`Loader` that loads pages using NODE.JS.
.. note:: The :class:`NodeJsLoader` currently does not support caching.
.. note:: The :class:`NodeJsLoader` currently does not support full page loading (i.e., fetching a page's subresources).
.. note:: The :class:`NodeJsLoader` currently does not support disabling network caches.
.. note:: The :class:`NodeJsLoader` currently does not support saving HARs.
.. note:: The :class:`NodeJsLoader` currently does not support saving screenshots.
.. note:: The :class:`NodeJsLoader` currently does not support saving content.
'''
def __init__(self, **kwargs):
super(NodeJsLoader, self).__init__(**kwargs)
if not self._http2:
raise NotImplementedError('NodeJsLoader does not support HTTP1.1')
if not self._disable_local_cache:
raise NotImplementedError('NodeJsLoader does not support local caching')
if self._full_page:
raise NotImplementedError('NodeJsLoader does not support loading a full page')
if self._disable_network_cache:
raise NotImplementedError('NodeJsLoader does not support disabling network caches.')
if self._save_har:
raise NotImplementedError('NodeJsLoader does not support saving HARs.')
if self._save_screenshot:
raise NotImplementedError('NodeJsLoader does not support saving screenshots.')
if self._delay_after_onload != 0:
raise NotImplementedError('NodeJsLoader does not support delay after onload')
if self._save_content != 'never':
raise NotImplementedError('NodeJsLoader does not support saving content')
self._image_paths_by_url = defaultdict(list)
def _load_page(self, url, outdir, trial_num=-1):
# load the specified URL
logging.info('Loading page: %s', url)
try:
# prepare the NODE command
node_cmd = NODE+' '
node_cmd += NODEHTTP2+' ' # Location of node.js client HTTP2 program
node_cmd += url
# load the page
logging.debug('Running node.js: %s', node_cmd)
with Timeout(seconds=self._timeout+5):
output = subprocess.check_output(shlex.split(node_cmd))
logging.debug('NODE returned: %s', output.strip())
# NODE returned, but may or may not have succeeded
returnvals = {field.split('=')[0]: field.split('=')[1] for field in output.split(';')}
if returnvals['http_code'] != '200':
return LoadResult(LoadResult.FAILURE_NO_200, url)
else:
# Report status and time
return LoadResult(LoadResult.SUCCESS,
url,
final_url=returnvals['final_url'],
time=float(string.replace(returnvals['time'], ',', '.')),
size=returnvals['size'])
# problem running NODE
except TimeoutError:
logging.exception('Timeout fetching %s', url)
return LoadResult(LoadResult.FAILURE_TIMEOUT, url)
except subprocess.CalledProcessError as e:
logging.exception('Error loading %s: %s\n%s' % (url, e, e.output))
if e.returncode == 28:
return LoadResult(LoadResult.FAILURE_TIMEOUT, url)
else:
return LoadResult(LoadResult.FAILURE_UNKNOWN, url)
except Exception as e:
logging.exception('Error loading %s: %s\n%s' % (url, e, traceback.format_exc()))
return LoadResult(LoadResult.FAILURE_UNKNOWN, url)
| dtnaylor/web-profiler | webloader/nodejs_loader.py | Python | mit | 3,959 |
from flask import render_template, flash, request, redirect, url_for
from flask_login import login_required
from kernel import agileCalendar
from kernel.DataBoard import Data
from kernel.NM_Aggregates import WorkBacklog, DevBacklog, RiskBacklog
from kconfig import coordinationBookByName
from . import coordination
__author__ = 'Manuel Escriche'
@coordination.route("/")
@coordination.route("/overview")
@login_required
def overview():
return redirect(url_for('coordination.delivery'))
@coordination.route("/success-stories")
@login_required
def success_stories():
cmp = coordinationBookByName['SuccessStories']
backlog = RiskBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/success_stories.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/friendliness")
@login_required
def friendliness():
cmp = coordinationBookByName['Friendliness']
backlog = RiskBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/friendliness.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/qualityassurance")
@login_required
def qualityassurance():
cmp = coordinationBookByName['QualityAssurance']
backlog = RiskBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/quality_assurance.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/issues")
@login_required
def issues():
cmp = coordinationBookByName['Issues']
backlog = RiskBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/issues.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/risks")
@login_required
def risks():
cmp = coordinationBookByName['Risks']
backlog = RiskBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/risks.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/delivery")
@login_required
def delivery():
cmp = coordinationBookByName['Deliverables']
backlog = WorkBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/delivery.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/docs")
@login_required
def docs():
cmp = coordinationBookByName['Documentation']
backlog = WorkBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/docs.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/agile")
@login_required
def agile():
cmp = coordinationBookByName['Agile']
backlog = WorkBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/agile.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
@coordination.route("/scrum-master")
@login_required
def scrumtools():
cmp = coordinationBookByName['SMTools']
backlog = DevBacklog(*Data.getGlobalComponent(cmp.key))
if backlog.source == 'store':
flash('Data from local storage obtained at {}'.format(backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'timeSlot'
return render_template('coordination/scrum_tools.html',
comp=cmp,
reporter=backlog,
sortedby=sortedby,
calendar=agileCalendar)
| flopezag/fiware-backlog | app/coordination/views.py | Python | apache-2.0 | 6,105 |
from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django import forms
from models import *
class WaiverForm(forms.ModelForm):
"A form for submitting waivers"
class Meta:
model = RGUser
fields = ['name', 'email', 'address', 'zip_code', 'emergency_contact', 'emergency_care', 'url']
| RobotGarden/rgdjapps | views.py | Python | bsd-2-clause | 377 |
import datetime
from mock import patch
import urllib
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from django.conf import settings
from django.test.utils import override_settings
import django.contrib.auth.models as auth_models
from toolkit.members.models import Member
import toolkit.members.member_views as member_views
from .common import MembersTestsMixin
class AddMemberIPAuth(TestCase):
def setUp(self):
factory = RequestFactory()
self.url = reverse("add-member")
self.request = factory.get(self.url)
self.request.user = auth_models.AnonymousUser()
def test_auth_by_ip_matching_ip_denied(self):
# Request should be denied from 127.0.0.1
# Check that this shouldn't work
self.assertNotIn('127.0.0.1', settings.CUBE_IP_ADDRESSES)
# Issue the request
response = member_views.add_member(self.request)
expected_redirect = (
"{0}?next={1}"
.format(reverse("login"), self.url)
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], expected_redirect)
def test_auth_by_ip_matching_ip_permitted(self):
# Request should be permitted from IP in settings
# Check that this should work:
self.assertTrue(len(settings.CUBE_IP_ADDRESSES))
# set source IP:
self.request.META['REMOTE_ADDR'] = settings.CUBE_IP_ADDRESSES[0]
# Issue the request
response = member_views.add_member(self.request)
self.assertEqual(response.status_code, 200)
self.assertNotIn('Location', response)
@patch('toolkit.members.models.timezone_now')
class TestMemberModelManagerBase(MembersTestsMixin):
def test_email_recipients(self, now_mock):
recipients = Member.objects.mailout_recipients()
self.assertEqual(recipients.count(), 6)
for member in recipients:
self.assertTrue(member.mailout)
self.assertFalse(member.mailout_failed)
self.assertTrue(member.email)
def test_expired(self, now_mock):
now_mock.return_value.date.return_value = \
datetime.date(day=1, month=6, year=2010)
members = Member.objects.expired().all()
self.assertEqual(len(members), 1)
self.assertEqual(members[0], self.mem_2)
def test_unexpired(self, now_mock):
now_mock.return_value.date.return_value = \
datetime.date(day=1, month=6, year=2010)
members = Member.objects.unexpired().all()
self.assertEqual(len(members), 8)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
class TestMemberModelManagerExpiryEnabled(TestMemberModelManagerBase,
TestCase):
pass
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=False)
class TestMemberModelManagerExpiryDisabled(TestMemberModelManagerBase,
TestCase):
pass
class TestMemberModel(TestCase):
def setUp(self):
member_one = Member(name="Member One", number="1",
email="one@example.com")
member_one.save()
def test_membership_number_no_existing(self):
new_member = Member(name="Member two", email="two@example.com")
new_member.save()
self.assertEqual(str(new_member.pk), new_member.number)
def test_membership_number_exists(self):
old_member = Member.objects.get(id=1)
old_member.number = "2"
old_member.save()
new_member = Member(name="Member two", email="two@example.com")
new_member.save()
self.assertEqual("100002", new_member.number)
def test_membership_number_exists_twice(self):
old_member = Member.objects.get(id=1)
old_member.number = "3"
old_member.save()
new_member_one = Member(name="Member two", email="two@example.com")
new_member_one.save()
new_member_one.number = "100003"
new_member_one.save()
new_member_two = Member(name="Member two", email="two@example.com")
new_member_two.save()
self.assertEqual("200003", new_member_two.number)
def test_membership_number_custom(self):
new_member = Member(name="Member two", email="two@example.com")
new_member.number = "Orange squash"
new_member.save()
new_member = Member.objects.get(id=new_member.pk)
self.assertEqual(new_member.number, "Orange squash")
def test_membership_number_custom_edit(self):
old_member = Member.objects.get(id=1)
old_member.number = "Orange squash"
old_member.save()
old_member = Member.objects.get(id=1)
self.assertEqual(old_member.number, "Orange squash")
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
@override_settings(MEMBERSHIP_LENGTH_DAYS=100)
@patch('toolkit.members.models.timezone_now')
def test_default_expiry_expiry_enabled(self, now_mock):
now_mock.return_value.date.return_value = \
datetime.date(day=1, month=1, year=2000)
new_member = Member(name="New Member")
new_member.save()
new_member.refresh_from_db()
self.assertEqual(new_member.membership_expires,
datetime.date(2000, 4, 10))
self.assertFalse(new_member.has_expired())
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=False)
def test_default_expiry_expiry_disabled(self):
new_member = Member(name="New Member")
new_member.save()
new_member.refresh_from_db()
self.assertIsNone(new_member.membership_expires)
self.assertFalse(new_member.has_expired())
class TestAddMemberView(MembersTestsMixin, TestCase):
def setUp(self):
super(TestAddMemberView, self).setUp()
self.assertTrue(self.client.login(
username="admin", password="T3stPassword!"))
def tearDown(self):
self.client.logout()
def test_get_form(self):
url = reverse("add-member")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_new_member.html")
def _test_post_form_common(self, now_mock, expiry_enabled):
now_mock.return_value.date.return_value = \
datetime.date(day=1, month=1, year=2000)
new_name = u"Some New \u20acejit"
self.assertEqual(Member.objects.filter(name=new_name).count(), 0)
url = reverse("add-member")
response = self.client.post(url, data={
u"name": new_name,
u"email": u"blah.blah-blah@hard-to-tell-if-genuine.uk",
u"postcode": "SW1A 1AA",
u"mailout": "on",
}, follow=True)
self.assertRedirects(response, url)
self.assertTemplateUsed(response, "form_new_member.html")
member = Member.objects.get(name=new_name)
self.assertEqual(
member.email, u"blah.blah-blah@hard-to-tell-if-genuine.uk")
self.assertEqual(member.postcode, u"SW1A 1AA")
self.assertEqual(member.mailout, True)
if expiry_enabled:
self.assertEqual(member.membership_expires,
datetime.date(2000, 4, 11))
else:
self.assertIsNone(member.membership_expires)
self.assertContains(
response, u"Added member: {0}".format(member.number))
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=False)
@patch('toolkit.members.models.timezone_now')
def test_post_form_expiry_disabled(self, now_mock):
self._test_post_form_common(now_mock, expiry_enabled=False)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
@override_settings(MEMBERSHIP_LENGTH_DAYS=101)
@patch('toolkit.members.models.timezone_now')
def test_post_form_expiry_enabled(self, now_mock):
self._test_post_form_common(now_mock, expiry_enabled=True)
def test_post_minimal_submission(self):
new_name = u"Another New \u20acejit"
self.assertEqual(Member.objects.filter(name=new_name).count(), 0)
url = reverse("add-member")
response = self.client.post(url, data={
u"name": new_name,
}, follow=True)
self.assertRedirects(response, url)
self.assertTemplateUsed(response, "form_new_member.html")
member = Member.objects.get(name=new_name)
self.assertEqual(member.email, u"")
self.assertEqual(member.postcode, u"")
self.assertEqual(member.is_member, False)
self.assertContains(
response, u"Added member: {0}".format(member.number))
def test_post_form_invalid_data_missing(self):
count_before = Member.objects.count()
url = reverse("add-member")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_new_member.html")
self.assertFormError(response, 'form', 'name',
u'This field is required.')
self.assertEqual(count_before, Member.objects.count())
def test_post_form_invalid_duplicate_email(self):
count_before = Member.objects.count()
url = reverse("add-member")
response = self.client.post(url, data={
"name": "another new member",
"email": self.mem_1.email,
"mailout": "on",
}, follow=True)
# Should have redirected to the search form, with the email address as
# the search term:
expected_url = reverse("search-members")
self.assertRedirects(response, expected_url
+ "?email=%s&q=" % self.mem_1.email)
self.assertTemplateUsed(response, "search_members_results.html")
# A new shouldn't have been created
self.assertEqual(count_before, Member.objects.count())
def test_invalid_method(self):
url = reverse("add-member")
response = self.client.put(url)
self.assertEqual(response.status_code, 405)
class TestSearchMemberView(MembersTestsMixin, TestCase):
def setUp(self):
super(TestSearchMemberView, self).setUp()
self.assertTrue(self.client.login(
username="admin", password="T3stPassword!"))
def tearDown(self):
self.client.logout()
@patch('toolkit.members.member_views.Member')
def test_no_query(self, member_patch):
url = reverse("search-members")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "search_members.html")
self.assertFalse(member_patch.objects.filter.called)
def _common_test_query_with_results(self, now_mock, expiry_enabled):
now_mock.return_value.date.return_value = \
datetime.date(day=1, month=6, year=2010)
url = reverse("search-members")
response = self.client.get(url, data={'q': u'member'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "search_members_results.html")
self.assertContains(
response, u"<td><a href='/members/1'>Member On\u0205</a></td>",
html=True)
self.assertContains(
response, u'<a href="mailto:one@example.com">one@example.com</a>',
html=True)
self.assertContains(response, u"<td>BS1 1AA</td>", html=True)
self.assertContains(
response, u"<td><a href='/members/2'>Tw\u020d Member</a></td>",
html=True)
self.assertContains(
response, u'<a href="mailto:two@example.com">two@example.com</a>',
html=True)
self.assertContains(
response, u"<td><a href='/members/3'>Some Third Chap</a></td>",
html=True)
self.assertContains(
response,
u'<td><a href="mailto:two@member.test">two@member.test</a></td>',
html=True)
self.assertContains(response, u"<td>NORAD</td>", html=True)
if expiry_enabled:
self.assertContains(response,
"<th>Membership expires</th>",
html=True)
self.assertContains(response,
'<td class="expired">31/05/2010</td>',
html=True)
self.assertContains(response,
'<td>01/06/2010</td>',
html=True)
self.assertContains(response, "expires")
else:
self.assertNotContains(response, "expires")
# Should have Edit / Delete buttons:
self.assertContains(
response, u'<input type="submit" value="Edit">', html=True)
self.assertContains(
response, u'<input type="submit" value="Delete">', html=True)
expected_edit_form = ('<form method="get" action="{0}">'
'<input type="submit" value="Edit"></form>'
.format(reverse(
"edit-member", kwargs={"member_id": 3})))
expected_delete_form = ('<form class="delete" method="post" '
'action="{0}">'
.format(reverse(
"delete-member", kwargs={"member_id": 3})))
self.assertContains(response, expected_edit_form)
self.assertContains(response, expected_delete_form)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=False)
@patch('toolkit.members.models.timezone_now')
def test_query_with_results_expiry_disabled(self, now_mock):
self._common_test_query_with_results(now_mock, expiry_enabled=False)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
@patch('toolkit.members.models.timezone_now')
def test_query_with_results_expiry_enabled(self, now_mock):
self._common_test_query_with_results(now_mock, expiry_enabled=True)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=False)
@patch('toolkit.members.models.timezone_now')
def test_email_query_with_results_expiry_disabled(self, now_mock):
url = reverse("search-members")
response = self.client.get(url, data={'email': self.mem_2.email})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "search_members_results.html")
self.assertContains(
response, u"<td><a href='/members/2'>Tw\u020d Member</a></td>",
html=True)
self.assertContains(
response, u'<a href="mailto:two@example.com">two@example.com</a>',
html=True)
self.assertNotContains(response, "expires")
def test_query_no_results(self):
url = reverse("search-members")
testcases = [
("q", {'q': 'toast'}),
("email", {'email': 'toast@infinite.monkey'}),
("both", {'q': 'tost', 'email': 'toast@infinite.monkey'}),
]
for name, testcase in testcases:
with self.subTest(name):
response = self.client.get(url, data=testcase)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
"search_members_results.html")
def test_email_query_no_results(self):
url = reverse("search-members")
response = self.client.get(url, data={'email': u'toast@infinity.com'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "search_members_results.html")
class TestDeleteMemberViewLoggedIn(MembersTestsMixin, TestCase):
def setUp(self):
super().setUp()
self.assertTrue(self.client.login(
username="admin", password="T3stPassword!"))
def tearDown(self):
self.client.logout()
def test_delete_non_volunteer(self):
self.assertEqual(Member.objects.filter(id=1).count(), 1)
url = reverse("delete-member", kwargs={"member_id": 1})
response = self.client.post(url, follow=True)
self.assertRedirects(response, reverse("search-members"))
self.assertContains(response, u"Deleted member: 1 (Member On\u0205)")
self.assertEqual(Member.objects.filter(id=1).count(), 0)
def test_delete_volunteer(self):
mem = self.vol_1.member
self.assertTrue(Member.objects.filter(id=mem.id).exists())
url = reverse("delete-member", kwargs={"member_id": mem.id})
response = self.client.post(url, follow=True)
self.assertRedirects(response, reverse("search-members"))
self.assertContains(
response, "Can't delete active volunteer %s" % mem.name)
self.assertTrue(Member.objects.filter(id=mem.id).exists())
def test_delete_nonexistent(self):
url = reverse("delete-member", kwargs={"member_id": 1000})
response = self.client.post(url)
self.assertEqual(response.status_code, 404)
def test_delete_get_form_no_key_logged_in(self):
self.assertEqual(Member.objects.filter(id=1).count(), 1)
url = reverse("delete-member", kwargs={"member_id": 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 405)
self.assertEqual(Member.objects.filter(id=1).count(), 1)
class TestDeleteMemberViewNotLoggedIn(MembersTestsMixin, TestCase):
def _assert_redirect_to_login(self, response, url, extra_parameters=""):
expected_redirect = (
reverse("login") +
"?next=" +
urllib.parse.quote(url + extra_parameters)
)
self.assertRedirects(response, expected_redirect)
def setUp(self):
super().setUp()
self.assertEqual(Member.objects.filter(id=self.mem_1.id).count(), 1)
def tearDown(self):
super().tearDown()
def test_delete_get_form_no_key(self):
url = reverse("delete-member", kwargs={"member_id": 1})
response = self.client.get(url)
self._assert_redirect_to_login(response, url)
self.assertEqual(Member.objects.filter(id=1).count(), 1)
def test_delete_get_form_wrong_key(self):
url = reverse("delete-member", kwargs={"member_id": 1})
response = self.client.get(url, data={"k": "badkey"})
self._assert_redirect_to_login(response, url, "?k=badkey")
self.assertEqual(Member.objects.filter(id=self.mem_1.id).count(), 1)
def test_delete_get_form_valid_key_no_confirmation(self):
url = reverse("delete-member", kwargs={"member_id": self.mem_1.id})
for confirmed in ["no", "nope", "", None, "1", "0", "yeees", "Yes"]:
data = {"k": self.mem_1.mailout_key}
with self.subTest(confirmed_string=confirmed):
if confirmed is not None:
data["confirmed"] = confirmed
response = self.client.get(url, data=data)
# Shouldn't have been deleted yet:
self.assertEqual(
Member.objects.filter(id=self.mem_1.id).count(), 1)
# Should have used the "pls confirm" form:
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "confirm-deletion.html")
def test_delete_get_form_valid_key_confirm(self):
url = reverse("delete-member", kwargs={"member_id": self.mem_1.id})
response = self.client.get(
url,
data={
"k": self.mem_1.mailout_key,
"confirmed": "yes",
})
# Should have been deleted:
self.assertEqual(Member.objects.filter(id=self.mem_1.id).count(), 0)
self.assertRedirects(response, reverse("goodbye"))
def test_delete_active_volunteer_fails(self):
mem = self.vol_1.member
self.assertTrue(Member.objects.filter(id=mem.id).exists())
url = reverse("delete-member", kwargs={"member_id": mem.id})
response = self.client.get(
url,
data={
"k": mem.mailout_key,
# confirmed shouldn't make a difference, but belt+braces:
"confirmed": "yes",
})
# Should not have been deleted:
self.assertTrue(Member.objects.filter(id=mem.id).exists())
# Should have been politely told to email the admins:
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "email_admin.html")
def test_delete_inactive_volunteer_succeeds(self):
mem = self.vol_1.member
self.assertTrue(Member.objects.filter(id=mem.id).exists())
# Retire:
self.vol_1.active = False
self.vol_1.save()
url = reverse("delete-member", kwargs={"member_id": mem.id})
response = self.client.get(
url,
data={
"k": mem.mailout_key,
"confirmed": "yes",
})
# Should have been deleted:
self.assertEqual(Member.objects.filter(id=mem.id).count(), 0)
self.assertRedirects(response, reverse("goodbye"))
class TestEditMemberViewNotLoggedIn(MembersTestsMixin, TestCase):
def setUp(self):
super(TestEditMemberViewNotLoggedIn, self).setUp()
def _assert_redirect_to_login(self, response, url, extra_parameters=""):
expected_redirect = (
reverse("login") +
"?next=" +
urllib.parse.quote(url + extra_parameters)
)
self.assertRedirects(response, expected_redirect)
# GET tests ###########################################
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
def test_edit_get_form(self):
member = Member.objects.get(pk=2)
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.get(url, data={
'k': member.mailout_key,
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member.html")
# Shouldn't have these fields
self.assertNotContains(response, "expires:")
self.assertNotContains(response, "Is member")
def test_edit_get_form_no_key(self):
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.get(url)
self._assert_redirect_to_login(response, url)
def test_edit_get_form_incorrect_key(self):
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.get(url, data={
'k': "the WRONG KEY",
})
self._assert_redirect_to_login(response, url, "?k=the+WRONG+KEY")
def test_edit_get_form_invalid_memberid(self):
url = reverse("edit-member", kwargs={"member_id": 21212})
response = self.client.get(url, data={
'k': "the WRONG KEY",
})
# If the member doesn't exist then don't give a specific error to that
# effect, just redirect to the login page:
self._assert_redirect_to_login(response, url, "?k=the+WRONG+KEY")
# POST tests ###########################################
def test_edit_post_form_minimal_data(self):
new_name = u'N\u018EW Name'
member = Member.objects.get(pk=2)
self.assertEqual(member.name, u"Tw\u020d Member")
member_mailout_key = member.mailout_key
self.assertTrue(member.is_member)
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'name': new_name,
'k': member_mailout_key,
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member.html")
member = Member.objects.get(pk=2)
self.assertEqual(member.name, new_name)
self.assertEqual(member.email, "")
self.assertEqual(member.address, "")
self.assertEqual(member.posttown, "")
self.assertEqual(member.postcode, "")
self.assertEqual(member.country, "")
self.assertEqual(member.website, "")
self.assertEqual(member.phone, "")
self.assertEqual(member.altphone, "")
self.assertEqual(member.notes, "")
self.assertFalse(member.mailout)
self.assertFalse(member.mailout_failed)
# Shouldn't have been changed:
self.assertTrue(member.is_member)
# Shouldn't have been changed:
self.assertEqual(member.mailout_key, member_mailout_key)
self.assertContains(response, new_name)
self.assertContains(response, "Member 02 updated")
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
def test_edit_post_form_all_data(self):
new_name = u'N\u018EW Name'
member = Member.objects.get(pk=2)
self.assertEqual(member.name, u"Tw\u020d Member")
member_mailout_key = member.mailout_key
self.assertTrue(member.is_member)
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'name': new_name,
'email': 'snoo@whatver.com',
'k': member_mailout_key,
'address': "somewhere over the rainbow, I guess",
'posttown': "Town Town Town!",
'postcode': "< Sixteen chars?",
'country': "Suriname",
'website': "http://don't_care/",
'phone': "+44 0000000000000001",
'altphone': "-1 3202394 2352 23 234",
'notes': "plays the balalaika really badly",
'mailout': "t",
'mailout_failed': "t",
'is_member': "t",
# Should be ignored:
"mailout_key": "sinister",
'membership_expires': "01/01/2020",
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member.html")
member = Member.objects.get(pk=2)
self.assertEqual(member.name, new_name)
self.assertEqual(member.email, 'snoo@whatver.com')
self.assertEqual(member.address, "somewhere over the rainbow, I guess")
self.assertEqual(member.posttown, "Town Town Town!")
self.assertEqual(member.postcode, "< Sixteen chars?")
self.assertEqual(member.country, "Suriname")
self.assertEqual(member.website, "http://don't_care/")
self.assertEqual(member.phone, "+44 0000000000000001")
self.assertEqual(member.altphone, "-1 3202394 2352 23 234")
self.assertEqual(member.notes, "plays the balalaika really badly")
self.assertTrue(member.mailout)
self.assertTrue(member.is_member)
# Shouldn't have been changed:
self.assertEqual(member.mailout_key, member_mailout_key)
self.assertEqual(member.membership_expires,
datetime.date(day=31, month=5, year=2010))
self.assertContains(response, new_name)
self.assertContains(response, "Member 02 updated")
def test_edit_post_form_invalid_emails(self):
new_name = u'N\u018EW Name'
member = Member.objects.get(pk=2)
self.assertEqual(member.name, u"Tw\u020d Member")
member_mailout_key = member.mailout_key
self.assertTrue(member.is_member)
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'name': new_name,
'email': 'definitely_invalid@example/com',
'k': member_mailout_key,
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member.html")
self.assertFormError(response, 'form', 'email',
u'Enter a valid email address.')
member = Member.objects.get(pk=2)
self.assertNotEqual(member.name, new_name)
self.assertEqual(member.email, "two@example.com")
self.assertEqual(member.mailout_key, member_mailout_key)
def test_edit_post_form_invalid_data_missing(self):
member = Member.objects.get(pk=2)
start_name = member.name
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'k': member.mailout_key,
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member.html")
# Only mandatory field is "name":
self.assertFormError(response, 'form', 'name',
u'This field is required.')
member = Member.objects.get(pk=2)
self.assertEqual(start_name, member.name)
def test_edit_post_form_no_key(self):
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(url)
self._assert_redirect_to_login(response, url)
def test_edit_post_form_incorrect_key(self):
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'k': "the WRONG KEY",
})
self._assert_redirect_to_login(response, url)
def test_edit_post_form_invalid_memberid(self):
url = reverse("edit-member", kwargs={"member_id": 21212})
response = self.client.post(url, data={
'k': "the WRONG KEY",
})
# If the member doesn't exist then don't give a specific error to that
# effect, just redirect to the login page:
self._assert_redirect_to_login(response, url)
class TestEditMemberViewLoggedIn(MembersTestsMixin, TestCase):
def setUp(self):
super(TestEditMemberViewLoggedIn, self).setUp()
self.assertTrue(self.client.login(
username="admin", password="T3stPassword!"))
def tearDown(self):
self.client.logout()
# GET tests ###########################################
def test_edit_get_form(self):
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member.html")
def test_edit_get_form_invalid_memberid(self):
url = reverse("edit-member", kwargs={"member_id": 21212})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# POST tests ###########################################
# Only test differences from not logged in view...
def _test_edit_post_form_minimal_data_common(self):
new_name = u'N\u018EW Name'
member = Member.objects.get(pk=2)
self.assertEqual(member.name, u"Tw\u020d Member")
member_mailout_key = member.mailout_key
membership_expires = member.membership_expires
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(
url, data={'name': new_name, }, follow=True)
member = Member.objects.get(pk=2)
# New name set:
self.assertEqual(member.name, new_name)
# Secret key shouldn't have been changed:
self.assertEqual(member.mailout_key, member_mailout_key)
# Expiry date shouldn't have changed:
self.assertEqual(member.membership_expires, membership_expires)
# Should redirect to search page, with a success message inserted:
self.assertRedirects(response, reverse("search-members"))
self.assertContains(response, "Member 02 updated")
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
def test_edit_post_form_minimal_data_expiry_enabled(self):
self._test_edit_post_form_minimal_data_common()
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=False)
def test_edit_post_form_minimal_data_expiry_disabled(self):
self._test_edit_post_form_minimal_data_common()
def _test_edit_post_form_modify_expiry(self, expiry_enabled):
member = Member.objects.get(pk=2)
membership_expires = member.membership_expires
url = reverse("edit-member", kwargs={"member_id": 2})
self.client.post(url, data={
'name': member.name,
# Always try to set. Should only succeed if expiry is enabled.
'membership_expires': "01/02/1980",
}, follow=True)
member = Member.objects.get(pk=2)
# Expiry date shouldn't have changed:
if expiry_enabled:
self.assertEqual(member.membership_expires,
datetime.date(1980, 2, 1))
else:
self.assertEqual(member.membership_expires, membership_expires)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
def test_edit_post_modify_expiry_expiry_enabled(self):
self._test_edit_post_form_modify_expiry(expiry_enabled=True)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=False)
def test_edit_post_form_modify_expiry_expiry_disabled(self):
self._test_edit_post_form_modify_expiry(expiry_enabled=False)
@override_settings(MEMBERSHIP_EXPIRY_ENABLED=True)
def test_edit_post_form_invalid_data_missing(self):
member = Member.objects.get(pk=2)
start_name = member.name
url = reverse("edit-member", kwargs={"member_id": 2})
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member.html")
# Only mandatory field is "name":
self.assertFormError(response, 'form', 'name',
u'This field is required.')
member = Member.objects.get(pk=2)
self.assertEqual(start_name, member.name)
def test_edit_post_form_invalid_memberid(self):
url = reverse("edit-member", kwargs={"member_id": 21212})
response = self.client.post(url)
self.assertEqual(response.status_code, 404)
class TestUnsubscribeMemberView(MembersTestsMixin, TestCase):
def setUp(self):
super(TestUnsubscribeMemberView, self).setUp()
def _assert_redirect_to_login(self, response, url, extra_parameters=""):
expected_redirect = (
reverse("login") +
"?next=" +
urllib.parse.quote(url + extra_parameters)
)
self.assertRedirects(response, expected_redirect)
def _assert_subscribed(self, member_id):
member = Member.objects.get(pk=member_id)
self.assertTrue(member.mailout)
def _assert_unsubscribed(self, member_id):
member = Member.objects.get(pk=member_id)
self.assertFalse(member.mailout)
# GET tests ###########################################
def test_unsubscribe_get_form(self):
self._assert_subscribed(2)
member = Member.objects.get(pk=2)
url = reverse("unsubscribe-member", kwargs={"member_id": 2})
response = self.client.get(url, data={
'k': member.mailout_key,
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member_edit_subs.html")
# Still subscribed:
self._assert_subscribed(2)
def test_unsubscribe_get_form_no_key(self):
self._assert_subscribed(2)
url = reverse("unsubscribe-member", kwargs={"member_id": 2})
response = self.client.get(url)
self._assert_redirect_to_login(response, url)
# Still subscribed:
self._assert_subscribed(2)
def test_unsubscribe_get_form_incorrect_key(self):
self._assert_subscribed(2)
url = reverse("unsubscribe-member", kwargs={"member_id": 2})
response = self.client.get(url, data={
'k': "the WRONG KEY",
})
self._assert_redirect_to_login(response, url, "?k=the+WRONG+KEY")
self._assert_subscribed(2)
def test_unsubscribe_get_form_invalid_memberid(self):
url = reverse("unsubscribe-member", kwargs={"member_id": 21212})
response = self.client.get(url, data={
'k': "the WRONG KEY",
})
# If the member doesn't exist then don't give a specific error to that
# effect, just redirect to the login page:
self._assert_redirect_to_login(response, url, "?k=the+WRONG+KEY")
# POST tests ##########################################
def test_unsubscribe_post_form(self):
self._assert_subscribed(2)
member = Member.objects.get(pk=2)
url = reverse("unsubscribe-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'k': member.mailout_key,
'action': 'unsubscribe',
'confirm': 'yes',
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member_edit_subs.html")
self.assertContains(response, u"Member 02 unsubscribed")
# Not subscribed:
self._assert_unsubscribed(2)
def test_subscribe_post_form(self):
member = Member.objects.get(pk=2)
member.mailout = False
member.save()
self._assert_unsubscribed(2)
url = reverse("unsubscribe-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'k': member.mailout_key,
'action': 'subscribe',
'confirm': 'yes',
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member_edit_subs.html")
self.assertContains(response, u"Member 02 subscribed")
# subscribed:
self._assert_subscribed(2)
def test_unsubscribe_post_form_no_confirm(self):
self._assert_subscribed(2)
member = Member.objects.get(pk=2)
url = reverse("unsubscribe-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'k': member.mailout_key,
'action': 'unsubscribe',
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "form_member_edit_subs.html")
self.assertNotContains(response, u"Member 02 unsubscribed")
# Still subscribed:
self._assert_subscribed(2)
def test_unsubscribe_post_form_invalid_key(self):
self._assert_subscribed(2)
member = Member.objects.get(pk=2)
url = reverse("unsubscribe-member", kwargs={"member_id": 2})
response = self.client.post(url, data={
'k': member.mailout_key + "x",
'action': 'unsubscribe',
'confirm': 'yes',
})
self._assert_redirect_to_login(response, url)
# Still subscribed:
self._assert_subscribed(2)
# TODO: Should add further tests for when the user is logged in. But
# it's not actually used, so don't bother...
class TestMemberMiscViews(MembersTestsMixin, TestCase):
def setUp(self):
super(TestMemberMiscViews, self).setUp()
self.assertTrue(self.client.login(
username="admin", password="T3stPassword!"))
def tearDown(self):
self.client.logout()
# The SQL query used for the stats doesn't work with SQLite!
# def test_get_stats(self):
# url = reverse("member-statistics")
# response = self.client.get(url)
# self.assertTemplateUsed(response, "stats.html")
def test_post_stats(self):
url = reverse("member-statistics")
response = self.client.post(url)
self.assertEqual(response.status_code, 405)
def test_get_homepages(self):
url = reverse("member-homepages")
response = self.client.get(url)
self.assertTemplateUsed(response, "homepages.html")
self.assertContains(
response,
u'<a href="http://1.foo.test/" '
u'rel="nofollow">http://1.foo.test/</a>',
html=True)
self.assertContains(
response,
u'<a href="http://two.foo.test/" '
u'rel="nofollow">http://two.foo.test/</a>',
html=True)
def test_post_homepages(self):
url = reverse("member-homepages")
response = self.client.post(url)
self.assertEqual(response.status_code, 405)
def test_view_member(self):
url = reverse("view-member", kwargs={"member_id": 3})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "view_member.html")
self.assertContains(
response, u"Some Third Chap")
self.assertContains(
response, "two@member.test")
self.assertContains(
response, u"NORAD")
def test_view_non_existant_member(self):
url = reverse("view-member", kwargs={"member_id": 999})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
| BenMotz/cubetoolkit | toolkit/members/tests/test_members.py | Python | agpl-3.0 | 40,693 |
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Starts the server for Mac. If a globe is specified,
# the server is started with that globe.
"""Starts the server in the Mac OS."""
import os
import sys
import time
import urllib
import portable_config
def IsServerRunning(port):
"""Returns whether server is already running."""
try:
fp = urllib.urlopen("http://localhost:%s/ping" % port)
fp.close()
#Ok, if there isn't a server running.
except:
return False
return True
def StopServer(port):
"""Stops server already running on the config port."""
try:
fp = urllib.urlopen("http://localhost:%s/setup?cmd=quit" % port)
fp.close()
except:
pass
def StartServer(globe=""):
"""Starts server on the config port."""
globe = globe.replace("'", "\\'")
cmd = ("../../../portable_server.app/Contents/MacOS/portable_server '%s' &" %
globe)
print "Running %s" % cmd
os.system(cmd)
def main(argv):
port = portable_config.PortableConfig().Port()
if IsServerRunning(port):
StopServer(port)
if len(argv) > 1:
StartServer(argv[1])
else:
StartServer()
# Give the server a chance to get started.
time.sleep(2)
cmd = "open http://localhost:%s" % port
print "Running %s" % cmd
os.system(cmd)
print "Done."
if __name__ == "__main__":
main(sys.argv)
| tst-mswartz/earthenterprise | earth_enterprise/src/fusion/portableglobe/servers/mac/globe_selector.py | Python | apache-2.0 | 1,895 |
import json
import os
import tempfile
import shutil
from git.remote import PushInfo
import pytest
import requests_mock
from click.testing import CliRunner
# By doing this import we make sure that the plugin is made available
# but the entry points loading inside gg.main.
# An alternative would we to set `PYTHONPATH=. py.test` (or something)
# but then that wouldn't test the entry point loading.
from gg.main import Config
from gg.builtins.push.gg_push import push
@pytest.fixture(autouse=True)
def requestsmock():
"""Return a context where requests are all mocked.
Usage::
def test_something(requestsmock):
requestsmock.get(
'https://example.com/path'
content=b'The content'
)
# Do stuff that involves requests.get('http://example.com/path')
"""
with requests_mock.mock() as m:
yield m
@pytest.yield_fixture
def temp_configfile():
tmp_dir = tempfile.mkdtemp("gg-start")
fp = os.path.join(tmp_dir, "state.json")
with open(fp, "w") as f:
json.dump({}, f)
yield fp
shutil.rmtree(tmp_dir)
def test_push(temp_configfile, mocker):
mocked_git = mocker.patch("git.Repo")
mocked_git().working_dir = "gg-start-test"
mocked_git().remotes.__getitem__().push.return_value = [
PushInfo(0, "All is well", None, None, "origin")
]
state = json.load(open(temp_configfile))
state["FORK_NAME"] = "peterbe"
with open(temp_configfile, "w") as f:
json.dump(state, f)
runner = CliRunner()
config = Config()
config.configfile = temp_configfile
result = runner.invoke(push, [], obj=config)
if result.exception:
raise result.exception
assert result.exit_code == 0
assert not result.exception
mocked_git().remotes.__getitem__().push.assert_called_with()
| peterbe/gg | gg/builtins/push/tests/test_gg_push.py | Python | mit | 1,854 |
class Person(object):
def __init__(self, name):
self.na##|me = name
def __str__(self):
return "Person " + self.name
Person("Lancelot").name
robin = Person("Sir Robin")
print robin
print robin.name
##r
class Person(object):
def __init__(self, name):
self.p = name
def __str__(self):
return "Person " + self.p
Person("Lancelot").p
robin = Person("Sir Robin")
print robin
print robin.p
| aptana/Pydev | tests/org.python.pydev.refactoring.tests/src/python/coderefactoring/rename/successful/testRenameAttribute1.py | Python | epl-1.0 | 455 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# nodectl
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Ryan Barry <rbarry@redhat.com>
#
import json
import logging
import re
import sys
from imgbased.bootloader import BootConfiguration
from .utils import string_types
log = logging.getLogger()
class Info(object):
"""Fetches and displays some information about the running node:
Bootloader information
Layout information
"""
results = dict()
def __init__(self, app, machine=False):
self.app = app
self.machine = machine
self._fetch_information()
def _fetch_information(self):
self._get_bootloader_info()
self._get_layout()
def _get_bootloader_info(self):
b = BootConfiguration()
bootinfo = dict()
bootinfo["default"] = b.get_default()
bootinfo["entries"] = dict()
for k, v in b.list().items():
# FIXME: this isn't very nice. GrubbyEntry should present
# a clean way for a dict which can be JSON serializable.
# json chokes with __repr__, so maybe a custom decoder?
for entry in v:
bootinfo["entries"][entry.title] = entry.__dict__
self.results["bootloader"] = bootinfo
def _get_layout(self):
layout = LayoutParser(self.app.imgbase.layout()).parse()
self.results["layers"] = layout
self.results["current_layer"] = \
str(self.app.imgbase.current_layer())
def write(self):
def pretty_print(k, indent=0):
sys.stdout.write('{0}{1}: '.format(' ' * indent, k[0]))
if isinstance(k[1], string_types):
sys.stdout.write('{0}\n'.format(k[1]))
elif isinstance(k[1], dict):
sys.stdout.write('\n')
items = list(k[1].items())
if k[0] == "entries": # bootloader entries
items.sort(key=lambda x: x[1]["index"])
for item in items:
pretty_print(item, indent+2)
elif isinstance(k[1], list):
sys.stdout.write('\n')
for item in k[1]:
print('{0}{1}'.format(' ' * (indent + 2), item))
sys.stdout.flush()
if self.machine:
print(json.dumps(self.results))
else:
# Neither JSON nor YAML gives a very nice output here, so use
# our own formatter, since pprint includes sigils
for k in self.results.items():
pretty_print(k)
class LayoutParser(object):
"""This parser grabs the output of "imgbase layout" and turns it into
something which is easily consumable by regular Python (until imgbased
itself can get some tweaking to make this better
"""
layout = None
def __init__(self, layout):
self.layout = layout
def parse(self):
result = dict()
layouts = re.split(r'\n(?=\w)', self.layout, re.M)
for current_layout in layouts:
lines = current_layout.splitlines()
parent = lines.pop(0)
result[parent] = []
for line in lines:
line = re.sub(r'^.*?(\w+)', r'\1', line)
result[parent].append(line)
return result
| oVirt/ovirt-node-ng | src/nodectl/info.py | Python | gpl-2.0 | 3,934 |
#!/usr/bin/python
from hwdata import PCI, USB
# for obtaining real id of your devices you can use package python-gudev
pci_vendor_id = '0e11'
pci_device_id = 'b01e'
usb_vendor_id = '03f0'
usb_device_id = '1f12'
pci = PCI()
print("Vendor: %s" % pci.get_vendor(pci_vendor_id))
print("Device: %s" % pci.get_device(pci_vendor_id, pci_device_id))
usb = USB()
print("Vendor: %s" % usb.get_vendor(usb_vendor_id))
print("Device: %s" % usb.get_device(usb_vendor_id, usb_device_id))
| colloquium/spacewalk | projects/python-hwdata/example.py | Python | gpl-2.0 | 484 |
# coding: utf-8
"""
Initializes flask server and assigns all routes by importing modules
"""
import flask
#import config
import util
from model.config import Config # NB The model module needs to be imported *after* setting CURRENT_VERSION_TIMESTAMP,
# since model.ndbModelBase uses it as default value for version_r property
app = flask.Flask(__name__) # pylint: disable=invalid-name
# note:Flask server doesn't need DEBUG parameter while developing, since server restarting is taken care by GAE SDK
#SECRET_KEY = CONFIG_DB.flask_secret.encode('ascii')
#model.AuthProvider.init()
class Config(object):
DEVELOPMENT = util.DEVT
SECRET_KEY = util.randomB64()
CONFIG_DB = Config.get_master_db()
config = Config()
app.config.from_object(config)
util.debugDict(config,'my config ')
util.debugDict(app.config,'flask app config ')
app.jinja_env.line_statement_prefix = '#'
app.jinja_env.line_comment_prefix = '##'
import auth # pylint: disable=unused-import
import control.error
import control.index
import control.user
import model # pylint: disable=unused-import
import task # pylint: disable=unused-import
from api import helpers
API = helpers.Api(app)
import api.v1 # pylint: disable=unused-import
import logging
logging.debug('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ main @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#logging.debug('####################################################### app id: %r ' , config.APPLICATION_ID)
# logging.debug('####################################################### cur ver id: %r' , config.CURRENT_VERSION_ID)
# logging.debug('####################################################### cur ver name: %r' , config.CURRENT_VERSION_NAME)
# logging.debug('####################################################### cur ver timestamp: %r',config.CURRENT_VERSION_TIMESTAMP)
#logging.debug('####################################################### cur ver datetime: %r', config.CURRENT_VERSION_DATE)
# shorts = [i for i[0] in config.authProviders]
# longs = [i for i[1] in config.authProviders]
# assert len(shorts) == len(set(shorts)), 'no short duplicates'
# assert len(longs ) == len(set(longs )), 'no long duplicates'
| chdb/DhammaMap1 | main/main.py | Python | mit | 2,227 |
from django.db import models
from datetime import datetime
# from django.contrib.auth.models import AbstractBaseUser
from mongoengine import *
from mongoengine.django.auth import *
from slugify import slugify
from django.core.urlresolvers import reverse
class Author(User):
firstname = StringField(required=True, max_length=45)
lastname = StringField(required=True, max_length=45)
email = EmailField(required=True, max_length=45)
def __unicode__(self):
return self.firstname+" "+self.lastname
class Category(Document):
user = ReferenceField(Author)
name = StringField(max_length=200,required=True)
def __unicode__(self):
return self.name
class Tag(Document):
user = ReferenceField(Author)
name = StringField(max_length=200,required=True)
def __unicode__(self):
return self.name
class Post(Document):
user = ReferenceField(User, reverse_delete_rule=CASCADE)
title = StringField(max_length=200, required=True)
content = StringField(required=True)
date_modified = DateTimeField(default=datetime.now)
is_published = BooleanField()
slug = StringField(max_length=200)
# image_url = StringField(max_length=200)
categories = ReferenceField(Category)
tags = ListField(ReferenceField(Tag),default=list)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
return super(Post, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('blog:detail',args=[self.id])
class Comment(Document):
user = ReferenceField(User,reverse_delete_rule=CASCADE)
post = ReferenceField(Post, reverse_delete_rule=CASCADE)
text = StringField(required=True)
date = DateTimeField(default=datetime.now)
def __unicode__(self):
return self.text
| gusaul/gigsblog | apps/blog/models.py | Python | mit | 1,734 |
# Copyright (c) 2017 Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================================================
from __future__ import absolute_import
from heapq import heapify, heappop, heappush
from collections import deque
from . import BaseAgent
class AStarAgent(BaseAgent):
def __init__(self, name, nb_actions, visualizer=None):
super(AStarAgent, self).__init__(name, nb_actions, visualizer)
def _find_shortest_path(self, start, end, **kwargs):
came_from, cost_so_far = {}, {}
explorer = []
heapify(explorer)
heappush(explorer, (0, start))
came_from[start] = None
cost_so_far[start] = 0
current = None
while len(explorer) > 0:
_, current = heappop(explorer)
if self.matches(current, end):
break
for nb in self.neighbors(current, **kwargs):
cost = nb.cost if hasattr(nb, "cost") else 1
new_cost = cost_so_far[current] + cost
if nb not in cost_so_far or new_cost < cost_so_far[nb]:
cost_so_far[nb] = new_cost
priority = new_cost + self.heuristic(end, nb, **kwargs)
heappush(explorer, (priority, nb))
came_from[nb] = current
# build path:
path = deque()
while current is not start:
path.appendleft(current)
current = came_from[current]
return path, cost_so_far
def neighbors(self, pos, **kwargs):
raise NotImplementedError()
def heuristic(self, a, b, **kwargs):
raise NotImplementedError()
def matches(self, a, b):
return a == b
| domin1101/malmo-challenge | malmopy/agent/astar.py | Python | mit | 2,799 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
PcrasterMapstackVisualisationDialog
A QGIS plugin
PCRaster Mapstack visualisation
-------------------
begin : 2014-06-28
copyright : (C) 2014 by Leon
email : mugwizal@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4 import QtCore, QtGui
from ui_pcrastermapstackvisualisation import Ui_PcrasterMapstackVisualisation
# create the dialog for zoom to point
class PcrasterMapstackVisualisationDialog(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self, None, QtCore.Qt.WindowStaysOnTopHint)
# Set up the user interface from Designer.
self.ui = Ui_PcrasterMapstackVisualisation()
self.ui.setupUi(self)
| mugwizaleon/PCRasterMapstacks | pcrastermapstackvisualisationdialog.py | Python | apache-2.0 | 1,556 |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An instance of FSimGate that works naturally on Google's Sycamore chip"""
import numpy as np
import cirq
from cirq._doc import document
class SycamoreGate(cirq.FSimGate):
"""The Sycamore gate is a two-qubit gate equivalent to FSimGate(π/2, π/6).
The unitary of this gate is
[[1, 0, 0, 0],
[0, 0, -1j, 0],
[0, -1j, 0, 0],
[0, 0, 0, exp(- 1j * π/6)]]
This gate can be performed on the Google's Sycamore chip and
is close to the gates that were used to demonstrate quantum
supremacy used in this paper:
https://www.nature.com/articles/s41586-019-1666-5
"""
def __init__(self):
super().__init__(theta=np.pi / 2, phi=np.pi / 6)
def __repr__(self) -> str:
return 'cirq_google.SYC'
def __str__(self) -> str:
return 'SYC'
def _circuit_diagram_info_(self, args: cirq.CircuitDiagramInfoArgs):
return 'SYC', 'SYC'
def _json_dict_(self):
return cirq.obj_to_dict_helper(self, [])
SYC = SycamoreGate()
document(
SYC,
"""The Sycamore gate is a two-qubit gate equivalent to FSimGate(π/2, π/6).
The unitary of this gate is
[[1, 0, 0, 0],
[0, 0, -1j, 0],
[0, -1j, 0, 0],
[0, 0, 0, exp(- 1j * π/6)]]
This gate can be performed on the Google's Sycamore chip and
is close to the gates that were used to demonstrate quantum
supremacy used in this paper:
https://www.nature.com/articles/s41586-019-1666-5
""",
)
| quantumlib/Cirq | cirq-google/cirq_google/ops/sycamore_gate.py | Python | apache-2.0 | 2,159 |
__author__= "barun"
__date__ = "$20 May, 2011 12:19:25 PM$"
## Defines a collection of metrics that can be used to analyze the performance
# of a network.
class Metrics(object):
## Calculate average throughput as: total_bytes_rcvd / duration.
#
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @param duration Time duration (in s) over which thruput is to be computed. Typically it is the simulation period.
# @return Average throughput in Kbps; return -1 if duration is not positive
@staticmethod
def average_throughput(pkts_list, duration):
#print 'Average throughput'
avg_thruput = 0
start = -1
stop = 0
if pkts_list:
for record in pkts_list:
#print record
try:
avg_thruput += long(record[1])
if start == -1:
start = float(record[0])
stop = float(record[0])
#print record[0], record[1]
except IndexError:
pass
if duration <= 0:
duration = stop - start + 0.00000001
#print 'duration:', duration
avg_thruput = 8 * float(avg_thruput) / (1024 * duration) # Since pkt len is in bytes
return avg_thruput
@staticmethod
## Calculate instantaneous throughput as total bytes_rcvd at each time instant.
#
# <b>Logic</b>: To determine total bytes received at any instant, say, at t = 5, sum
# up sizes of all packets received in the interval 5.00000... to 5.99999...
#
# This procedure is repeated for all the time instances.
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @return A list in the form [(time_instance, total_Kbytes),]
def instantaneous_throughput(pkts_list=None):
#print 'Instantaneous throughput'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
#i_duration = long(duration)
if pkts_list:
for record in pkts_list:
try:
if start_time < 0: # This is the first record encountered
start_time = float(record[0])
#print start_time
this_instance = int(start_time)
#print this_instance
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
bytes_this_instance += long(record[1])
else:
result.append( (this_instance, bytes_this_instance * 8 / 1024) )
this_instance += 1
bytes_this_instance = long(record[1])
except IndexError:
pass
# Append the last record
result.append( (this_instance, bytes_this_instance * 8 / 1024) )
return result
@staticmethod
def cumulative_bytes_received(pkts_list=None):
#print 'Cumulative plot of bytes received'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
if pkts_list:
for record in pkts_list:
try:
if start_time < 0:
start_time = float(record[0])
this_instance = int(start_time)
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
bytes_this_instance += long(record[1])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
continue
else:
result.append( (this_instance, ( float(bytes_this_instance / 1024) ) * 8 ) )
this_instance += 1
#print cur_time
except IndexError:
pass
# Append the last record
result.append( (this_instance, ( float(bytes_this_instance / 1024) ) * 8 ) )
return result
@staticmethod
## Calculate throughput as total bytes_rcvd upto current instance of time / total duration upto current instance
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @return A list in the form [(time_instance, total_bytes),]
def cumulative_throughput(pkts_list=None):
#print 'Current throughput'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
if pkts_list:
for record in pkts_list:
try:
if start_time < 0:
start_time = float(record[0])
this_instance = int(start_time)
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
bytes_this_instance += long(record[1])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
continue
else:
result.append( (this_instance, ( float(bytes_this_instance / 1024) / ( this_instance - int(start_time) + 1) ) * 8 ) )
this_instance += 1
except IndexError:
pass
# Append the last record
result.append( (this_instance, ( float(bytes_this_instance / 1024) / ( this_instance - int(start_time) + 1) ) * 8 ) )
return result
## Return the end to end delay for each packet moving between a source and
# destination node, and identified by a flow ID. The delay is computed as
# the difference between sending time of the packet at source node and
# receiving time of the packet at the destination node.
# @param send_pkts_list An iterator object in the format [(seq_num, timestamp)]
# @param rcvd_pkts_list An iterator object in the format [(seq_num, timestamp)]
# @return A list in the form [(seq_num, delay),]
@staticmethod
def end2end_delay(send_pkts_list=None, rcvd_pkts_list=None):
#print 'End to end delay'
send_pkts = {}
rcvd_pkts = {}
for pkt in send_pkts_list:
send_pkts[pkt[0]] = float(pkt[1])
for pkt in rcvd_pkts_list:
rcvd_pkts[pkt[0]] = float(pkt[1])
pkt_delay = []
for seq_num in send_pkts:
if seq_num in rcvd_pkts:
if rcvd_pkts[seq_num] >= send_pkts[seq_num]:
delay = rcvd_pkts[seq_num] - send_pkts[seq_num]
pkt_delay.append( (seq_num, delay) )
# Sort pkt_delay in integer order of seq_num -- otherwise displayed
# graph would be garbage
pkt_delay = [ ( int(e[0]), e[1], ) for e in pkt_delay ]
pkt_delay.sort()
return pkt_delay
# @param send_pkts_list An iterator object in the format [seq_num]
@staticmethod
def packet_retransmissions(send_pkts_list=None):
#print 'Packet retransmissions'
send_pkts = {}
send_pkts_list = [ int(item) for item in send_pkts_list ]
for seq_num in send_pkts_list:
if seq_num in send_pkts:
send_pkts[seq_num] += 1
else:
send_pkts[seq_num] = 0
pkt_retransmits = []
for (seq_num, retransmits) in send_pkts.items():
if retransmits != 0:
pkt_retransmits.append( (seq_num, retransmits) )
pkt_retransmits.sort()
return pkt_retransmits
| barun-saha/ns2web | ns2trace/metrics.py | Python | gpl-2.0 | 8,308 |
import numpy as np
from openglider.vector.functions import normalize
class Plane(object):
def __init__(self, p0, v1, v2):
self.p0 = np.array(p0)
self.v1 = np.array(v1)
self.v2 = np.array(v2)
def point(self, x1, x2):
return self.p0 + x1 * self.v1 + x2 * self.v2
def cut(self, p1, p2):
"""
cut two points
eq: p1 + x1*(p2-p1) = self.p0 + x2 * self.v1 + x3*self.r2
- x1*(p2-p1) + x2 * self.v1 + x3 * self.v2 = p1 - self.p0
"""
lhs = np.matrix([p1-p2, self.v1, self.v2]).transpose()
rhs = p1 - self.p0
res = np.linalg.solve(lhs, rhs)
return res[0], res[1:], self.point(res[1], res[2])
def projection(self, point):
diff = point - self.p0
return [self.v1.dot(diff), self.v2.dot(diff)]
@property
def translation_matrix(self):
return np.matrix([self.v1, self.v2, self.normvector]).transpose()
def align(self, point_3d):
return self.p0 + self.translation_matrix.dot(point_3d)
def normalize(self):
self.v1 = normalize(self.v1)
self.v2 = normalize(self.v2 - self.v1 * self.v1.dot(self.v2))
@property
def normvector(self):
return np.cross(self.v1, self.v2)
@normvector.setter
def normvector(self, normvector):
#assert isinstance(normvector, np.ndarray)
# todo: fix // write test
self.v1 = np.array([1,1,1])
self.v1 = self.v1 - self.v1 * normvector
#self.v1 = np.array([0, -normvector[3], normvector[2]])
self.v2 = np.cross(self.v1, normvector)
@classmethod
def from_point_cloud(cls, points):
# TODO: p0
mat = np.array(points).T
mat = np.array([mat[0], mat[1], mat[2], np.ones(len(mat[0]))])
u, d, v = np.linalg.svd(mat.T)
n = v[-1][0:3]
l_n = np.linalg.norm(n)
n /= l_n
x = np.cross(n, n[::-1])
y = np.cross(n, x)
#return cls(p0, x,y)
| hiaselhans/OpenGlider | openglider/vector/plane.py | Python | gpl-3.0 | 1,988 |
from django.db import models
from django.contrib.postgres.fields.jsonb import JSONField
class Supplier(models.Model):
name = models.CharField(max_length=50)
tax_id = models.CharField(max_length=10)
def __str__(self):
return self.name
class Bargain(models.Model):
sku = models.CharField(max_length=20)
price = models.DecimalField(max_digits=7, decimal_places=2)
supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE,
db_index=True)
info = JSONField(db_index=True) # This will create a btree index, not GIN
def __str__(self):
return self.sku
@property
def description(self):
return self.info.get('description', '')
@property
def sale_price(self):
return self.info.get('sale_price', '')
@property
def acquire_cost(self):
return self.info.get('acquire_cost', '')
@property
def color(self):
return self.info.get('color', '')
| sebastian-code/jsonb-test | jsonb/emporium/models.py | Python | mit | 986 |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import six
from pyface.action.menu_manager import MenuManager
from pyface.tasks.traits_dock_pane import TraitsDockPane
from traits.api import Int, Property, Button, Instance
from traits.has_traits import MetaHasTraits
from traitsui.api import (
View,
UItem,
VGroup,
InstanceEditor,
HGroup,
VSplit,
Handler,
TabularEditor,
TreeEditor,
)
from traitsui.menu import Action
from traitsui.tabular_adapter import TabularAdapter
from traitsui.tree_node import TreeNode
from uncertainties import nominal_value, std_dev
from pychron.core.configurable_tabular_adapter import ConfigurableMixin
from pychron.core.helpers.color_generators import colornames
from pychron.core.helpers.formatting import floatfmt
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.qt.tree_editor import PipelineEditor
from pychron.core.ui.table_configurer import TableConfigurer
from pychron.core.ui.tabular_editor import myTabularEditor
from pychron.envisage.browser.view import PaneBrowserView
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.pipeline.engine import Pipeline, PipelineGroup, NodeGroup
from pychron.pipeline.nodes import FindReferencesNode
from pychron.pipeline.nodes.base import BaseNode
from pychron.pipeline.nodes.data import DataNode, InterpretedAgeNode
from pychron.pipeline.nodes.figure import IdeogramNode, SpectrumNode, SeriesNode
from pychron.pipeline.nodes.filter import FilterNode, MSWDFilterNode
from pychron.pipeline.nodes.find import FindFluxMonitorsNode
from pychron.pipeline.nodes.fit import (
FitIsotopeEvolutionNode,
FitBlanksNode,
FitICFactorNode,
FitFluxNode,
)
from pychron.pipeline.nodes.grouping import GroupingNode, SubGroupingNode
from pychron.pipeline.nodes.persist import PDFNode, DVCPersistNode
from pychron.pipeline.nodes.review import ReviewNode
from pychron.pipeline.tasks.tree_node import (
SeriesTreeNode,
PDFTreeNode,
GroupingTreeNode,
SpectrumTreeNode,
IdeogramTreeNode,
FilterTreeNode,
DataTreeNode,
DBSaveTreeNode,
FindTreeNode,
FitTreeNode,
PipelineTreeNode,
ReviewTreeNode,
PipelineGroupTreeNode,
NodeGroupTreeNode,
)
from pychron.pipeline.template import (
PipelineTemplate,
PipelineTemplateGroup,
PipelineTemplateRoot,
)
from pychron.pychron_constants import PLUSMINUS_ONE_SIGMA, LIGHT_RED, LIGHT_YELLOW
class TemplateTreeNode(TreeNode):
def get_icon(self, obj, is_expanded):
icon = obj.icon
if not icon:
icon = super(TemplateTreeNode, self).get_icon(obj, is_expanded)
return icon
def node_adder(name):
def wrapper(obj, info, o):
# print name, info.object
f = getattr(info.object, name)
f(o)
return wrapper
class PipelineHandlerMeta(MetaHasTraits):
def __new__(cls, *args, **kwargs):
klass = MetaHasTraits.__new__(cls, *args, **kwargs)
for t in (
"review",
"pdf_figure",
"iso_evo_persist",
"data",
"filter",
"mswd_filter",
"ideogram",
"spectrum",
"series",
"isotope_evolution",
"blanks",
"detector_ic",
"flux",
"find_blanks",
"find_airs",
"icfactor",
"push",
"audit",
"inverse_isochron",
"grouping",
"graph_grouping",
"subgrouping",
"set_interpreted_age",
"interpreted_ages",
):
name = "add_{}".format(t)
setattr(klass, name, node_adder(name))
for c in ("isotope_evolution", "blanks", "ideogram", "spectrum", "icfactors"):
name = "chain_{}".format(c)
setattr(klass, name, node_adder(name))
return klass
class PipelineHandler(six.with_metaclass(PipelineHandlerMeta, Handler)):
def save_template(self, info, obj):
info.object.save_pipeline_template()
def review_node(self, info, obj):
info.object.review_node(obj)
def delete_node(self, info, obj):
info.object.remove_node(obj)
def enable(self, info, obj):
self._toggle_enable(info, obj, True)
def disable(self, info, obj):
self._toggle_enable(info, obj, False)
def enable_permanent(self, info, obj):
self._toggle_permanent(info, obj, True)
def disable_permanent(self, info, obj):
self._toggle_permanent(info, obj, False)
def toggle_skip_configure(self, info, obj):
obj.skip_configure = not obj.skip_configure
info.object.update_needed = True
def configure(self, info, obj):
info.object.configure(obj)
def move_up(self, info, obj):
info.object.pipeline.move_up(obj)
info.object.selected = obj
def move_down(self, info, obj):
info.object.pipeline.move_down(obj)
info.object.selected = obj
def _toggle_permanent(self, info, obj, state):
info.object.set_review_permanent(state)
self._toggle_enable(info, obj, state)
def _toggle_enable(self, info, obj, state):
obj.enabled = state
info.object.refresh_all_needed = True
info.object.update_needed = True
class PipelinePane(TraitsDockPane):
name = "Pipeline"
id = "pychron.pipeline.pane"
def traits_view(self):
def enable_disable_menu_factory():
return MenuManager(
Action(
name="Enable", action="enable", visible_when="not object.enabled"
),
Action(name="Disable", action="disable", visible_when="object.enabled"),
Action(
name="Enable Permanent",
action="enable_permanent",
visible_when="not object.enabled",
),
Action(
name="Disable Permanent",
action="disable_permanent",
visible_when="object.enabled",
),
name="Enable/Disable",
)
def menu_factory(*actions):
return MenuManager(
Action(name="Configure", action="configure"),
Action(
name="Enable Auto Configure",
action="toggle_skip_configure",
visible_when="object.skip_configure",
),
Action(
name="Disable Auto Configure",
action="toggle_skip_configure",
visible_when="not object.skip_configure",
),
Action(name="Move Up", action="move_up"),
Action(name="Move Down", action="move_down"),
Action(name="Delete", action="delete_node"),
Action(name="Save Template", action="save_template"),
*actions
)
def add_menu_factory():
fig_menu = MenuManager(
Action(name="Add Inverse Isochron", action="add_inverse_isochron"),
Action(name="Add Ideogram", action="add_ideogram"),
Action(name="Add Spectrum", action="add_spectrum"),
Action(name="Add Series", action="add_series"),
name="Figure",
)
grp_menu = MenuManager(
Action(name="Add Grouping", action="add_grouping"),
Action(name="Add Graph Grouping", action="add_graph_grouping"),
Action(name="Add SubGrouping", action="add_subgrouping"),
name="Grouping",
)
filter_menu = MenuManager(
Action(name="Add Filter", action="add_filter"),
Action(name="Add MSWD Filter", action="add_mswd_filter"),
name="Filter",
)
return MenuManager(
Action(name="Add Unknowns", action="add_data"),
Action(name="Add Interpreted Ages", action="add_interpreted_ages"),
grp_menu,
filter_menu,
fig_menu,
Action(name="Add Set IA", action="add_set_interpreted_age"),
Action(name="Add Review", action="add_review"),
Action(name="Add Audit", action="add_audit"),
Action(name="Add Push"),
name="Add",
)
def fit_menu_factory():
return MenuManager(
Action(name="Isotope Evolution", action="add_isotope_evolution"),
Action(name="Blanks", action="add_blanks"),
Action(name="IC Factor", action="add_icfactor"),
Action(name="Detector IC", enabled=False, action="add_detector_ic"),
Action(name="Flux", enabled=False, action="add_flux"),
name="Fit",
)
def save_menu_factory():
return MenuManager(
Action(name="Save PDF Figure", action="add_pdf_figure"),
Action(name="Save Iso Evo", action="add_iso_evo_persist"),
Action(name="Save Blanks", action="add_blanks_persist"),
Action(name="Save ICFactor", action="add_icfactor_persist"),
name="Save",
)
def find_menu_factory():
return MenuManager(
Action(name="Blanks", action="add_find_blanks"),
Action(name="Airs", action="add_find_airs"),
name="Find",
)
def chain_menu_factory():
return MenuManager(
Action(name="Chain Ideogram", action="chain_ideogram"),
Action(
name="Chain Isotope Evolution", action="chain_isotope_evolution"
),
Action(name="Chain Spectrum", action="chain_spectrum"),
Action(name="Chain Blanks", action="chain_blanks"),
Action(name="Chain ICFactors", action="chain_icfactors"),
name="Chain",
)
# ------------------------------------------------
def data_menu_factory():
return menu_factory(
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
chain_menu_factory(),
find_menu_factory(),
)
def filter_menu_factory():
return menu_factory(
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
chain_menu_factory(),
)
def figure_menu_factory():
return menu_factory(
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
chain_menu_factory(),
save_menu_factory(),
)
def ffind_menu_factory():
return menu_factory(
Action(name="Review", action="review_node"),
enable_disable_menu_factory(),
add_menu_factory(),
fit_menu_factory(),
)
nodes = [
PipelineGroupTreeNode(
node_for=[PipelineGroup], children="pipelines", auto_open=True
),
PipelineTreeNode(
node_for=[Pipeline],
children="nodes",
icon_open="",
label="name",
auto_open=True,
),
NodeGroupTreeNode(
node_for=[NodeGroup], children="nodes", auto_open=True, label="name"
),
DataTreeNode(
node_for=[DataNode, InterpretedAgeNode], menu=data_menu_factory()
),
FilterTreeNode(
node_for=[FilterNode, MSWDFilterNode], menu=filter_menu_factory()
),
IdeogramTreeNode(node_for=[IdeogramNode], menu=figure_menu_factory()),
SpectrumTreeNode(node_for=[SpectrumNode], menu=figure_menu_factory()),
SeriesTreeNode(node_for=[SeriesNode], menu=figure_menu_factory()),
PDFTreeNode(node_for=[PDFNode], menu=menu_factory()),
GroupingTreeNode(
node_for=[GroupingNode, SubGroupingNode], menu=data_menu_factory()
),
DBSaveTreeNode(node_for=[DVCPersistNode], menu=data_menu_factory()),
FindTreeNode(
node_for=[FindReferencesNode, FindFluxMonitorsNode],
menu=ffind_menu_factory(),
),
FitTreeNode(
node_for=[
FitIsotopeEvolutionNode,
FitICFactorNode,
FitBlanksNode,
FitFluxNode,
],
menu=ffind_menu_factory(),
),
ReviewTreeNode(node_for=[ReviewNode], menu=enable_disable_menu_factory()),
PipelineTreeNode(node_for=[BaseNode], label="name"),
]
editor = PipelineEditor(
nodes=nodes,
editable=False,
selected="selected",
dclick="dclicked",
hide_root=True,
lines_mode="off",
show_disabled=True,
refresh_all_icons="refresh_all_needed",
update="update_needed",
)
tnodes = [
TreeNode(node_for=[PipelineTemplateRoot], children="groups"),
TemplateTreeNode(
node_for=[PipelineTemplateGroup], label="name", children="templates"
),
TemplateTreeNode(
node_for=[
PipelineTemplate,
],
label="name",
),
]
teditor = TreeEditor(
nodes=tnodes,
editable=False,
selected="selected_pipeline_template",
dclick="dclicked_pipeline_template",
hide_root=True,
lines_mode="off",
)
v = View(
VSplit(
UItem("pipeline_template_root", editor=teditor),
VGroup(
HGroup(
icon_button_editor(
"run_needed", "start", visible_when="run_enabled"
),
icon_button_editor(
"run_needed", "edit-redo-3", visible_when="resume_enabled"
),
icon_button_editor("add_pipeline", "add"),
),
UItem("pipeline_group", editor=editor),
),
),
handler=PipelineHandler(),
)
return v
class BaseAnalysesAdapter(TabularAdapter, ConfigurableMixin):
font = "arial 10"
rundate_text = Property
record_id_width = Int(80)
tag_width = Int(50)
sample_width = Int(80)
def _get_rundate_text(self):
try:
r = self.item.rundate.strftime("%m-%d-%Y %H:%M")
except AttributeError:
r = ""
return r
def get_bg_color(self, obj, trait, row, column=0):
if self.item.tag == "invalid":
c = "#C9C5C5"
elif self.item.is_omitted():
c = "#FAC0C0"
else:
c = super(BaseAnalysesAdapter, self).get_bg_color(obj, trait, row, column)
return c
class UnknownsAdapter(BaseAnalysesAdapter):
columns = [
("Run ID", "record_id"),
("Sample", "sample"),
("Age", "age"),
("Comment", "comment"),
("Tag", "tag"),
("GroupID", "group_id"),
]
all_columns = [
("RunDate", "rundate"),
("Run ID", "record_id"),
("Aliquot", "aliquot"),
("Step", "step"),
("UUID", "display_uuid"),
("Sample", "sample"),
("Project", "project"),
("RepositoryID", "repository_identifier"),
("Age", "age"),
("Age {}".format(PLUSMINUS_ONE_SIGMA), "age_error"),
("F", "f"),
("F {}".format(PLUSMINUS_ONE_SIGMA), "f_error"),
("Saved J", "j"),
("Saved J {}".format(PLUSMINUS_ONE_SIGMA), "j_error"),
("Model J", "model_j"),
("Model J {}".format(PLUSMINUS_ONE_SIGMA), "model_j_error"),
("Model J Kind", "model_j_kind"),
("Comment", "comment"),
("Tag", "tag"),
("GroupID", "group_id"),
("GraphID", "graph_id"),
]
age_width = Int(70)
error_width = Int(60)
graph_id_width = Int(30)
age_text = Property
age_error_text = Property
j_error_text = Property
j_text = Property
f_error_text = Property
f_text = Property
model_j_error_text = Property
model_j_text = Property
def __init__(self, *args, **kw):
super(UnknownsAdapter, self).__init__(*args, **kw)
# self._ncolors = len(colornames)
self.set_colors(colornames)
def set_colors(self, colors):
self._colors = colors
self._ncolors = len(colors)
def get_menu(self, obj, trait, row, column):
grp = MenuManager(
Action(name="Group Selected", action="unknowns_group_by_selected"),
Action(name="Aux Group Selected", action="unknowns_aux_group_by_selected"),
Action(name="Group by Sample", action="unknowns_group_by_sample"),
Action(name="Group by Aliquot", action="unknowns_group_by_aliquot"),
Action(name="Group by Identifier", action="unknowns_group_by_identifier"),
Action(name="Clear Group", action="unknowns_clear_grouping"),
Action(name="Clear All Group", action="unknowns_clear_all_grouping"),
name="Plot Grouping",
)
return MenuManager(
Action(name="Recall", action="recall_unknowns"),
Action(
name="Graph Group Selected", action="unknowns_graph_group_by_selected"
),
Action(name="Save Analysis Group", action="save_analysis_group"),
Action(name="Toggle Status", action="unknowns_toggle_status"),
Action(name="Configure", action="configure_unknowns"),
Action(name="Play Video...", action="play_analysis_video"),
grp,
)
def _get_f_text(self):
r = floatfmt(self.item.f, n=4)
return r
def _get_f_error_text(self):
r = floatfmt(self.item.f_err, n=4)
return r
def _get_j_text(self):
r = floatfmt(nominal_value(self.item.j), n=8)
return r
def _get_j_error_text(self):
r = floatfmt(std_dev(self.item.j), n=8)
return r
def _get_model_j_text(self):
r = ""
if self.item.modeled_j:
r = floatfmt(nominal_value(self.item.modeled_j), n=8)
return r
def _get_model_j_error_text(self):
r = ""
if self.item.modeled_j:
r = floatfmt(std_dev(self.item.modeled_j), n=8)
return r
def _get_age_text(self):
r = floatfmt(nominal_value(self.item.uage), n=3)
return r
def _get_age_error_text(self):
r = floatfmt(std_dev(self.item.uage), n=4)
return r
def get_text_color(self, obj, trait, row, column=0):
color = "black"
item = getattr(obj, trait)[row]
gid = item.group_id or item.aux_id
cid = gid % self._ncolors if self._ncolors else 0
try:
color = self._colors[cid]
except IndexError:
pass
return color
class ReferencesAdapter(BaseAnalysesAdapter):
columns = [("Run ID", "record_id"), ("Comment", "comment")]
all_columns = [
("RunDate", "rundate"),
("Run ID", "record_id"),
("Aliquot", "aliquot"),
("UUID", "display_uuid"),
("Sample", "sample"),
("Project", "project"),
("RepositoryID", "repository_identifier"),
("Comment", "comment"),
("Tag", "tag"),
]
def get_menu(self, object, trait, row, column):
return MenuManager(
Action(name="Recall", action="recall_references"),
Action(name="Configure", action="configure_references"),
)
class AnalysesPaneHandler(Handler):
def unknowns_group_by_sample(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_group_by("sample")
def unknowns_group_by_identifier(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_group_by("identifier")
def unknowns_group_by_aliquot(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_group_by("aliquot")
def unknowns_graph_group_by_selected(self, info, obj):
obj = info.ui.context["object"]
obj.group_selected("graph_id")
def unknowns_group_by_selected(self, info, obj):
obj = info.ui.context["object"]
obj.group_selected("group_id")
def unknowns_aux_group_by_selected(self, info, obj):
obj = info.ui.context["object"]
obj.group_selected("aux_id")
def unknowns_clear_grouping(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_clear_grouping()
def unknowns_clear_all_grouping(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_clear_all_grouping()
def unknowns_toggle_status(self, info, obj):
obj = info.ui.context["object"]
obj.unknowns_toggle_status()
def save_analysis_group(self, info, obj):
obj = info.ui.context["object"]
obj.save_analysis_group()
def play_analysis_video(self, info, obj):
obj = info.ui.context["object"]
obj.play_analysis_video()
def recall_unknowns(self, info, obj):
obj = info.ui.context["object"]
obj.recall_unknowns()
def recall_references(self, info, obj):
obj = info.ui.context["object"]
obj.recall_references()
def configure_unknowns(self, info, obj):
pane = info.ui.context["pane"]
pane.configure_unknowns()
def configure_references(self, info, obj):
pane = info.ui.context["pane"]
pane.configure_references()
class UnknownsTableConfigurer(TableConfigurer):
id = "unknowns_pane"
class ReferencesTableConfigurer(TableConfigurer):
id = "references_pane"
class AnalysesPane(TraitsDockPane):
name = "Analyses"
id = "pychron.pipeline.analyses"
unknowns_adapter = Instance(UnknownsAdapter)
unknowns_table_configurer = Instance(UnknownsTableConfigurer, ())
references_adapter = Instance(ReferencesAdapter)
references_table_configurer = Instance(ReferencesTableConfigurer, ())
def configure_unknowns(self):
self.unknowns_table_configurer.edit_traits()
def configure_references(self):
self.references_table_configurer.edit_traits()
def _unknowns_adapter_default(self):
a = UnknownsAdapter()
self.unknowns_table_configurer.set_adapter(a)
return a
def _references_adapter_default(self):
a = ReferencesAdapter()
self.references_table_configurer.set_adapter(a)
return a
def traits_view(self):
v = View(
VGroup(
UItem(
"object.selected.unknowns",
width=200,
editor=TabularEditor(
adapter=self.unknowns_adapter,
update="refresh_table_needed",
multi_select=True,
column_clicked="object.selected.column_clicked",
# drag_external=True,
# drop_factory=self.model.drop_factory,
dclicked="dclicked_unknowns",
selected="selected_unknowns",
operations=["delete"],
),
),
UItem(
"object.selected.references",
visible_when="object.selected.references",
editor=TabularEditor(
adapter=self.references_adapter,
update="refresh_table_needed",
# drag_external=True,
multi_select=True,
dclicked="dclicked_references",
selected="selected_references",
operations=["delete"],
),
),
),
handler=AnalysesPaneHandler(),
)
return v
class RepositoryTabularAdapter(TabularAdapter):
columns = [("Name", "name"), ("Ahead", "ahead"), ("Behind", "behind")]
def get_menu(self, obj, trait, row, column):
return MenuManager(
Action(name="Refresh Status", action="refresh_repository_status"),
Action(name="Get Changes", action="pull"),
Action(name="Share Changes", action="push"),
Action(name="Delete Local Changes", action="delete_local_changes"),
)
def get_bg_color(self, obj, trait, row, column=0):
if self.item.behind:
c = LIGHT_RED
elif self.item.ahead:
c = LIGHT_YELLOW
else:
c = "white"
return c
class RepositoryPaneHandler(Handler):
def refresh_repository_status(self, info, obj):
obj.refresh_repository_status()
def pull(self, info, obj):
obj.pull()
def push(self, info, obj):
obj.push()
def delete_local_changes(self, info, obj):
obj.delete_local_changes()
obj.refresh_repository_status()
class RepositoryPane(TraitsDockPane):
name = "Repositories"
id = "pychron.pipeline.repository"
def traits_view(self):
v = View(
UItem(
"object.repositories",
editor=myTabularEditor(
adapter=RepositoryTabularAdapter(),
editable=False,
multi_select=True,
refresh="object.refresh_needed",
selected="object.selected_repositories",
),
),
handler=RepositoryPaneHandler(),
)
return v
class EditorOptionsPane(TraitsDockPane):
name = "Editor Options"
id = "pychron.pipeline.editor_options"
def traits_view(self):
v = View(
UItem(
"object.active_editor_options", style="custom", editor=InstanceEditor()
)
)
return v
class BrowserPane(TraitsDockPane, PaneBrowserView):
id = "pychron.browser.pane"
name = "Analysis Selection"
class SearcherPane(TraitsDockPane):
name = "Search"
id = "pychron.browser.searcher.pane"
add_search_entry_button = Button
def _add_search_entry_button_fired(self):
self.model.add_search_entry()
def traits_view(self):
v = View(
VGroup(
HGroup(
UItem("search_entry"),
UItem(
"search_entry",
editor=myEnumEditor(name="search_entries"),
width=-35,
),
icon_button_editor("pane.add_search_entry_button", "add"),
),
UItem(
"object.table.analyses",
editor=myTabularEditor(
adapter=self.model.table.tabular_adapter,
operations=["move", "delete"],
column_clicked="object.table.column_clicked",
refresh="object.table.refresh_needed",
selected="object.table.selected",
dclicked="object.table.dclicked",
),
),
)
)
return v
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/pipeline/tasks/panes.py | Python | apache-2.0 | 28,668 |
import spirit.spiritlib as spiritlib
import ctypes
### Load Library
_spirit = spiritlib.LoadSpiritLibrary()
### Read an image from disk
_Image_Read = _spirit.IO_Image_Read
_Image_Read.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_Image_Read.restype = None
def Image_Read(p_state, filename, fileformat=0, idx_image=-1, idx_chain=-1):
spiritlib.WrapFunction(_Image_Read, [p_state, ctypes.c_char_p(filename.encode('utf-8')), fileformat, idx_image, idx_chain])
# _Image_Read(p_state, ctypes.c_char_p(filename), fileformat, idx_image, idx_chain)
### Write an image to disk
_Image_Write = _spirit.IO_Image_Write
_Image_Write.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_Image_Write.restype = None
def Image_Write(p_state, filename, fileformat=0, idx_image=-1, idx_chain=-1):
spiritlib.WrapFunction(_Image_Write, [p_state, ctypes.c_char_p(filename.encode('utf-8')), fileformat, idx_image, idx_chain])
# _Image_Write(p_state, ctypes.c_char_p(filename), fileformat, idx_image, idx_chain)
### Append an image to an existing file
_Image_Append = _spirit.IO_Image_Append
_Image_Append.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_Image_Append.restype = None
def Image_Append(p_state, filename, iteration=0, fileformat=0, idx_image=-1, idx_chain=-1):
spiritlib.WrapFunction(_Image_Append, [p_state, ctypes.c_char_p(filename.encode('utf-8')), iteration, fileformat, idx_image, idx_chain])
# _Image_Append(p_state, ctypes.c_char_p(filename), iteration, fileformat, idx_image, idx_chain)
### Read a chain of images from disk
_Chain_Read = _spirit.IO_Chain_Read
_Chain_Read.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_Chain_Read.restype = None
def Chain_Read(p_state, filename, idx_image=-1, idx_chain=-1):
spiritlib.WrapFunction(_Chain_Read, [p_state, ctypes.c_char_p(filename.encode('utf-8')), idx_image, idx_chain])
# _Chain_Read(p_state, ctypes.c_char_p(filename), idx_image, idx_chain)
### Write a chain of images to disk
_Chain_Write = _spirit.IO_Chain_Write
_Chain_Write.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_Chain_Write.restype = None
def Chain_Write(p_state, filename, idx_image=-1, idx_chain=-1):
spiritlib.WrapFunction(_Chain_Write, [p_state, ctypes.c_char_p(filename.encode('utf-8')), idx_image, idx_chain])
# _Chain_Write(p_state, ctypes.c_char_p(filename), idx_image, idx_chain)
| Disselkamp/spirit | core/python/spirit/io.py | Python | mit | 2,661 |
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""Setup for YDK
"""
from os import path
from setuptools import setup, find_packages
HERE = path.abspath(path.dirname(__file__))
INSTALL_REQUIREMENTS = ['ydk>=0.6.1']
NMSP_PKG_NAME = "$PACKAGE$"
NMSP_PKG_VERSION = "$VERSION$"
NMSP_PKG_DEPENDENCIES = ['$DEPENDENCY$']
if NMSP_PKG_DEPENDENCIES != "$DEPENDENCY$":
INSTALL_REQUIREMENTS.extend(NMSP_PKG_DEPENDENCIES)
NMSP_PACKAGES = ['ydk', 'ydk.models']
YDK_PACKAGES = find_packages(exclude=['contrib', 'docs*', 'tests*',
'ncclient', 'samples'])
DESCRIPTION = "$DESCRIPTION$"
LONG_DESCRIPTION = "$LONG_DESCRIPTION$"
setup(
name=NMSP_PKG_NAME,
version=NMSP_PKG_VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='https://github.com/CiscoDevNet/ydk-py',
author='Cisco Systems',
author_email='yang-dk@cisco.com',
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: C++'
],
keywords='yang, C++11, python bindings',
packages=YDK_PACKAGES,
namespace_packages=NMSP_PACKAGES,
install_requires=INSTALL_REQUIREMENTS,
include_package_data=True
)
| psykokwak4/ydk-gen | sdk/python/packages/setup.py | Python | apache-2.0 | 2,597 |
def rm_dup(s):
d = {}
for i in s:
if i not in d:
d[i] = 1
else:
d[i] += 1
in_stk, stk = set(), []
for i in s:
if i not in in_stk:
while stk and stk[-1] > i and d[stk[-1]]:
in_stk.remove(stk.pop())
stk += i
in_stk.add(i)
d[i] -= 1
return ''.join(stk)
if __name__ == '__main__':
s = 'cbacdcbc'
print(rm_dup(s))
| LeonardCohen/coding | py/remove_duplicate_letters.py | Python | gpl-2.0 | 447 |
from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_train_classifier(
training_data_path: InputPath('CSV'),
model_path: OutputPath('CatBoostModel'),
starting_model_path: InputPath('CatBoostModel') = None,
label_column: int = 0,
loss_function: str = 'Logloss',
num_iterations: int = 500,
learning_rate: float = None,
depth: int = 6,
random_seed: int = 0,
cat_features: list = None,
text_features: list = None,
additional_training_options: dict = {},
):
'''Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'Logloss'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
text_features: A list of Text features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostClassifier
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: Alexey Volkov <alexey.volkov@ark-kun.com>
'''
import tempfile
from pathlib import Path
from catboost import CatBoostClassifier, Pool
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
train_data = Pool(
training_data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostClassifier(
iterations=num_iterations,
depth=depth,
learning_rate=learning_rate,
loss_function=loss_function,
random_seed=random_seed,
verbose=True,
**additional_training_options,
)
model.fit(
train_data,
cat_features=cat_features,
text_features=text_features,
init_model=starting_model_path,
#verbose=False,
#plot=True,
)
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
model.save_model(model_path)
if __name__ == '__main__':
catboost_train_classifier_op = create_component_from_func(
catboost_train_classifier,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.23'],
annotations={
"author": "Alexey Volkov <alexey.volkov@ark-kun.com>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_classifier/from_CSV/component.yaml",
},
)
| kubeflow/pipelines | components/contrib/CatBoost/Train_classifier/from_CSV/component.py | Python | apache-2.0 | 3,545 |
from distutils.core import setup
from sys import argv
import shutil
import py2exe
from os import path, getcwd, system
# get the name of this directory
name = path.basename(getcwd())
# seed the install command line with what we want
argv += ["py2exe"]
setup(
windows = [
{
"script": name + ".py",
"icon_resources": [(1, "media/RjzServer.ico")]
}
],
options = {"py2exe": {
"packages": ["mako.cache"],
}},
)
shutil.copytree("media", path.join("dist", "media"))
| chr15m/rjzserver | build-windows.py | Python | lgpl-3.0 | 549 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Logging system
Log level
---------
======= ======
Level number
------- ------
DEBUG4 9
DEBUG3 8
DEBUG2 7
DEBUG1 6
DEBUG 5
INFO 4
NOTE 3
WARN 2
ERROR 1
QUIET 0
======= ======
Large value means more noise in the output file.
.. note::
Error and warning messages are written to stderr.
Each Logger object has its own output destination and verbose level. So
multiple Logger objects can be created to manage the message system without
affecting each other.
The methods provided by Logger class has the direct connection to the log level.
E.g. :func:`info` print messages if the verbose level >= 4 (INFO):
>>> import sys
>>> from pyscf import lib
>>> log = lib.logger.Logger(sys.stdout, 4)
>>> log.info('info level')
info level
>>> log.verbose = 3
>>> log.info('info level')
>>> log.note('note level')
note level
timer
-----
Logger object provides timer method for timing. Set :attr:`TIMER_LEVEL` to
control at which level the timing information should be output. It is 5
(DEBUG) by default.
>>> import sys, time
>>> from pyscf import lib
>>> log = lib.logger.Logger(sys.stdout, 4)
>>> t0 = logger.process_clock()
>>> log.timer('test', t0)
>>> lib.logger.TIMER_LEVEL = 4
>>> log.timer('test', t0)
CPU time for test 0.00 sec
'''
import sys
import time
if sys.version_info < (3, 0):
process_clock = time.clock
perf_counter = time.time
else:
process_clock = time.process_time
perf_counter = time.perf_counter
from pyscf.lib import parameters as param
import pyscf.__config__
DEBUG4 = param.VERBOSE_DEBUG + 4
DEBUG3 = param.VERBOSE_DEBUG + 3
DEBUG2 = param.VERBOSE_DEBUG + 2
DEBUG1 = param.VERBOSE_DEBUG + 1
DEBUG = param.VERBOSE_DEBUG
INFO = param.VERBOSE_INFO
NOTE = param.VERBOSE_NOTICE
NOTICE = NOTE
WARN = param.VERBOSE_WARN
WARNING = WARN
ERR = param.VERBOSE_ERR
ERROR = ERR
QUIET = param.VERBOSE_QUIET
CRIT = param.VERBOSE_CRIT
ALERT = param.VERBOSE_ALERT
PANIC = param.VERBOSE_PANIC
TIMER_LEVEL = getattr(pyscf.__config__, 'TIMER_LEVEL', DEBUG)
sys.verbose = NOTE
def flush(rec, msg, *args):
rec.stdout.write(msg%args)
rec.stdout.write('\n')
rec.stdout.flush()
def log(rec, msg, *args):
if rec.verbose > QUIET:
flush(rec, msg, *args)
def error(rec, msg, *args):
if rec.verbose >= ERROR:
flush(rec, '\nERROR: '+msg+'\n', *args)
sys.stderr.write('ERROR: ' + (msg%args) + '\n')
def warn(rec, msg, *args):
if rec.verbose >= WARN:
flush(rec, '\nWARN: '+msg+'\n', *args)
if rec.stdout is not sys.stdout:
sys.stderr.write('WARN: ' + (msg%args) + '\n')
def info(rec, msg, *args):
if rec.verbose >= INFO:
flush(rec, msg, *args)
def note(rec, msg, *args):
if rec.verbose >= NOTICE:
flush(rec, msg, *args)
def debug(rec, msg, *args):
if rec.verbose >= DEBUG:
flush(rec, msg, *args)
def debug1(rec, msg, *args):
if rec.verbose >= DEBUG1:
flush(rec, msg, *args)
def debug2(rec, msg, *args):
if rec.verbose >= DEBUG2:
flush(rec, msg, *args)
def debug3(rec, msg, *args):
if rec.verbose >= DEBUG3:
flush(rec, msg, *args)
def debug4(rec, msg, *args):
if rec.verbose >= DEBUG4:
flush(rec, msg, *args)
def stdout(rec, msg, *args):
if rec.verbose >= DEBUG:
flush(rec, msg, *args)
sys.stdout.write('>>> %s\n' % msg)
def timer(rec, msg, cpu0=None, wall0=None):
if cpu0 is None:
cpu0 = rec._t0
if wall0:
rec._t0, rec._w0 = process_clock(), perf_counter()
if rec.verbose >= TIMER_LEVEL:
flush(rec, ' CPU time for %s %9.2f sec, wall time %9.2f sec'
% (msg, rec._t0-cpu0, rec._w0-wall0))
return rec._t0, rec._w0
else:
rec._t0 = process_clock()
if rec.verbose >= TIMER_LEVEL:
flush(rec, ' CPU time for %s %9.2f sec' % (msg, rec._t0-cpu0))
return rec._t0
def timer_debug1(rec, msg, cpu0=None, wall0=None):
if rec.verbose >= DEBUG1:
return timer(rec, msg, cpu0, wall0)
elif wall0:
rec._t0, rec._w0 = process_clock(), perf_counter()
return rec._t0, rec._w0
else:
rec._t0 = process_clock()
return rec._t0
class Logger(object):
'''
Attributes:
stdout : file object or sys.stdout
The file to dump output message.
verbose : int
Large value means more noise in the output file.
'''
def __init__(self, stdout=sys.stdout, verbose=NOTE):
self.stdout = stdout
self.verbose = verbose
self._t0 = process_clock()
self._w0 = perf_counter()
log = log
error = error
warn = warn
note = note
info = info
debug = debug
debug1 = debug1
debug2 = debug2
debug3 = debug3
debug4 = debug4
timer = timer
timer_debug1 = timer_debug1
def new_logger(rec=None, verbose=None):
'''Create and return a :class:`Logger` object
Args:
rec : An object which carries the attributes stdout and verbose
verbose : a Logger object, or integer or None
The verbose level. If verbose is a Logger object, the Logger
object is returned. If verbose is not specified (None),
rec.verbose will be used in the new Logger object.
'''
if isinstance(verbose, Logger):
log = verbose
elif isinstance(verbose, int):
if getattr(rec, 'stdout', None):
log = Logger(rec.stdout, verbose)
else:
log = Logger(sys.stdout, verbose)
else:
log = Logger(rec.stdout, rec.verbose)
return log
| sunqm/pyscf | pyscf/lib/logger.py | Python | apache-2.0 | 6,296 |
"""
Course API Views
"""
from completion.exceptions import UnavailableCompletionData
from completion.utilities import get_key_to_last_completed_block
from django.urls import reverse
from django.utils.translation import gettext as _
from edx_django_utils.cache import TieredCache
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from edx_rest_framework_extensions.auth.session.authentication import SessionAuthenticationAllowInactiveUser
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from rest_framework import status
from rest_framework.exceptions import NotFound
from rest_framework.generics import RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.util.views import expose_header
from lms.djangoapps.edxnotes.helpers import is_feature_enabled
from lms.djangoapps.certificates.api import get_certificate_url
from lms.djangoapps.certificates.models import GeneratedCertificate
from lms.djangoapps.course_api.api import course_detail
from lms.djangoapps.course_goals.models import UserActivity
from lms.djangoapps.course_goals.api import get_course_goal
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.access_response import (
CoursewareMicrofrontendDisabledAccessError,
)
from lms.djangoapps.courseware.context_processor import user_timezone_locale_prefs
from lms.djangoapps.courseware.courses import check_course_access
from lms.djangoapps.courseware.masquerade import (
is_masquerading_as_specific_student,
setup_masquerade,
is_masquerading_as_non_audit_enrollment,
)
from lms.djangoapps.courseware.models import LastSeenCoursewareTimezone
from lms.djangoapps.courseware.module_render import get_module_by_usage_id
from lms.djangoapps.courseware.tabs import get_course_tab_list
from lms.djangoapps.courseware.toggles import (
courseware_legacy_is_visible,
courseware_mfe_is_visible,
course_exit_page_is_active,
)
from lms.djangoapps.courseware.views.views import get_cert_data
from lms.djangoapps.grades.api import CourseGradeFactory
from lms.djangoapps.verify_student.services import IDVerificationService
from openedx.core.djangoapps.agreements.api import get_integrity_signature
from openedx.core.djangoapps.agreements.toggles import is_integrity_signature_enabled as integrity_signature_toggle
from openedx.core.djangoapps.courseware_api.utils import get_celebrations_dict
from openedx.core.djangoapps.programs.utils import ProgramProgressMeter
from openedx.core.lib.api.authentication import BearerAuthenticationAllowInactiveUser
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin
from openedx.core.lib.courses import get_course_by_id
from openedx.features.course_experience import DISPLAY_COURSE_SOCK_FLAG
from openedx.features.course_experience import ENABLE_COURSE_GOALS
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.course_duration_limits.access import get_access_expiration_data
from openedx.features.discounts.utils import generate_offer_data
from common.djangoapps.student.models import (
CourseEnrollment,
CourseEnrollmentCelebration,
LinkedInAddToProfileConfiguration
)
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.search import path_to_location # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.x_module import PUBLIC_VIEW, STUDENT_VIEW # lint-amnesty, pylint: disable=wrong-import-order
from .serializers import CourseInfoSerializer
from .utils import serialize_upgrade_info
class CoursewareMeta:
"""
Encapsulates courseware and enrollment metadata.
"""
def __init__(self, course_key, request, username=''):
self.request = request
self.overview = course_detail(
self.request,
username or self.request.user.username,
course_key,
)
# We must compute course load access *before* setting up masquerading,
# else course staff (who are not enrolled) will not be able view
# their course from the perspective of a learner.
self.load_access = check_course_access(
self.overview,
self.request.user,
'load',
check_if_enrolled=True,
check_if_authenticated=True,
)
self.original_user_is_staff = has_access(self.request.user, 'staff', self.overview).has_access
self.original_user_is_global_staff = self.request.user.is_staff
self.course_key = course_key
self.course = get_course_by_id(self.course_key)
self.course_masquerade, self.effective_user = setup_masquerade(
self.request,
course_key,
staff_access=self.original_user_is_staff,
)
self.request.user = self.effective_user
self.is_staff = has_access(self.effective_user, 'staff', self.overview).has_access
self.enrollment_object = CourseEnrollment.get_enrollment(self.effective_user, self.course_key,
select_related=['celebration', 'user__celebration'])
self.can_view_legacy_courseware = courseware_legacy_is_visible(
course_key=course_key,
is_global_staff=self.original_user_is_global_staff,
)
def __getattr__(self, name):
return getattr(self.overview, name)
def is_microfrontend_enabled_for_user(self):
"""
Can this user see the MFE for this course?
"""
return courseware_mfe_is_visible(
course_key=self.course_key,
is_global_staff=self.original_user_is_global_staff,
is_course_staff=self.original_user_is_staff
)
@property
def enrollment(self):
"""
Return enrollment information.
"""
if self.effective_user.is_anonymous or not self.enrollment_object:
mode = None
is_active = False
else:
mode = self.enrollment_object.mode
is_active = self.enrollment_object.is_active
return {'mode': mode, 'is_active': is_active}
@property
def access_expiration(self):
return get_access_expiration_data(self.effective_user, self.overview)
@property
def offer(self):
return generate_offer_data(self.effective_user, self.overview)
@property
def content_type_gating_enabled(self):
return ContentTypeGatingConfig.enabled_for_enrollment(
user=self.effective_user,
course_key=self.course_key,
)
@property
def can_show_upgrade_sock(self):
return DISPLAY_COURSE_SOCK_FLAG.is_enabled(self.course_key)
@property
def license(self):
return self.course.license
@property
def username(self):
return self.effective_user.username
@property
def course_access(self) -> dict:
"""
Can the user load this course in the learning micro-frontend?
Return a JSON-friendly access response.
"""
# Only check whether the MFE is enabled if the user would otherwise be allowed to see it
# This means that if the user was denied access, they'll see a meaningful message first if
# there is one.
if self.load_access and not self.is_microfrontend_enabled_for_user():
return CoursewareMicrofrontendDisabledAccessError().to_json()
return self.load_access.to_json()
@property
def tabs(self):
"""
Return course tab metadata.
"""
tabs = []
for priority, tab in enumerate(get_course_tab_list(self.effective_user, self.overview)):
title = tab.title or tab.get('name', '')
tabs.append({
'title': _(title), # pylint: disable=translation-of-non-string
'slug': tab.tab_id,
'priority': priority,
'type': tab.type,
'url': tab.link_func(self.overview, reverse),
})
return tabs
@property
def verified_mode(self):
"""
Return verified mode information, or None.
"""
return serialize_upgrade_info(self.effective_user, self.overview, self.enrollment_object)
@property
def notes(self):
"""
Return whether edxnotes is enabled and visible.
"""
return {
'enabled': is_feature_enabled(self.overview, self.effective_user),
'visible': self.overview.edxnotes_visibility,
}
@property
def celebrations(self):
"""
Returns a dict of celebrations that should be performed.
"""
browser_timezone = self.request.query_params.get('browser_timezone', None)
celebrations = get_celebrations_dict(self.effective_user, self.enrollment_object, self.course, browser_timezone)
return celebrations
@property
def course_goals(self):
"""
Returns a dict of course goals
"""
course_goals = {
'selected_goal': None,
'weekly_learning_goal_enabled': False,
}
user_is_enrolled = CourseEnrollment.is_enrolled(self.effective_user, self.course_key)
if (user_is_enrolled and ENABLE_COURSE_GOALS.is_enabled(self.course_key)):
course_goals['weekly_learning_goal_enabled'] = True
selected_goal = get_course_goal(self.effective_user, self.course_key)
if selected_goal:
course_goals['selected_goal'] = {
'days_per_week': selected_goal.days_per_week,
'subscribed_to_reminders': selected_goal.subscribed_to_reminders,
}
return course_goals
@property
def user_has_passing_grade(self):
""" Returns a boolean on if the effective_user has a passing grade in the course """
if not self.effective_user.is_anonymous:
user_grade = CourseGradeFactory().read(self.effective_user, self.course).percent
return user_grade >= self.course.lowest_passing_grade
return False
@property
def course_exit_page_is_active(self):
""" Returns a boolean on if the course exit page is active """
return course_exit_page_is_active(self.course_key)
@property
def certificate_data(self):
"""
Returns certificate data if the effective_user is enrolled.
Note: certificate data can be None depending on learner and/or course state.
"""
if self.enrollment_object:
return get_cert_data(self.effective_user, self.course, self.enrollment_object.mode)
@property
def verify_identity_url(self):
"""
Returns a String to the location to verify a learner's identity
Note: This might return an absolute URL (if the verification MFE is enabled) or a relative
URL. The serializer will make the relative URL absolute so any consumers can treat this
as a full URL.
"""
if self.enrollment_object and self.enrollment_object.mode in CourseMode.VERIFIED_MODES:
verification_status = IDVerificationService.user_status(self.effective_user)['status']
if verification_status == 'must_reverify':
return IDVerificationService.get_verify_location()
else:
return IDVerificationService.get_verify_location(self.course_key)
@property
def verification_status(self):
"""
Returns a String of the verification status of learner.
"""
if self.enrollment_object and self.enrollment_object.mode in CourseMode.VERIFIED_MODES:
return IDVerificationService.user_status(self.effective_user)['status']
# I know this looks weird (and is), but this is just so it is inline with what the
# IDVerificationService.user_status method would return before a verification was created
return 'none'
@property
def linkedin_add_to_profile_url(self):
"""
Returns a URL to add a certificate to a LinkedIn profile (will autofill fields).
Requires LinkedIn sharing to be enabled, either via a site configuration or a
LinkedInAddToProfileConfiguration object being enabled.
"""
if self.effective_user.is_anonymous:
return
linkedin_config = LinkedInAddToProfileConfiguration.current()
if linkedin_config.is_enabled():
try:
user_certificate = GeneratedCertificate.eligible_certificates.get(
user=self.effective_user, course_id=self.course_key
)
except GeneratedCertificate.DoesNotExist:
return
cert_url = self.request.build_absolute_uri(
get_certificate_url(course_id=self.course_key, uuid=user_certificate.verify_uuid)
)
return linkedin_config.add_to_profile_url(
self.overview.display_name, user_certificate.mode, cert_url, certificate=user_certificate,
)
@property
def is_integrity_signature_enabled(self):
"""
Course waffle flag for the integrity signature feature.
"""
return integrity_signature_toggle(self.course_key)
@property
def user_needs_integrity_signature(self):
"""
Boolean describing whether the user needs to sign the integrity agreement for a course.
"""
integrity_signature_required = (
self.enrollment_object
# Master's enrollments are excluded here as honor code is handled separately
and self.enrollment_object.mode in CourseMode.CREDIT_MODES + CourseMode.CREDIT_ELIGIBLE_MODES
)
if not integrity_signature_required:
# Check masquerading as a non-audit enrollment
integrity_signature_required = is_masquerading_as_non_audit_enrollment(
self.effective_user,
self.course_key,
self.course_masquerade
)
if (
integrity_signature_toggle(self.course_key)
and integrity_signature_required
):
signature = get_integrity_signature(self.effective_user.username, str(self.course_key))
if not signature:
return True
return False
@property
def related_programs(self):
"""
Returns related program data if the effective_user is enrolled.
Note: related programs can be None depending on the course.
"""
if self.effective_user.is_anonymous:
return
meter = ProgramProgressMeter(self.request.site, self.effective_user)
inverted_programs = meter.invert_programs()
related_programs = inverted_programs.get(str(self.course_key))
if related_programs is None:
return
related_progress = meter.progress(programs=related_programs)
progress_by_program = {
progress['uuid']: progress for progress in related_progress
}
programs = [{
'progress': progress_by_program[program['uuid']],
'title': program['title'],
'slug': program['type_attrs']['slug'],
'url': program['detail_url'],
'uuid': program['uuid']
} for program in related_programs]
return programs
@property
def user_timezone(self):
"""Returns the user's timezone setting (may be None)"""
user_timezone_locale = user_timezone_locale_prefs(self.request)
return user_timezone_locale['user_timezone']
class CoursewareInformation(RetrieveAPIView):
"""
**Use Cases**
Request details for a course
**Example Requests**
GET /api/courseware/course/{course_key}
**Response Values**
Body consists of the following fields:
* access_expiration: An object detailing when access to this course will expire
* expiration_date: (str) When the access expires, in ISO 8601 notation
* masquerading_expired_course: (bool) Whether this course is expired for the masqueraded user
* upgrade_deadline: (str) Last chance to upgrade, in ISO 8601 notation (or None if can't upgrade anymore)
* upgrade_url: (str) Upgrade linke (or None if can't upgrade anymore)
* celebrations: An object detailing which celebrations to render
* first_section: (bool) If the first section celebration should render
Note: Also uses information from frontend so this value is not final
* streak_length_to_celebrate: (int) The streak length to celebrate for the learner
* streak_discount_enabled: (bool) If the frontend should render an upgrade discount for hitting the streak
* weekly_goal: (bool) If the weekly goal celebration should render
* course_goals:
* selected_goal:
* days_per_week: (int) The number of days the learner wants to learn per week
* subscribed_to_reminders: (bool) Whether the learner wants email reminders about their goal
* weekly_learning_goal_enabled: Flag indicating if this feature is enabled for this call
* effort: A textual description of the weekly hours of effort expected
in the course.
* end: Date the course ends, in ISO 8601 notation
* enrollment: Enrollment status of authenticated user
* mode: `audit`, `verified`, etc
* is_active: boolean
* enrollment_end: Date enrollment ends, in ISO 8601 notation
* enrollment_start: Date enrollment begins, in ISO 8601 notation
* id: A unique identifier of the course; a serialized representation
of the opaque key identifying the course.
* media: An object that contains named media items. Included here:
* course_image: An image to show for the course. Represented
as an object with the following fields:
* uri: The location of the image
* name: Name of the course
* number: Catalog number of the course
* offer: An object detailing upgrade discount information
* code: (str) Checkout code
* expiration_date: (str) Expiration of offer, in ISO 8601 notation
* original_price: (str) Full upgrade price without checkout code; includes currency symbol
* discounted_price: (str) Upgrade price with checkout code; includes currency symbol
* percentage: (int) Amount of discount
* upgrade_url: (str) Checkout URL
* org: Name of the organization that owns the course
* related_programs: A list of objects that contains program data related to the given course including:
* progress: An object containing program progress:
* complete: (int) Number of complete courses in the program (a course is completed if the user has
earned a certificate for any of the nested course runs)
* in_progress: (int) Number of courses in the program that are in progress (a course is in progress if
the user has enrolled in any of the nested course runs)
* not_started: (int) Number of courses in the program that have not been started
* slug: (str) The program type
* title: (str) The title of the program
* url: (str) The link to the program's landing page
* uuid: (str) A unique identifier of the program
* short_description: A textual description of the course
* start: Date the course begins, in ISO 8601 notation
* start_display: Readably formatted start of the course
* start_type: Hint describing how `start_display` is set. One of:
* `"string"`: manually set by the course author
* `"timestamp"`: generated from the `start` timestamp
* `"empty"`: no start date is specified
* pacing: Course pacing. Possible values: instructor, self
* tabs: Course tabs
* user_timezone: User's chosen timezone setting (or null for browser default)
* can_load_course: Whether the user can view the course (AccessResponse object)
* is_staff: Whether the effective user has staff access to the course
* original_user_is_staff: Whether the original user has staff access to the course
* can_view_legacy_courseware: Indicates whether the user is able to see the legacy courseware view
* user_has_passing_grade: Whether or not the effective user's grade is equal to or above the courses minimum
passing grade
* course_exit_page_is_active: Flag for the learning mfe on whether or not the course exit page should display
* certificate_data: data regarding the effective user's certificate for the given course
* verify_identity_url: URL for a learner to verify their identity. Only returned for learners enrolled in a
verified mode. Will update to reverify URL if necessary.
* linkedin_add_to_profile_url: URL to add the effective user's certificate to a LinkedIn Profile.
* user_needs_integrity_signature: Whether the user needs to sign the integrity agreement for the course
**Parameters:**
requested_fields (optional) comma separated list:
If set, then only those fields will be returned.
username (optional) username to masquerade as (if requesting user is staff)
**Returns**
* 200 on success with above fields.
* 400 if an invalid parameter was sent or the username was not provided
for an authenticated request.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the course is not available or cannot be seen.
"""
authentication_classes = (
JwtAuthentication,
BearerAuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
serializer_class = CourseInfoSerializer
def set_last_seen_courseware_timezone(self, user):
"""
The timezone in the user's account is frequently not set.
This method sets a user's recent timezone that can be used as a fallback
"""
if not user.id:
return
cache_key = 'browser_timezone_{}'.format(str(user.id))
browser_timezone = self.request.query_params.get('browser_timezone', None)
cached_value = TieredCache.get_cached_response(cache_key)
if not cached_value.is_found:
if browser_timezone:
TieredCache.set_all_tiers(cache_key, str(browser_timezone), 86400) # Refresh the cache daily
LastSeenCoursewareTimezone.objects.update_or_create(
user=user,
defaults={'last_seen_courseware_timezone': browser_timezone},
)
def get_object(self):
"""
Return the requested course object, if the user has appropriate
permissions.
"""
original_user = self.request.user
if self.request.user.is_staff:
username = self.request.GET.get('username', '') or self.request.user.username
else:
username = self.request.user.username
course_key = CourseKey.from_string(self.kwargs['course_key_string'])
overview = CoursewareMeta(
course_key,
self.request,
username=username,
)
# Record course goals user activity for learning mfe courseware on web
UserActivity.record_user_activity(self.request.user, course_key)
# Record a user's browser timezone
self.set_last_seen_courseware_timezone(original_user)
return overview
def get_serializer_context(self):
"""
Return extra context to be used by the serializer class.
"""
context = super().get_serializer_context()
context['requested_fields'] = self.request.GET.get('requested_fields', None)
return context
def finalize_response(self, request, response, *args, **kwargs):
"""
Return the final response, exposing the 'Date' header for computing relative time to the dates in the data.
Important dates such as 'access_expiration' are enforced server-side based on correct time; client-side clocks
are frequently substantially far off which could lead to inaccurate messaging and incorrect expectations.
Therefore, any messaging about those dates should be based on the server time and preferably in relative terms
(time remaining); the 'Date' header is a straightforward and generalizable way for client-side code to get this
reference.
"""
response = super().finalize_response(request, response, *args, **kwargs)
# Adding this header should be moved to global middleware, not just this endpoint
return expose_header('Date', response)
class SequenceMetadata(DeveloperErrorViewMixin, APIView):
"""
**Use Cases**
Request details for a sequence/subsection
**Example Requests**
GET /api/courseware/sequence/{usage_key}
**Response Values**
Body consists of the following fields:
TODO
**Returns**
* 200 on success with above fields.
* 400 if an invalid parameter was sent.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the course/usage_key is not available or cannot be seen.
* 422 if the usage key is valid but does not have sequence metadata (like a unit or a problem)
"""
authentication_classes = (
JwtAuthentication,
SessionAuthenticationAllowInactiveUser,
)
def get(self, request, usage_key_string, *args, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Return response to a GET request.
"""
try:
usage_key = UsageKey.from_string(usage_key_string)
except InvalidKeyError as exc:
raise NotFound(f"Invalid usage key: '{usage_key_string}'.") from exc
_, request.user = setup_masquerade(
request,
usage_key.course_key,
staff_access=has_access(request.user, 'staff', usage_key.course_key),
reset_masquerade_data=True,
)
sequence, _ = get_module_by_usage_id(
self.request,
str(usage_key.course_key),
str(usage_key),
disable_staff_debug_info=True,
will_recheck_access=True)
if not hasattr(sequence, 'get_metadata'):
# Looks like we were asked for metadata on something that is not a sequence (or section).
return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY)
view = STUDENT_VIEW
if request.user.is_anonymous:
view = PUBLIC_VIEW
context = {'specific_masquerade': is_masquerading_as_specific_student(request.user, usage_key.course_key)}
return Response(sequence.get_metadata(view=view, context=context))
class Resume(DeveloperErrorViewMixin, APIView):
"""
**Use Cases**
Request the last completed block in a course
**Example Requests**
GET /api/courseware/resume/{course_key}
**Response Values**
Body consists of the following fields:
* block: the last completed block key
* section: the key to the section
* unit: the key to the unit
If no completion data is available, the keys will be null
**Returns**
* 200 on success with above fields.
* 400 if an invalid parameter was sent.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the course is not available or cannot be seen.
"""
authentication_classes = (
JwtAuthentication,
SessionAuthenticationAllowInactiveUser,
)
permission_classes = (IsAuthenticated,)
def get(self, request, course_key_string, *args, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Return response to a GET request.
"""
course_id = CourseKey.from_string(course_key_string)
resp = {
'block_id': None,
'section_id': None,
'unit_id': None,
}
try:
block_key = get_key_to_last_completed_block(request.user, course_id)
path = path_to_location(modulestore(), block_key, request, full_path=True)
resp['section_id'] = str(path[2])
resp['unit_id'] = str(path[3])
resp['block_id'] = str(block_key)
except (ItemNotFoundError, NoPathToItem, UnavailableCompletionData):
pass # leaving all the IDs as None indicates a redirect to the first unit in the course, as a backup
return Response(resp)
class Celebration(DeveloperErrorViewMixin, APIView):
"""
**Use Cases**
Marks a particular celebration as complete
**Example Requests**
POST /api/courseware/celebration/{course_key}
**Request Parameters**
Body consists of the following fields:
* first_section (bool): whether we should celebrate when a user finishes their first section of a course
* weekly_goal (bool): whether we should celebrate when a user hits their weekly learning goal in a course
**Returns**
* 200 or 201 or 202 on success with above fields.
* 400 if an invalid parameter was sent.
* 404 if the course is not available or cannot be seen.
"""
authentication_classes = (
JwtAuthentication,
BearerAuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
permission_classes = (IsAuthenticated,)
http_method_names = ['post']
def post(self, request, course_key_string, *args, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Handle a POST request.
"""
course_key = CourseKey.from_string(course_key_string)
# Check if we're masquerading as someone else. If so, we should just ignore this request.
_, user = setup_masquerade(
request,
course_key,
staff_access=has_access(request.user, 'staff', course_key),
reset_masquerade_data=True,
)
if user != request.user:
return Response(status=202) # "Accepted"
data = dict(request.data)
first_section = data.pop('first_section', None)
weekly_goal = data.pop('weekly_goal', None)
if data:
return Response(status=400) # there were parameters we didn't recognize
enrollment = CourseEnrollment.get_enrollment(request.user, course_key)
if not enrollment:
return Response(status=404)
defaults = {}
if first_section is not None:
defaults['celebrate_first_section'] = first_section
if weekly_goal is not None:
defaults['celebrate_weekly_goal'] = weekly_goal
if defaults:
_, created = CourseEnrollmentCelebration.objects.update_or_create(enrollment=enrollment, defaults=defaults)
return Response(status=201 if created else 200)
else:
return Response(status=200) # just silently allow it
| arbrandes/edx-platform | openedx/core/djangoapps/courseware_api/views.py | Python | agpl-3.0 | 31,995 |
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibConvert tool to convert bibliographic records from any format to any format."""
__revision__ = "$Id$"
import fileinput
import string
import os
import re
import sys
from time import strftime, localtime
#FIXME: pu
from invenio.config import CFG_OAI_ID_PREFIX
from invenio.legacy.search_engine import perform_request_search
from .registry import kb
### Matching records with database content
def parse_query_string(query_string):
"""Parse query string, e.g.:
Input: 245__a::REP(-, )::SHAPE::SUP(SPACE, )::MINL(4)::MAXL(8)::EXPW(PUNCT)::WORDS(4,L)::SHAPE::SUP(SPACE, )||700__a::MINL(2)::REP(COMMA,).
Output:[['245__a','REP(-,)','SHAPE','SUP(SPACE, )','MINL(4)','MAXL(8)','EXPW(PUNCT)','WORDS(4,L)','SHAPE','SUP(SPACE, )'],['700__a','MINL(2)','REP(COMMA,)']]
"""
query_string_out = []
query_string_out_in = []
query_string_split_1 = query_string.split('||')
for item_1 in query_string_split_1:
query_string_split_2 = item_1.split('::')
query_string_out_in = []
for item in query_string_split_2:
query_string_out_in.append(item)
query_string_out.append(query_string_out_in)
return query_string_out
def set_conv():
"""
bibconvert common settings
=======================
minimal length of output line = 1
maximal length of output line = 4096
"""
conv_setting = [
1,
4096
]
return conv_setting
def get_pars(fn):
"Read function and its parameters into list"
out = []
out.append(re.split('\(|\)', fn)[0])
out.append(re.split(',', re.split('\(|\)', fn)[1]))
return out
def get_other_par(par, cfg):
"Get other parameter (par) from the configuration file (cfg)"
out = ""
other_parameters = {
'_QRYSTR_' : '_QRYSTR_---.*$',
'_MATCH_' : '_MATCH_---.*$',
'_RECSEP_' : '_RECSEP_---.*$',
'_EXTCFG_' : '_EXTCFG_---.*$',
'_SRCTPL_' : '_SRCTPL_---.*$',
'_DSTTPL_' : '_DSTTPL_---.*$',
'_RECHEAD_': '_RECHEAD_---.*$',
'_RECFOOT_': '_RECFOOT_---.*$',
'_HEAD_' : '_HEAD_---.*$',
'_FOOT_' : '_FOOT_---.*$',
'_EXT_' : '_EXT_---.*$',
'_SEP_' : '_SEP_---.*$',
'_COD_' : '_COD_---.*$',
'_FRK_' : '_FRK_---.*$',
'_NC_' : '_NC_---.*$',
'_MCH_' : '_MCH_---.*$',
'_UPL_' : '_UPL_---.*$',
'_AUTO_' : '_AUTO_---.*$'
}
parameters = other_parameters.keys()
for line in fileinput.input(cfg):
pattern = re.compile(other_parameters[par])
items = pattern.findall(line)
for item in items:
out = item.split('---')[1]
return out
def append_to_output_file(filename, output):
"bibconvert output file creation by output line"
try:
file = open(filename, 'a')
file.write(output)
file.close()
except IOError as e:
exit_on_error("Cannot write into %s" % filename)
return 1
def sub_keywd(out):
"bibconvert keywords literal substitution"
out = string.replace(out, "EOL", "\n")
out = string.replace(out, "_CR_", "\r")
out = string.replace(out, "_LF_", "\n")
out = string.replace(out, "\\", '\\')
out = string.replace(out, "\r", '\r')
out = string.replace(out, "BSLASH", '\\')
out = string.replace(out, "COMMA", ',')
out = string.replace(out, "LEFTB", '[')
out = string.replace(out, "RIGHTB", ']')
out = string.replace(out, "LEFTP", '(')
out = string.replace(out, "RIGHTP", ')')
return out
def check_split_on(data_item_split, sep, tpl_f):
"""
bibconvert conditional split with following conditions
===================================================
::NEXT(N,TYPE,SIDE) - next N chars are of the TYPE having the separator on the SIDE
::PREV(N,TYPE,SIDE) - prev.N chars are of the TYPE having the separator on the SIDE
"""
fn = get_pars(tpl_f)[0]
par = get_pars(tpl_f)[1]
done = 0
while (done == 0):
if ( (( fn == "NEXT" ) and ( par[2]=="R" )) or
(( fn == "PREV" ) and ( par[2]=="L" )) ):
test_value = data_item_split[0][-(string.atoi(par[0])):]
elif ( ((fn == "NEXT") and ( par[2]=="L")) or
((fn == "PREV") and ( par[2]=="R")) ):
test_value = data_item_split[1][:(string.atoi(par[0]))]
data_item_split_tmp = []
if ((FormatField(test_value, "SUP(" + par[1] + ",)") != "") \
or (len(test_value) < string.atoi(par[0]))):
data_item_split_tmp = data_item_split[1].split(sep, 1)
if(len(data_item_split_tmp)==1):
done = 1
data_item_split[0] = data_item_split[0] + sep + \
data_item_split_tmp[0]
data_item_split[1] = ""
else:
data_item_split[0] = data_item_split[0] + sep + \
data_item_split_tmp[0]
data_item_split[1] = data_item_split_tmp[1]
else:
done = 1
return data_item_split
def get_subfields(data, subfield, src_tpl):
"Get subfield according to the template"
out = []
for data_item in data:
found = 0
for src_tpl_item in src_tpl:
if (src_tpl_item[:2] == "<:"):
if (src_tpl_item[2:-2] == subfield):
found = 1
else:
sep_in_list = src_tpl_item.split("::")
sep = sep_in_list[0]
data_item_split = data_item.split(sep, 1)
if (len(data_item_split)==1):
data_item = data_item_split[0]
else:
if (len(sep_in_list) > 1):
data_item_split = check_split_on(data_item.split(sep, 1),
sep_in_list[0],
sep_in_list[1])
if(found == 1):
data_item = data_item_split[0]
else:
data_item = string.join(data_item_split[1:], sep)
out.append(data_item)
return out
def exp_n(word):
"Replace newlines and carriage return's from string."
out = ""
for ch in word:
if ((ch != '\n') and (ch != '\r')):
out = out + ch
return out
def exp_e(list):
"Expunge empty elements from a list"
out = []
for item in list:
item = exp_n(item)
if ((item != '\r\n' and item != '\r' \
and item != '\n' and item !="" \
and len(item)!=0)):
out.append(item)
return out
def sup_e(word):
"Replace spaces"
out = ""
for ch in word:
if (ch != ' '):
out = out + ch
return out
def select_line(field_code, list):
"Return appropriate item from a list"
out = ['']
for field in list:
field[0] = sup_e(field[0])
field_code = sup_e(field_code)
if (field[0] == field_code):
out = field[1]
return out
def parse_field_definition(source_field_definition):
"Create list of source_field_definition"
word_list = []
out = []
word = ""
counter = 0
if (len(source_field_definition.split("---"))==4):
out = source_field_definition.split("---")
else:
element_list_high = source_field_definition.split("<:")
for word_high in element_list_high:
element_list_low = word_high.split(':>')
for word_low in element_list_low:
word_list.append(word_low)
word_list.append(":>")
word_list.pop()
word_list.append("<:")
word_list.pop()
for item in word_list:
word = word + item
if (item == "<:"):
counter = counter + 1
if (item == ":>"):
counter = counter - 1
if counter == 0:
out.append(word)
word = ""
return out
def parse_template(template):
"""
bibconvert parse template
======================
in - template filename
out - [ [ field_code , [ field_template_parsed ] , [] ]
"""
out = []
for field_def in read_file(template, 1):
field_tpl_new = []
if ((len(field_def.split("---", 1)) > 1) and (field_def[:1] != "#")):
field_code = field_def.split("---", 1)[0]
field_tpl = parse_field_definition(field_def.split("---", 1)[1])
field_tpl_new = field_tpl
field_tpl = exp_e(field_tpl_new)
out_data = [field_code, field_tpl]
out.append(out_data)
return out
def parse_common_template(template, part):
"""
bibconvert parse template
=========================
in - template filename
out - [ [ field_code , [ field_template_parsed ] , [] ]
"""
out = []
counter = 0
for field_def in read_file(template, 1):
if (exp_n(field_def)[:3] == "==="):
counter = counter + 1
elif (counter == part):
field_tpl_new = []
if ((len(field_def.split("---", 1)) > 1) and (field_def[:1]!="#")):
field_code = field_def.split("---", 1)[0]
field_tpl = parse_field_definition(field_def.split("---", 1)[1])
field_tpl_new = field_tpl
field_tpl = exp_e(field_tpl_new)
out_data = [field_code, field_tpl]
out.append(out_data)
return out
def parse_input_data_f(source_data_open, source_tpl):
"""
bibconvert parse input data
========================
in - input source data location (filehandle)
source data template
source_field_code list of source field codes
source_field_data list of source field data values (repetitive fields each line one occurence)
out - [ [ source_field_code , [ source_field_data ] ] , [] ]
source_data_template entry - field_code---[const]<:subfield_code:>[const][<:subfield_code:>][]
destination_templace entry - [::GFF()]---[const]<:field_code::subfield_code[::FF()]:>[]
input data file; by line: - fieldcode value
"""
global separator
out = [['', []]]
count = 0
values = []
while (count < 1):
line = source_data_open.readline()
if (line == ""):
return(-1)
line_split = line.split(" ", 1)
if (re.sub("\s", "", line) == separator):
count = count + 1
if (len(line_split) == 2):
field_code = line_split[0]
field_value = exp_n(line_split[1])
values.append([field_code, field_value])
item_prev = ""
stack = ['']
for item in values:
if ((item[0]==item_prev)or(item_prev == "")):
stack.append(item[1])
item_prev = item[0]
else:
out.append([item_prev, stack])
item_prev = item[0]
stack = []
stack.append(item[1])
try:
if (stack[0] != ""):
if (out[0][0]==""):
out = []
out.append([field_code, stack])
except IndexError as e:
out = out
return out
def parse_input_data_fx(source_tpl):
"""
bibconvert parse input data
========================
in - input source data location (filehandle)
source data template
source_field_code list of source field codes
source_field_data list of source field data values (repetitive fields each line one occurence)
out - [ [ source_field_code , [ source_field_data ] ] , [] ]
extraction_template_entry -
input data file - specified by extract_tpl
"""
global separator
count = 0
record = ""
field_data_1_in_list = []
out = [['', []]]
while (count <10):
line = sys.stdin.readline()
if (line == ""):
count = count + 1
if (record == "" and count):
return (-1)
if (re.sub("\s", "", line) == separator):
count = count + 10
else:
record = record + line
for field_defined in extract_tpl_parsed:
try:
field_defined[1][0] = sub_keywd(field_defined[1][0])
field_defined[1][1] = sub_keywd(field_defined[1][1])
except IndexError as e:
field_defined = field_defined
try:
field_defined[1][2] = sub_keywd(field_defined[1][2])
except IndexError as e:
field_defined = field_defined
field_data_1 =""
if ((field_defined[1][0][0:2] == '//') and \
(field_defined[1][0][-2:] == '//')):
field_defined_regexp = field_defined[1][0][2:-2]
try:
####
if (len(re.split(field_defined_regexp, record)) == 1):
field_data_1 = ""
field_data_1_in_list = []
else:
field_data_1_tmp = re.split(field_defined_regexp, record, 1)[1]
field_data_1_in_list = field_data_1_tmp.split(field_defined_regexp)
except IndexError as e:
field_data_1 = ""
else:
try:
if (len(record.split(field_defined[1][0])) == 1):
field_data_1 = ""
field_data_1_in_list = []
else:
field_data_1_tmp = record.split(field_defined[1][0], 1)[1]
field_data_1_in_list = field_data_1_tmp.split(field_defined[1][0])
except IndexError as e:
field_data_1 = ""
spliton = []
outvalue = ""
field_data_2 = ""
field_data = ""
try:
if ((field_defined[1][1])=="EOL"):
spliton = ['\n']
elif ((field_defined[1][1])=="MIN"):
spliton = ['\n']
elif ((field_defined[1][1])=="MAX"):
for item in extract_tpl_parsed:
try:
spliton.append(item[1][0])
except IndexError as e:
spliton = spliton
elif (field_defined[1][1][0:2] == '//') and \
(field_defined[1][1][-2:] == '//'):
spliton = [field_defined[1][1][2:-2]]
else:
spliton = [field_defined[1][1]]
except IndexError as e:
spliton = ""
outvalues = []
for field_data in field_data_1_in_list:
outvalue = ""
for splitstring in spliton:
field_data_2 = ""
if (len(field_data.split(splitstring))==1):
if (outvalue == ""):
field_data_2 = field_data
else:
field_data_2 = outvalue
else:
field_data_2 = field_data.split(splitstring)[0]
outvalue = field_data_2
field_data = field_data_2
outvalues.append(outvalue)
outvalues = exp_e(outvalues)
if (len(outvalues) > 0):
if (out[0][0]==""):
out = []
outstack = []
if (len(field_defined[1])==3):
spliton = [field_defined[1][2]]
if (field_defined[1][2][0:2] == '//') and \
(field_defined[1][2][-2:] == '//'):
spliton = [field_defined[1][2][2:-2]]
for item in outvalues:
stack = re.split(spliton[0], item)
for stackitem in stack:
outstack.append(stackitem)
else:
outstack = outvalues
out.append([field_defined[0], outstack])
return out
def parse_input_data_d(source_data, source_tpl):
"""
bibconvert parse input data
========================
in - input source data location (directory)
source data template
source_field_code list of source field codes
source_field_data list of source field data values (repetitive fields each line one occurence)
out - [ [ source_field_code , [ source_field_data ] ] , [] ]
source_data_template entry - field_code---[const]<:subfield_code:>[const][<:subfield_code:>][]
destination_templace entry - [::GFF()]---[const]<:field_code::subfield_code[::FF()]:>[]
input data dir; by file: - fieldcode value per line
"""
out = []
for source_field_tpl in read_file(source_tpl, 1):
source_field_code = source_field_tpl.split("---")[0]
source_field_data = read_file(source_data + source_field_code, 0)
source_field_data = exp_e(source_field_data)
out_data = [source_field_code, source_field_data]
out.append(out_data)
return out
def sub_empty_lines(value):
out = re.sub('\n\n+', '', value)
return out
def set_par_defaults(par1, par2):
"Set default parameter when not defined"
par_new_in_list = par2.split(",")
i = 0
out = []
for par in par_new_in_list:
if (len(par1)>i):
if (par1[i] == ""):
out.append(par)
else:
out.append(par1[i])
else:
out.append(par)
i = i + 1
return out
def generate(keyword):
"""
bibconvert generaded values:
=========================
SYSNO() - generate date as '%w%H%M%S'
WEEK(N) - generate date as '%V' with shift (N)
DATE(format) - generate date in specifieddate FORMAT
VALUE(value) - enter value literarly
OAI() - generate oai_identifier, starting value given at command line as -o<value>
"""
out = keyword
fn = keyword + "()"
par = get_pars(fn)[1]
fn = get_pars(fn)[0]
par = set_par_defaults(par, "")
if (fn == "SYSNO"):
out = sysno500
if (fn == "SYSNO330"):
out = sysno
if (fn == "WEEK"):
par = set_par_defaults(par, "0")
out = "%02d" % (string.atoi(strftime("%V", localtime())) \
+ string.atoi(par[0]))
if (string.atoi(out)<0):
out = "00"
if (fn == "VALUE"):
par = set_par_defaults(par, "")
out = par[0]
if (fn == "DATE"):
par = set_par_defaults(par, "%w%H%M%S," + "%d" % set_conv()[1])
out = strftime(par[0], localtime())
out = out[:string.atoi(par[1])]
if (fn == "XDATE"):
par = set_par_defaults(par,"%w%H%M%S," + ",%d" % set_conv()[1])
out = strftime(par[0], localtime())
out = par[1] + out[:string.atoi(par[2])]
if (fn == "OAI"):
out = "%s:%d" % (CFG_OAI_ID_PREFIX, tcounter + oai_identifier_from)
return out
def read_file(filename, exception):
"Read file into list"
out = []
if (os.path.isfile(filename)):
file = open(filename,'r')
out = file.readlines()
file.close()
else:
if exception:
exit_on_error("Cannot access file: %s" % filename)
return out
def crawl_KB(filename, value, mode):
"""
bibconvert look-up value in KB_file in one of following modes:
===========================================================
1 - case sensitive / match (default)
2 - not case sensitive / search
3 - case sensitive / search
4 - not case sensitive / match
5 - case sensitive / search (in KB)
6 - not case sensitive / search (in KB)
7 - case sensitive / search (reciprocal)
8 - not case sensitive / search (reciprocal)
9 - replace by _DEFAULT_ only
R - not case sensitive / search (reciprocal) (8) replace
"""
if (os.path.isfile(filename) != 1):
# Look for KB in same folder as extract_tpl, if exists
try:
pathtmp = string.split(extract_tpl,"/")
pathtmp.pop()
path = string.join(pathtmp,"/")
filename = path + "/" + filename
except NameError:
# File was not found. Try to look inside default KB
# directory
filename = kb.get(filename, '')
# FIXME: Remove \n from returned value?
if (os.path.isfile(filename)):
file_to_read = open(filename,"r")
file_read = file_to_read.readlines()
for line in file_read:
code = string.split(line, "---")
if (mode == "2"):
value_to_cmp = string.lower(value)
code[0] = string.lower(code[0])
if ((len(string.split(value_to_cmp, code[0])) > 1) \
or (code[0]=="_DEFAULT_")):
value = code[1]
return value
elif ((mode == "3") or (mode == "0")):
if ((len(string.split(value, code[0])) > 1) or \
(code[0] == "_DEFAULT_")):
value = code[1]
return value
elif (mode == "4"):
value_to_cmp = string.lower(value)
code[0] = string.lower(code[0])
if ((code[0] == value_to_cmp) or \
(code[0] == "_DEFAULT_")):
value = code[1]
return value
elif (mode == "5"):
if ((len(string.split(code[0], value)) > 1) or \
(code[0] == "_DEFAULT_")):
value = code[1]
return value
elif (mode == "6"):
value_to_cmp = string.lower(value)
code[0] = string.lower(code[0])
if ((len(string.split(code[0], value_to_cmp)) > 1) or \
(code[0] == "_DEFAULT_")):
value = code[1]
return value
elif (mode == "7"):
if ((len(string.split(code[0], value)) > 1) or \
(len(string.split(value,code[0])) > 1) or \
(code[0] == "_DEFAULT_")):
value = code[1]
return value
elif (mode == "8"):
value_to_cmp = string.lower(value)
code[0] = string.lower(code[0])
if ((len(string.split(code[0], value_to_cmp)) > 1) or \
(len(string.split(value_to_cmp, code[0])) > 1) or \
(code[0] == "_DEFAULT_")):
value = code[1]
return value
elif (mode == "9"):
if (code[0]=="_DEFAULT_"):
value = code[1]
return value
elif (mode == "R"):
value_to_cmp = string.lower(value)
code[0] = string.lower(code[0])
if ((len(string.split(code[0], value_to_cmp)) > 1) or \
(len(string.split(value_to_cmp, code[0])) > 1) or \
(code[0] == "_DEFAULT_")):
value = value.replace(code[0], code[1])
else:
if ((code[0] == value) or (code[0]=="_DEFAULT_")):
value = code[1]
return value
else:
sys.stderr.write("Warning: given KB could not be found. \n")
return value
def FormatField(value, fn):
"""
bibconvert formatting functions:
================================
ADD(prefix,suffix) - add prefix/suffix
KB(kb_file,mode) - lookup in kb_file and replace value
ABR(N,suffix) - abbreviate to N places with suffix
ABRX() - abbreviate exclusively words longer
ABRW() - abbreviate word (limit from right)
REP(x,y) - replace
SUP(type) - remove characters of certain TYPE
LIM(n,side) - limit to n letters from L/R
LIMW(string,side) - L/R after split on string
WORDS(n,side) - limit to n words from L/R
IF(value,valueT,valueF) - replace on IF condition
MINL(n) - replace words shorter than n
MINLW(n) - replace words shorter than n
MAXL(n) - replace words longer than n
EXPW(type) - replace word from value containing TYPE
EXP(STR,0/1) - replace word from value containing string
NUM() - take only digits in given string
SHAPE() - remove extra space
UP() - to uppercase
DOWN() - to lowercase
CAP() - make capitals each word
SPLIT(n,h,str,from) - only for final Aleph field, i.e. AB , maintain whole words
SPLITW(sep,h,str,from) - only for final Aleph field, split on string
CONF(filed,value,0/1) - confirm validity of output line (check other field)
CONFL(substr,0/1) - confirm validity of output line (check field being processed)
CUT(prefix,postfix) - remove substring from side
RANGE(MIN,MAX) - select items in repetitive fields
RE(regexp) - regular expressions
IFDEFP(field,value,0/1) - confirm validity of output line (check other field)
NOTE: This function works for CONSTANT
lines - those without any variable values in
them.
JOINMULTILINES(prefix,suffix) - Given a field-value with newlines in it,
split the field on the new lines (\n), separating
them with prefix, then suffix. E.g.:
For the field XX with the value:
Test
Case, A
And the function call:
<:XX^::XX::JOINMULTILINES(<subfield code="a">,</subfield>):>
The results would be:
<subfield code="a">Test</subfield><subfield code="a">Case, A</subfield>
One note on this: <:XX^::XX:
Without the ^ the newlines will be lost as
bibconvert will remove them, so you'll
never see an effect from this function.
bibconvert character TYPES
==========================
ALPHA - alphabetic
NALPHA - not alpphabetic
NUM - numeric
NNUM - not numeric
ALNUM - alphanumeric
NALNUM - non alphanumeric
LOWER - lowercase
UPPER - uppercase
PUNCT - punctual
NPUNCT - non punctual
SPACE - space
"""
global data_parsed
out = value
fn = fn + "()"
par = get_pars(fn)[1]
fn = get_pars(fn)[0]
regexp = "//"
NRE = len(regexp)
value = sub_keywd(value)
par_tmp = []
for item in par:
item = sub_keywd(item)
par_tmp.append(item)
par = par_tmp
if (fn == "RE"):
new_value = ""
par = set_par_defaults(par,".*,0")
if (re.search(par[0], value) and (par[1] == "0")):
new_value = value
out = new_value
if (fn == "KB"):
new_value = ""
par = set_par_defaults(par, "KB,0")
new_value = crawl_KB(par[0], value, par[1])
out = new_value
elif (fn == "ADD"):
par = set_par_defaults(par, ",")
out = par[0] + value + par[1]
elif (fn == "ABR"):
par = set_par_defaults(par, "1,.")
out = value[:string.atoi(par[0])] + par[1]
elif (fn == "ABRW"):
tmp = FormatField(value, "ABR(1,.)")
tmp = tmp.upper()
out = tmp
elif (fn == "ABRX"):
par = set_par_defaults(par, ",")
toout = []
tmp = value.split(" ")
for wrd in tmp:
if (len(wrd) > string.atoi(par[0])):
wrd = wrd[:string.atoi(par[0])] + par[1]
toout.append(wrd)
out = string.join(toout, " ")
elif (fn == "SUP"):
par = set_par_defaults(par, ",")
if(par[0]=="NUM"):
out = re.sub('\d+', par[1], value)
if(par[0]=="NNUM"):
out = re.sub('\D+', par[1], value)
if(par[0]=="ALPHA"):
out = re.sub('[a-zA-Z]+', par[1], value)
if(par[0]=="NALPHA"):
out = re.sub('[^a-zA-Z]+', par[1], value)
if((par[0]=="ALNUM") or (par[0] == "NPUNCT")):
out = re.sub('\w+', par[1], value)
if(par[0]=="NALNUM"):
out = re.sub('\W+', par[1], value)
if(par[0]=="PUNCT"):
out = re.sub('\W+', par[1], value)
if(par[0]=="LOWER"):
out = re.sub('[a-z]+', par[1], value)
if(par[0]=="UPPER"):
out = re.sub('[A-Z]+', par[1], value)
if(par[0]=="SPACE"):
out = re.sub('\s+', par[1], value)
elif (fn == "LIM"):
par = set_par_defaults(par,",")
if (par[1] == "L"):
out = value[(len(value) - string.atoi(par[0])):]
if (par[1] == "R"):
out = value[:string.atoi(par[0])]
elif (fn == "LIMW"):
par = set_par_defaults(par,",")
if (par[0]!= ""):
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
if re.search(par[0], value):
par[0] = re.search(par[0], value).group()
else:
par[0] = None
if par[0]:
tmp = value.split(par[0], 1) # split into two parts only
if (par[1] == "L"):
out = par[0] + tmp[1]
if (par[1] == "R"):
out = tmp[0] + par[0]
else:
# split by empty par means keep value full
out = value
elif (fn == "WORDS"):
par = set_par_defaults(par, ",")
words = value.split(" ")
try:
max_num_words = int(par[0])
except ValueError:
max_num_words = len(words)
if (par[1] == "L"):
words.reverse()
wordlist = words[:max_num_words]
wordlist.reverse()
else:
wordlist = words[:max_num_words]
out = " ".join(wordlist)
elif (fn == "MINL"):
par = set_par_defaults(par, "1")
tmp = value.split(" ")
tmp2 = []
i = 0
for wrd in tmp:
if (len(wrd) >= string.atoi(par[0])):
tmp2.append(wrd)
out = string.join(tmp2, " ")
elif (fn == "MINLW"):
par = set_par_defaults(par, "1")
if (len(value) >= string.atoi(par[0])):
out = value
else:
out = ""
elif (fn == "MAXL"):
par = set_par_defaults(par, "4096")
tmp = value.split(" ")
tmp2 = []
i = 0
for wrd in tmp:
if (len(wrd) <= string.atoi(par[0])):
tmp2.append(wrd)
out = string.join(tmp2, " ")
elif (fn == "REP"):
set_par_defaults(par, ",")
if (par[0]!= ""):
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
out = re.sub(par[0], par[1], value)
else:
out = value.replace(par[0], par[1])
elif (fn == "SHAPE"):
if (value != ""):
out = value.strip()
elif (fn == "UP"):
out = unicode(value,'utf-8').upper().encode('utf-8')
elif (fn == "DOWN"):
out = unicode(value,'utf-8').lower().encode('utf-8')
elif (fn == "CAP"):
tmp = value.split(" ")
out2 = []
for wrd in tmp:
wrd2 = wrd.capitalize()
out2.append(wrd2)
out = string.join(out2, " ")
elif (fn == "IF"):
par = set_par_defaults(par, ",,")
N = 0
while N < 3:
if (par[N][0:NRE] == regexp and par[N][-NRE:] == regexp):
par[N] = par[N][NRE:-NRE]
if re.search(par[N], value):
par[N] = re.search(par[N], value).group()
N += 1
if (value == par[0]):
out = par[1]
else:
out = par[2]
if (out == "ORIG"):
out = value
elif (fn == "EXP"):
par = set_par_defaults(par, ",0")
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
if re.search(par[0], value):
par[0] = re.search(par[0], value).group()
tmp = value.split(" ")
out2 = []
for wrd in tmp:
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
if ((re.search(par[0], wrd).group() == wrd) and \
(par[1] == "1")):
out2.append(wrd)
if ((re.search(par[0], wrd).group() != wrd) and \
(par[1] == "0")):
out2.append(wrd)
else:
if ((len(wrd.split(par[0])) == 1) and \
(par[1] == "1")):
out2.append(wrd)
if ((len(wrd.split(par[0])) != 1) and \
(par[1] == "0")):
out2.append(wrd)
out = string.join(out2," ")
elif (fn == "EXPW"):
par = set_par_defaults(par,",0")
tmp = value.split(" ")
out2 = []
for wrd in tmp:
if ((FormatField(wrd,"SUP(" + par[0] + ")") == wrd) and \
(par[1] == "1")):
out2.append(wrd)
if ((FormatField(wrd,"SUP(" + par[0] + ")") != wrd) and \
(par[1] == "0")):
out2.append(wrd)
out = string.join(out2," ")
elif fn == "JOINMULTILINES":
## Take a string, split it on newlines, and join them together, with
## a prefix and suffix for each segment. If prefix and suffix are
## empty strings, make suffix a single space.
prefix = par[0]
suffix = par[1]
if prefix == "" and suffix == "":
## Values should at least be separated by something;
## make suffix a space:
suffix = " "
new_value = ""
vals_list = value.split("\n")
for item in vals_list:
new_value += "%s%s%s" % (prefix, item, suffix)
new_value.rstrip(" ")
## Update "out" with the newly created value:
out = new_value
elif (fn == "SPLIT"):
par = set_par_defaults(par, "%d,0,,1" % conv_setting[1])
length = string.atoi(par[0]) + (string.atoi(par[1]))
header = string.atoi(par[1])
headerplus = par[2]
starting = string.atoi(par[3])
line = ""
tmp2 = []
tmp3 = []
tmp = value.split(" ")
linenumber = 1
if (linenumber >= starting):
tmp2.append(headerplus)
line = line + headerplus
for wrd in tmp:
line = line + " " + wrd
tmp2.append(wrd)
if (len(line) > length):
linenumber = linenumber + 1
line = tmp2.pop()
toout = string.join(tmp2)
tmp3.append(toout)
tmp2 = []
line2 = value[:header]
if (linenumber >= starting):
line3 = line2 + headerplus + line
else:
line3 = line2 + line
line = line3
tmp2.append(line)
tmp3.append(line)
out = string.join(tmp3, "\n")
out = FormatField(out, "SHAPE()")
elif (fn == "SPLITW"):
par = set_par_defaults(par, ",0,,1")
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
stri = re.search(par[0], value)
if stri:
stri = stri.group(0)
else:
stri = ""
header = string.atoi(par[1])
headerplus = par[2]
starting = string.atoi(par[3])
counter = 1
tmp2 = []
tmp = re.split(par[0], value)
last = tmp.pop()
for wrd in tmp:
counter = counter + 1
if (counter >= starting):
tmp2.append(value[:header] + headerplus + wrd + stri)
else:
tmp2.append(value[:header] + wrd + stri)
if (last != ""):
counter = counter + 1
if (counter >= starting):
tmp2.append(value[:header] + headerplus + last)
else:
tmp2.append(value[:header] + last)
out = string.join(tmp2,"\n")
elif (fn == "CONF"):
par = set_par_defaults(par, ",,1")
found = 0
par1 = ""
data = select_line(par[0], data_parsed)
for line in data:
if (par[1][0:NRE] == regexp and par[1][-NRE:] == regexp):
par1 = par[1][NRE:-NRE]
else:
par1 = par[1]
if (par1 == ""):
if (line == ""):
found = 1
elif (len(re.split(par1,line)) > 1 ):
found = 1
if ((found == 1) and (string.atoi(par[2]) == 1)):
out = value
if ((found == 1) and (string.atoi(par[2]) == 0)):
out = ""
if ((found == 0) and (string.atoi(par[2]) == 1)):
out = ""
if ((found == 0) and (string.atoi(par[2]) == 0)):
out = value
return out
elif (fn == "IFDEFP"):
par = set_par_defaults(par, ",,1")
found = 0
par1 = ""
data = select_line(par[0], data_parsed)
if len(data) == 0 and par[1] == "":
## The "found" condition is that the field was empty
found = 1
else:
## Seeking a value in the field - conduct the search:
for line in data:
if (par[1][0:NRE] == regexp and par[1][-NRE:] == regexp):
par1 = par[1][NRE:-NRE]
else:
par1 = par[1]
if (par1 == ""):
if (line == ""):
found = 1
elif (len(re.split(par1,line)) > 1 ):
found = 1
if ((found == 1) and (string.atoi(par[2]) == 1)):
out = value
if ((found == 1) and (string.atoi(par[2]) == 0)):
out = ""
if ((found == 0) and (string.atoi(par[2]) == 1)):
out = ""
if ((found == 0) and (string.atoi(par[2]) == 0)):
out = value
return out
elif (fn == "CONFL"):
set_par_defaults(par,",1")
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
if (re.search(par[0], value)):
if (string.atoi(par[1]) == 1):
out = value
else:
out = ""
else:
if (string.atoi(par[1]) == 1):
out = ""
else:
out = value
return out
elif (fn == "CUT"):
par = set_par_defaults(par, ",")
left = value[:len(par[0])]
right = value[-(len(par[1])):]
if (left == par[0]):
out = out[len(par[0]):]
if (right == par[1]):
out = out[:-(len(par[1]))]
return out
elif (fn == "NUM"):
tmp = re.findall('\d', value)
out = string.join(tmp, "")
return out
def format_field(value, fn):
"""
bibconvert formatting functions:
================================
ADD(prefix,suffix) - add prefix/suffix
KB(kb_file,mode) - lookup in kb_file and replace value
ABR(N,suffix) - abbreviate to N places with suffix
ABRX() - abbreviate exclusively words longer
ABRW() - abbreviate word (limit from right)
REP(x,y) - replace
SUP(type) - remove characters of certain TYPE
LIM(n,side) - limit to n letters from L/R
LIMW(string,side) - L/R after split on string
WORDS(n,side) - limit to n words from L/R
IF(value,valueT,valueF) - replace on IF condition
MINL(n) - replace words shorter than n
MINLW(n) - replace words shorter than n
MAXL(n) - replace words longer than n
EXPW(type) - replace word from value containing TYPE
EXP(STR,0/1) - replace word from value containing string
NUM() - take only digits in given string
SHAPE() - remove extra space
UP() - to uppercase
DOWN() - to lowercase
CAP() - make capitals each word
SPLIT(n,h,str,from) - only for final Aleph field, i.e. AB , maintain whole words
SPLITW(sep,h,str,from) - only for final Aleph field, split on string
CONF(filed,value,0/1) - confirm validity of output line (check other field)
CONFL(substr,0/1) - confirm validity of output line (check field being processed)
CUT(prefix,postfix) - remove substring from side
RANGE(MIN,MAX) - select items in repetitive fields
RE(regexp) - regular expressions
bibconvert character TYPES
==========================
ALPHA - alphabetic
NALPHA - not alpphabetic
NUM - numeric
NNUM - not numeric
ALNUM - alphanumeric
NALNUM - non alphanumeric
LOWER - lowercase
UPPER - uppercase
PUNCT - punctual
NPUNCT - non punctual
SPACE - space
"""
global data_parsed
out = value
fn = fn + "()"
par = get_pars(fn)[1]
fn = get_pars(fn)[0]
regexp = "//"
NRE = len(regexp)
value = sub_keywd(value)
par_tmp = []
for item in par:
item = sub_keywd(item)
par_tmp.append(item)
par = par_tmp
if (fn == "RE"):
new_value = ""
par = set_par_defaults(par, ".*,0")
if (re.search(par[0], value) and (par[1] == "0")):
new_value = value
out = new_value
if (fn == "KB"):
new_value = ""
par = set_par_defaults(par, "KB,0")
new_value = crawl_KB(par[0], value, par[1])
out = new_value
elif (fn == "ADD"):
par = set_par_defaults(par, ",")
out = par[0] + value + par[1]
elif (fn == "ABR"):
par = set_par_defaults(par, "1,.")
out = value[:string.atoi(par[0])] + par[1]
elif (fn == "ABRW"):
tmp = format_field(value,"ABR(1,.)")
tmp = tmp.upper()
out = tmp
elif (fn == "ABRX"):
par = set_par_defaults(par, ",")
toout = []
tmp = value.split(" ")
for wrd in tmp:
if (len(wrd) > string.atoi(par[0])):
wrd = wrd[:string.atoi(par[0])] + par[1]
toout.append(wrd)
out = string.join(toout, " ")
elif (fn == "SUP"):
par = set_par_defaults(par, ",")
if(par[0] == "NUM"):
out = re.sub('\d+', par[1], value)
if(par[0] == "NNUM"):
out = re.sub('\D+', par[1], value)
if(par[0] == "ALPHA"):
out = re.sub('[a-zA-Z]+', par[1], value)
if(par[0] == "NALPHA"):
out = re.sub('[^a-zA-Z]+', par[1], value)
if((par[0] == "ALNUM") or (par[0] == "NPUNCT")):
out = re.sub('\w+', par[1], value)
if(par[0] == "NALNUM"):
out = re.sub('\W+', par[1], value)
if(par[0] == "PUNCT"):
out = re.sub('\W+', par[1], value)
if(par[0] == "LOWER"):
out = re.sub('[a-z]+', par[1], value)
if(par[0] == "UPPER"):
out = re.sub('[A-Z]+', par[1], value)
if(par[0] == "SPACE"):
out = re.sub('\s+', par[1], value)
elif (fn == "LIM"):
par = set_par_defaults(par, ",")
if (par[1] == "L"):
out = value[(len(value) - string.atoi(par[0])):]
if (par[1] == "R"):
out = value[:string.atoi(par[0])]
elif (fn == "LIMW"):
par = set_par_defaults(par, ",")
if (par[0]!= ""):
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
par[0] = re.search(par[0], value).group()
tmp = value.split(par[0])
if (par[1] == "L"):
out = par[0] + tmp[1]
if (par[1] == "R"):
out = tmp[0] + par[0]
elif (fn == "WORDS"):
tmp2 = [value]
par = set_par_defaults(par, ",")
if (par[1] == "R"):
tmp = value.split(" ")
tmp2 = []
i = 0
while (i < string.atoi(par[0])):
tmp2.append(tmp[i])
i = i + 1
if (par[1] == "L"):
tmp = value.split(" ")
tmp.reverse()
tmp2 = []
i = 0
while (i < string.atoi(par[0])):
tmp2.append(tmp[i])
i = i + 1
tmp2.reverse()
out = string.join(tmp2, " ")
elif (fn == "MINL"):
par = set_par_defaults(par, "1")
tmp = value.split(" ")
tmp2 = []
i = 0
for wrd in tmp:
if (len(wrd) >= string.atoi(par[0])):
tmp2.append(wrd)
out = string.join(tmp2, " ")
elif (fn == "MINLW"):
par = set_par_defaults(par, "1")
if (len(value) >= string.atoi(par[0])):
out = value
else:
out = ""
elif (fn == "MAXL"):
par = set_par_defaults(par, "4096")
tmp = value.split(" ")
tmp2 = []
i = 0
for wrd in tmp:
if (len(wrd) <= string.atoi(par[0])):
tmp2.append(wrd)
out = string.join(tmp2, " ")
elif (fn == "REP"):
set_par_defaults(par, ",")
if (par[0]!= ""):
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
out = re.sub(par[0], par[1], value)
else:
out = value.replace(par[0], par[1])
elif (fn == "SHAPE"):
if (value != ""):
out = value.strip()
elif (fn == "UP"):
out = unicode(value,'utf-8').upper().encode('utf-8')
elif (fn == "DOWN"):
out = unicode(value,'utf-8').lower().encode('utf-8')
elif (fn == "CAP"):
tmp = value.split(" ")
out2 = []
for wrd in tmp:
wrd2 = wrd.capitalize()
out2.append(wrd2)
out = string.join(out2," ")
elif (fn == "IF"):
par = set_par_defaults(par,",,")
N = 0
while N < 3:
if (par[N][0:NRE] == regexp and par[N][-NRE:] == regexp):
par[N] = par[N][NRE:-NRE]
par[N] = re.search(par[N], value).group()
N += 1
if (value == par[0]):
out = par[1]
else:
out = par[2]
if (out == "ORIG"):
out = value
elif (fn == "EXP"):
par = set_par_defaults(par, ",0")
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
if re.search(par[0], value):
par[0] = re.search(par[0], value).group()
tmp = value.split(" ")
out2 = []
for wrd in tmp:
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
if ((re.search(par[0], wrd).group() == wrd) and \
(par[1] == "1")):
out2.append(wrd)
if ((re.search(par[0], wrd).group() != wrd) and \
(par[1] == "0")):
out2.append(wrd)
else:
if ((len(wrd.split(par[0])) == 1) and \
(par[1] == "1")):
out2.append(wrd)
if ((len(wrd.split(par[0])) != 1) and \
(par[1] == "0")):
out2.append(wrd)
out = string.join(out2," ")
elif (fn == "EXPW"):
par = set_par_defaults(par,",0")
tmp = value.split(" ")
out2 = []
for wrd in tmp:
if ((format_field(wrd,"SUP(" + par[0] + ")") == wrd) and \
(par[1] == "1")):
out2.append(wrd)
if ((format_field(wrd,"SUP(" + par[0] + ")") != wrd) and \
(par[1] == "0")):
out2.append(wrd)
out = string.join(out2," ")
elif (fn == "SPLIT"):
par = set_par_defaults(par, "%d,0,,1" % conv_setting[1])
length = string.atoi(par[0]) + (string.atoi(par[1]))
header = string.atoi(par[1])
headerplus = par[2]
starting = string.atoi(par[3])
line = ""
tmp2 = []
tmp3 = []
tmp = value.split(" ")
linenumber = 1
if (linenumber >= starting):
tmp2.append(headerplus)
line = line + headerplus
for wrd in tmp:
line = line + " " + wrd
tmp2.append(wrd)
if (len(line) > length):
linenumber = linenumber + 1
line = tmp2.pop()
toout = string.join(tmp2)
tmp3.append(toout)
tmp2 = []
line2 = value[:header]
if (linenumber >= starting):
line3 = line2 + headerplus + line
else:
line3 = line2 + line
line = line3
tmp2.append(line)
tmp3.append(line)
out = string.join(tmp3, "\n")
out = format_field(out, "SHAPE()")
elif (fn == "SPLITW"):
par = set_par_defaults(par, ",0,,1")
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
str = re.search(par[0], value)
header = string.atoi(par[1])
headerplus = par[2]
starting = string.atoi(par[3])
counter = 1
tmp2 = []
tmp = re.split(par[0], value)
last = tmp.pop()
for wrd in tmp:
counter = counter + 1
if (counter >= starting):
tmp2.append(value[:header] + headerplus + wrd + str)
else:
tmp2.append(value[:header] + wrd + str)
if (last != ""):
counter = counter + 1
if (counter >= starting):
tmp2.append(value[:header] + headerplus + last)
else:
tmp2.append(value[:header] + last)
out = string.join(tmp2, "\n")
elif (fn == "CONF"):
par = set_par_defaults(par, ",,1")
found = 0
par1 = ""
data = select_line(par[0], data_parsed)
for line in data:
if (par[1][0:NRE] == regexp and par[1][-NRE:] == regexp):
par1 = par[1][NRE:-NRE]
else:
par1 = par[1]
if (par1 == ""):
if (line == ""):
found = 1
elif (len(re.split(par1,line)) > 1 ):
found = 1
if ((found == 1) and (string.atoi(par[2]) == 1)):
out = value
if ((found == 1) and (string.atoi(par[2]) == 0)):
out = ""
if ((found == 0) and (string.atoi(par[2]) == 1)):
out = ""
if ((found == 0) and (string.atoi(par[2]) == 0)):
out = value
return out
elif (fn == "CONFL"):
set_par_defaults(par,",1")
if (par[0][0:NRE] == regexp and par[0][-NRE:] == regexp):
par[0] = par[0][NRE:-NRE]
if (re.search(par[0], value)):
if (string.atoi(par[1]) == 1):
out = value
else:
out = ""
else:
if (string.atoi(par[1]) == 1):
out = ""
else:
out = value
return out
elif (fn == "CUT"):
par = set_par_defaults(par, ",")
left = value[:len(par[0])]
right = value[-(len(par[1])):]
if (left == par[0]):
out = out[len(par[0]):]
if (right == par[1]):
out = out[:-(len(par[1]))]
return out
elif (fn == "NUM"):
tmp = re.findall('\d', value)
out = string.join(tmp, "")
return out
## Match records with the database content
##
def match_in_database(record, query_string):
"Check if record is in alreadey in database with an oai identifier. Returns recID if present, 0 otherwise."
query_string_parsed = parse_query_string(query_string)
search_pattern = []
search_field = []
for query_field in query_string_parsed:
ind1 = query_field[0][3:4]
if ind1 == "_":
ind1 = ""
ind2 = query_field[0][4:5]
if ind2 == "_":
ind2 = ""
stringsplit = "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">" % (query_field[0][0:3], ind1, ind2, query_field[0][5:6])
formatting = query_field[1:]
record1 = string.split(record, stringsplit)
if len(record1) > 1:
matching_value = string.split(record1[1], "<")[0]
for fn in formatting:
matching_value = FormatField(matching_value, fn)
search_pattern.append(matching_value)
search_field.append(query_field[0])
search_field.append("")
search_field.append("")
search_field.append("")
search_pattern.append("")
search_pattern.append("")
search_pattern.append("")
recID_list = perform_request_search(p1=search_pattern[0],
f1=search_field[0],
p2=search_pattern[1],
f2=search_field[1],
p3=search_pattern[2],
f3=search_field[2])
return recID_list
def exit_on_error(error_message):
"exit when error occured"
sys.stderr.write("\n bibconvert data convertor\n")
sys.stderr.write(" Error: %s\n" % error_message)
sys.exit()
return 0
def create_record(begin_record_header,
ending_record_footer,
query_string,
match_mode,
Xcount):
"Create output record"
global data_parsed
out_to_print = ""
out = []
field_data_item_LIST = []
ssn5cnt = "%3d" % Xcount
sysno = generate("DATE(%w%H%M%S)")
sysno500 = generate("XDATE(%w%H%M%S)," + ssn5cnt)
for T_tpl_item_LIST in target_tpl_parsed:
# the line is printed only if the variables inside are not empty
print_line = 0
to_output = []
rows = 1
for field_tpl_item_STRING in T_tpl_item_LIST[1]:
save_field_newlines = 0
DATA = []
if (field_tpl_item_STRING[:2]=="<:"):
field_tpl_item_STRING = field_tpl_item_STRING[2:-2]
field = field_tpl_item_STRING.split("::")[0]
if (len(field_tpl_item_STRING.split("::")) == 1):
value = generate(field)
to_output.append([value])
else:
subfield = field_tpl_item_STRING.split("::")[1]
if (field[-1] == "*"):
repetitive = 1
field = field[:-1]
elif field[-1] == "^":
## Keep the newlines in a field's value:
repetitive = 0
save_field_newlines = 1
field = field[:-1]
else:
repetitive = 0
if dirmode:
DATA = select_line(field, data_parsed)
else:
DATA = select_line(field, data_parsed)
if save_field_newlines == 1:
## put newlines back into the element value:
DATA = [string.join(DATA, "\n")]
elif (repetitive == 0):
DATA = [string.join(DATA, " ")]
SRC_TPL = select_line(field, source_tpl_parsed)
try:
## Get the components that this field is composed of:
field_components = field_tpl_item_STRING.split("::")
num_field_components = len(field_components)
## Test the number of components. If it is greater that 2,
## some kind of functions must be called on the value of
## the field, and it should therefore be evaluated. If however,
## the field is made-up of only 2 components, (i.e. no functions
## are called on its value, AND the value is empty, do not bother
## to evaluate it.
##
## E.g. In the following line:
## 300---<Pages><:Num::Num:><:Num::Num::IF(,mult. p):></Pages>
##
## If we have a value "3" for page number (Num), we want the following result:
## <Pages>3 p</Pages>
## If however, we have no value for page number (Num), we want this result:
## <Pages>mult. p</Pages>
## The functions relating to the datafield must therefore be executed
##
## If however, the template contains this line:
## 300---<Pages><:Num::Num:></Pages>
##
## If we have a value "3" for page number (Num), we want the following result:
## <Pages>3</Pages>
## If however, we have no value for page number (Num), we do NOT want the line
## to be printed at all - we should SKIP the element and not return an empty
## value (<Pages></Pages> would be pointless.)
if (DATA[0] != "" or num_field_components > 2):
DATA = get_subfields(DATA, subfield, SRC_TPL)
FF = field_tpl_item_STRING.split("::")
if (len(FF) > 2):
FF = FF[2:]
for fn in FF:
# DATAFORMATTED = []
if (len(DATA) != 0):
DATA = get_subfields(DATA, subfield, SRC_TPL)
FF = field_tpl_item_STRING.split("::")
if (len(FF) > 2):
FF = FF[2:]
for fn2 in FF:
DATAFORMATTED = []
for item in DATA:
item = FormatField(item, fn)
if item != "":
DATAFORMATTED.append(item)
DATA = DATAFORMATTED
if (len(DATA) > rows):
rows = len(DATA)
if DATA[0] != "":
print_line = 1
to_output.append(DATA)
except IndexError as e:
pass
else:
to_output.append([field_tpl_item_STRING])
current = 0
default_print = 0
while (current < rows):
line_to_print = []
for item in to_output:
if (item == []):
item = ['']
if (len(item) <= current):
printout = item[0]
else:
printout = item[current]
line_to_print.append(printout)
output = exp_n(string.join(line_to_print,""))
global_formatting_functions = T_tpl_item_LIST[0].split("::")[1:]
for GFF in global_formatting_functions:
if (GFF[:5] == "RANGE"):
parR = get_pars(GFF)[1]
parR = set_par_defaults(parR,"MIN,MAX")
if (parR[0]!="MIN"):
if (string.atoi(parR[0]) > (current+1)):
output = ""
if (parR[1]!="MAX"):
if (string.atoi(parR[1]) < (current+1)):
output = ""
elif (GFF[:6] == "IFDEFP"):
## Like a DEFP and a CONF combined. I.e. Print the line
## EVEN if its a constant, but ONLY IF the condition in
## the IFDEFP is met.
## If the value returned is an empty string, no line will
## be printed.
output = FormatField(output, GFF)
print_line = 1
elif (GFF[:4] == "DEFP"):
default_print = 1
else:
output = FormatField(output, GFF)
if ((len(output) > set_conv()[0] and print_line == 1) or default_print):
out_to_print = out_to_print + output + "\n"
current = current + 1
###
out_flag = 0
if query_string:
recID = match_in_database(out_to_print, query_string)
if len(recID) == 1 and match_mode == 1:
ctrlfield = "<controlfield tag=\"001\">%d</controlfield>" % (recID[0])
out_to_print = ctrlfield + "\n" + out_to_print
out_flag = 1
if len(recID) == 0 and match_mode == 0:
out_flag = 1
if len(recID) > 1 and match_mode == 2:
out_flag = 1
if out_flag or match_mode == -1:
if begin_record_header != "":
out_to_print = begin_record_header + "\n" + out_to_print
if ending_record_footer != "":
out_to_print = out_to_print + "\n" + ending_record_footer
else:
out_to_print = ""
return out_to_print
def convert(ar_):
global dirmode, Xcount, conv_setting, sysno, sysno500, separator, tcounter, source_data, query_string, match_mode, begin_record_header, ending_record_footer, output_rec_sep, begin_header, ending_footer, oai_identifier_from, source_tpl, source_tpl_parsed, target_tpl, target_tpl_parsed, extract_tpl, extract_tpl_parsed, data_parsed
dirmode, Xcount, conv_setting, sysno, sysno500, separator, tcounter, source_data, query_string, match_mode, begin_record_header, ending_record_footer, output_rec_sep, begin_header, ending_footer, oai_identifier_from, source_tpl, source_tpl_parsed, target_tpl, target_tpl_parsed, extract_tpl, extract_tpl_parsed = ar_
# separator = spt
# Added by Alberto
separator = sub_keywd(separator)
if dirmode:
if (os.path.isdir(source_data)):
data_parsed = parse_input_data_d(source_data, source_tpl)
record = create_record(begin_record_header, ending_record_footer, query_string, match_mode, Xcount)
if record != "":
print record
tcounter = tcounter + 1
if output_rec_sep != "":
print output_rec_sep
else:
exit_on_error("Cannot access directory: %s" % source_data)
else:
done = 0
print begin_header
while (done == 0):
data_parsed = parse_input_data_fx(source_tpl)
if (data_parsed == -1):
done = 1
else:
if (data_parsed[0][0]!= ''):
record = create_record(begin_record_header, ending_record_footer, query_string, match_mode, Xcount)
Xcount += 1
if record != "":
print record
tcounter = tcounter + 1
if output_rec_sep != "":
print output_rec_sep
print ending_footer
return
| MSusik/invenio | invenio/legacy/bibconvert/api.py | Python | gpl-2.0 | 66,991 |
# encoding=utf8
# -*- coding: utf-8 -*-
import requests
from pyquery import PyQuery as pq
# 获取亚马逊的所有书名
# python3
# 访问分类页面的url
clc_url = 'https://www.amazon.cn/Kindle%E7%94%B5%E5%AD%90%E4%B9%A6/b?ie=UTF8&node=116169071&ref_=nav_topnav_giftcert'
'''
访问首页的header
'''
headers = {'Host': 'www.amazon.cn',
'Connection': 'keep-alive',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'DNT': '1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-User': '?1',
'Sec-Fetch-Dest': 'document',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9', }
# 访问某一个分类的header
headers2 = {'Host': 'www.amazon.cn',
'Connection': 'keep-alive',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'DNT': '1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-User': '?1',
'Sec-Fetch-Dest': 'document',
'Referer': 'https://www.amazon.cn/Kindle%E7%94%B5%E5%AD%90%E4%B9%A6/b?ie=UTF8&node=116169071&ref_=nav_topnav_giftcert',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9', }
# 获取分类
def get_classification():
requests.DEFAULT_RETRIES = 2 # 增加重试连接次数
s = requests.session()
s.keep_alive = False # 关闭多余连接
url = clc_url
# s.get(url) # 你需要的网址
response = requests.get(url, headers=headers)
# print(response.text)
resp_obj = pq(response.text)
divs = resp_obj('#s-refinements').children('div')
# print(divs)
# if 1==1:
# return
cf_ul = pq(divs[2]).find('ul li')
i = 0
file_classfication = open('cfc.csv', 'w')
for ul in cf_ul:
i = i + 1
if i < 3:
# 前2个不是要抓取的分类名称
continue
print('==============')
ss = pq(ul).find('a').attr('href')
c_name = pq(ul).text()
# 分类ID,拼到url中 https://www.amazon.cn/s?rh=n%3A{}&fs=true&ref=lp_{}_sar 请求这个的时候记得refer_url为下面那个长链接
c_id = ss[56:65]
c_url = 'https://www.amazon.cn/s?rh=n%3A{}&fs=true&ref=lp_{}_sar'.format(c_id, c_id)
file_classfication.write((c_id + ',' + c_name + ',' + c_url + '\n'))
# print(c_id + ',' + c_name + ',' + c_url)
# 根据url获取图书名称写入文件,c_id作为文件名
def get_books_by_url(c_id, c_url):
response = requests.get(c_url, headers=headers2)
# print(response.text)
resp_obj = pq(response.text)
# 书名
a = resp_obj('#search > div.s-desktop-width-max.s-opposite-dir > div > div.s-matching-dir.sg-col-16-of-20.sg-col.sg-col-8-of-12.sg-col-12-of-16 > div > span:nth-child(4) > div.s-main-slot.s-result-list.s-search-results.sg-row > div:nth-child(1) > div > span > div > div > div:nth-child(2) > div.sg-col.sg-col-4-of-12.sg-col-8-of-16.sg-col-12-of-20 > div > div.a-section.a-spacing-none > h2 > a > span')
# 第二本书
b = resp_obj('#search > div.s-desktop-width-max.s-opposite-dir > div > div.s-matching-dir.sg-col-16-of-20.sg-col.sg-col-8-of-12.sg-col-12-of-16 > div > span:nth-child(4) > div.s-main-slot.s-result-list.s-search-results.sg-row > div:nth-child(2)')
# 作者、年份
a2 = resp_obj('#search > div.s-desktop-width-max.s-opposite-dir > div > div.s-matching-dir.sg-col-16-of-20.sg-col.sg-col-8-of-12.sg-col-12-of-16 > div > span:nth-child(4) > div.s-main-slot.s-result-list.s-search-results.sg-row > div:nth-child(1) > div > span > div > div > div:nth-child(2) > div.sg-col.sg-col-4-of-12.sg-col-8-of-16.sg-col-12-of-20 > div > div.a-section.a-spacing-none > div')
print(a)
print(a2)
# print(resp_obj)
pass
def main():
# 第一次运行需要先获取所有分类的信息,写入文件
# get_classification()
c_id = '143359071'
c_url = 'https://www.amazon.cn/s?rh=n%3A143359071&fs=true&ref=lp_143359071_sar'
get_books_by_url(c_id, c_url)
# a = '123'
# print ("This website name is %s %s" % (a,a))
pass
if __name__ == '__main__':
main()
'''
这是该分类下全部书籍的链接,替换这里的 144154071 即可。上面抓到的链接中,只有这个数字不同,其他都是相同的值
https://www.amazon.cn/s?rh=n%3A144154071&fs=true&ref=lp_144154071_sar
/s?bbn=116169071&rh=n%3A116087071%2Cn%3A116169071%2Cn%3A144154071&dc&qid=1618411517&rnid=116169071&ref=lp_116169071_nr_n_0
/s?bbn=116169071&rh=n%3A116087071%2Cn%3A116169071%2Cn%3A143468071&dc&qid=1618412690&rnid=116169071&ref=lp_116169071_nr_n_27
/s?bbn=116169071&rh=n%3A116087071%2Cn%3A116169071%2Cn%3A143553071&dc&qid=1618413163&rnid=116169071&ref=lp_116169071_nr_n_32
/s?bbn=116169071&rh=n%3A116087071%2Cn%3A116169071%2Cn%3A143579071&dc&qid=1618413163&rnid=116169071&ref=lp_116169071_nr_n_33
/s?bbn=116169071&rh=n%3A116087071%2Cn%3A116169071%2Cn%3A116170071&dc&qid=1618413163&rnid=116169071&ref=lp_116169071_nr_n_34
https://www.amazon.cn/s?rh=n%3A144180071&fs=true&ref=lp_144180071_sar
https://www.amazon.cn/s?i=digital-text&rh=n%3A144180071&fs=true&page=2&qid=1618411782&ref=sr_pg_2
https://www.amazon.cn/s?i=digital-text&rh=n%3A144180071&fs=true&page=3&qid=1618413388&ref=sr_pg_3
https://www.amazon.cn/s?i=digital-text&rh=n%3A144180071&fs=true&page=4&qid=1618413415&ref=sr_pg_3
https://www.amazon.cn/s?i=digital-text&rh=n%3A144180071&fs=true&qid=1618413452&ref=sr_pg_1
https://www.amazon.cn/s?i=digital-text&rh=n%3A144180071&fs=true&page=2&qid=1618413470&ref=sr_pg_2
有一个莫名其妙的qid,貌似是在相应的cookie里的值
'''
| wang153723482/HelloWorld_my | HelloWorld_python/http_spider/amazon/get_books.py | Python | apache-2.0 | 6,587 |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import six
import testtools
from oslo_log import log as logging
from tempest.api.image import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""Here we test the basic operations of images"""
@decorators.attr(type='smoke')
@decorators.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
def test_register_upload_get_image_file(self):
"""Here we test these functionalities
Register image, upload the image file, get image and get image
file api's
"""
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private',
ramdisk_id=uuid)
self.assertIn('name', image)
self.assertEqual(image_name, image['name'])
self.assertIn('visibility', image)
self.assertEqual('private', image['visibility'])
self.assertIn('status', image)
self.assertEqual('queued', image['status'])
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = six.BytesIO(file_content)
self.client.store_image_file(image['id'], image_file)
# Now try to get image details
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
body = self.client.show_image_file(image['id'])
self.assertEqual(file_content, body.data)
@decorators.attr(type='smoke')
@decorators.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
# Deletes an image by image_id
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
# Delete Image
self.client.delete_image(image['id'])
self.client.wait_for_resource_deletion(image['id'])
# Verifying deletion
images = self.client.list_images()['images']
images_id = [item['id'] for item in images]
self.assertNotIn(image['id'], images_id)
@decorators.attr(type='smoke')
@decorators.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
# Updates an image by image_id
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
self.assertEqual('queued', image['status'])
# Now try uploading an image file
image_file = six.BytesIO(data_utils.random_bytes())
self.client.store_image_file(image['id'], image_file)
# Update Image
new_image_name = data_utils.rand_name('new-image')
body = self.client.update_image(image['id'], [
dict(replace='/name', value=new_image_name)])
# Verifying updating
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual(new_image_name, body['name'])
@testtools.skipUnless(CONF.image_feature_enabled.deactivate_image,
'deactivate-image is not available.')
@decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_deactivate_reactivate_image(self):
# Create image
image_name = data_utils.rand_name('image')
image = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
# Upload an image file
content = data_utils.random_bytes()
image_file = six.BytesIO(content)
self.client.store_image_file(image['id'], image_file)
# Deactivate image
self.client.deactivate_image(image['id'])
body = self.client.show_image(image['id'])
self.assertEqual("deactivated", body['status'])
# User unable to download deactivated image
self.assertRaises(lib_exc.Forbidden, self.client.show_image_file,
image['id'])
# Reactivate image
self.client.reactivate_image(image['id'])
body = self.client.show_image(image['id'])
self.assertEqual("active", body['status'])
# User able to download image after reactivation
body = self.client.show_image_file(image['id'])
self.assertEqual(content, body.data)
class ListUserImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of image information"""
@classmethod
def resource_setup(cls):
super(ListUserImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
container_fmts = CONF.image.container_formats
disk_fmts = CONF.image.disk_formats
all_pairs = [(container_fmt, disk_fmt)
for container_fmt in container_fmts
for disk_fmt in disk_fmts]
for (container_fmt, disk_fmt) in all_pairs[:6]:
LOG.debug("Creating an image"
"(Container format: %s, Disk format: %s).",
container_fmt, disk_fmt)
cls._create_standard_image(container_fmt, disk_fmt)
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""Create a new standard image and return the newly-registered image-id
Note that the size of the new image is a random number between
1024 and 4096
"""
size = random.randint(1024, 4096)
image_file = six.BytesIO(data_utils.random_bytes(size))
tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
image = cls.create_image(container_format=container_format,
disk_format=disk_format,
visibility='private',
tags=tags)
cls.client.store_image_file(image['id'], data=image_file)
# Keep the data of one test image so it can be used to filter lists
cls.test_data = image
return image['id']
def _list_by_param_value_and_assert(self, params):
"""Perform list action with given params and validates result."""
# Retrieve the list of images that meet the filter
images_list = self.client.list_images(params=params)['images']
# Validating params of fetched images
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
def _list_sorted_by_image_size_and_assert(self, params, desc=False):
"""Validate an image list that has been sorted by size
Perform list action with given params and validates the results are
sorted by image size in either ascending or descending order.
"""
# Retrieve the list of images that meet the filter
images_list = self.client.list_images(params=params)['images']
# Validate that the list was fetched sorted accordingly
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
sorted_list = [image['size'] for image in images_list]
msg = 'The list of images was not sorted correctly.'
self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
@decorators.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_list_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()['images']
image_list = [image['id'] for image in images_list]
for image in self.created_images:
self.assertIn(image, image_list)
@decorators.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
# Test to get all images with a specific container_format
params = {"container_format": self.test_data['container_format']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
# Test to get all images with disk_format = raw
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
# Test to get all images with visibility = private
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
# Test to get all images by size
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
# Test to get all images with size between 2000 to 3000
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
images_list = self.client.list_images(params=params)['images']
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertGreaterEqual(image_size, params['size_min'],
"Failed to get images by size_min")
self.assertLessEqual(image_size, params['size_max'],
"Failed to get images by size_max")
@decorators.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
# Test to get all active images
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
# Test to get images by limit
params = {"limit": 1}
images_list = self.client.list_images(params=params)['images']
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@decorators.idempotent_id('e9a44b91-31c8-4b40-a332-e0a39ffb4dbb')
def test_list_image_param_owner(self):
# Test to get images by owner
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
params = {"owner": image['owner']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('55c8f5f5-bfed-409d-a6d5-4caeda985d7b')
def test_list_images_param_name(self):
# Test to get images by name
params = {'name': self.test_data['name']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('aa8ac4df-cff9-418b-8d0f-dd9c67b072c9')
def test_list_images_param_tag(self):
# Test to get images matching a tag
params = {'tag': self.test_data['tags'][0]}
images_list = self.client.list_images(params=params)['images']
# Validating properties of fetched images
self.assertNotEmpty(images_list)
for image in images_list:
msg = ("The image {image_name} does not have the expected tag "
"{expected_tag} among its tags: {observerd_tags}."
.format(image_name=image['name'],
expected_tag=self.test_data['tags'][0],
observerd_tags=image['tags']))
self.assertIn(self.test_data['tags'][0], image['tags'], msg)
@decorators.idempotent_id('eeadce49-04e0-43b7-aec7-52535d903e7a')
def test_list_images_param_sort(self):
params = {'sort': 'size:desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('9faaa0c2-c3a5-43e1-8f61-61c54b409a49')
def test_list_images_param_sort_key_dir(self):
params = {'sort_key': 'size', 'sort_dir': 'desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
# Test to get image schema
schema = "image"
body = self.schemas_client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
# Test to get images schema
schema = "images"
body = self.schemas_client.show_schema(schema)
self.assertEqual("images", body['name'])
class ListSharedImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of a shared image information"""
credentials = ['primary', 'alt']
@classmethod
def setup_clients(cls):
super(ListSharedImagesTest, cls).setup_clients()
cls.image_member_client = cls.os_primary.image_member_client_v2
cls.alt_img_client = cls.os_alt.image_client_v2
@decorators.idempotent_id('3fa50be4-8e38-4c02-a8db-7811bb780122')
def test_list_images_param_member_status(self):
# Create an image to be shared using default visibility
image_file = six.BytesIO(data_utils.random_bytes(2048))
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(container_format=container_format,
disk_format=disk_format)
self.client.store_image_file(image['id'], data=image_file)
# Share the image created with the alt user
self.image_member_client.create_image_member(
image_id=image['id'], member=self.alt_img_client.tenant_id)
# As an image consumer you need to provide the member_status parameter
# along with the visibility=shared parameter in order for it to show
# results
params = {'member_status': 'pending', 'visibility': 'shared'}
fetched_images = self.alt_img_client.list_images(params)['images']
self.assertEqual(1, len(fetched_images))
self.assertEqual(image['id'], fetched_images[0]['id'])
| vedujoshi/tempest | tempest/api/image/v2/test_images.py | Python | apache-2.0 | 16,568 |
#!/usr/bin/env python
# encoding: utf-8
"""
misc.EventSystem.py
Copyright (C) 2018 Stefan Braun
registered handlers (a bag of handlers) getting called when event gets fired
using ideas from "axel events" https://github.com/axel-events/axel
and "event system" from http://www.valuedlessons.com/2008/04/events-in-python.html
(it seems that axel.Event leaks threads in my tries, it didn't kill old ones on synchronous execution, that's why I created my own event system)
=>differences to "axel events":
-executes handlers synchronously in same thread
-no timeouts for executing handler/callback functions,
(in Python it seems not possible to cleanly kill another thread,
and killing a subprocess would lead to problems for user with IPC and broken queues)
=>differences to "event system from valuedlessons.com":
-using list of handlers instead of set (fixed execution order, allowing registration of callback multiple times)
The execution result is returned as a list containing all results per handler having this structure:
exec_result = [
(True, result, handler), # on success
(False, error_info, handler), # on error
(None, None, handler), ... # asynchronous execution
]
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import threading
import queue
import time
import copy
import sys
import logging
# pause in busy-waiting-loop
SLEEP_TIMEBASE = 0.001
# setup of logging
# (based on tutorial https://docs.python.org/2/howto/logging.html )
# create logger =>set level to DEBUG if you want to catch all log messages!
logger = logging.getLogger('misc.EventSystem')
logger.setLevel(logging.WARN)
# create console handler
# =>set level to DEBUG if you want to see everything on console!
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
class EventSystem(object):
''' lightweight event system with many ideas from "axel event" '''
# one background thread for all asynchronous event handler functions
# (shared by EventSystem instances in asynchronous mode)
_alock = threading.Lock()
_async_thread = None
_async_queue = None
def __init__(self, sync_mode = True, exc_info=True, traceback=False):
self._sync_mode = sync_mode
self._exc_info = exc_info
self._traceback = traceback
self._handler_list = []
self._hlock = threading.RLock()
if not sync_mode:
EventSystem._setup_async_thread()
self._time_secs_old = 0.0
self.duration_secs = 0.0
@staticmethod
def _setup_async_thread():
with EventSystem._alock:
if not EventSystem._async_queue:
EventSystem._async_queue = queue.Queue()
if EventSystem._async_thread:
logger.info('EventSystem._setup_async_thread(): using existing background thread for asynchronous handler execution...')
logger.debug('[number of active threads: ' + repr(threading.enumerate()) + ']')
EventSystem._async_thread.inc_nof_eventsources()
logger.debug('[number of active threads: ' + repr(threading.enumerate()) + ']')
else:
logger.debug('[number of active threads: ' + repr(threading.enumerate()) + ']')
EventSystem._async_thread = _Async_Executor_thread(target_q=EventSystem._async_queue)
EventSystem._async_thread.start()
logger.info(
'EventSystem._setup_async_thread(): started background thread for asynchronous handler execution...')
logger.debug('[number of active threads: ' + repr(threading.enumerate()) + ']')
def handle(self, handler):
""" register a handler (add a callback function) """
with self._hlock:
self._handler_list.append(handler)
return self
def unhandle(self, handler):
""" unregister handler (removing callback function) """
with self._hlock:
try:
self._handler_list.remove(handler)
except:
raise ValueError("Handler is not handling this event, so cannot unhandle it.")
return self
def fire(self, *args, **kargs):
""" collects results of all executed handlers """
self._time_secs_old = time.time()
# allow register/unregister while execution
# (a shallowcopy should be okay.. https://docs.python.org/2/library/copy.html )
with self._hlock:
handler_list = copy.copy(self._handler_list)
result_list = []
for handler in handler_list:
one_res_tuple = (None, None, None)
if self._sync_mode:
# grab results of all handlers
result = self._execute(handler, *args, **kargs)
if isinstance(result, tuple) and len(result) == 3 and isinstance(result[1], Exception):
# error occurred
one_res_tuple = (False, self._error(result), handler)
else:
one_res_tuple = (True, result, handler)
else:
# execute handlers in background, ignoring result
EventSystem._async_queue.put((handler, args, kargs))
one_res_tuple = (None, None, handler)
result_list.append(one_res_tuple)
# update statistics
time_secs_new = time.time()
self.duration_secs = time_secs_new - self._time_secs_old
self._time_secs_old = time_secs_new
return result_list
def _execute(self, handler, *args, **kwargs):
""" executes one callback function """
# difference to Axel Events: we don't use a timeout and execute all handlers in same thread
# FIXME: =>possible problem: blocking of event firing when user gives a long- or infinitly-running callback function
# (in Python it doesn't seem possible to forcefully kill a thread with clean ressource releasing,
# a thread has to cooperate und behave nice...
# execution and killing a process would be possible, but has data corruption problems with queues
# and possible interprocess communication problems with given handler/callback function)
result = None
exc_info = None
try:
result = handler(*args, **kwargs)
except:
exc_info = sys.exc_info()
if exc_info:
return exc_info
return result
def _error(self, exc_info):
""" Retrieves the error info """
if self._exc_info:
if self._traceback:
return exc_info
return exc_info[:2]
return exc_info[1]
def getHandlerCount(self):
with self._hlock:
return len(self._handler_list)
def clear(self):
""" Discards all registered handlers """
with self._hlock:
self._handler_list = []
def __del__(self):
if not self._sync_mode:
if hasattr(EventSystem, "_async_thread"):
# only if thread-instance is still existing (on teardown Python interpreter removes symbols of objects):
# update number of asynchronous instances
EventSystem._async_thread.dec_nof_eventsources()
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = getHandlerCount
def __repr__(self):
""" developer representation of this object """
return u'EventSystem(' + repr(self._handler_list) + u')'
class _Async_Executor_thread(threading.Thread):
""" executing handler functions asynchronously in background """
# =>attention: if EventSystem doesn't keep "self._nof_eventsources" up to date, then this thread will keep whole Python program running!
def __init__(self, target_q):
threading.Thread.__init__(self)
# trying to cleanup thread, so no daemon...
# https://stackoverflow.com/questions/20596918/python-exception-in-thread-thread-1-most-likely-raised-during-interpreter-shutd/20598791#20598791
self.daemon = False
self._target_q = target_q
#self.keep_running = True
self._nof_eventsources = 1
self._lock = threading.Lock()
def run(self):
while self._nof_eventsources > 0:
try:
_target, _args, _kwargs = self._target_q.get(block=False)
except queue.Empty:
# give other threads some CPU time...
time.sleep(SLEEP_TIMEBASE)
else:
# (this is an optional else clause when no exception occured)
try:
_res = _target(*_args, **_kwargs)
logger.debug('_Async_Executor_thread.run(): handler function ' + repr(_target) + '(args=' + repr(_args) + ', kwargs=' + repr(_kwargs) + ') has result "' + repr(_res) + '"')
except:
logger.error('_Async_Executor_thread.run(): exception in handler function ' + repr(_target) + '(args=' + repr(_args) + ', kwargs=' + repr(_kwargs) + '):'+ repr(sys.exc_info()[:2]))
finally:
del _target, _args, _kwargs
def inc_nof_eventsources(self):
with self._lock:
self._nof_eventsources += 1
logger.debug('_Async_Executor_thread.inc_nof_eventsources(): self._nof_eventsources=' + str(self._nof_eventsources))
def dec_nof_eventsources(self):
with self._lock:
self._nof_eventsources -= 1
logger.debug('_Async_Executor_thread.dec_nof_eventsources(): self._nof_eventsources=' + str(self._nof_eventsources))
if __name__ == '__main__':
test_set = set([1, 2])
def cb1(event):
logger.info('### cb1: enter callback: event=' + str(event))
time.sleep(2)
logger.info('*** cb1: exit callback: event=' + str(event))
return True
def cb2(event):
# takes too long for execution
logger.info('### cb2: enter callback: event=' + str(event))
time.sleep(10)
logger.info('*** cb2: exit callback: event=' + str(event))
def cb3(event):
# raises exception
logger.info('### cb3: enter callback: event=' + str(event))
print(str(1/0))
logger.info('*** cb3: exit callback: event=' + str(event) + str(1 / 0))
def cb4(event):
# raises exception
logger.info('### cb4: enter callback: event=' + str(event))
raise Exception()
logger.info('*** cb4: exit callback: event=' + str(event))
sync_mode = True
if 1 in test_set:
sync_mode = False
if 2 in test_set:
if sync_mode:
logger.error('testing synchronous events...')
else:
logger.error('testing asynchronous events...')
event = EventSystem(sync_mode=sync_mode)
event += cb1
event += cb2
event += cb3
event += cb4
CALLBACK_DURATION_WARNLEVEL = 10
for x in range(2):
result = event('TEST no.' + str(x))
if result:
for idx, res in enumerate(result):
if res[0] == None:
logger.debug('event-firing: asynchronously started callback no.' + str(idx) + ': handler=' + repr(res[2]))
else:
logger.debug('event-firing: synchronous callback no.' + str(idx) + ': success=' + str(res[0]) + ', result=' + str(res[1]) + ', handler=' + repr(res[2]))
if res[0] == False:
# example: res[1] without traceback: (<type 'exceptions.TypeError'>, TypeError("cannot concatenate 'str' and 'int' objects",))
# =>when traceback=True, then ID of traceback object is added to the part above.
# Assumption: traceback is not needed. It would be useful when debugging client code...
logger.error('event-firing: synchronous callback no.' + str(idx) + ' failed: ' + str(res[1]) + ' [handler=' + repr(res[2]) + ']')
else:
logger.info('event-firing had no effect (no handler in EventSystem object')
# diagnostic values
logger.debug('[number of active threads: ' + repr(threading.enumerate()) + ']')
if event.duration_secs > CALLBACK_DURATION_WARNLEVEL:
logger.warn('event-firing took ' + str(event.duration_secs) + ' seconds... =>you should shorten your callback functions!')
# give callbacks time for execution
time.sleep(15)
logger.info('test is done, trying to clean up...')
logger.debug('[number of active threads: ' + repr(threading.enumerate()) + ']')
logger.info('manually deleting reference to EventSystem instance...')
event = None
time.sleep(3)
logger.debug('[number of active threads: ' + repr(threading.enumerate()) + ']')
logger.info('main thread will exit...')
| stefanbraun-private/pyVisiToolkit | src/misc/EventSystem.py | Python | gpl-3.0 | 12,113 |
"""
create specializer projects
basically copies all files and directories from a template.
"""
from __future__ import print_function
import sys
import argparse
import collections
import shutil
import os
import ctree
from ctree.tools.generators.builder import Builder
if sys.version_info >= (3, 0, 0): # python 3
# noinspection PyPep8Naming
import configparser as ConfigParser
else:
# noinspection PyPep8Naming
import ConfigParser
__author__ = 'chick'
def main(*args):
"""run ctree utility stuff, currently only the project generator"""
if sys.argv:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog="ctree", description="ctree is a python SEJITS framework")
parser.add_argument('-sp', '--startproject', help='generate a specializer project')
parser.add_argument(
'-wu', '--wattsupmeter', help="start interactive watts up meter shell", action="store_true"
)
parser.add_argument('-p', '--port', help="/dev name to use for wattsup meter port")
parser.add_argument('-v', '--verbose', help='show more debug than you like', action="store_true")
parser.add_argument('-dc', '--disable_cache', help='disable and delete the persistent cache', action="store_true")
parser.add_argument('-ec', '--enable_cache', help='enable the persistent cache', action="store_true")
parser.add_argument('-cc', '--clear_cache', help='clear the persistent cache', action="store_true")
args = parser.parse_args(args)
if args.startproject:
specializer_name = args.startproject
print("create project specializer %s" % specializer_name)
builder = Builder("create", specializer_name, verbose=args.verbose)
builder.build(None, None)
elif args.wattsupmeter:
from ctree.metrics.watts_up_reader import WattsUpReader
port = args.port if args.port else WattsUpReader.guess_port()
meter = WattsUpReader(port_name=port)
meter.interactive_mode()
elif args.enable_cache:
ctree.CONFIG.set("jit", "CACHE", value="True")
write_success = write_to_config('jit', 'CACHE', True)
if write_success:
print("[SUCCESS] ctree caching enabled.")
elif args.disable_cache:
wipe_cache()
ctree.CONFIG.set("jit", "CACHE", value="False")
write_success = write_to_config('jit', 'CACHE', False)
args.clear_cache = True
if write_success:
print("[SUCCESS] ctree caching disabled.")
elif args.clear_cache:
wipe_cache()
else:
parser.print_usage()
def get_responsible(section, key):
"""
:param section: Section to search for
:param key: key to search for
:return: path of config file responsible for setting
"""
first = ctree.CFG_PATHS[-1]
paths = reversed(ctree.CFG_PATHS)
for path in paths:
config = ConfigParser.ConfigParser()
config.read(path)
if config.has_option(section, key):
return path
return first
def write_to_config(section, key, value):
"""
This method handles writing to the closest config file to the current
project, but does not write to the defaults.cfg file in ctree.
:return: return True if write is successful. False otherwise.
"""
if ctree.CFG_PATHS:
target = get_responsible(section, key)
config = ConfigParser.ConfigParser()
config.read(target)
print(target)
if not config.has_section(section):
config.add_section(section)
config.set(section, key, value)
with open(target, 'w') as configfile:
config.write(configfile)
configfile.close()
return True
else:
print("[FAILURE] No config file detected. Please create a '.ctree.cfg' file in your project directory.")
return False
def wipe_cache():
"""
if path is absolute, just remove the directory
if the path is relative, recursively look from current directory down
looking for matching paths. This can take a long time looking for
:return:
"""
cache_name = os.path.expanduser(ctree.CONFIG.get('jit', 'COMPILE_PATH'))
if os.path.isabs(cache_name):
if os.path.exists(cache_name):
result = shutil.rmtree(cache_name)
print("removed cache directory {} {}".format(
cache_name, result if result else ""))
exit(0)
splitted = cache_name.split(os.sep)
while splitted:
first = splitted[0]
if first == '.':
splitted.pop(0)
elif first == '..':
os.chdir('../')
splitted.pop(0)
else:
cache_name = os.sep.join(splitted)
break
wipe_queue = collections.deque([os.path.abspath(p) for p in os.listdir(os.getcwd())])
print("ctree looking for relative cache directories named {}, checking directories under this one".format(
cache_name))
while wipe_queue:
directory = wipe_queue.popleft()
if not os.path.isdir(directory):
continue
if os.path.split(directory)[-1] == cache_name:
shutil.rmtree(directory)
else:
print("{} ".format(directory))
for sub_item in os.listdir(directory):
wipe_queue.append(os.path.join(directory, sub_item))
print()
if __name__ == '__main__':
main(sys.argv[1:])
| mbdriscoll/ctree | ctree/tools/runner.py | Python | bsd-2-clause | 5,409 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateConnection
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-connection
# [START bigqueryconnection_v1_generated_ConnectionService_CreateConnection_async]
from google.cloud import bigquery_connection_v1
async def sample_create_connection():
# Create a client
client = bigquery_connection_v1.ConnectionServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_connection_v1.CreateConnectionRequest(
parent="parent_value",
)
# Make the request
response = await client.create_connection(request=request)
# Handle the response
print(response)
# [END bigqueryconnection_v1_generated_ConnectionService_CreateConnection_async]
| googleapis/python-bigquery-connection | samples/generated_samples/bigqueryconnection_v1_generated_connection_service_create_connection_async.py | Python | apache-2.0 | 1,563 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for create_brain_handler."""
import copy
import tempfile
from unittest import mock
from absl.testing import absltest
from api import create_brain_handler
from api import data_cache
from api import proto_conversion
from api import test_constants
from api import unique_id
# pylint: disable=g-bad-import-order
import common.generate_protos # pylint: disable=unused-import
import brain_pb2
import data_store_pb2
import falken_service_pb2
from google.protobuf import text_format
from google.protobuf import timestamp_pb2
from google.rpc import code_pb2
class CreateBrainHandlerTest(absltest.TestCase):
def setUp(self):
"""Create a file system object that uses a temporary directory."""
super().setUp()
self._temp_dir = tempfile.TemporaryDirectory()
self._ds = mock.Mock()
def tearDown(self):
"""Clean up the temporary directory for flatbuffers generation."""
super().tearDown()
self._temp_dir.cleanup()
@mock.patch.object(proto_conversion.ProtoConverter,
'convert_proto')
@mock.patch.object(unique_id, 'generate_unique_id')
@mock.patch.object(data_cache, 'get_brain')
def test_valid_request(self, get_brain, generate_unique_id, convert_proto):
generate_unique_id.return_value = 'test_brain_uuid'
request = falken_service_pb2.CreateBrainRequest(
display_name='test_brain',
brain_spec=text_format.Parse(test_constants.TEST_BRAIN_SPEC,
brain_pb2.BrainSpec()),
project_id='test_project')
context = mock.Mock()
write_brain = data_store_pb2.Brain(
project_id=request.project_id,
brain_id='test_brain_uuid',
name=request.display_name,
brain_spec=text_format.Parse(test_constants.TEST_BRAIN_SPEC,
brain_pb2.BrainSpec()))
data_store_brain = copy.copy(write_brain)
data_store_brain.created_micros = 1619726720852543
expected_brain = brain_pb2.Brain(
project_id=request.project_id,
brain_id='test_brain_uuid',
display_name=request.display_name,
brain_spec=text_format.Parse(test_constants.TEST_BRAIN_SPEC,
brain_pb2.BrainSpec()),
create_time=timestamp_pb2.Timestamp(seconds=1619726720))
get_brain.return_value = (data_store_brain)
convert_proto.return_value = expected_brain
self.assertEqual(
create_brain_handler.create_brain(request, context, self._ds),
expected_brain)
generate_unique_id.assert_called_once_with()
self._ds.write.assert_called_once_with(write_brain)
get_brain.assert_called_once_with(
self._ds, 'test_project', 'test_brain_uuid')
convert_proto.assert_called_once_with(data_store_brain)
def test_missing_brain_spec(self):
request = falken_service_pb2.CreateBrainRequest(
display_name='test_brain', brain_spec=None, project_id='test_project')
context = mock.Mock()
self.assertIsNone(
create_brain_handler.create_brain(request, context, self._ds))
context.abort.assert_called_with(
code_pb2.INVALID_ARGUMENT,
'Unable to create Brain. BrainSpec spec invalid. Error: '
'InvalidSpecError(\'BrainSpec must have an observation spec and action '
'spec.\')')
if __name__ == '__main__':
absltest.main()
| google-research/falken | service/api/create_brain_handler_test.py | Python | apache-2.0 | 3,929 |
from collections import OrderedDict
import ntpath
import StringIO
from ..base import Processor
class Append(Processor):
def __init__(self, pipeline, additional_files):
super(Append, self).__init__(pipeline)
self.additional_files = additional_files
def process(self, inputs):
"""
Concatenates the inputs into a single file
"""
output = StringIO.StringIO()
for filename, contents in inputs.items():
output.write(contents.read())
output.write("\n")
for f in self.additional_files:
output.write(open(f).read())
output.write("\n")
output.seek(0) #Rewind to the beginning
return OrderedDict([(filename, output)])
def prepare(self, inputs):
result = []
for f in self.additional_files:
result.append((f, None))
for k, v in inputs.items():
result.append((k, v))
return OrderedDict(result)
| potatolondon/assetpipe | assetpipe/processors/append.py | Python | bsd-2-clause | 991 |
import coloreffect, inkex
class C(coloreffect.ColorEffect):
def colmod(self,r,g,b):
hsl = self.rgb_to_hsl(r/255.0, g/255.0, b/255.0)
#inkex.debug("hsl: " + str(hsl[0]) + ", " + str(hsl[1]) + ", " + str(hsl[2]))
hsl[1] = hsl[1] + 0.05
if hsl[1] > 1.0:
hsl[1] = 1.0
rgb = self.hsl_to_rgb(hsl[0], hsl[1], hsl[2])
return '%02x%02x%02x' % (rgb[0]*255, rgb[1]*255, rgb[2]*255)
c = C()
c.affect()
| NirBenTalLab/proorigami-cde-package | cde-root/usr/local/apps/inkscape/share/inkscape/extensions/color_moresaturation.py | Python | mit | 429 |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from indico.modules.events.contributions.lists import ContributionListGenerator
from indico.modules.events.contributions.models.persons import AuthorType, ContributionPersonLink
from indico.modules.events.models.persons import EventPerson
from indico.modules.events.registration.models.forms import RegistrationForm
from indico.modules.events.registration.models.registrations import Registration, RegistrationState
@pytest.fixture
def create_registration(dummy_event):
"""Return a callable that lets you create a contribution."""
def _create_registration(user, regform, **kwargs):
return Registration(
first_name='Guinea',
last_name='Pig',
checked_in=True,
state=RegistrationState.complete,
currency='USD',
email=user.email,
user=user,
registration_form=regform,
**kwargs
)
return _create_registration
def test_filter_contrib_entries(app, db, dummy_event, create_user, create_contribution, create_registration):
registered_user = create_user(1)
registered_speaker = create_user(2)
unregistered_user = create_user(3)
dummy_regform = RegistrationForm(event=dummy_event, title='Registration Form', currency='USD')
dummy_event.registrations.append(create_registration(registered_user, dummy_regform))
dummy_event.registrations.append(create_registration(registered_speaker, dummy_regform))
registered_speaker_contribution = create_contribution(dummy_event, 'Registered Speaker', person_links=[
ContributionPersonLink(person=EventPerson.create_from_user(registered_speaker, dummy_event),
is_speaker=True)
])
registered_speaker_author_contribution = create_contribution(
dummy_event, 'Registered Speaker Author', person_links=[
ContributionPersonLink(person=EventPerson.for_user(registered_speaker, dummy_event),
is_speaker=True, author_type=AuthorType.primary)
])
unregistered_speaker_registered_author_contribution = create_contribution(
dummy_event, 'Unregistered Speaker, Registered Author', person_links=[
ContributionPersonLink(person=EventPerson.for_user(unregistered_user, dummy_event),
is_speaker=True),
ContributionPersonLink(person=EventPerson.for_user(registered_user, dummy_event),
author_type=AuthorType.primary)
])
registered_speaker_unregistered_author_contribution = create_contribution(
dummy_event, 'Registered Speaker, Unregistered Author', person_links=[
ContributionPersonLink(person=EventPerson.for_user(registered_user, dummy_event), is_speaker=True),
ContributionPersonLink(person=EventPerson.for_user(unregistered_user, dummy_event),
author_type=AuthorType.primary)
])
# Filter contributions with registered users
with app.test_request_context():
list_gen = ContributionListGenerator(dummy_event)
list_gen.list_config['filters'] = {'items': {'people': {'registered'}}}
result = list_gen.get_list_kwargs()
assert result['contribs'] == [
registered_speaker_contribution,
registered_speaker_author_contribution,
unregistered_speaker_registered_author_contribution,
registered_speaker_unregistered_author_contribution
]
# Filter contributions with registered speakers
list_gen.list_config['filters'] = {'items': {'speakers': {'registered'}}}
with app.test_request_context():
result = list_gen.get_list_kwargs()
assert result['contribs'] == [
registered_speaker_contribution,
registered_speaker_author_contribution,
registered_speaker_unregistered_author_contribution
]
# Filter contributions with unregistered speakers and registered users
list_gen.list_config['filters'] = {'items': {'speakers': {'not_registered'}, 'people': {'registered'}}}
with app.test_request_context():
result = list_gen.get_list_kwargs()
assert result['contribs'] == [
unregistered_speaker_registered_author_contribution
]
| indico/indico | indico/modules/events/contributions/lists_test.py | Python | mit | 4,479 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-15 21:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('helpdesk', '0011_admin_related_improvements'),
]
operations = [
migrations.AddField(
model_name='queue',
name='default_owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='default_owner', to=settings.AUTH_USER_MODEL, verbose_name='Default owner'),
),
]
| mrkiwi-nz/django-helpdesk | helpdesk/migrations/0012_queue_default_owner.py | Python | bsd-3-clause | 739 |
# PENMAN MONTEITH ETP
def ETp(T, Z, u2, Rn, night, Rh, hc):
"""
Potential Evapotranspiration Calculation with hourly/daily Penman-Monteith
T = Temperature raster map [°C]
Z = DEM raster map [m a.s.l.]
u2 = Wind Speed raster map [m/s]
Rn = Net Solar Radiation raster map [MJ/m2/h]
Rh = Relative Umidity raster map [%]
hc = Crop height raster map [m]
OUTPUT = output Reference Potential Evapotranspiration layer [mm/h]
PM_ETp(T, Z, u2, Rn, night, Rh, hc)
"""
cp = 1.013 #[kj/kg*�c] specific heat of moist air
epsilon = 0.622 #[-] ratio of molecular weigth of water to dry air
po = 101.3 #[kpa] atmospheric pressure at sea level
tko = 293.16 #[k] reference temperature at sea level
eta = 0.0065 #[k/m] constant lapse rate
ao = 0 #[m] altitude at sea level
g = 9.81 #[m/s] gravitational accelleration
r = 287 #[j/kg*k] specific gas constant
zw = 2 #[m] height of wind measurements
zh = 2 #[m] height of humidity measurements
k = 0.41 #[-] von karman constant
#/* calculus: mean saturation vapoure pressure [KPa] */
ea = 0.61078*exp((17.27*T)/(T+237.3))
#/* calculus: slope of vapoure pressure curve [KPa/�C] */
delta = (4098*ea)/pow((237.3+T),2)
#/* calculus: latent heat vapourisation [MJ/kg] */
lambda = 2.501 - (0.002361*T)
#/* calculus: atmospheric pressure [KPa] */
P = Po * pow(((Tko-eta*(Z-Ao))/Tko),(g/(eta*R)))
#/* calculus: psichiometric constant [kPa/�C] */
gamma = ((cp*P)/(epsilon*lambda))*0.001
#/* calculus: aerodynamic resistance [s/m] */
if ( hc < 2 ):
d = (2/3)*hc
Zom = 0.123*hc
Zoh = 0.1*Zom
ra = ( log((Zw-d)/Zom) * log((Zh-d)/Zoh) ) / (k*k*u2)
else:
u10 = u2*(log((67.8*10)-5.42))/4.87
ra = 94 / u10
#/* calculus: surface resistance [s/m] */
rs = 100/(0.5*24*hc);
#/*calculus: modified psichiometric constant [kPa/�C] */
gstar = gamma*(1+(rs/ra));
#/*calculus: net radiation [MJ/m2*d] */
#/*Rn derived from r.sun */
#/*calculus: soil heat flux [MJ/m2*d] */
if (night==FALSE)
G=0.1*Rn;
else
G=0.5*Rn;
#/* calculus: radiation term [mm/h] */
/* ETrad = (delta/(delta+gstar))*((Rn-G)/(lambda*1000000)); */
ETrad = (delta/(delta+gstar))*((Rn-G)/lambda); /* torna da analisi dimensionale */
#/* calculus: actual vapoure pressure [kPa] */
ed = Rh*ea/100;
#/* calculus: virtual temperature [�C] */
Tkv = (T+273.15)/(1-(0.378*ed/P));
#/* calculus: atmospheric density [Kg/m^3] */
rho = P/(Tkv*R/100);
/* calculus: aerodynamic term [mm/h] */
/* ETaero = (0.001/lambda)*(1/(delta+gstar))*(rho*cp/ra)*(ea-ed); */
ETaero = (3.6/lambda)*(1/(delta+gstar))*(rho*cp/ra)*(ea-ed); #/* torna da analisi dimensionale */
#/* calculus: potential evapotranspiration [mm/h] */
ETp = ETrad + ETaero
return ETp
def openwaterETp( T, Z, u2, Rn, day, Rh, hc):
"""
Open Water Potential Evapotranspiration Calculation with hourly Penman-Monteith
T = Temperature raster map [°C]
Z = DEM raster map [m a.s.l.]
u2 = Wind Speed raster map [m/s]
Rn = Net Solar Radiation raster map [MJ/m2/h]
Rh = Relative Umidity raster map [%]
hc = Crop height raster map [m]
OUTPUT = output Reference Potential Evapotranspiration layer [mm/h]
PM_openwaterETp( T, Z, u2, Rn, day, Rh, hc)
"""
cp = 1.013 #[kj/kg*�c] specific heat of moist air
epsilon = 0.622 #[-] ratio of molecular weigth of water to dry air
po = 101.3 #[kpa] atmospheric pressure at sea level
tko = 293.16 #[k] reference temperature at sea level
eta = 0.0065 #[k/m] constant lapse rate
ao = 0 #[m] altitude at sea level
g = 9.81 #[m/s] gravitational accelleration
r = 287 #[j/kg*k] specific gas constant
zw = 2 #[m] height of wind measurements
zh = 2 #[m] height of humidity measurements
k = 0.41 #[-] von karman constant
cp = 1.013 #[kj/kg*�c] specific heat of moist air
epsilon = 0.622 #[-] ratio of molecular weigth of water to dry air
po = 101.3 #[kpa] atmospheric pressure at sea level
tko = 293.16 #[k] reference temperature at sea level
eta = 0.0065 #[k/m] constant lapse rate
ao = 0 #[m] altitude at sea level
g = 9.81 #[m/s] gravitational accelleration
r = 287 #[j/kg*k] specific gas constant
zw = 2 #[m] height of wind measurements
zh = 2 #[m] height of humidity measurements
k = 0.41 #[-] von karman constant
#/* calculus: mean saturation vapoure pressure [KPa] */
ea = 0.61078*exp((17.27*T)/(T+237.3))
#/* calculus: slope of vapoure pressure curve [KPa/�C] */
delta = (4098*ea)/pow((237.3+T),2)
#/* calculus: latent heat vapourisation [MJ/kg] */
lambda = 2.501 - (0.002361*T)
#/* calculus: atmospheric pressure [KPa] */
P = Po * pow(((Tko-eta*(Z-Ao))/Tko),(g/(eta*R)))
#/* calculus: di psichiometric constant [kPa/�C] */
gamma = ((cp*P)/(epsilon*lambda))*0.001
#/*calculus: net radiation [MJ/m2*h] */
#/*Rn derived from r.sun
#/*calculus: actual vapoure pressure [kPa] */
ed = Rh*ea/100
#/*calculus: aerodynamic term [mm/d] */
#ETaero = 0.35*(0.5+(0.621375*u2/100))*7.500638*(ea-ed)
#/*to convert mm/d to mm/h it results: */
ETaero = (0.35/24)*(0.5+(0.621375*u2/100))*7.500638*(ea-ed)
#/*calculus: potential evapotranspiration [mm/h] */
ETp = (((Rn*delta)/lambda)+(gamma*ETaero))/(delta+gamma)
return ETp
# END OF PENMAN MONTEITH ETP
| YannChemin/wxGIPE | RS_functions/evapo_pm.py | Python | unlicense | 5,290 |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listsinceblock RPC."""
from test_framework.address import key_to_p2wpkh
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.key import ECKey
from test_framework.test_framework import FujicoinTestFramework
from test_framework.messages import BIP125_SEQUENCE_NUMBER
from test_framework.util import (
assert_array_result,
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif
from decimal import Decimal
class ListSinceBlockTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# All nodes are in IBD from genesis, so they'll need the miner (node2) to be an outbound connection, or have
# only one connection. (See fPreferredDownload in net_processing)
self.connect_nodes(1, 2)
self.nodes[2].generate(COINBASE_MATURITY + 1)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
self.double_spends_filtered()
self.test_targetconfirmations()
def test_no_blockhash(self):
self.log.info("Test no blockhash")
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
blockheight = self.nodes[2].getblockheader(blockhash)['height']
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"blockheight": blockheight,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
def test_invalid_blockhash(self):
self.log.info("Test invalid blockhash")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 11, for 'invalid-hex')", self.nodes[0].listsinceblock,
"invalid-hex")
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock,
"Z000000000000000000000000000000000000000000000000000000000000000")
def test_targetconfirmations(self):
'''
This tests when the value of target_confirmations exceeds the number of
blocks in the main chain. In this case, the genesis block hash should be
given for the `lastblock` property. If target_confirmations is < 1, then
a -8 invalid parameter error is thrown.
'''
self.log.info("Test target_confirmations")
blockhash, = self.nodes[2].generate(1)
blockheight = self.nodes[2].getblockheader(blockhash)['height']
self.sync_all()
assert_equal(
self.nodes[0].getblockhash(0),
self.nodes[0].listsinceblock(blockhash, blockheight + 1)['lastblock'])
assert_equal(
self.nodes[0].getblockhash(0),
self.nodes[0].listsinceblock(blockhash, blockheight + 1000)['lastblock'])
assert_raises_rpc_error(-8, "Invalid parameter",
self.nodes[0].listsinceblock, blockhash, 0)
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
self.log.info("Test reorg")
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
nodes1_last_blockhash = self.nodes[1].generate(6)[-1]
nodes2_first_blockhash = self.nodes[2].generate(7)[0]
self.log.debug("nodes[1] last blockhash = {}".format(nodes1_last_blockhash))
self.log.debug("nodes[2] first blockhash = {}".format(nodes2_first_blockhash))
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
self.join_network()
# listsinceblock(nodes1_last_blockhash) should now include tx as seen from nodes[0]
# and return the block height which listsinceblock now exposes since a5e7795.
transactions = self.nodes[0].listsinceblock(nodes1_last_blockhash)['transactions']
found = next(tx for tx in transactions if tx['txid'] == senttx)
assert_equal(found['blockheight'], self.nodes[0].getblockheader(nodes2_first_blockhash)['height'])
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives FJC in tx1 from utxo1 in block aa1.
2. User 2 receives FJC in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.log.info("Test double spend")
self.sync_all()
# share utxo between nodes[1] and nodes[2]
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
address = key_to_p2wpkh(eckey.get_pubkey().get_bytes())
self.nodes[2].sendtoaddress(address, 10)
self.nodes[2].generate(6)
self.sync_all()
self.nodes[2].importprivkey(privkey)
utxos = self.nodes[2].listunspent()
utxo = [u for u in utxos if u["address"] == address][0]
self.nodes[1].importprivkey(privkey)
# Split network into two
self.split_network()
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransactionwithwallet(
self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipient_dict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.log.info("Test double send")
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['blockheight'], self.nodes[0].getblockheader(tx1['blockhash'])['height'])
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
def double_spends_filtered(self):
'''
`listsinceblock` was returning conflicted transactions even if they
occurred before the specified cutoff blockhash
'''
self.log.info("Test spends filtered")
spending_node = self.nodes[2]
dest_address = spending_node.getnewaddress()
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in spending_node.listunspent()))
rawtx = spending_node.createrawtransaction(
[tx_input], {dest_address: tx_input["amount"] - Decimal("0.00051000"),
spending_node.getrawchangeaddress(): Decimal("0.00050000")})
signedtx = spending_node.signrawtransactionwithwallet(rawtx)
orig_tx_id = spending_node.sendrawtransaction(signedtx["hex"])
original_tx = spending_node.gettransaction(orig_tx_id)
double_tx = spending_node.bumpfee(orig_tx_id)
# check that both transactions exist
block_hash = spending_node.listsinceblock(
spending_node.getblockhash(spending_node.getblockcount()))
original_found = False
double_found = False
for tx in block_hash['transactions']:
if tx['txid'] == original_tx['txid']:
original_found = True
if tx['txid'] == double_tx['txid']:
double_found = True
assert_equal(original_found, True)
assert_equal(double_found, True)
lastblockhash = spending_node.generate(1)[0]
# check that neither transaction exists
block_hash = spending_node.listsinceblock(lastblockhash)
original_found = False
double_found = False
for tx in block_hash['transactions']:
if tx['txid'] == original_tx['txid']:
original_found = True
if tx['txid'] == double_tx['txid']:
double_found = True
assert_equal(original_found, False)
assert_equal(double_found, False)
if __name__ == '__main__':
ListSinceBlockTest().main()
| fujicoin/fujicoin | test/functional/wallet_listsinceblock.py | Python | mit | 14,650 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts', copy=True),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
move_obj = self.pool.get('stock.move')
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.bom_id.product_uom.id)
qty1 = sub_product.product_qty
qty2 = production.product_uos and production.product_uos_qty or False
product_uos_factor = 0.0
if qty2 and production.bom_id.product_uos.id:
product_uos_factor = product_uom_obj._compute_qty(cr, uid, production.product_uos.id, production.product_uos_qty, production.bom_id.product_uos.id)
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
if production.product_uos_qty:
qty2 *= product_uos_factor / (production.bom_id.product_uos_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'product_uos_qty': qty2,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'production_id': production.id
}
move_id = move_obj.create(cr, uid, data, context=context)
move_obj.action_confirm(cr, uid, [move_id], context=context)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
else:
for sub_product_line in prod.bom_id.sub_products:
if sub_product_line.product_id.id == m.product_id.id:
factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context)
subproduct_qty = sub_product_line.subproduct_type == 'variable' and qty * factor or sub_product_line.product_qty
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': subproduct_qty})
| addition-it-solutions/project-all | addons/mrp_byproduct/mrp_byproduct.py | Python | agpl-3.0 | 8,865 |
import csv
import cStringIO
from io import BytesIO
from datetime import datetime, timedelta
import logging
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer, \
Table, TableStyle
from django.conf import settings
from django.core.mail import EmailMessage
from django.db.models import Avg, Min, Max
from core import utils as core_utils
from .models import Glucose
logger = logging.getLogger(__name__)
DATE_FORMAT = '%m/%d/%Y'
FILENAME_DATE_FORMAT = '%b%d%Y'
TIME_FORMAT = '%I:%M %p'
class UserStats(object):
def __init__(self, user):
self.user = user
self.data = Glucose.objects.by_user(self.user)
self.glucose_unit_name = user.settings.glucose_unit.name
def glucose_by_unit_setting(self, value):
return core_utils.glucose_by_unit_setting(self.user, value)
@property
def user_settings(self):
user_settings = self.user.settings
low = user_settings.glucose_low
high = user_settings.glucose_high
target_min = user_settings.glucose_target_min
target_max = user_settings.glucose_target_max
return {
'low': low,
'high': high,
'target_min': target_min,
'target_max': target_max
}
@property
def user_stats(self):
stats = {
'latest_entry': self.latest_entry,
'num_records': self.data.count(),
'hba1c': self.hba1c,
'breakdown': self.get_breakdown(),
}
return stats
@property
def latest_entry(self):
latest_entry = self.data.order_by('-record_date', '-record_time')[0] \
if self.data else None
latest_entry_value = 'None'
latest_entry_time = latest_entry_notes = ''
css_class = self.get_css_class(None)
if latest_entry:
latest_entry_value = '%s %s' % \
(self.glucose_by_unit_setting(latest_entry.value),
self.glucose_unit_name)
latest_entry_time = latest_entry.record_time.strftime(TIME_FORMAT)
latest_entry_notes = latest_entry.notes
css_class = self.get_css_class(latest_entry.value)
return {
'value': latest_entry_value,
'record_time': latest_entry_time,
'notes': latest_entry_notes,
'css_class': css_class,
}
@property
def hba1c(self):
"""
The HbA1c is calculated using the average blood glucose from the last
90 days.
Less than 7 = Excellent
Between 7 and 8 = Average
Greater than 8 = Bad
"""
now = datetime.now(tz=self.user.settings.time_zone).date()
subset = self.by_date(now - timedelta(days=90), now)
average = core_utils.round_value(
subset.aggregate(Avg('value'))['value__avg'])
hba1c = core_utils.round_value(core_utils.calc_hba1c(average))
css_class = 'text-default'
if hba1c:
if hba1c < 7:
css_class = 'text-success'
elif hba1c > 8:
css_class = 'text-danger'
else:
css_class = 'text-primary'
value_html = '%s%%<br><small>(%s %s)</small>' % \
(hba1c, self.glucose_by_unit_setting(average),
self.glucose_unit_name) \
if hba1c else 'None<br><small>(None)</small>'
return {
'value': value_html,
'css_class': css_class
}
def get_breakdown(self, days=14):
now = datetime.now(tz=self.user.settings.time_zone).date()
subset = self.by_date(now - timedelta(days=days), now)
total = subset.count()
lowest = subset.aggregate(Min('value'))['value__min']
highest = subset.aggregate(Max('value'))['value__max']
average = core_utils.round_value(
subset.aggregate(Avg('value'))['value__avg'])
highs = subset.filter(value__gt=self.user_settings['high']).count()
lows = subset.filter(value__lt=self.user_settings['low']).count()
within_target = subset.filter(
value__gte=self.user_settings['target_min'],
value__lte=self.user_settings['target_max']
).count()
other = total - (highs + lows + within_target)
return {
'total': total,
'lowest': {
'value': '%s %s' % (self.glucose_by_unit_setting(lowest),
self.glucose_unit_name) \
if lowest else 'None',
'css_class': self.get_css_class(lowest),
},
'highest': {
'value': '%s %s' % (self.glucose_by_unit_setting(highest),
self.glucose_unit_name) \
if highest else 'None',
'css_class': self.get_css_class(highest),
},
'average': {
'value': '%s %s' % (self.glucose_by_unit_setting(average),
self.glucose_unit_name) \
if average else 'None',
'css_class': self.get_css_class(average)
},
'highs': '%s (%s%%)' % (highs, core_utils.percent(highs, total)),
'lows': '%s (%s%%)' % (lows, core_utils.percent(lows, total)),
'within_target': '%s (%s%%)' % (
within_target, core_utils.percent(within_target, total)),
'other': '%s (%s%%)' % (other, core_utils.percent(other, total)),
}
def by_date(self, start, end):
return self.data.filter(record_date__gte=start, record_date__lte=end)
def get_css_class(self, value):
css_class = 'text-default'
low = self.user_settings['low']
high = self.user_settings['high']
target_min = self.user_settings['target_min']
target_max = self.user_settings['target_max']
# Only change the css_class if a value exists.
if value:
if value < low or value > high:
css_class = 'text-danger'
elif value >= target_min and value <= target_max:
css_class = 'text-success'
else:
css_class = 'text-primary'
return css_class
class ChartData(object):
@classmethod
def get_count_by_category(cls, user, days):
now = datetime.now(tz=user.settings.time_zone).date()
category_count = Glucose.objects.by_category(
(now - timedelta(days=days)), now, user)
data = [[c['category__name'], c['count']] for c in category_count]
return data
@classmethod
def get_level_breakdown(cls, user, days):
now = datetime.now(tz=user.settings.time_zone).date()
glucose_level = Glucose.objects.level_breakdown(
(now - timedelta(days=days)), now, user)
chart_colors = {
'Low': 'orange',
'High': 'red',
'Within Target': 'green',
'Other': 'blue'
}
data = []
keyorder = ['Low', 'High', 'Within Target', 'Other']
for k, v in sorted(glucose_level.items(),
key=lambda i: keyorder.index(i[0])):
data.append({'name': k, 'y': v, 'color': chart_colors[k]})
return data
@classmethod
def get_avg_by_category(cls, user, days):
now = datetime.now(tz=user.settings.time_zone).date()
glucose_averages = Glucose.objects.avg_by_category(
(now - timedelta(days=days)), now, user)
data = {'categories': [], 'values': []}
for avg in glucose_averages:
rounded_value = core_utils.round_value(avg['avg_value'])
data['values'].append(
core_utils.glucose_by_unit_setting(user, rounded_value))
data['categories'].append(avg['category__name'])
return data
@classmethod
def get_avg_by_day(cls, user, days):
now = datetime.now(tz=user.settings.time_zone).date()
glucose_averages = Glucose.objects.avg_by_day(
(now - timedelta(days=days)), now, user)
data = {'dates': [], 'values': []}
for avg in glucose_averages:
rounded_value = core_utils.round_value(avg['avg_value'])
data['values'].append(
core_utils.glucose_by_unit_setting(user, rounded_value))
data['dates'].append(avg['record_date'].strftime('%m/%d'))
return data
class GlucoseBaseReport(object):
def __init__(self, start_date, end_date, user, include_notes=True,
include_tags=True):
self.start_date = start_date
self.end_date = end_date
self.user = user
self.include_notes = include_notes
self.include_tags = include_tags
self.email_footer = '----------\nSent from https://%s' % \
settings.SITE_DOMAIN
def glucose_by_unit_setting(self, value):
return core_utils.glucose_by_unit_setting(self.user, value)
class GlucoseCsvReport(GlucoseBaseReport):
def generate(self):
data = Glucose.objects.by_date(
self.start_date, self.end_date, self.user)
data = data.order_by('-record_date', '-record_time')
csv_data = cStringIO.StringIO()
try:
headers = ['Value', 'Category', 'Date', 'Time']
if self.include_notes:
headers.append('Notes')
if self.include_tags:
headers.append('Tags')
writer = csv.writer(csv_data)
writer.writerow(headers)
for item in data:
row = [
self.glucose_by_unit_setting(item.value),
item.category,
item.record_date.strftime(DATE_FORMAT),
item.record_time.strftime(TIME_FORMAT),
]
if self.include_notes:
row.append(item.notes)
if self.include_tags:
tag_list = ', '.join([t.name for t in item.tags.all()])
row.append(tag_list)
writer.writerow(row)
logging.info('CSV report generated for %s', self.user)
return csv_data.getvalue()
finally:
csv_data.close()
def email(self, recipient, subject='', message=''):
message = '%s\n\n\n%s' % (message, self.email_footer)
email = EmailMessage(
from_email=settings.CONTACTS['info_email'],
subject=subject,
body=message,
to=[recipient],
headers={'Reply-To': self.user.email},
)
attachment_filename = 'GlucoseData_%sto%s.csv' % \
(self.start_date.strftime(FILENAME_DATE_FORMAT),
self.end_date.strftime(FILENAME_DATE_FORMAT))
email.attach(attachment_filename, self.generate(), 'text/csv')
email.send()
class GlucosePdfReport(GlucoseBaseReport):
def __init__(self, *args, **kwargs):
super(GlucosePdfReport, self).__init__(*args, **kwargs)
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER))
self.styles.add(ParagraphStyle(name='Left', alignment=TA_LEFT))
self.styles.add(ParagraphStyle(name='Right', alignment=TA_RIGHT))
self.styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
# Width of a letter size paper
self.max_width = 8.5 * inch
self.left_margin = 0.7 * inch
self.right_margin = 0.75 * inch
self.top_margin = 0.7 * inch
self.bottom_margin = 0.7 * inch
self.fields = [
('value', 'Value'),
('category', 'Category'),
('date', 'Date'),
('time', 'Time'),
]
if self.include_notes:
self.fields.append(('notes', 'Notes'))
if self.include_tags:
self.fields.append(('tags', 'Tags'))
def generate(self):
qs = Glucose.objects.by_date(
self.start_date, self.end_date, self.user)
qs = qs.order_by('-record_date', '-record_time')
data = []
for i in qs:
value = i.value
value_by_unit_setting = self.glucose_by_unit_setting(value)
# Bold the text if the value is high or low based on the user's
# settings
low = self.user.settings.glucose_low
high = self.user.settings.glucose_high
if value < low or value > high:
value_by_unit_setting = '<b>%s</b>' % value_by_unit_setting
data_dict = {
'value': self.to_paragraph(value_by_unit_setting),
'category': i.category,
'date': i.record_date.strftime(DATE_FORMAT),
'time': i.record_time.strftime(TIME_FORMAT),
}
if self.include_notes:
data_dict['notes'] = self.to_paragraph(i.notes)
if self.include_tags:
tag_list = ', '.join([t.name for t in i.tags.all()])
data_dict['tags'] = self.to_paragraph(tag_list)
data.append(data_dict)
buffer = BytesIO()
doc = SimpleDocTemplate(buffer,
pagesize=letter,
leftMargin=self.left_margin,
rightMargin=self.right_margin,
topMargin=self.top_margin,
bottomMargin=self.bottom_margin)
styles = getSampleStyleSheet()
styleH = styles['Heading1']
story = []
story.append(Paragraph('Glucose Data', styleH))
story.append(Spacer(1, 0.25 * inch))
converted_data = self.__convert_data(data)
table = Table(converted_data, hAlign='LEFT')
table.setStyle(TableStyle([
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('BACKGROUND', (0, 0), (-1, 0), colors.lightgrey),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('ALIGN', (1, 0), (0, -1), 'LEFT'),
('INNERGRID', (0, 0), (-1, -1), 0.50, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
]))
story.append(table)
doc.build(story)
pdf = buffer.getvalue()
buffer.close()
logging.info('PDF report generated for %s', self.user)
return pdf
def email(self, recipient, subject='', message=''):
message = '%s\n\n\n%s' % (message, self.email_footer)
email = EmailMessage(
from_email=settings.CONTACTS['info_email'],
subject=subject,
body=message,
to=[recipient],
headers={'Reply-To': self.user.email},
)
attachment_filename = 'GlucoseData_%sto%s.pdf' % \
(self.start_date.strftime(FILENAME_DATE_FORMAT),
self.end_date.strftime(FILENAME_DATE_FORMAT))
email.attach(attachment_filename, self.generate(), 'application/pdf')
email.send()
def get_width_from_percent(self, values=[], max_width=None, indent=0):
"""
Return the width values from the given percent values.
"""
if not max_width:
max_width = self.max_width
width_diff = (max_width) - (indent + self.left_margin +
self.right_margin)
widths = [((width_diff * v) / 100) for v in values]
return widths
def to_paragraph(self, data):
"""
Convert the data to a Paragraph object.
Paragraph objects can be easily formatted using HTML-like tags
and automatically wrap inside a table.
"""
return Paragraph(unicode(data), self.styles['Left'])
def __convert_data(self, data):
"""
Convert the list of dictionaries to a list of list to create
the PDF table.
"""
# Create 2 separate lists in the same order: one for the
# list of keys and the other for the names to display in the
# table header.
keys, names = zip(*[[k, n] for k, n in self.fields])
new_data = [names]
for d in data:
new_data.append([d[k] for k in keys])
return new_data
| jcalazan/glucose-tracker | glucosetracker/glucoses/reports.py | Python | mit | 16,741 |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.db import db
from indico.util.string import format_repr, return_ascii
class LegacyRegistrationMapping(db.Model):
"""Legacy registration id/token mapping
Legacy registrations had tokens which are not compatible with the
new UUID-based ones.
"""
__tablename__ = 'legacy_registration_map'
__table_args__ = {'schema': 'event_registration'}
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
autoincrement=False
)
legacy_registrant_id = db.Column(
db.Integer,
primary_key=True,
autoincrement=False
)
legacy_registrant_key = db.Column(
db.String,
nullable=False
)
registration_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.registrations.id'),
index=True,
nullable=False
)
registration = db.relationship(
'Registration',
lazy=False,
backref=db.backref(
'legacy_mapping',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
@return_ascii
def __repr__(self):
return format_repr(self, 'event_id', 'legacy_registrant_id', 'legacy_registrant_key', 'registration_id')
| mic4ael/indico | indico/modules/events/registration/models/legacy_mapping.py | Python | mit | 1,557 |
#! /usr/bin/python2
# -*- coding: utf-8; -*-
#
# (c) 2013 booya (http://booya.at)
#
# This file is part of the OpenGlider project.
#
# OpenGlider is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OpenGlider is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenGlider. If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys
import os
from PyQt4 import QtGui
from openglider.gui import ApplicationWindow
try:
import openglider
except ImportError:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
import openglider
from openglider.input import ControlPoint, MplWidget
from openglider.input.ballooning import input_ballooning
from openglider.input.shape import shapeinput, MplSymmetricBezier
qApp = QtGui.QApplication(sys.argv)
testfolder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
importpath = testfolder + '/demokite.ods'
class GliderTestClass(unittest.TestCase):
def setUp(self, complete=True):
self.glider = openglider.glider.Glider.import_geometry(path=importpath)
def test_spline_input(self):
points = [[.1, .2], [.2, .2], [.3, .6], [.6, .0]]
controlpoints = [ControlPoint(p, locked=[0, 0]) for p in points]
# print(mpl1)
line1 = MplSymmetricBezier(controlpoints) #, mplwidget=mpl1)
mplwidget = MplWidget(dpi=100)
line1.insert_mpl(mplwidget)
aw = ApplicationWindow([mplwidget])
mplwidget.redraw()
aw.show()
qApp.exec_()
def test_shape_input(self):
window = shapeinput(self.glider)
window.show()
qApp.exec_()
def test_ballooning_input(self):
ballooning = self.glider.ribs[0].ballooning
window = input_ballooning(ballooning)
window.show()
qApp.exec_()
if __name__ is '__main__':
unittest.main() | hiaselhans/OpenGlider | tests/input/visual_test_input.py | Python | gpl-3.0 | 2,297 |
"""
Tracks devices by sending a ICMP ping.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.ping/
device_tracker:
- platform: ping
count: 2
hosts:
host_one: pc.local
host_two: 192.168.2.25
"""
import logging
import subprocess
import sys
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, DEFAULT_SCAN_INTERVAL, SOURCE_TYPE_ROUTER)
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant import util
from homeassistant import const
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = []
_LOGGER = logging.getLogger(__name__)
CONF_PING_COUNT = 'count'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(const.CONF_HOSTS): {cv.string: cv.string},
vol.Optional(CONF_PING_COUNT, default=1): cv.positive_int,
})
class Host:
"""Host object with ping detection."""
def __init__(self, ip_address, dev_id, hass, config):
"""Initialize the Host pinger."""
self.hass = hass
self.ip_address = ip_address
self.dev_id = dev_id
self._count = config[CONF_PING_COUNT]
if sys.platform == "win32":
self._ping_cmd = ['ping', '-n 1', '-w 1000', self.ip_address]
else:
self._ping_cmd = ['ping', '-n', '-q', '-c1', '-W1',
self.ip_address]
def ping(self):
"""Send ICMP ping and return True if success."""
pinger = subprocess.Popen(self._ping_cmd, stdout=subprocess.PIPE)
try:
pinger.communicate()
return pinger.returncode == 0
except subprocess.CalledProcessError:
return False
def update(self, see):
"""Update device state by sending one or more ping messages."""
failed = 0
while failed < self._count: # check more times if host in unreachable
if self.ping():
see(dev_id=self.dev_id, source_type=SOURCE_TYPE_ROUTER)
return True
failed += 1
_LOGGER.debug("ping KO on ip=%s failed=%d", self.ip_address, failed)
def setup_scanner(hass, config, see, discovery_info=None):
"""Setup the Host objects and return the update function."""
hosts = [Host(ip, dev_id, hass, config) for (dev_id, ip) in
config[const.CONF_HOSTS].items()]
interval = timedelta(seconds=len(hosts) * config[CONF_PING_COUNT]) + \
DEFAULT_SCAN_INTERVAL
_LOGGER.info("Started ping tracker with interval=%s on hosts: %s",
interval, ",".join([host.ip_address for host in hosts]))
def update(now):
"""Update all the hosts on every interval time."""
for host in hosts:
host.update(see)
track_point_in_utc_time(hass, update, now + interval)
return True
return update(util.dt.utcnow())
| kyvinh/home-assistant | homeassistant/components/device_tracker/ping.py | Python | apache-2.0 | 2,962 |
# Copyright (c) 2013 Red Hat, Inc.
#
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# module for updating product branding info
# on subscription
import logging
log = logging.getLogger(__name__)
class BrandsInstaller(object):
def __init__(self, ent_certs=None):
self.ent_certs = ent_certs
# find brand installers
self.brand_installers = self._get_brand_installers()
def _get_brand_installers(self):
"""returns a list or iterable of BrandInstaller(s)"""
return []
def install(self):
for brand_installer in self.brand_installers:
brand_installer.install()
class BrandInstaller(object):
"""Install branding info for a set of entititlement certs."""
def __init__(self, ent_certs=None):
self.ent_certs = ent_certs
log.debug("BrandInstaller ent_certs: %s" % [x.serial for x in ent_certs or []])
def install(self):
"""Create a Brand object if needed, and save it."""
brand_picker = self._get_brand_picker()
new_brand = brand_picker.get_brand()
# no branded name info to install
if not new_brand:
return
current_brand = self._get_current_brand()
log.debug("Current branded name info, if any: %s" % current_brand.name)
log.debug("Fresh ent cert has branded product info: %s" % new_brand.name)
if current_brand.is_outdated_by(new_brand):
self._install(new_brand)
else:
log.debug("Product branding info does not need to be updated")
def _get_brand_picker(self):
raise NotImplementedError
def _get_current_brand(self):
raise NotImplementedError
def _install(self, brand):
raise NotImplementedError
class BrandPicker(object):
"""Returns the branded name to install.
Check installed product certs, and the list of entitlement certs
passed in, and find the correct branded name, if any."""
def __init__(self, ent_certs=None):
self.ent_certs = ent_certs
def get_brand(self):
raise NotImplementedError
class Brand(object):
"""Base class for Brand objects."""
name = None
# could potentially be a __lt__ etc, though there is some
# oddness in the compares are not symetric for the empty
# cases (ie, we update nothing with something,etc)
def is_outdated_by(self, new_brand):
"""If a Brand should be replaced with new_brand."""
if not self.name:
return True
# prevent empty branded_name
if not new_brand.name:
return False
# Don't install new branded_name if it's the same to prevent
# churn
return new_brand.name != self.name
class ProductBrand(Brand):
"""A brand for a branded product"""
def __init__(self, name):
self.brand_file = self._get_brand_file()
self.name = name
def _get_brand_file(self):
return BrandFile()
def save(self):
brand = self.format_brand(self.name)
self.brand_file.write(brand)
@classmethod
def from_product(cls, product):
return cls(product.brand_name)
@staticmethod
def format_brand(brand):
if not brand.endswith('\n'):
brand += '\n'
return brand
class CurrentBrand(Brand):
"""The currently installed brand"""
def __init__(self):
self.brand_file = self._get_brand_file()
self.load()
def _get_brand_file(self):
return BrandFile()
def load(self):
try:
brand_info = self.brand_file.read()
except IOError:
log.error("No brand info file found (%s) " % self.brand_file)
return
self.name = self.unformat_brand(brand_info)
@staticmethod
def unformat_brand(brand):
if brand:
return brand.strip()
return None
class BrandFile(object):
"""The file used for storing product branding info.
Default is "/var/lib/rhsm/branded_name
"""
path = "/var/lib/rhsm/branded_name"
def write(self, brand_info):
with open(self.path, 'w') as brand_file:
brand_file.write(brand_info)
def read(self):
with open(self.path, 'r') as brand_file:
return brand_file.read()
def __str__(self):
return "<BrandFile path=%s>" % self.path
| candlepin/subscription-manager | src/subscription_manager/entbranding.py | Python | gpl-2.0 | 4,891 |
# encoding: utf-8
from __future__ import unicode_literals
import re
from hashlib import sha1
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
unified_strdate,
)
class ProSiebenSat1IE(InfoExtractor):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
_VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at)|ran\.de|fem\.com)/(?P<id>.+)'
_TESTS = [
{
# Tests changes introduced in https://github.com/rg3/youtube-dl/pull/6242
# in response to fixing https://github.com/rg3/youtube-dl/issues/6215:
# - malformed f4m manifest support
# - proper handling of URLs starting with `https?://` in 2.0 manifests
# - recursive child f4m manifests extraction
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
'ext': 'mp4',
'title': 'Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html',
'info_dict': {
'id': '2570327',
'ext': 'mp4',
'title': 'Lady-Umstyling für Audrina',
'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d',
'upload_date': '20131014',
'duration': 606.76,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Seems to be broken',
},
{
'url': 'http://www.prosiebenmaxx.de/tv/experience/video/144-countdown-fuer-die-autowerkstatt-ganze-folge',
'info_dict': {
'id': '2429369',
'ext': 'mp4',
'title': 'Countdown für die Autowerkstatt',
'description': 'md5:809fc051a457b5d8666013bc40698817',
'upload_date': '20140223',
'duration': 2595.04,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip',
'info_dict': {
'id': '2904997',
'ext': 'mp4',
'title': 'Sexy laufen in Ugg Boots',
'description': 'md5:edf42b8bd5bc4e5da4db4222c5acb7d6',
'upload_date': '20140122',
'duration': 245.32,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.sat1.de/film/der-ruecktritt/video/im-interview-kai-wiesinger-clip',
'info_dict': {
'id': '2906572',
'ext': 'mp4',
'title': 'Im Interview: Kai Wiesinger',
'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
'upload_date': '20140203',
'duration': 522.56,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.kabeleins.de/tv/rosins-restaurants/videos/jagd-auf-fertigkost-im-elsthal-teil-2-ganze-folge',
'info_dict': {
'id': '2992323',
'ext': 'mp4',
'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
'description': 'md5:2669cde3febe9bce13904f701e774eb6',
'upload_date': '20141014',
'duration': 2410.44,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.ran.de/fussball/bundesliga/video/schalke-toennies-moechte-raul-zurueck-ganze-folge',
'info_dict': {
'id': '3004256',
'ext': 'mp4',
'title': 'Schalke: Tönnies möchte Raul zurück',
'description': 'md5:4b5b271d9bcde223b54390754c8ece3f',
'upload_date': '20140226',
'duration': 228.96,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip',
'info_dict': {
'id': '2572814',
'ext': 'mp4',
'title': 'Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
'upload_date': '20131017',
'duration': 469.88,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
'info_dict': {
'id': '2156342',
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist',
'info_dict': {
'id': '439664',
'title': 'Episode 8 - Ganze Folge - Playlist',
'description': 'md5:63b8963e71f481782aeea877658dec84',
},
'playlist_count': 2,
},
]
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
r'clip[iI]d=(\d+)',
r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
]
_TITLE_REGEXES = [
r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
r'<header class="clearfix">\s*<h3>(.+?)</h3>',
r'<!-- start video -->\s*<h1>(.+?)</h1>',
r'<h1 class="att-name">\s*(.+?)</h1>',
r'<header class="module_header">\s*<h2>([^<]+)</h2>\s*</header>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
r'<p class="att-description">\s*(.+?)\s*</p>',
]
_UPLOAD_DATE_REGEXES = [
r'<meta property="og:published_time" content="(.+?)">',
r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
]
_PAGE_TYPE_REGEXES = [
r'<meta name="page_type" content="([^"]+)">',
r"'itemType'\s*:\s*'([^']*)'",
]
_PLAYLIST_ID_REGEXES = [
r'content[iI]d=(\d+)',
r"'itemId'\s*:\s*'([^']*)'",
]
_PLAYLIST_CLIP_REGEXES = [
r'(?s)data-qvt=.+?<a href="([^"]+)"',
]
def _extract_clip(self, url, webpage):
clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id')
access_token = 'prosieben'
client_name = 'kolibri-2.0.19-splec4'
client_location = url
videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({
'access_token': access_token,
'client_location': client_location,
'client_name': client_name,
'ids': clip_id,
})
video = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')[0]
if video.get('is_protected') is True:
raise ExtractorError('This video is DRM protected.', expected=True)
duration = float_or_none(video.get('duration'))
source_ids = [source['id'] for source in video['sources']]
source_ids_str = ','.join(map(str, source_ids))
g = '01!8d8F_)r9]4s[qeuXfP%'
client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
.encode('utf-8')).hexdigest()
sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
'client_name': client_name,
}))
sources = self._download_json(sources_api_url, clip_id, 'Downloading sources JSON')
server_id = sources['server_id']
client_id = g[:2] + sha1(''.join([g, clip_id, access_token, server_id,
client_location, source_ids_str, g, client_name])
.encode('utf-8')).hexdigest()
url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
'client_name': client_name,
'server_id': server_id,
'source_ids': source_ids_str,
}))
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
description = self._html_search_regex(self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._html_search_regex(
self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
formats = []
urls_sources = urls['sources']
if isinstance(urls_sources, dict):
urls_sources = urls_sources.values()
def fix_bitrate(bitrate):
bitrate = int_or_none(bitrate)
if not bitrate:
return None
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
for source in urls_sources:
protocol = source['protocol']
source_url = source['url']
if protocol == 'rtmp' or protocol == 'rtmpe':
mobj = re.search(r'^(?P<url>rtmpe?://[^/]+)/(?P<path>.+)$', source_url)
if not mobj:
continue
path = mobj.group('path')
mp4colon_index = path.rfind('mp4:')
app = path[:mp4colon_index]
play_path = path[mp4colon_index:]
formats.append({
'url': '%s/%s' % (mobj.group('url'), app),
'app': app,
'play_path': play_path,
'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
'page_url': 'http://www.prosieben.de',
'vbr': fix_bitrate(source['bitrate']),
'ext': 'mp4',
'format_id': '%s_%s' % (source['cdn'], source['bitrate']),
})
elif 'f4mgenerator' in source_url or determine_ext(source_url) == 'f4m':
formats.extend(self._extract_f4m_formats(source_url, clip_id))
else:
formats.append({
'url': source_url,
'vbr': fix_bitrate(source['bitrate']),
})
self._sort_formats(formats)
return {
'id': clip_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
}
def _extract_playlist(self, url, webpage):
playlist_id = self._html_search_regex(
self._PLAYLIST_ID_REGEXES, webpage, 'playlist id')
for regex in self._PLAYLIST_CLIP_REGEXES:
playlist_clips = re.findall(regex, webpage)
if playlist_clips:
title = self._html_search_regex(
self._TITLE_REGEXES, webpage, 'title')
description = self._html_search_regex(
self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
entries = [
self.url_result(
re.match('(.+?//.+?)/', url).group(1) + clip_path,
'ProSiebenSat1')
for clip_path in playlist_clips]
return self.playlist_result(entries, playlist_id, title, description)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page_type = self._search_regex(
self._PAGE_TYPE_REGEXES, webpage,
'page type', default='clip').lower()
if page_type == 'clip':
return self._extract_clip(url, webpage)
elif page_type == 'playlist':
return self._extract_playlist(url, webpage)
| miminus/youtube-dl | youtube_dl/extractor/prosiebensat1.py | Python | unlicense | 13,995 |
import os, sys
if os.path.isfile("/proc/device-tree/hat/product"):
file = open("/proc/device-tree/hat/product","r")
hat = file.readline()
if hat == "Sense HAT\x00":
print('Sense HAT detected')
mypath = os.path.dirname(os.path.abspath(__file__))
file.close()
os.system("/usr/bin/env python3 " + mypath+"/8x8grid-sense.py")
elif hat == "Unicorn HAT\x00":
print('Unicorn HAT detected')
mypath = os.path.dirname(os.path.abspath(__file__))
file.close()
os.system("/usr/bin/env python3 " + mypath+"/8x8grid-unicorn.py")
else:
print("Unknown HAT : " + str(hat))
file.close()
sys.exit()
else:
print('No HAT detected')
answer = input('Do you have a Unicorn Phat (y/n)?')
if answer == 'y':
print('Configuring for Unicorn Phat')
mypath = os.path.dirname(os.path.abspath(__file__))
os.system("/usr/bin/env python3 " + mypath+"/8x8grid-unicornphat.py")
else:
sys.exit()
| topshed/RPi_8x8GridDraw | 8x8grid.py | Python | mit | 898 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetIamPolicy
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-securitycenter
# [START securitycenter_v1_generated_SecurityCenter_GetIamPolicy_sync]
from google.cloud import securitycenter_v1
def sample_get_iam_policy():
# Create a client
client = securitycenter_v1.SecurityCenterClient()
# Initialize request argument(s)
request = securitycenter_v1.GetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.get_iam_policy(request=request)
# Handle the response
print(response)
# [END securitycenter_v1_generated_SecurityCenter_GetIamPolicy_sync]
| googleapis/python-securitycenter | samples/generated_samples/securitycenter_v1_generated_security_center_get_iam_policy_sync.py | Python | apache-2.0 | 1,489 |
# Copyright 2009 Shikhar Bhushan <shikhar@schmizz.net>
#
# This file is part of the Sweetmail activity for Sugar.
#
# Sweetmail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sweetmail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sweetmail. If not, see <http://www.gnu.org/licenses/>.
from tags import HARDCODED, FLAGS
import sugar.util
import email.utils
import logging
class MsgInfo(object):
def __init__(self, mailstore, key, flags, hdr_from, hdr_to, hdr_subj, hdr_date):
self._ms = mailstore
self._key = key
self._flags = flags
self._hdr_from = hdr_from
self._hdr_to = hdr_to
self._hdr_subj = hdr_subj
self._hdr_date = hdr_date
def destroy(self):
attrs = (self._ms,
self._key,
self._flags,
self._hdr_from,
self._hdr_subj,
self._hdr_date)
for attr in attrs:
del attr
def mark(self, flag):
flag = FLAGS.get(flag, None)
if flag is not None:
self._ms.flag(self._key, flag)
self._flags = self._flags|flag
def unmark(self, flag):
flag = FLAGS.get(flag, None)
if flag is not None:
self._ms.unflag(self._key, flag)
self._flags = self._flags&(~flag)
def mark_sent(self):
self._ms.unflag(FLAGS['outbound'])
self._ms.flag(self._key, FLAGS['sent'])
def mark_has_attachment(self):
self._ms.unflag(FLAGS['has_attachment'])
def _whoify(self, hdr):
if hdr=='undefined':
return _('Unknown')
else:
(name, addr) = email.utils.parseaddr(hdr)
return name if not name=='' else addr
@property
def msg_id(self):
return self._key
@property
def who(self):
internal = FLAGS['draft'] | FLAGS['outbound'] | FLAGS['sent']
if self._flags & internal:
logging.debug('self._hdr_to %s' % self._hdr_to)
return self._whoify(self._hdr_to)
else:
logging.debug('self._hdr_from %s' % self._hdr_from)
return self._whoify(self._hdr_from)
@property
def what(self):
if self._hdr_subj=='undefined':
return _('No subject')
else:
return self._hdr_subj
@property
def timestamp(self):
ti = email.utils.parsedate_tz(self._hdr_date)
return email.utils.mktime_tz(ti)
@property
def when(self):
return sugar.util.timestamp_to_elapsed_string(self.timestamp)
seen = property(lambda self: bool(self._flags & FLAGS['seen']))
unseen = property(lambda self: not self.seen)
starred = property(lambda self: bool(self._flags & FLAGS['starred']))
has_attachment = property(lambda self: bool(self._flags & FLAGS['has_attachment'])) | FOSSRIT/sweetermail | msginfo.py | Python | gpl-3.0 | 3,348 |
"""Provide CudaNdarrayType
"""
from __future__ import print_function
import os
import six.moves.copyreg as copyreg
import warnings
import numpy
import theano
from theano import Type, Variable
from theano import tensor, config
from theano import scalar as scal
from six import StringIO
try:
# We must do those import to be able to create the full doc when nvcc
# is not available
import cuda_ndarray.cuda_ndarray as cuda
from theano.sandbox.cuda.nvcc_compiler import NVCC_compiler
import cuda_ndarray
except ImportError:
# Used to know that `cuda` could not be properly imported.
cuda = None
class CudaNdarrayType(Type):
typenum = 11 # Until hardware improves, this class deals with floats.
dtype = 'float32'
Variable = None
""" This will be set to the Variable type corresponding to this class.
That variable type is `CudaNdarrayVariable` defined in the
``var.py`` file beside this one.
:note: The var file depends on the file basic_ops.py, which
depends on this file. A cyclic dependency is avoided by not
hardcoding ``Variable = CudaNdarrayVariable``.
"""
Constant = None
""" This will be set to `CudaNdarrayConstant` defined in ``var.py``
:note:
The var file depends on the file basic_ops.py, which depends on this file.
A cyclic dependency is avoided by not hardcoding this class.
"""
SharedVariable = None
""" This will be set to `CudaNdarraySharedVariable` defined in ``var.py``
:note:
The var file depends on the file basic_ops.py, which depends on this file.
A cyclic dependency is avoided by not hardcoding this class.
"""
if cuda is not None:
value_zeros = staticmethod(cuda.CudaNdarray.zeros)
"""
Create an CudaNdarray full of 0 values
"""
def __init__(self, broadcastable, name=None, dtype=None):
if dtype is not None and dtype != 'float32':
raise TypeError('%s only supports dtype float32 for now. Tried '
'using dtype %s for variable %s' %
(self.__class__.__name__, dtype, name))
self.broadcastable = tuple(broadcastable)
self.name = name
self.dtype_specs() # error checking is done there
def clone(self, dtype=None, broadcastable=None):
if broadcastable is None:
broadcastable = self.broadcastable
return self.__class__(broadcastable, name=self.name, dtype=dtype)
def filter(self, data, strict=False, allow_downcast=None):
return self.filter_inplace(data, None, strict=strict,
allow_downcast=allow_downcast)
def filter_inplace(self, data, old_data, strict=False,
allow_downcast=None):
if strict or allow_downcast or isinstance(data, cuda.CudaNdarray):
return cuda.filter(data, self.broadcastable, strict, old_data)
else: # (not strict) and (not allow_downcast)
# Check if data.dtype can be accurately cast to self.dtype
if isinstance(data, numpy.ndarray):
up_dtype = scal.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
return cuda.filter(data, self.broadcastable,
strict, old_data)
else:
raise TypeError(
'%s, with dtype %s, cannot store a value of '
'dtype %s without risking loss of precision.'
'If you do not mind, please cast your data to %s.'
% (self, self.dtype, data.dtype, self.dtype),
data)
else:
converted_data = theano._asarray(data, self.dtype)
if (allow_downcast is None and
type(data) is float and
self.dtype == theano.config.floatX):
return cuda.filter(converted_data, self.broadcastable,
strict, old_data)
elif numpy.all(data == converted_data):
return cuda.filter(converted_data, self.broadcastable,
strict, old_data)
else:
raise TypeError(
'%s, with dtype %s, cannot store accurately value %s, '
'it would be represented as %s. If you do not mind, '
'you can cast your data to %s.'
% (self, self.dtype, data, converted_data, self.dtype),
data)
def filter_variable(self, other):
"""Convert a Variable into a CudaNdarrayType, if compatible.
This Variable should either already be a CudaNdarrayType, or be
a TensorType. It has to have the right number of dimensions,
broadcastable pattern, and dtype.
"""
if hasattr(other, '_as_CudaNdarrayVariable'):
other = other._as_CudaNdarrayVariable()
if not isinstance(other, Variable):
# The value is not a Variable: we cast it into
# a Constant of the appropriate Type.
other = self.Constant(type=self, data=other)
if other.type == self:
return other
if not isinstance(other.type, (tensor.TensorType, CudaNdarrayType)):
raise TypeError('Incompatible type', (self, other.type))
if (other.type.dtype != self.dtype):
raise TypeError('Incompatible dtype', (self.dtype,
other.type.dtype))
if other.type.ndim != self.ndim:
raise TypeError('Incompatible number of dimensions.'
' Expected %d, got %d.' % (self.ndim, other.ndim))
if other.type.broadcastable != self.broadcastable:
raise TypeError('Incompatible broadcastable dimensions.'
' Expected %s, got %s.' %
(str(other.type.broadcastable),
str(self.broadcastable)))
return theano.sandbox.cuda.basic_ops.GpuFromHost()(other)
@staticmethod
def bound(a):
high = a.gpudata
low = a.gpudata
# stride is in the number of element.
# we must convert that to bytes in case we
# will view the element as a different type.
elem_size = numpy.zeros(0, dtype=a.dtype).dtype.itemsize
for stri, shp in zip(a._strides, a.shape):
if stri < 0:
low += (stri * elem_size) * (shp - 1)
else:
high += (stri * elem_size) * (shp - 1)
return low, high
@staticmethod
def may_share_memory(a, b):
# when this is called with a an ndarray and b
# a sparce matrix, numpy.may_share_memory fail.
if a is b:
return True
if a.__class__ is b.__class__:
a_l, a_h = CudaNdarrayType.bound(a)
b_l, b_h = CudaNdarrayType.bound(b)
if b_l >= a_h or a_l >= b_h:
return False
return True
else:
return False
@staticmethod
def values_eq(a, b):
# TODO: make the comparaison without transfert.
return tensor.TensorType.values_eq(numpy.asarray(a), numpy.asarray(b))
@staticmethod
def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
rtol=None, atol=None):
# TODO: make the comparaison without transfert.
return tensor.TensorType.values_eq_approx(
numpy.asarray(a),
numpy.asarray(b),
allow_remove_inf=allow_remove_inf,
allow_remove_nan=allow_remove_nan,
rtol=rtol, atol=atol
)
def dtype_specs(self):
"""Return a tuple (python type, c type, numpy typenum) that
corresponds to self.dtype.
This function is used internally as part of C code generation.
"""
# TODO: add more type correspondances for e.g. int32, int64, float32,
# complex64, etc.
try:
return {'float32': (float, 'npy_float32', 'NPY_FLOAT32'),
'float64': (float, 'npy_float64', 'NPY_FLOAT64'),
'uint8': (int, 'npy_uint8', 'NPY_UINT8'),
'int8': (int, 'npy_int8', 'NPY_INT8'),
'uint16': (int, 'npy_uint16', 'NPY_UINT16'),
'int16': (int, 'npy_int16', 'NPY_INT16'),
'uint32': (int, 'npy_uint32', 'NPY_UINT32'),
'int32': (int, 'npy_int32', 'NPY_INT32'),
'uint64': (int, 'npy_uint64', 'NPY_UINT64'),
'int64': (int, 'npy_int64', 'NPY_INT64'),
'complex128': (complex, 'theano_complex128',
'NPY_COMPLEX128'),
'complex64': (complex, 'theano_complex64',
'NPY_COMPLEX64')}[self.dtype]
except KeyError:
raise TypeError("Unsupported dtype for %s: %s" % (
self.__class__.__name__, self.dtype))
def __eq__(self, other):
"""Compare True iff other is the same kind of CudaNdarrayType"""
return (type(self) == type(other) and
other.broadcastable == self.broadcastable)
def convert_variable(self, var):
if (type(self) == type(var.type) and
self.ndim == var.type.ndim and
all(sb == ob or ob for sb, ob in zip(self.broadcastable,
var.type.broadcastable))):
return theano.tensor.patternbroadcast(var, self.broadcastable)
def __hash__(self):
"""Hash equal for same kinds of CudaNdarrayType"""
return hash(type(self)) ^ hash(self.broadcastable)
ndim = property(lambda self: len(self.broadcastable),
doc="number of dimensions")
"""Number of dimensions
This read-only property is the preferred way to get the number of
dimensions of a `CudaNdarrayType`.
"""
def make_variable(self, name=None):
"""Return a `TensorVariable` of this type
:Parameters:
- `name`: str
A pretty name to identify this `Variable` when printing and
debugging
"""
return self.Variable(self, name=name)
def __str__(self):
if self.name:
return self.name
else:
b = self.broadcastable
#bcast = str(self.broadcastable)
if not numpy.any(b):
s = "%iD" % len(b)
else:
s = str(b)
bcast = {(): 'scalar',
(False,): 'vector',
(False, True): 'col',
(True, False): 'row',
(False, False): 'matrix'}.get(b, s)
return "CudaNdarrayType(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self):
return str(self)
#"CudaNdarrayType{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub, check_input=True):
return """ CudaNdarray * %(name)s;""" % locals()
def c_init(self, name, sub):
return "%(name)s = NULL;" % locals()
def c_extract(self, name, sub, check_input=True,
check_broadcast=True):
sio = StringIO()
fail = sub['fail']
nd = self.ndim
print("""
assert(py_%(name)s->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_%(name)s))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %%p %%i\\n", py_%(name)s, (py_%(name)s->ob_refcnt));
%(name)s = (CudaNdarray*)py_%(name)s;
//std::cerr << "c_extract " << %(name)s << '\\n';
""" % locals(), file=sio)
if(check_input):
print("""
if (%(name)s->nd != %(nd)s)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %%i, it was supposed to have rank %(nd)s",
%(name)s->nd);
%(name)s = NULL;
%(fail)s;
}
//std::cerr << "c_extract " << %(name)s << " nd check passed\\n";
""" % locals(), file=sio)
for i, b in enumerate(self.broadcastable):
if b and check_broadcast:
print("""
if (CudaNdarray_HOST_DIMS(%(name)s)[%(i)s] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %%i on broadcastable dimension %%i",
CudaNdarray_HOST_DIMS(%(name)s)[%(i)s], %(i)s);
%(name)s = NULL;
%(fail)s;
}
//std::cerr << "c_extract " << %(name)s << "dim check %(i)s passed\\n";
//std::cerr << "c_extract " << %(name)s << "checking bcast %(i)s <" << %(name)s->str<< ">\\n";
//std::cerr << "c_extract " << %(name)s->str[%(i)s] << "\\n";
if (CudaNdarray_HOST_STRIDES(%(name)s)[%(i)s])
{
//std::cerr << "c_extract bad stride detected...\\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %%i on a broadcastable dimension %%i",
CudaNdarray_HOST_STRIDES(%(name)s)[%(i)s], %(i)s);
%(name)s = NULL;
%(fail)s;
}
//std::cerr << "c_extract " << %(name)s << "bcast check %(i)s passed\\n";
""" % locals(), file=sio)
print("""
assert(%(name)s);
Py_INCREF(py_%(name)s);
}
else if (py_%(name)s == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
%(name)s = NULL;
%(fail)s;
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %%p %%i\\n", py_%(name)s, (py_%(name)s->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
%(name)s = NULL;
%(fail)s;
}
//std::cerr << "c_extract done " << %(name)s << '\\n';
""" % locals(), file=sio)
else:
print("""
assert(%(name)s);
Py_INCREF(py_%(name)s);
}
""" % locals(), file=sio)
# print sio.getvalue()
return sio.getvalue()
def c_extract_out(self, name, sub, check_input=True, check_broadcast=True):
""" To allow the hack to skip check_broadcast.
"""
return """
if (py_%(name)s == Py_None)
{
%(c_init_code)s
}
else
{
%(c_extract_code)s
}
""" % dict(
name=name,
c_init_code=self.c_init(name, sub),
c_extract_code=self.c_extract(name, sub, check_input,
check_broadcast))
def c_cleanup(self, name, sub):
return """
//std::cerr << "cleanup " << py_%(name)s << " " << %(name)s << "\\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %%p %%i\\n", py_%(name)s, (py_%(name)s->ob_refcnt));
if (%(name)s)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %%p %%i\\n", %(name)s, (%(name)s->ob_refcnt));
Py_XDECREF(%(name)s);
}
//std::cerr << "cleanup done" << py_%(name)s << "\\n";
""" % locals()
def c_sync(self, name, sub):
"""Override `CLinkerOp.c_sync` """
return """
//std::cerr << "sync\\n";
if (NULL == %(name)s) {
// failure: sync None to storage
Py_XDECREF(py_%(name)s);
py_%(name)s = Py_None;
Py_INCREF(py_%(name)s);
}
else
{
if (py_%(name)s != (PyObject*)%(name)s)
{
Py_XDECREF(py_%(name)s);
py_%(name)s = (PyObject*)%(name)s;
Py_INCREF(py_%(name)s);
}
assert(py_%(name)s->ob_refcnt);
}
""" % locals()
def c_headers(self):
"""Override `CLinkerOp.c_headers` """
return ['cuda_ndarray.cuh']
def c_header_dirs(self):
"""Override `CLinkerOp.c_headers` """
ret = [os.path.dirname(cuda_ndarray.__file__)]
cuda_root = config.cuda.root
if cuda_root:
ret.append(os.path.join(cuda_root, 'include'))
return ret
def c_lib_dirs(self):
ret = [os.path.dirname(cuda_ndarray.__file__)]
return ret
def c_libraries(self):
# returning cublas because the cuda_ndarray.cuh header
# includes calls to SetVector and cublasGetError
return ['cudart', config.cublas.lib, 'cuda_ndarray']
def c_support_code(cls):
return ""
def c_code_cache_version(self):
# return ()
# no need to put nvcc.fastmath in the tuple as the
# c_compile_args is put in the key.
return (3,) # cublas v2 changes
def c_compiler(self):
return NVCC_compiler
def c_compile_args(self):
return []
def get_shape_info(self, obj):
return obj.shape
def get_size(self, shape_info):
if shape_info:
return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize
else: # a scalar
return numpy.dtype(self.dtype).itemsize
theano.compile.ops.expandable_types += (CudaNdarrayType,)
# Register C code for ViewOp on CudaNdarrayType
theano.compile.register_view_op_c_code(
CudaNdarrayType,
"""
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
version=1)
theano.compile.register_shape_i_c_code(
CudaNdarrayType,
"""
if(!%(oname)s)
%(oname)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(oname)s))[0] =
CudaNdarray_HOST_DIMS(%(iname)s)[%(i)s];
""",
"""
if (%(i)s>=CudaNdarray_NDIM(%(iname)s)){
PyErr_SetString(PyExc_TypeError,
"Number of dimensions lower than expected");
%(fail)s
}
""",
version=(1,))
# Register CudaNdarrayType to the DeepCopyOp list of types with c code.
theano.compile.register_deep_copy_op_c_code(
CudaNdarrayType,
"""
int alloc = %(oname)s == NULL;
for(int i=0; !alloc && i<CudaNdarray_NDIM(%(oname)s); i++) {
if(CudaNdarray_HOST_DIMS(%(iname)s)[i] !=
CudaNdarray_HOST_DIMS(%(oname)s)[i]) {
alloc = true;
break;
}
}
if(alloc) {
Py_XDECREF(%(oname)s);
%(oname)s = (CudaNdarray*)CudaNdarray_Copy(%(iname)s);
if (!%(oname)s)
{
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed!");
%(fail)s;
}
} else {
if(CudaNdarray_CopyFromCudaNdarray(%(oname)s, %(iname)s)) {
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed into already allocated space!");
%(fail)s;
}
}
""",
version=3)
# THIS WORKS But CudaNdarray instances don't compare equal to one
# another, and what about __hash__ ? So the unpickled version doesn't
# equal the pickled version, and the cmodule cache is not happy with
# the situation.
def CudaNdarray_unpickler(npa):
if (config.experimental.unpickle_gpu_on_cpu and config.device == 'cpu'):
# directly return numpy array
warnings.warn("config.experimental.unpickle_gpu_on_cpu is set to True. Unpickling CudaNdarray as numpy.ndarray")
return npa
elif cuda:
return cuda.CudaNdarray(npa)
else:
raise ImportError("Cuda not found. Cannot unpickle CudaNdarray")
copyreg.constructor(CudaNdarray_unpickler)
def CudaNdarray_pickler(cnda):
return (CudaNdarray_unpickler, (numpy.asarray(cnda),))
# In case cuda is not imported.
if cuda is not None:
copyreg.pickle(cuda.CudaNdarray, CudaNdarray_pickler,
CudaNdarray_unpickler)
| nke001/attention-lvcsr | libs/Theano/theano/sandbox/cuda/type.py | Python | mit | 20,739 |
''' Some tests for filters '''
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_raises,
assert_array_equal, TestCase, run_module_suite)
import scipy.ndimage as sndi
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0)
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4
def test_valid_origins():
"""Regression test for #1311."""
func = lambda x: np.mean(x)
data = np.array([1,2,3,4,5], dtype=np.float64)
assert_raises(ValueError, sndi.generic_filter, data, func, size=3,
origin=2)
func2 = lambda x, y: np.mean(x + y)
assert_raises(ValueError, sndi.generic_filter1d, data, func,
filter_size=3, origin=2)
assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3,
origin=2)
for filter in [sndi.uniform_filter, sndi.minimum_filter,
sndi.maximum_filter, sndi.maximum_filter1d,
sndi.median_filter, sndi.minimum_filter1d]:
# This should work, since for size == 3, the valid range for origin is
# -1 to 1.
list(filter(data, 3, origin=-1))
list(filter(data, 3, origin=1))
# Just check this raises an error instead of silently accepting or
# segfaulting.
assert_raises(ValueError, filter, data, 3, origin=2)
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = np.zeros((100, 100), np.float)
arr[50, 50] = 1
num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = np.zeros(51)
x[25] = 1
f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
class TestThreading(TestCase):
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os)
self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = np.random.randn(500, 500)
k = np.random.randn(10, 10)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate, (d, k), os)
self.check_func_thread(4, sndi.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.median_filter, (d, 3), os)
self.check_func_thread(4, sndi.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.maximum_filter, (d, 3), os)
self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, sndi.minimum_filter, (d, 3), os)
self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_minmaximum_filter1d():
# Regression gh-3898
in_ = np.arange(10)
out = sndi.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = sndi.maximum_filter1d(in_, 1)
assert_equal(in_, out)
# Test reflect
out = sndi.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='reflect')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
#Test constant
out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
# Test nearest
out = sndi.minimum_filter1d(in_, 5, mode='nearest')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='nearest')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test wrap
out = sndi.minimum_filter1d(in_, 5, mode='wrap')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
out = sndi.maximum_filter1d(in_, 5, mode='wrap')
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
if __name__ == "__main__":
run_module_suite(argv=sys.argv)
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/ndimage/tests/test_filters.py | Python | gpl-2.0 | 7,092 |
import codecs
from setuptools import setup
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with codecs.open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='zenbu',
version='1.0.5',
description='Jinja2 + YAML based config templater.',
long_description=long_description,
url='https://github.com/metakirby5/zenbu',
author='Ethan Chan',
author_email='metakirby5@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='zenbu config templater jinja2 yaml',
py_modules=['zenbu'],
install_requires=[
'argcomplete',
'colorlog',
'Jinja2',
'PyYAML',
'termcolor',
'watchdog',
],
entry_points={
'console_scripts': [
'zenbu=zenbu:main',
],
},
)
| metakirby5/zenbu | setup.py | Python | mit | 1,288 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import json
import os
import re
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.request import urlopen
import validators
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.api.rest import Resource, getApiUrl, setResponseHeader
from girder.constants import AccessType
from girder.exceptions import RestException
from girder.models.folder import Folder
from girder.models.item import Item
from girder.plugins.ythub.constants import PluginSettings
_DOI_REGEX = re.compile(r'(10.\d{4,9}/[-._;()/:A-Z0-9]+)', re.IGNORECASE)
_QUOTES_REGEX = re.compile(r'"(.*)"')
_CNTDISP_REGEX = re.compile(r'filename="(.*)"')
class DataverseImportProvider(object):
@staticmethod
def query_dataverse(search_url):
resp = urlopen(search_url).read()
data = json.loads(resp.decode('utf-8'))['data']
if data['count_in_response'] != 1:
raise ValueError
item = data['items'][0]
doi = None
doi_search = _DOI_REGEX.search(item['dataset_citation'])
if doi_search is not None:
doi = "doi:" + doi_search.group() # TODO: get a proper protocol
return doi
@staticmethod
def parse_dataset(url):
"""Extract title, file, doi from Dataverse resource.
Handles: {siteURL}/dataset.xhtml?persistentId={persistentId}
Handles: {siteURL}/api/datasets/{:id}
"""
if "persistentId" in url.query:
dataset_url = urlunparse(
url._replace(path='/api/datasets/:persistentId')
)
else:
dataset_url = urlunparse(url)
resp = urlopen(dataset_url).read()
data = json.loads(resp.decode('utf-8'))
doi = '{protocol}:{authority}/{identifier}'.format(**data['data'])
return doi
def parse_file_url(self, url):
"""Extract title, file, doi from Dataverse resource.
Handles:
{siteURL}/file.xhtml?persistentId={persistentId}&...
{siteURL}/api/access/datafile/:persistentId/?persistentId={persistentId}
"""
qs = parse_qs(url.query)
try:
full_doi = qs['persistentId'][0]
except (KeyError, ValueError):
# fail here in a meaningful way...
raise
return os.path.dirname(full_doi)
def parse_access_url(self, url):
"""Extract title, file, doi from Dataverse resource.
Handles: {siteURL}/api/access/datafile/{fileId}
"""
fileId = os.path.basename(url.path)
search_url = urlunparse(
url._replace(path='/api/search', query='q=entityId:' + fileId)
)
return self.query_dataverse(search_url)
@staticmethod
def dataset_full_url(site, doi):
return "{scheme}://{netloc}/dataset.xhtml?persistentId={doi}".format(
scheme=site.scheme, netloc=site.netloc, doi=doi
)
class ytHub(Resource):
"""Meta resource for yt Hub."""
def __init__(self):
super(ytHub, self).__init__()
self.resourceName = "ythub"
self.route("GET", (), self.get_ythub_url)
self.route("GET", (":id", "examples"), self.generateExamples)
self.route("GET", (":id", "registry"), self.generate_pooch_registry)
self.route("POST", ("genkey",), self.generateRSAKey)
self.route("GET", ("dataverse",), self.dataverseExternalTools)
@access.admin
@autoDescribeRoute(Description("Generate ythub's RSA key"))
def generateRSAKey(self, params):
rsa_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
pubkey_pem = (
rsa_key.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
.decode("utf8")
)
privkey_pem = rsa_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
).decode("utf8")
self.model("setting").set(PluginSettings.HUB_PUB_KEY, pubkey_pem)
self.model("setting").set(PluginSettings.HUB_PRIV_KEY, privkey_pem)
return {
PluginSettings.HUB_PUB_KEY: pubkey_pem,
PluginSettings.HUB_PRIV_KEY: privkey_pem,
}
@access.public
@autoDescribeRoute(Description("Return url for tmpnb hub."))
def get_ythub_url(self, params):
setting = self.model("setting")
url = setting.get(PluginSettings.REDIRECT_URL)
if not url:
url = setting.get(PluginSettings.TMPNB_URL)
return {"url": url, "pubkey": setting.get(PluginSettings.HUB_PUB_KEY)}
@access.public
@autoDescribeRoute(
Description("Generate example data page.").modelParam(
"id", model="folder", level=AccessType.READ
)
)
def generateExamples(self, folder, params):
def get_code(resource):
try:
return resource["meta"]["code"]
except KeyError:
return "unknown"
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def download_path(_id, resource):
return "{}/{}/{}/download".format(getApiUrl(), resource, _id)
def get_meta(item):
try:
frontend = "{} frontend".format(item["meta"]["frontend"])
fname, fobj = next(Item().fileList(item, data=False))
entry = {
"code": get_code(item),
"description": item["meta"]["description"],
"filename": fname.rsplit(".", 2)[0],
"size": sizeof_fmt(fobj["size"]),
"url": download_path(fobj["_id"], "file"),
}
return frontend, entry
except:
pass
result = {}
for ds in Folder().childItems(folder):
frontend, entry = get_meta(ds)
if frontend not in result:
result[frontend] = []
result[frontend].append(entry)
return result
@access.public
@autoDescribeRoute(
Description("Generate pooch registry for yt data").modelParam(
"id", model="folder", level=AccessType.READ
)
)
def generate_pooch_registry(self, folder):
def download_path(_id, resource):
return "{}/{}/{}/download".format(getApiUrl(), resource, _id)
result = {}
for item in Folder().childItems(folder):
fname, fobj = next(Item().fileList(item, data=False))
result[item["name"]] = {
"hash": "sha512:{}".format(fobj["sha512"]),
"load_kwargs": item["meta"].get("load_kwargs", {}),
"load_name": item["meta"].get("load_name"),
"url": download_path(fobj["_id"], "file"),
}
return result
@access.public
@autoDescribeRoute(
Description("Convert external tools request and bounce it to the BinderHub.")
.param(
"siteUrl",
"The URL of the Dataverse installation that hosts the file "
"with the fileId above",
required=True,
)
.param(
"fileId",
"The database ID of a file the user clicks 'Explore' on. "
"For example, 42. This reserved word is required for file level tools "
"unless you use {filePid} instead.",
required=False,
)
.param(
"filePid",
"The Persistent ID (DOI or Handle) of a file the user clicks 'Explore' on. "
"For example, doi:10.7910/DVN/TJCLKP/3VSTKY. Note that not all installations "
"of Dataverse have Persistent IDs (PIDs) enabled at the file level. "
"This reserved word is required for file level tools unless "
"you use {fileId} instead.",
required=False,
)
.param(
"apiToken",
"The Dataverse API token of the user launching the external "
"tool, if available. Please note that API tokens should be treated with "
"the same care as a password. For example, "
"f3465b0c-f830-4bc7-879f-06c0745a5a5c.",
required=False,
)
.param(
"datasetId",
"The database ID of the dataset. For example, 42. This reseved word is "
"required for dataset level tools unless you use {datasetPid} instead.",
required=False,
)
.param(
"datasetPid",
"The Persistent ID (DOI or Handle) of the dataset. "
"For example, doi:10.7910/DVN/TJCLKP. This reseved word is "
"required for dataset level tools unless you use {datasetId} instead.",
required=False,
)
.param(
"datasetVersion",
"The friendly version number ( or :draft ) of the dataset version "
"the tool is being launched from. For example, 1.0 or :draft.",
required=False,
)
.param(
"fullDataset",
"If True, imports the full dataset that "
"contains the file defined by fileId.",
dataType="boolean",
default=True,
required=False,
)
.notes("apiToken is currently ignored.")
)
def dataverseExternalTools(
self,
siteUrl,
fileId,
filePid,
apiToken,
datasetId,
datasetPid,
datasetVersion,
fullDataset,
):
if not validators.url(siteUrl):
raise RestException("Not a valid URL: siteUrl")
if all(arg is None for arg in (fileId, filePid, datasetId, datasetPid)):
raise RestException("No data Id provided")
provider = DataverseImportProvider()
site = urlparse(siteUrl)
if fileId:
try:
fileId = int(fileId)
except (TypeError, ValueError):
raise RestException("Invalid fileId (should be integer)")
url = "{scheme}://{netloc}/api/access/datafile/{fileId}".format(
scheme=site.scheme, netloc=site.netloc, fileId=fileId
)
doi = provider.parse_access_url(urlparse(url))
elif datasetId:
try:
datasetId = int(datasetId)
except (TypeError, ValueError):
raise RestException("Invalid datasetId (should be integer)")
url = "{scheme}://{netloc}/api/datasets/{_id}".format(
scheme=site.scheme, netloc=site.netloc, _id=datasetId
)
doi = provider.parse_dataset(urlparse(url))
url = provider.dataset_full_url(site, doi)
elif filePid:
url = "{scheme}://{netloc}/file.xhtml?persistentId={doi}".format(
scheme=site.scheme, netloc=site.netloc, doi=filePid
)
doi = provider.parse_file_url(urlparse(url))
elif datasetPid:
url = provider.dataset_full_url(site, datasetPid)
doi = provider.parse_dataset(urlparse(url))
binder_url = os.environ.get("BINDER_URL", "https://mybinder.org/v2/dataverse/")
location = os.path.join(binder_url, doi.rsplit(":")[-1])
setResponseHeader("Location", location)
cherrypy.response.status = 303
| data-exp-lab/girder_ythub | server/rest/ythub.py | Python | bsd-3-clause | 12,034 |
from tests.base_case import ChatBotTestCase
from chatterbot.logic import MathematicalEvaluation
from chatterbot.conversation import Statement
class MathematicalEvaluationTests(ChatBotTestCase):
def setUp(self):
super().setUp()
self.adapter = MathematicalEvaluation(self.chatbot)
def test_can_process(self):
statement = Statement(text='What is 10 + 10 + 10?')
self.assertTrue(self.adapter.can_process(statement))
def test_can_not_process(self):
statement = Statement(text='What is your favorite song?')
self.assertFalse(self.adapter.can_process(statement))
def test_addition_operator(self):
statement = Statement(text='What is 100 + 54?')
response = self.adapter.process(statement)
self.assertEqual(response.text, '100 + 54 = 154')
self.assertEqual(response.confidence, 1)
def test_subtraction_operator(self):
statement = Statement(text='What is 100 - 58?')
response = self.adapter.process(statement)
self.assertEqual(response.text, '100 - 58 = 42')
self.assertEqual(response.confidence, 1)
def test_multiplication_operator(self):
statement = Statement(text='What is 100 * 20')
response = self.adapter.process(statement)
self.assertEqual(response.text, '100 * 20 = 2000')
self.assertEqual(response.confidence, 1)
def test_division_operator(self):
statement = Statement(text='What is 100 / 20')
response = self.adapter.process(statement)
self.assertEqual(response.text, '100 / 20 = 5')
self.assertEqual(response.confidence, 1)
def test_exponent_operator(self):
statement = Statement(text='What is 2 ^ 10')
response = self.adapter.process(statement)
self.assertEqual(response.text, '2 ^ 10 = 1024')
self.assertEqual(response.confidence, 1)
def test_parenthesized_multiplication_and_addition(self):
statement = Statement(text='What is 100 + ( 1000 * 2 )?')
response = self.adapter.process(statement)
self.assertEqual(response.text, '100 + ( 1000 * 2 ) = 2100')
self.assertEqual(response.confidence, 1)
def test_parenthesized_with_words(self):
statement = Statement(text='What is four plus 100 + ( 100 * 2 )?')
response = self.adapter.process(statement)
self.assertEqual(response.text, 'four plus 100 + ( 100 * 2 ) = 304')
self.assertEqual(response.confidence, 1)
def test_word_numbers_addition(self):
statement = Statement(text='What is one hundred + four hundred?')
response = self.adapter.process(statement)
self.assertEqual(response.text, 'one hundred + four hundred = 500')
self.assertEqual(response.confidence, 1)
def test_word_division_operator(self):
statement = Statement(text='What is 100 divided by 100?')
response = self.adapter.process(statement)
self.assertEqual(response.text, '100 divided by 100 = 1')
self.assertEqual(response.confidence, 1)
def test_large_word_division_operator(self):
statement = Statement(text='What is one thousand two hundred four divided by one hundred?')
response = self.adapter.process(statement)
self.assertEqual(response.text, 'one thousand two hundred four divided by one hundred = 12.04')
self.assertEqual(response.confidence, 1)
def test_negative_multiplication(self):
statement = Statement(text='What is -105 * 5')
response = self.adapter.process(statement)
self.assertEqual(response.text, '-105 * 5 = -525')
self.assertEqual(response.confidence, 1)
def test_negative_decimal_multiplication(self):
statement = Statement(text='What is -100.5 * 20?')
response = self.adapter.process(statement)
self.assertEqual(response.text, '-100.5 * 20 = -2010.0')
self.assertEqual(response.confidence, 1)
def test_pi_constant(self):
statement = Statement(text='What is pi plus one ?')
response = self.adapter.process(statement)
self.assertEqual(response.text, 'pi plus one = 4.141693')
self.assertEqual(response.confidence, 1)
def test_e_constant(self):
statement = Statement(text='What is e plus one ?')
response = self.adapter.process(statement)
self.assertEqual(response.text, 'e plus one = 3.718281')
self.assertEqual(response.confidence, 1)
def test_log_function(self):
statement = Statement(text='What is log 100 ?')
response = self.adapter.process(statement)
self.assertEqual(response.text, 'log 100 = 2.0')
self.assertEqual(response.confidence, 1)
def test_square_root_function(self):
statement = Statement(text='What is the sqrt 144 ?')
response = self.adapter.process(statement)
self.assertEqual(response.text, 'sqrt 144 = 12.0')
self.assertEqual(response.confidence, 1)
| vkosuri/ChatterBot | tests/logic/test_mathematical_evaluation.py | Python | bsd-3-clause | 4,970 |
from __future__ import absolute_import
import logging
from django.shortcuts import render_to_response, render, get_object_or_404, redirect
from django.template import RequestContext
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views.generic.list import ListView
from replica.contrib.blip.models import Timeline, Blip
from replica.contrib.blip.forms import TimelineModelForm, BlipModelForm
class LatestBlipsListViewMobile(ListView):
paginate_by = 25
template_name = 'replica/dashboard/blip/blip_list.html'
def get_queryset(self):
return Blip.objects.filter(user=self.request.user).order_by('-pub_date')
def get_context_data(self, **kwargs):
context = super(LatestBlipsListViewMobile, self).get_context_data(**kwargs)
context.update({'hide_timeline': True, 'nav_title': 'All Blips',})
return context
class TimelinesListView(ListView):
paginate_by = 25
template_name = 'replica/dashboard/blip/timeline_list.html'
def get_queryset(self):
return Timeline.objects.filter(user=self.request.user).order_by('-pub_date')
def get_context_data(self, **kwargs):
context = super(TimelinesListView, self).get_context_data(**kwargs)
context.update({ 'nav_title': 'Timelines',})
return context
class TimelineBlipListView(ListView):
paginate_by = 100
template_name = 'replica/dashboard/blip/blip_list.html'
def get_queryset(self):
self.timeline = get_object_or_404(Timeline, slug=self.kwargs.pop('timeline_slug'))
b = Blip.objects.filter(user=self.request.user).filter(timeline=self.timeline)
if self.timeline.rev_order == True:
return b.order_by('-pub_date')
else:
return b.order_by('pub_date')
def get_context_data(self, **kwargs):
context = super(TimelineBlipListView, self).get_context_data(**kwargs)
context.update({'timeline': self.timeline, 'nav_title': self.timeline.name,})
return context
def AddTimeline(request):
#add a timeline.
instance = Timeline(user=request.user)
f = TimelineModelForm(request.POST or None, instance=instance)
if f.is_valid():
f.save()
messages.add_message(
request, messages.INFO, 'New list created.')
return redirect('Replica:Blip-Timelines')
ctx = {'form': f, 'adding': True}
return render(request, 'replica/dashboard/blip/edit_timeline.html', ctx)
def EditTimeline(request, timeline_slug):
#Lets a user edit a blip they've previously added.
timeline = get_object_or_404(Timeline, slug=timeline_slug)
f = TimelineModelForm(request.POST or None, instance=timeline)
if f.is_valid():
f.save()
return redirect('Replica:Blip-Add-To-Timeline', timeline_slug=timeline_slug)
ctx = {'form': f, 'timeline': timeline, 'adding': False}
return render(request, 'replica/dashboard/blip/edit_timeline.html', ctx)
def SingleBlip(request, blip_guid):
#Shows a single blip.
blip = get_object_or_404(Blip, guid=blip_guid)
if blip.timeline:
recent_blips = Blip.objects.filter(timeline__id=blip.timeline.id, is_private=False)[:5]
ctx = {'blip': blip, 'recent_blips': recent_blips}
else:
ctx = {'blip': blip}
return render(request, 'replica/dashboard/blip/single_blip.html', ctx)
def AddBlip(request, timeline_slug=None):
object_list = Blip.objects.filter(user=request.user).order_by('-pub_date')[:10]
instance = Blip(user=request.user)
f = BlipModelForm(request.POST or None, instance=instance)
if f.is_valid():
f.save()
messages.add_message(
request, messages.INFO, 'Blip Added.')
return redirect('Replica:Blip:Index')
ctx = {'form': f, 'object_list': object_list, 'adding': True, 'blip_submit': True, 'hide_timeline': True, 'nav_title': 'All Blips', }
return render(request, 'replica/dashboard/blip/blip_list.html', ctx)
def AddBlipToTimeline(request, timeline_slug):
ft = get_object_or_404(Timeline, slug=timeline_slug)
if ft.rev_order == True:
b = Blip.objects.filter(user=request.user).filter(timeline=ft).order_by('-pub_date')[:10]
else:
b = Blip.objects.filter(user=request.user).filter(timeline=ft).order_by('pub_date')[:10]
instance = Blip(user=request.user, timeline=ft)
f = BlipModelForm(request.POST or None, instance=instance)
if f.is_valid():
f.save()
messages.add_message(
request, messages.INFO, 'Blip Added.')
return redirect('Replica:Blip:Timeline', timeline_slug=timeline_slug)
ctx = {'form': f, 'timeline': ft, 'adding': True, 'blip_submit': True, 'nav_title': ft.name, 'object_list': b, }
return render(request, 'replica/dashboard/blip/blip_list.html', ctx)
def EditBlip(request, blip_guid):
#Lets a user edit a blip they've previously added.
blip = get_object_or_404(Blip, guid=blip_guid, user=request.user)
f = BlipModelForm(request.POST or None, instance=blip)
if f.is_valid():
f.save()
return redirect('Replica:Blip:Blip', blip_guid=blip_guid)
ctx = {'form': f, 'blip': blip, 'adding': False}
return render(request, 'replica/dashboard/blip/edit_blip.html', ctx)
def DeleteBlip(request, blip_guid):
blip = get_object_or_404(Blip, guid=blip_guid, user=request.user)
if request.method == 'POST':
blip.delete()
return redirect('Replica:Blip:Index')
return render(request, 'replica/dashboard/delete-confirm.html', {'object': blip, 'content_type': 'Blip'})
| underlost/Replica | replica/contrib/blip/dashboard/views.py | Python | mit | 5,663 |
from __future__ import division
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
import networkx as nx
from networkx.utils import pairwise
def validate_path(G, s, t, soln_len, path):
assert_equal(path[0], s)
assert_equal(path[-1], t)
if not G.is_multigraph():
computed = sum(G[u][v].get('weight', 1) for u, v in pairwise(path))
assert_equal(soln_len, computed)
else:
computed = sum(min(e.get('weight', 1) for e in G[u][v].values())
for u, v in pairwise(path))
assert_equal(soln_len, computed)
def validate_length_path(G, s, t, soln_len, length, path):
assert_equal(soln_len, length)
validate_path(G, s, t, length, path)
class WeightedTestBase(object):
"""Base class for test classes that test functions for computing
shortest paths in weighted graphs.
"""
def setup(self):
"""Creates some graphs for use in the unit tests."""
cnlti = nx.convert_node_labels_to_integers
self.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1,
ordering="sorted")
self.cycle = nx.cycle_graph(7)
self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
self.XG = nx.DiGraph()
self.XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
self.MXG = nx.MultiDiGraph(self.XG)
self.MXG.add_edge('s', 'u', weight=15)
self.XG2 = nx.DiGraph()
self.XG2.add_weighted_edges_from([[1, 4, 1], [4, 5, 1],
[5, 6, 1], [6, 3, 1],
[1, 3, 50], [1, 2, 100],
[2, 3, 100]])
self.XG3 = nx.Graph()
self.XG3.add_weighted_edges_from([[0, 1, 2], [1, 2, 12],
[2, 3, 1], [3, 4, 5],
[4, 5, 1], [5, 0, 10]])
self.XG4 = nx.Graph()
self.XG4.add_weighted_edges_from([[0, 1, 2], [1, 2, 2],
[2, 3, 1], [3, 4, 1],
[4, 5, 1], [5, 6, 1],
[6, 7, 1], [7, 0, 1]])
self.MXG4 = nx.MultiGraph(self.XG4)
self.MXG4.add_edge(0, 1, weight=3)
self.G = nx.DiGraph() # no weights
self.G.add_edges_from([('s', 'u'), ('s', 'x'),
('u', 'v'), ('u', 'x'),
('v', 'y'), ('x', 'u'),
('x', 'v'), ('x', 'y'),
('y', 's'), ('y', 'v')])
class TestWeightedPath(WeightedTestBase):
def test_dijkstra(self):
(D, P) = nx.single_source_dijkstra(self.XG, 's')
validate_path(self.XG, 's', 'v', 9, P['v'])
assert_equal(D['v'], 9)
validate_path(
self.XG, 's', 'v', 9, nx.single_source_dijkstra_path(self.XG, 's')['v'])
assert_equal(dict(
nx.single_source_dijkstra_path_length(self.XG, 's'))['v'], 9)
validate_path(
self.XG, 's', 'v', 9, nx.single_source_dijkstra(self.XG, 's')[1]['v'])
validate_path(
self.MXG, 's', 'v', 9, nx.single_source_dijkstra_path(self.MXG, 's')['v'])
GG = self.XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight'] = 2
(D, P) = nx.single_source_dijkstra(GG, 's')
validate_path(GG, 's', 'v', 8, P['v'])
assert_equal(D['v'], 8) # uses lower weight of 2 on u<->x edge
validate_path(GG, 's', 'v', 8, nx.dijkstra_path(GG, 's', 'v'))
assert_equal(nx.dijkstra_path_length(GG, 's', 'v'), 8)
validate_path(self.XG2, 1, 3, 4, nx.dijkstra_path(self.XG2, 1, 3))
validate_path(self.XG3, 0, 3, 15, nx.dijkstra_path(self.XG3, 0, 3))
assert_equal(nx.dijkstra_path_length(self.XG3, 0, 3), 15)
validate_path(self.XG4, 0, 2, 4, nx.dijkstra_path(self.XG4, 0, 2))
assert_equal(nx.dijkstra_path_length(self.XG4, 0, 2), 4)
validate_path(self.MXG4, 0, 2, 4, nx.dijkstra_path(self.MXG4, 0, 2))
validate_path(
self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's', 'v')[1]['v'])
validate_path(
self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's')[1]['v'])
validate_path(self.G, 's', 'v', 2, nx.dijkstra_path(self.G, 's', 'v'))
assert_equal(nx.dijkstra_path_length(self.G, 's', 'v'), 2)
# NetworkXError: node s not reachable from moon
assert_raises(nx.NetworkXNoPath, nx.dijkstra_path, self.G, 's', 'moon')
assert_raises(
nx.NetworkXNoPath, nx.dijkstra_path_length, self.G, 's', 'moon')
validate_path(self.cycle, 0, 3, 3, nx.dijkstra_path(self.cycle, 0, 3))
validate_path(self.cycle, 0, 4, 3, nx.dijkstra_path(self.cycle, 0, 4))
assert_equal(
nx.single_source_dijkstra(self.cycle, 0, 0), ({0: 0}, {0: [0]}))
def test_bidirectional_dijkstra(self):
validate_length_path(
self.XG, 's', 'v', 9, *nx.bidirectional_dijkstra(self.XG, 's', 'v'))
validate_length_path(
self.G, 's', 'v', 2, *nx.bidirectional_dijkstra(self.G, 's', 'v'))
validate_length_path(
self.cycle, 0, 3, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 3))
validate_length_path(
self.cycle, 0, 4, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 4))
validate_length_path(
self.XG3, 0, 3, 15, *nx.bidirectional_dijkstra(self.XG3, 0, 3))
validate_length_path(
self.XG4, 0, 2, 4, *nx.bidirectional_dijkstra(self.XG4, 0, 2))
# need more tests here
P = nx.single_source_dijkstra_path(self.XG, 's')['v']
validate_path(self.XG, 's', 'v', sum(self.XG[u][v]['weight'] for u, v in zip(
P[:-1], P[1:])), nx.dijkstra_path(self.XG, 's', 'v'))
@raises(nx.NetworkXNoPath)
def test_bidirectional_dijkstra_no_path(self):
G = nx.Graph()
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5, 6])
path = nx.bidirectional_dijkstra(G, 1, 6)
def test_dijkstra_predecessor(self):
G = nx.path_graph(4)
assert_equal(nx.dijkstra_predecessor_and_distance(G, 0),
({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}))
G = nx.grid_2d_graph(2, 2)
pred, dist = nx.dijkstra_predecessor_and_distance(G, (0, 0))
assert_equal(sorted(pred.items()),
[((0, 0), []), ((0, 1), [(0, 0)]),
((1, 0), [(0, 0)]), ((1, 1), [(0, 1), (1, 0)])])
assert_equal(sorted(dist.items()),
[((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 2)])
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
(P, D) = nx.dijkstra_predecessor_and_distance(XG, 's')
assert_equal(P['v'], ['u'])
assert_equal(D['v'], 9)
(P, D) = nx.dijkstra_predecessor_and_distance(XG, 's', cutoff=8)
assert_false('v' in D)
def test_single_source_dijkstra_path_length(self):
pl = nx.single_source_dijkstra_path_length
assert_equal(dict(pl(self.MXG4, 0))[2], 4)
spl = pl(self.MXG4, 0, cutoff=2)
assert_false(2 in spl)
def test_bidirectional_dijkstra_multigraph(self):
G = nx.MultiGraph()
G.add_edge('a', 'b', weight=10)
G.add_edge('a', 'b', weight=100)
dp = nx.bidirectional_dijkstra(G, 'a', 'b')
assert_equal(dp, (10, ['a', 'b']))
def test_dijkstra_pred_distance_multigraph(self):
G = nx.MultiGraph()
G.add_edge('a', 'b', key='short', foo=5, weight=100)
G.add_edge('a', 'b', key='long', bar=1, weight=110)
p, d = nx.dijkstra_predecessor_and_distance(G, 'a')
assert_equal(p, {'a': [], 'b': ['a']})
assert_equal(d, {'a': 0, 'b': 100})
def test_negative_edge_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
assert_equal(nx.negative_edge_cycle(G), False)
G.add_edge(8, 9, weight=-7)
G.add_edge(9, 8, weight=3)
graph_size = len(G)
assert_equal(nx.negative_edge_cycle(G), True)
assert_equal(graph_size, len(G))
assert_raises(ValueError, nx.single_source_dijkstra_path_length, G, 8)
assert_raises(ValueError, nx.single_source_dijkstra, G, 8)
assert_raises(ValueError, nx.dijkstra_predecessor_and_distance, G, 8)
G.add_edge(9, 10)
assert_raises(ValueError, nx.bidirectional_dijkstra, G, 8, 10)
def test_weight_function(self):
"""Tests that a callable weight is interpreted as a weight
function instead of an edge attribute.
"""
# Create a triangle in which the edge from node 0 to node 2 has
# a large weight and the other two edges have a small weight.
G = nx.complete_graph(3)
G.edge[0][2]['weight'] = 10
G.edge[0][1]['weight'] = 1
G.edge[1][2]['weight'] = 1
# The weight function will take the multiplicative inverse of
# the weights on the edges. This way, weights that were large
# before now become small and vice versa.
weight = lambda u, v, d: 1 / d['weight']
# The shortest path from 0 to 2 using the actual weights on the
# edges should be [0, 1, 2].
distances, paths = nx.single_source_dijkstra(G, 0, 2)
assert_equal(distances[2], 2)
assert_equal(paths[2], [0, 1, 2])
# However, with the above weight function, the shortest path
# should be [0, 2], since that has a very small weight.
distances, paths = nx.single_source_dijkstra(G, 0, 2, weight=weight)
assert_equal(distances[2], 1 / 10)
assert_equal(paths[2], [0, 2])
class TestDijkstraPathLength(object):
"""Unit tests for the :func:`networkx.dijkstra_path_length`
function.
"""
def test_weight_function(self):
"""Tests for computing the length of the shortest path using
Dijkstra's algorithm with a user-defined weight function.
"""
# Create a triangle in which the edge from node 0 to node 2 has
# a large weight and the other two edges have a small weight.
G = nx.complete_graph(3)
G.edge[0][2]['weight'] = 10
G.edge[0][1]['weight'] = 1
G.edge[1][2]['weight'] = 1
# The weight function will take the multiplicative inverse of
# the weights on the edges. This way, weights that were large
# before now become small and vice versa.
weight = lambda u, v, d: 1 / d['weight']
# The shortest path from 0 to 2 using the actual weights on the
# edges should be [0, 1, 2]. However, with the above weight
# function, the shortest path should be [0, 2], since that has a
# very small weight.
length = nx.dijkstra_path_length(G, 0, 2, weight=weight)
assert_equal(length, 1 / 10)
class TestMultiSourceDijkstra(object):
"""Unit tests for the multi-source dialect of Dijkstra's shortest
path algorithms.
"""
@raises(ValueError)
def test_no_sources(self):
nx.multi_source_dijkstra(nx.Graph(), {})
@raises(ValueError)
def test_path_no_sources(self):
nx.multi_source_dijkstra_path(nx.Graph(), {})
@raises(ValueError)
def test_path_length_no_sources(self):
nx.multi_source_dijkstra_path_length(nx.Graph(), {})
def test_two_sources(self):
edges = [(0, 1, 1), (1, 2, 1), (2, 3, 10), (3, 4, 1)]
G = nx.Graph()
G.add_weighted_edges_from(edges)
sources = {0, 4}
distances, paths = nx.multi_source_dijkstra(G, sources)
expected_distances = {0: 0, 1: 1, 2: 2, 3: 1, 4: 0}
expected_paths = {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [4, 3], 4: [4]}
assert_equal(distances, expected_distances)
assert_equal(paths, expected_paths)
class TestBellmanFordAndGoldbergRadzik(WeightedTestBase):
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
assert_equal(nx.single_source_bellman_ford_path(G, 0), {0: [0]})
assert_equal(dict(nx.single_source_bellman_ford_path_length(G, 0)), {0: 0})
assert_equal(nx.single_source_bellman_ford(G, 0), ({0: 0}, {0: [0]}))
assert_equal(nx.bellman_ford_predecessor_and_distance(G, 0), ({0: [None]}, {0: 0}))
assert_equal(nx.goldberg_radzik(G, 0), ({0: None}, {0: 0}))
assert_raises(nx.NodeNotFound, nx.bellman_ford_predecessor_and_distance, G, 1)
assert_raises(nx.NodeNotFound, nx.goldberg_radzik, G, 1)
def test_negative_weight_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-7)
for i in range(5):
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i)
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i)
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
assert_raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i)
assert_raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.cycle_graph(5) # undirected Graph
G.add_edge(1, 2, weight=-3)
for i in range(5):
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i)
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i)
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
assert_raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i)
assert_raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.DiGraph([(1, 1, {'weight': -1})])
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1)
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1)
assert_raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1)
assert_raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1)
assert_raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1)
# no negative cycle but negative weight
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-3)
assert_equal(nx.single_source_bellman_ford_path(G, 0),
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]})
assert_equal(dict(nx.single_source_bellman_ford_path_length(G, 0)),
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0})
assert_equal(nx.single_source_bellman_ford(G, 0),
({0: 0, 1: 1, 2: -2, 3: -1, 4: 0},
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]}))
assert_equal(nx.bellman_ford_predecessor_and_distance(G, 0),
({0: [None], 1: [0], 2: [1], 3: [2], 4: [3]},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0}))
assert_equal(nx.goldberg_radzik(G, 0),
({0: None, 1: 0, 2: 1, 3: 2, 4: 3},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0}))
def test_not_connected(self):
G = nx.complete_graph(6)
G.add_edge(10, 11)
G.add_edge(10, 12)
assert_equal(nx.single_source_bellman_ford_path(G, 0),
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]})
assert_equal(dict(nx.single_source_bellman_ford_path_length(G, 0)),
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
assert_equal(nx.single_source_bellman_ford(G, 0),
({0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}))
assert_equal(nx.bellman_ford_predecessor_and_distance(G, 0),
({0: [None], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
assert_equal(nx.goldberg_radzik(G, 0),
({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
# not connected, with a component not containing the source that
# contains a negative cost cycle.
G = nx.complete_graph(6)
G.add_edges_from([('A', 'B', {'load': 3}),
('B', 'C', {'load': -10}),
('C', 'A', {'load': 2})])
assert_equal(nx.single_source_bellman_ford_path(G, 0, weight='load'),
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]})
assert_equal(dict(nx.single_source_bellman_ford_path_length(G, 0, weight='load')),
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
assert_equal(nx.single_source_bellman_ford(G, 0, weight='load'),
({0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}))
assert_equal(nx.bellman_ford_predecessor_and_distance(G, 0, weight='load'),
({0: [None], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
assert_equal(nx.goldberg_radzik(G, 0, weight='load'),
({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
def test_multigraph(self):
assert_equal(nx.bellman_ford_path(self.MXG, 's', 'v'), ['s', 'x', 'u', 'v'])
assert_equal(nx.bellman_ford_path_length(self.MXG, 's', 'v'), 9)
assert_equal(nx.single_source_bellman_ford_path(self.MXG, 's')['v'], ['s', 'x', 'u', 'v'])
assert_equal(dict(nx.single_source_bellman_ford_path_length(self.MXG, 's'))['v'], 9)
D, P = nx.single_source_bellman_ford(self.MXG, 's', target='v')
assert_equal(D['v'], 9)
assert_equal(P['v'], ['s', 'x', 'u', 'v'])
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, 's')
assert_equal(P['v'], ['u'])
assert_equal(D['v'], 9)
P, D = nx.goldberg_radzik(self.MXG, 's')
assert_equal(P['v'], 'u')
assert_equal(D['v'], 9)
assert_equal(nx.bellman_ford_path(self.MXG4, 0, 2), [0, 1, 2])
assert_equal(nx.bellman_ford_path_length(self.MXG4, 0, 2), 4)
assert_equal(nx.single_source_bellman_ford_path(self.MXG4, 0)[2], [0, 1, 2])
assert_equal(dict(nx.single_source_bellman_ford_path_length(self.MXG4, 0))[2], 4)
D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2)
assert_equal(D[2], 4)
assert_equal(P[2], [0, 1, 2])
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0)
assert_equal(P[2], [1])
assert_equal(D[2], 4)
P, D = nx.goldberg_radzik(self.MXG4, 0)
assert_equal(P[2], 1)
assert_equal(D[2], 4)
def test_others(self):
assert_equal(nx.bellman_ford_path(self.XG, 's', 'v'), ['s', 'x', 'u', 'v'])
assert_equal(nx.bellman_ford_path_length(self.XG, 's', 'v'), 9)
assert_equal(nx.single_source_bellman_ford_path(self.XG, 's')['v'], ['s', 'x', 'u', 'v'])
assert_equal(dict(nx.single_source_bellman_ford_path_length(self.XG, 's'))['v'], 9)
D, P = nx.single_source_bellman_ford(self.XG, 's', target='v')
assert_equal(D['v'], 9)
assert_equal(P['v'], ['s', 'x', 'u', 'v'])
(P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, 's')
assert_equal(P['v'], ['u'])
assert_equal(D['v'], 9)
(P, D) = nx.goldberg_radzik(self.XG, 's')
assert_equal(P['v'], 'u')
assert_equal(D['v'], 9)
G = nx.path_graph(4)
assert_equal(nx.single_source_bellman_ford_path(G, 0),
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]})
assert_equal(dict(nx.single_source_bellman_ford_path_length(G, 0)),
{0: 0, 1: 1, 2: 2, 3: 3})
assert_equal(nx.single_source_bellman_ford(G, 0),
({0: 0, 1: 1, 2: 2, 3: 3}, {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]}))
assert_equal(nx.bellman_ford_predecessor_and_distance(G, 0),
({0: [None], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}))
assert_equal(nx.goldberg_radzik(G, 0),
({0: None, 1: 0, 2: 1, 3: 2}, {0: 0, 1: 1, 2: 2, 3: 3}))
assert_equal(nx.single_source_bellman_ford_path(G, 3),
{0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]})
assert_equal(dict(nx.single_source_bellman_ford_path_length(G, 3)),
{0: 3, 1: 2, 2: 1, 3: 0})
assert_equal(nx.single_source_bellman_ford(G, 3),
({0: 3, 1: 2, 2: 1, 3: 0}, {0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]}))
assert_equal(nx.bellman_ford_predecessor_and_distance(G, 3),
({0: [1], 1: [2], 2: [3], 3: [None]}, {0: 3, 1: 2, 2: 1, 3: 0}))
assert_equal(nx.goldberg_radzik(G, 3),
({0: 1, 1: 2, 2: 3, 3: None}, {0: 3, 1: 2, 2: 1, 3: 0}))
G = nx.grid_2d_graph(2, 2)
dist, path = nx.single_source_bellman_ford(G, (0, 0))
assert_equal(sorted(dist.items()),
[((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 2)])
assert_equal(sorted(path.items()),
[((0, 0), [(0, 0)]), ((0, 1), [(0, 0), (0, 1)]),
((1, 0), [(0, 0), (1, 0)]), ((1, 1), [(0, 0), (0, 1), (1, 1)])])
pred, dist = nx.bellman_ford_predecessor_and_distance(G, (0, 0))
assert_equal(sorted(pred.items()),
[((0, 0), [None]), ((0, 1), [(0, 0)]),
((1, 0), [(0, 0)]), ((1, 1), [(0, 1), (1, 0)])])
assert_equal(sorted(dist.items()),
[((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 2)])
pred, dist = nx.goldberg_radzik(G, (0, 0))
assert_equal(sorted(pred.items()),
[((0, 0), None), ((0, 1), (0, 0)),
((1, 0), (0, 0)), ((1, 1), (0, 1))])
assert_equal(sorted(dist.items()),
[((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 2)])
class TestJohnsonAlgorithm(WeightedTestBase):
@raises(nx.NetworkXError)
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
nx.johnson(G)
def test_negative_cycle(self):
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
assert_raises(nx.NetworkXUnbounded, nx.johnson, G)
G = nx.Graph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
assert_raises(nx.NetworkXUnbounded, nx.johnson, G)
def test_negative_weights(self):
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
paths = nx.johnson(G)
assert_equal(paths, {'1': {'1': ['1'], '3': ['1', '2', '3'],
'2': ['1', '2']}, '0': {'1': ['0', '1'],
'0': ['0'], '3': ['0', '1', '2', '3'],
'2': ['0', '1', '2']}, '3': {'3': ['3']},
'2': {'3': ['2', '3'], '2': ['2']}})
@raises(nx.NetworkXError)
def test_unweighted_graph(self):
G = nx.path_graph(5)
nx.johnson(G)
def test_graphs(self):
validate_path(self.XG, 's', 'v', 9, nx.johnson(self.XG)['s']['v'])
validate_path(self.MXG, 's', 'v', 9, nx.johnson(self.MXG)['s']['v'])
validate_path(self.XG2, 1, 3, 4, nx.johnson(self.XG2)[1][3])
validate_path(self.XG3, 0, 3, 15, nx.johnson(self.XG3)[0][3])
validate_path(self.XG4, 0, 2, 4, nx.johnson(self.XG4)[0][2])
validate_path(self.MXG4, 0, 2, 4, nx.johnson(self.MXG4)[0][2])
| cmtm/networkx | networkx/algorithms/shortest_paths/tests/test_weighted.py | Python | bsd-3-clause | 25,025 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-01-29 02:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('human_feedback_api', '0003_auto_20170129_0220'),
]
operations = [
migrations.AlterField(
model_name='feedback',
name='metadata_json',
field=models.TextField(blank=True, null=True, verbose_name=b'json encoded string with metadata'),
),
migrations.AlterField(
model_name='feedback',
name='responded_at',
field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name=b'date created'),
),
migrations.AlterField(
model_name='feedback',
name='response',
field=models.TextField(blank=True, db_index=True, null=True, verbose_name=b'the response from the tasker'),
),
migrations.AlterField(
model_name='feedback',
name='shown_to_tasker_at',
field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name=b'date created'),
),
migrations.AlterField(
model_name='feedback',
name='tasker_slack_name',
field=models.TextField(blank=True, db_index=True, null=True, verbose_name=b'The slack name of the tasker (without the @)'),
),
]
| nottombrown/rl-teacher | human-feedback-api/human_feedback_api/migrations/0004_auto_20170129_0223.py | Python | mit | 1,448 |
from C4CApplication.page_objects.FixedPage import FixedPage
from selenium.common.exceptions import NoSuchElementException
class MemberDetailsPage(FixedPage):
def __init__(self, driver):
super().__init__(driver)
self.favorite_button = None
self.log_as_button = None
try:
self.favorite_button = self.driver.find_element_by_xpath('//input[@id="AddRelation"]')
except NoSuchElementException:
pass
try:
self.log_as_button = self.driver.find_element_by_link_text("Log as member")
except NoSuchElementException:
pass
def click_on_add_friend(self):
if self.favorite_button is not None :
self.favorite_button.click()
return self
def click_on_log_as_member(self):
if self.log_as_button is not None :
self.log_as_button.click()
return self
| dsarkozi/care4care-sdp-grp4 | Care4Care/C4CApplication/page_objects/MemberDetailsPage.py | Python | agpl-3.0 | 968 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0f 38 22 /r
# PMOVSXBW xmm1, xmm2/m64
Buffer = bytes.fromhex('660f38229000000000')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x0f3822)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'pmovsxbq')
assert_equal(myDisasm.repr(), 'pmovsxbq xmm2, qword ptr [rax+00000000h]')
# VEX.128.66.0F38.WIG 22 /r
# vpmovsxbq xmm1, xmm2/m64
myVEX = VEX('VEX.128.66.0F38.WIG')
Buffer = bytes.fromhex('{}229000000000'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsxbq')
assert_equal(myDisasm.repr(), 'vpmovsxbq xmm10, qword ptr [r8+00000000h]')
# VEX.256.66.0F38.WIG 22 /r
# vpmovsxbq ymm1, xmm2/m128
myVEX = VEX('VEX.256.66.0F38.WIG')
Buffer = bytes.fromhex('{}229000000000'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsxbq')
assert_equal(myDisasm.repr(), 'vpmovsxbq ymm10, xmmword ptr [r8+00000000h]')
# EVEX.128.66.0F38.WIG 22 /r
# vpmovsxbq xmm1 {k1}{z}, xmm2/m64
myEVEX = EVEX('EVEX.128.66.0F38.WIG')
Buffer = bytes.fromhex('{}229000000000'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsxbq')
assert_equal(myDisasm.repr(), 'vpmovsxbq xmm26, qword ptr [r8+00000000h]')
# EVEX.256.66.0F38.WIG 22 /r
# vpmovsxbq ymm1 {k1}{z}, xmm2/m128
myEVEX = EVEX('EVEX.256.66.0F38.WIG')
Buffer = bytes.fromhex('{}229000000000'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsxbq')
assert_equal(myDisasm.repr(), 'vpmovsxbq ymm26, xmmword ptr [r8+00000000h]')
# EVEX.512.66.0F38.WIG 22 /r
# vpmovsxbq zmm1 {k1}{z}, ymm2/m256
myEVEX = EVEX('EVEX.512.66.0F38.WIG')
Buffer = bytes.fromhex('{}229000000000'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsxbq')
assert_equal(myDisasm.repr(), 'vpmovsxbq zmm26, ymmword ptr [r8+00000000h]')
# EVEX.128.F3.0F38.W0 22 /r
# vpmovsqb xmm1/m64 {k1}{z},xmm2
myEVEX = EVEX('EVEX.128.F3.0F38.W0')
Buffer = bytes.fromhex('{}229000000000'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsqb')
assert_equal(myDisasm.repr(), 'vpmovsqb qword ptr [r8+00000000h], xmm26')
# EVEX.256.F3.0F38.W0 22 /r
# vpmovsqb xmm1/m128 {k1}{z},ymm2
myEVEX = EVEX('EVEX.256.F3.0F38.W0')
Buffer = bytes.fromhex('{}229000000000'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsqb')
assert_equal(myDisasm.repr(), 'vpmovsqb xmmword ptr [r8+00000000h], ymm26')
# EVEX.512.F3.0F38.W0 22 /r
# vpmovsqb ymm1/m256 {k1}{z},zmm2
myEVEX = EVEX('EVEX.512.F3.0F38.W0')
Buffer = bytes.fromhex('{}229000000000'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x22)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmovsqb')
assert_equal(myDisasm.repr(), 'vpmovsqb ymmword ptr [r8+00000000h], zmm26')
| 0vercl0k/rp | src/third_party/beaengine/tests/0f3822.py | Python | mit | 5,003 |
# -*- coding: utf-8 -*-
# pylint: disable=bad-whitespace
""" rTorrent web apps.
Copyright (c) 2013 The PyroScope Project <pyroscope.project@gmail.com>
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
import os
import re
import json
import time
import socket
#import mimetypes
import psutil
from webob import exc, static, Request, Response
from webob.dec import wsgify
#from webob.response import Response
from pyrobase.parts import Bunch
from pyrocore import config, error
from pyrocore.util import pymagic, xmlrpc, stats
class StaticFolders(object):
""" An application that serves up the files in a list of given directories.
Non-existent paths are ignored.
Pass a `fileapp` factory to change the default file serving app.
"""
def __init__(self, paths, fileapp=None, **kw):
self.LOG = pymagic.get_class_logger(self)
self.paths = []
self.fileapp = fileapp or static.FileApp
self.fileapp_kw = kw
for path in paths:
path = os.path.abspath(path).rstrip(os.path.sep) + os.path.sep
if os.path.isdir(path):
self.paths.append(path)
else:
self.LOG.warn("Static HTTP directory %r not found, ignoring it" % path)
@wsgify
def __call__(self, req):
urlpath = req.urlvars.filepath.strip('/').replace("..", "!FORBIDDEN!")
for basepath in self.paths:
path = os.path.abspath(os.path.realpath(os.path.join(basepath, urlpath)))
if not os.path.isfile(path):
continue
elif not path.startswith(basepath):
return exc.HTTPForbidden(comment="Won't follow symlink to %r" % urlpath)
else:
return self.fileapp(path, **self.fileapp_kw)
return exc.HTTPNotFound(comment=urlpath)
class JsonController(object):
""" Controller for generating JSON data.
"""
ERRORS_LOGGED = set()
def __init__(self, **kwargs):
self.LOG = pymagic.get_class_logger(self)
self.cfg = Bunch(kwargs)
@wsgify
def __call__(self, req):
action = req.urlvars.get("action")
try:
try:
method = getattr(self, "json_" + action)
except AttributeError:
raise exc.HTTPNotFound("No action '%s'" % action)
resp = method(req)
if isinstance(resp, (dict, list)):
try:
resp = json.dumps(resp, sort_keys=True)
except (TypeError, ValueError, IndexError, AttributeError) as json_exc:
raise exc.HTTPInternalServerError("JSON serialization error (%s)" % json_exc)
if isinstance(resp, basestring):
resp = Response(body=resp, content_type="application/json")
except exc.HTTPException as http_exc:
resp = http_exc
return resp
def guarded(self, func, *args, **kwargs):
""" Call a function, return None on errors.
"""
try:
return func(*args, **kwargs)
except (EnvironmentError, error.LoggableError, xmlrpc.ERRORS) as g_exc:
if func.__name__ not in self.ERRORS_LOGGED:
self.LOG.warn("While calling '%s': %s" % (func.__name__, g_exc))
self.ERRORS_LOGGED.add(func.__name__)
return None
def json_engine(self, req): # pylint: disable=R0201,W0613
""" Return torrent engine data.
"""
try:
return stats.engine_data(config.engine)
except (error.LoggableError, xmlrpc.ERRORS) as torrent_exc:
raise exc.HTTPInternalServerError(str(torrent_exc))
def json_charts(self, req):
""" Return charting data.
"""
disk_used, disk_total, disk_detail = 0, 0, []
for disk_usage_path in self.cfg.disk_usage_path.split(os.pathsep):
disk_usage = self.guarded(psutil.disk_usage, os.path.expanduser(disk_usage_path.strip()))
if disk_usage:
disk_used += disk_usage.used
disk_total += disk_usage.total
disk_detail.append((disk_usage.used, disk_usage.total))
data = dict(
engine = self.json_engine(req),
uptime = time.time() - psutil.BOOT_TIME, # pylint: disable=no-member
fqdn = self.guarded(socket.getfqdn),
cpu_usage = self.guarded(psutil.cpu_percent, 0),
ram_usage = self.guarded(psutil.virtual_memory),
swap_usage = self.guarded(psutil.swap_memory),
disk_usage = (disk_used, disk_total, disk_detail) if disk_total else None,
disk_io = self.guarded(psutil.disk_io_counters),
net_io = self.guarded(psutil.net_io_counters),
)
return data
class Router(object):
""" URL router middleware.
See http://docs.webob.org/en/latest/do-it-yourself.html
"""
ROUTES_RE = re.compile(r'''
\{ # The exact character "{"
(\w+) # The variable name (restricted to a-z, 0-9, _)
(?::([^}]+))? # The optional :regex part
\} # The exact character "}"
''', re.VERBOSE)
@classmethod
def parse_route(cls, template):
""" Parse a route definition, and return the compiled regex that matches it.
"""
regex = ''
last_pos = 0
for match in cls.ROUTES_RE.finditer(template):
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1)
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
regex = '^%s$' % regex
return re.compile(regex)
def __init__(self):
self.LOG = pymagic.get_class_logger(self)
self.routes = []
def add_route(self, template, controller, **kwargs):
""" Add a route definition
`controller` can be either a controller instance,
or the name of a callable that will be imported.
"""
if isinstance(controller, basestring):
controller = pymagic.import_name(controller)
self.routes.append((self.parse_route(template), controller, kwargs))
return self
def __call__(self, environ, start_response):
req = Request(environ)
self.LOG.debug("Incoming request at %r" % (req.path_info,))
for regex, controller, kwargs in self.routes:
match = regex.match(req.path_info)
if match:
req.urlvars = Bunch(kwargs)
req.urlvars.update(match.groupdict())
self.LOG.debug("controller=%r; vars=%r; req=%r; env=%r" % (controller, req.urlvars, req, environ))
return controller(environ, start_response)
return exc.HTTPNotFound()(environ, start_response)
@wsgify
def redirect(req, _log=pymagic.get_lazy_logger("redirect")):
""" Redirect controller to emit a HTTP 301.
"""
log = req.environ.get("wsgilog.logger", _log)
target = req.relative_url(req.urlvars.to)
log.info("Redirecting '%s' to '%s'" % (req.url, target))
return exc.HTTPMovedPermanently(location=target)
def make_app(httpd_config):
""" Factory for the monitoring webapp.
"""
#mimetypes.add_type('image/vnd.microsoft.icon', '.ico')
# Default paths to serve static file from
htdocs_paths = [
os.path.realpath(os.path.join(config.config_dir, "htdocs")),
os.path.join(os.path.dirname(config.__file__), "data", "htdocs"),
]
return (Router()
.add_route("/", controller=redirect, to="/static/index.html")
.add_route("/favicon.ico", controller=redirect, to="/static/favicon.ico")
.add_route("/static/{filepath:.+}", controller=StaticFolders(htdocs_paths))
.add_route("/json/{action}", controller=JsonController(**httpd_config.json))
)
def module_test():
""" Quick test using…
python -m pyrocore.daemon.webapp
"""
import pprint
from pyrocore import connect
try:
engine = connect()
print("%s - %s" % (engine.engine_id, engine.open()))
pprint.pprint(stats.engine_data(engine))
print("%s - %s" % (engine.engine_id, engine.open()))
except (error.LoggableError, xmlrpc.ERRORS) as torrent_exc:
print("ERROR: %s" % torrent_exc)
if __name__ == "__main__":
module_test()
| pyroscope/pyrocore | src/pyrocore/daemon/webapp.py | Python | gpl-2.0 | 9,247 |
# example2.py
#
# Singleton
class Singleton(type):
def __init__(self, *args, **kwargs):
self.__instance = None
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
if self.__instance is None:
self.__instance = super().__call__(*args, **kwargs)
return self.__instance
else:
return self.__instance
class Spam(metaclass=Singleton):
def __init__(self):
print('Creating Spam')
if __name__ == '__main__':
a = Spam()
b = Spam()
print(a is b)
| tuanavu/python-cookbook-3rd | src/9/using_metaclasses_to_control_instance_creation/example2.py | Python | mit | 558 |
# Copyright (c) 2015 Tencent Inc.
# All rights reserved.
#
# Author: Li Wenting <wentingli@tencent.com>
# Date: August 28, 2015
"""
This is the maven module which manages jar files downloaded
from maven repository
"""
import os
import shutil
import subprocess
import time
import configparse
import console
def is_valid_id(id):
"""Check if id is valid. """
parts = id.split(':')
if len(parts) == 3:
group, artifact, version = parts
if group and artifact and version:
return True
return False
class MavenArtifact(object):
"""
MavenArtifact represents a jar artifact and its transitive dependencies
separated by colon in maven cache.
"""
def __init__(self, path):
self.path = path
self.deps = None
class MavenCache(object):
"""MavenCache. Manages maven jar files. """
__instance = None
@staticmethod
def instance(log_dir):
if not MavenCache.__instance:
MavenCache.__instance = MavenCache(log_dir)
return MavenCache.__instance
def __init__(self, log_dir):
"""Init method. """
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.__log_dir = log_dir
# key: (id, classifier)
# id: jar id in the format group:artifact:version
# value: an instance of MavenArtifact
self.__jar_database = {}
java_config = configparse.blade_config.get_config('java_config')
self.__maven = java_config.get('maven')
self.__central_repository = java_config.get('maven_central')
# Local repository is set to the maven default directory
# and could not be configured currently
local_repository = '~/.m2/repository'
self.__local_repository = os.path.expanduser(local_repository)
self.__need_check_config = True
# Download the snapshot artifact daily
self.__build_time = time.time()
self.__one_day_interval = 86400
def _generate_jar_path(self, id):
"""Generate jar path within local repository. """
group, artifact, version = id.split(':')
return os.path.join(self.__local_repository,
group.replace('.', '/'), artifact, version)
def _check_config(self):
"""Check whether maven is configured correctly. """
if not self.__need_check_config:
return
if not self.__maven:
console.error_exit('MavenCache was not configured')
self.__need_check_config = False
def _check_id(self, id):
"""Check if id is valid. """
if not is_valid_id(id):
console.error_exit('Invalid id %s: Id should be group:artifact:version, '
'such as jaxen:jaxen:1.1.6' % id)
def _is_log_expired(self, log):
"""Check if the modification time of log file is expired relative to build time. """
return self.__build_time - os.path.getmtime(log) > self.__one_day_interval
def _download_jar(self, id, classifier):
group, artifact, version = id.split(':')
pom = artifact + '-' + version + '.pom'
jar = artifact + '-' + version + '.jar'
log = artifact + '__download.log'
if classifier:
jar = artifact + '-' + version + '-' + classifier + '.jar'
log = artifact + '-' + classifier + '__download.log'
log_path = os.path.join(self.__log_dir, log)
target_path = self._generate_jar_path(id)
target_log = os.path.join(target_path, log)
if (os.path.isfile(os.path.join(target_path, jar)) and
os.path.isfile(os.path.join(target_path, pom))):
if not version.endswith('-SNAPSHOT'):
return True
if os.path.isfile(target_log) and not self._is_log_expired(target_log):
return True
if classifier:
id = '%s:%s' % (id, classifier)
console.info('Downloading %s from central repository...' % id)
cmd = ' '.join([self.__maven,
'dependency:get',
'-DgroupId=%s' % group,
'-DartifactId=%s' % artifact,
'-Dversion=%s' % version])
if classifier:
cmd += ' -Dclassifier=%s' % classifier
if subprocess.call('%s > %s' % (cmd, log_path), shell=True):
console.warning('Error occurred when downloading %s from central '
'repository. Check %s for more details.' % (
id, log_path))
cmd += ' -Dtransitive=false'
if subprocess.call('%s > %s' % (cmd, log_path + '.transitive'),
shell=True):
return False
console.warning('Download standalone artifact %s successfully, but '
'its transitive dependencies are unavailable.' % id)
shutil.copy(log_path, target_log)
return True
def _download_dependency(self, id, classifier):
group, artifact, version = id.split(':')
target_path = self._generate_jar_path(id)
log, classpath = artifact + '__classpath.log', 'classpath.txt'
if classifier:
log = artifact + '-' + classifier + '__classpath.log'
classpath = 'classpath-%s.txt' % classifier
log = os.path.join(target_path, log)
if os.path.isfile(os.path.join(target_path, classpath)):
if not version.endswith('-SNAPSHOT'):
return True
if os.path.isfile(log) and not self._is_log_expired(log):
return True
if classifier:
id = '%s:%s' % (id, classifier)
# Currently analyzing dependencies of classifier jar
# usually fails. Here when there is no classpath.txt
# file but classpath.log exists, that means the failure
# of analyzing dependencies last time
if (not os.path.exists(os.path.join(target_path, classpath))
and os.path.exists(log)):
return False
console.info('Downloading %s dependencies...' % id)
pom = os.path.join(target_path, artifact + '-' + version + '.pom')
cmd = ' '.join([self.__maven,
'dependency:build-classpath',
'-DincludeScope=runtime',
'-Dmdep.outputFile=%s' % classpath])
if classifier:
cmd += ' -Dclassifier=%s' % classifier
cmd += ' -f %s > %s' % (pom, log)
if subprocess.call(cmd, shell=True):
console.warning('Error occurred when resolving %s dependencies. '
'Check %s for more details.' % (id, log))
return False
return True
def _download_artifact(self, id, classifier):
"""Download the specified jar and its transitive dependencies. """
if not self._download_jar(id, classifier):
return False
group, artifact, version = id.split(':')
path = self._generate_jar_path(id)
jar = artifact + '-' + version + '.jar'
if classifier:
jar = artifact + '-' + version + '-' + classifier + '.jar'
self.__jar_database[(id, classifier)] = MavenArtifact(os.path.join(path, jar))
return True
def _get_artifact_from_database(self, id, classifier):
"""get_artifact_from_database. """
self._check_config()
self._check_id(id)
if (id, classifier) not in self.__jar_database:
if not self._download_artifact(id, classifier):
console.error_exit('Download %s failed' % id)
return self.__jar_database[(id, classifier)]
def get_jar_path(self, id, classifier):
"""get_jar_path
Return local jar path corresponding to the id specified in the
format group:artifact:version.
Download jar files and its transitive dependencies if needed.
"""
artifact = self._get_artifact_from_database(id, classifier)
return artifact.path
def get_jar_deps_path(self, id, classifier):
"""get_jar_deps_path
Return a string of the dependencies path separated by colon.
This string can be used in java -cp later.
"""
artifact = self._get_artifact_from_database(id, classifier)
if artifact.deps is None:
if not self._download_dependency(id, classifier):
# Ignore dependency download error
artifact.deps = ''
else:
path = self._generate_jar_path(id)
classpath = os.path.join(path, 'classpath.txt')
if classifier:
classpath = os.path.join(path, 'classpath-%s.txt' % classifier)
with open(classpath) as f:
# Read the first line
artifact.deps = f.readline()
return artifact.deps
| project-zerus/blade | src/blade/maven.py | Python | bsd-3-clause | 8,974 |
# Copyright (C) 2005 Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
# Copyright (C) 2005 XenSource Ltd
# This file is subject to the terms and conditions of the GNU General
# Public License. See the file "COPYING" in the main directory of
# this archive for more details.
import errno
import threading
from xen.xend.xenstore.xsutil import xshandle
class xswatch:
##
# Create a watch on the given path in the store. The watch will fire
# immediately, then subsequently each time the watched path is changed,
# until the watch is deregistered, either by the return value from the
# watch callback being False, or by an explicit call to unwatch.
#
# @param fn The function to be called when the watch fires. This function
# should take the path that has changed as its first argument, followed by
# the extra arguments given to this constructor, if any. It should return
# True if the watch is to remain registered, or False if it is to be
# deregistered.
#
def __init__(self, path, fn, *args, **kwargs):
self.path = path
self.fn = fn
self.args = args
self.kwargs = kwargs
watchStart()
xs.watch(path, self)
def unwatch(self):
xs.unwatch(self.path, self)
watchThread = None
xs = None
xslock = threading.Lock()
def watchStart():
global watchThread
global xs
xslock.acquire()
try:
if watchThread:
return
xs = xshandle()
watchThread = threading.Thread(name="Watcher", target=watchMain)
watchThread.setDaemon(True)
watchThread.start()
finally:
xslock.release()
def watchMain():
while True:
try:
we = xs.read_watch()
watch = we[1]
res = watch.fn(we[0], *watch.args, **watch.kwargs)
if not res:
try:
watch.unwatch()
except RuntimeError, exn:
if exn.args[0] == errno.ENOENT:
# The watch has already been unregistered -- that's
# fine.
pass
else:
raise
except:
pass
# Ignore this exception -- there's no point throwing it
# further on because that will just kill the watcher thread,
# which achieves nothing.
| kaustubh-kabra/modified-xen | tools/python/xen/xend/xenstore/xswatch.py | Python | gpl-2.0 | 2,418 |
import cv2
import numpy as np
img = cv2.imread('alpha_o.JPG',1)
scaledown = cv2.resize(img,(0,0),fx=0.25,fy=0.25)
color_space = cv2.cvtColor(scaledown,cv2.COLOR_RGB2HSV)
sift = cv2.SIFT(7)
kp = sift.detect(color_space,None)
scaledown = cv2.drawKeypoints(color_space,kp)
cv2.imshow('image',scaledown)
cv2.waitKey(0)
cv2.destroyAllWindows()
__author__ = 'ganesh'
| xgt001/uav_msr_ip14 | opencv_rnd/buff.py | Python | mit | 373 |
"""
Cluster definition
part of context, Cluster is used to save connection information.
"""
from teuthology.orchestra import run
class Cluster(object):
"""
Manage SSH connections to a cluster of machines.
"""
def __init__(self, remotes=None):
"""
:param remotes: A sequence of 2-tuples of this format:
(Remote, [role_1, role_2 ...])
"""
self.remotes = {}
if remotes is not None:
for remote, roles in remotes:
self.add(remote, roles)
def __repr__(self):
remotes = [(k, v) for k, v in self.remotes.items()]
remotes.sort(key=lambda tup: tup[0].name)
remotes = '[' + ', '.join('[{remote!r}, {roles!r}]'.format(
remote=k, roles=v) for k, v in remotes) + ']'
return '{classname}(remotes={remotes})'.format(
classname=self.__class__.__name__,
remotes=remotes,
)
def __str__(self):
remotes = list(self.remotes.items())
remotes.sort(key=lambda tup: tup[0].name)
remotes = ((k, ','.join(v)) for k, v in remotes)
remotes = ('{k}[{v}]'.format(k=k, v=v) for k, v in remotes)
return ' '.join(remotes)
def add(self, remote, roles):
"""
Add roles to the list of remotes.
"""
if remote in self.remotes:
raise RuntimeError(
'Remote {new!r} already found in remotes: {old!r}'.format(
new=remote,
old=self.remotes[remote],
),
)
self.remotes[remote] = list(roles)
def run(self, wait=True, parallel=False, **kwargs):
"""
Run a command on all the nodes in this cluster.
Goes through nodes in alphabetical order.
The default usage is when parallel=False and wait=True,
which is a sequential run for each node one by one.
If you specify parallel=True, it will be in parallel.
If you specify wait=False, it returns immediately.
Since it is not possible to run sequentially and
do not wait each command run finished, the parallel value
is ignored and treated as True.
Returns a list of `RemoteProcess`.
"""
# -+-------+----------+----------+------------+---------------
# | wait | parallel | run.wait | remote.run | comments
# -+-------+----------+----------+------------+---------------
# 1|*True |*False | no | wait=True | sequentially
# 2| True | True | yes | wait=False | parallel
# 3| False | True | no | wait=False | parallel
# 4| False | False | no | wait=False | same as above
# We always run in parallel if wait=False,
# that is why (4) is equivalent to (3).
# We wait from remote.run only if run sequentially.
_wait = (parallel == False and wait == True)
remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name)
procs = [remote.run(**kwargs, wait=_wait) for remote in remotes]
# We do run.wait only if parallel=True, because if parallel=False,
# we have run sequentially and all processes are complete.
if parallel and wait:
run.wait(procs)
return procs
def sh(self, script, **kwargs):
"""
Run a command on all the nodes in this cluster.
Goes through nodes in alphabetical order.
Returns a list of the command outputs correspondingly.
"""
remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name)
return [remote.sh(script, **kwargs) for remote in remotes]
def write_file(self, file_name, content, sudo=False, perms=None, owner=None):
"""
Write text to a file on each node.
:param file_name: file name
:param content: file content
:param sudo: use sudo
:param perms: file permissions (passed to chmod) ONLY if sudo is True
"""
remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name)
for remote in remotes:
if sudo:
remote.write_file(file_name, content,
sudo=True, mode=perms, owner=owner)
else:
if perms is not None or owner is not None:
raise ValueError("To specify perms or owner, sudo must be True")
remote.write_file(file_name, content)
def only(self, *roles):
"""
Return a cluster with only the remotes that have all of given roles.
For roles given as strings, they are matched against the roles
on a remote, and the remote passes the check only if all the
roles listed are present.
Argument can be callable, and will act as a match on roles of
the remote. The matcher will be evaluated one role at a time,
but a match on any role is good enough. Note that this is
subtly diffent from the behavior of string roles, but is
logical if you consider a callable to be similar to passing a
non-string object with an `__eq__` method.
For example::
web = mycluster.only(lambda role: role.startswith('web-'))
"""
c = self.__class__()
want = frozenset(r for r in roles if not callable(r))
matchers = [r for r in roles if callable(r)]
for remote, has_roles in self.remotes.items():
# strings given as roles must all match
if frozenset(has_roles) & want != want:
# not a match
continue
# every matcher given must match at least one role
if not all(
any(matcher(role) for role in has_roles)
for matcher in matchers
):
continue
c.add(remote, has_roles)
return c
def exclude(self, *roles):
"""
Return a cluster *without* remotes that have all of given roles.
This is the opposite of `only`.
"""
matches = self.only(*roles)
c = self.__class__()
for remote, has_roles in self.remotes.items():
if remote not in matches.remotes:
c.add(remote, has_roles)
return c
def filter(self, func):
"""
Return a cluster whose remotes are filtered by `func`.
Example::
cluster = ctx.cluster.filter(lambda r: r.is_online)
"""
result = self.__class__()
for rem, roles in self.remotes.items():
if func(rem):
result.add(rem, roles)
return result
| ceph/teuthology | teuthology/orchestra/cluster.py | Python | mit | 6,697 |
# Copyright (C) 2014 Red Hat, Inc.
#
# Author: Rich Megginson <rmeggins@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslotest import mockpatch
from requests.auth import AuthBase
from designate import tests
from designate import utils
from designate.tests.test_backend import BackendTestMixin
from designate.openstack.common import jsonutils as json
from designate.backend import impl_ipa
ipamethods = {"dnszone_add": {}, "dnszone_mod": {},
"dnszone_del": {}, "dnsrecord_add": {},
"dnsrecord_mod": {}, "dnsrecord_del": {},
}
class MockIPAAuth(AuthBase):
def __init__(self, hostname, keytab):
self.count = 0
def refresh_auth(self):
self.count += 1
class MockResponse(object):
def __init__(self, status_code, jsontext):
self.status_code = status_code
self.text = jsontext
class MockRequest(object):
def __init__(self, testcase):
self.headers = {}
self.myauth = MockIPAAuth("ignore", "ignore")
self.testcase = testcase
self.error = None
self.needauth = False
@property
def auth(self):
# always return the mock object
return self.myauth
@auth.setter
def auth(self, val):
# disallow setting
pass
def post(self, jsonurl, data):
# convert json data string to dict
ddict = json.loads(data)
# check basic parameters
self.testcase.assertIn('method', ddict)
meth = ddict['method']
self.testcase.assertIn(meth, ipamethods)
self.testcase.assertIn('params', ddict)
self.testcase.assertIsInstance(ddict['params'], list)
self.testcase.assertEqual(len(ddict['params']), 2)
self.testcase.assertIsInstance(ddict['params'][0], list)
self.testcase.assertIsInstance(ddict['params'][1], dict)
self.testcase.assertIn('version', ddict['params'][1])
# check method specific parameters
if meth.startswith('dnsrecord_'):
self.testcase.assertEqual(len(ddict['params'][0]), 2)
# domain params end with a .
param1 = ddict['params'][0][0]
self.testcase.assertEqual(param1[-1], ".")
elif meth.startswith('dnszone_'):
self.testcase.assertEqual(len(ddict['params'][0]), 1)
param1 = ddict['params'][0][0]
self.testcase.assertEqual(param1[-1], ".")
rc = {}
if self.needauth:
self.needauth = False # reset
return MockResponse(401, json.dumps(rc))
if self.error:
rc['error'] = {'code': self.error}
self.error = None # reset
else:
rc['error'] = None
return MockResponse(200, json.dumps(rc))
class IPABackendTestCase(tests.TestCase, BackendTestMixin):
def get_record_fixture(self, recordset_type, fixture=0, values=None):
"""override to ensure all records have a recordset_id"""
values = values or {}
return super(IPABackendTestCase, self).get_record_fixture(
recordset_type, fixture,
values={
'recordset_id': utils.generate_uuid()
}
)
def setUp(self):
super(IPABackendTestCase, self).setUp()
self.request = MockRequest(self)
# make requests return our mock object
def getSession():
return self.request
# replace requests.Session() with our mock version
self.useFixture(fixtures.MonkeyPatch('requests.Session', getSession))
self.config(backend_driver='ipa', group='service:agent')
self.backend = self.get_backend_driver()
self.CONF['backend:ipa'].ipa_auth_driver_class = \
"designate.tests.test_backend.test_ipa.MockIPAAuth"
self.backend.start()
# Since some CRUD methods in impl_ipa call central's find_servers
# and find_records method, mock it up to return our fixture.
self.useFixture(mockpatch.PatchObject(
self.backend.central_service,
'find_servers',
return_value=[self.get_server_fixture()]
))
self.useFixture(mockpatch.PatchObject(
self.backend.central_service,
'find_records',
return_value=[self.get_record_fixture('A')]
))
def test_create_server(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
def test_update_server(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
self.backend.update_server(context, server)
def test_delete_server(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
self.backend.delete_server(context, server)
def test_create_domain(self):
context = self.get_context()
server = self.get_server_fixture()
domain = self.get_domain_fixture()
self.backend.create_server(context, server)
self.backend.create_domain(context, domain)
def test_update_domain(self):
context = self.get_context()
server = self.get_server_fixture()
domain = self.get_domain_fixture()
self.backend.create_server(context, server)
self.backend.create_domain(context, domain)
domain['serial'] = 123456789
self.backend.update_domain(context, domain)
def test_delete_domain(self):
context = self.get_context()
server = self.get_server_fixture()
domain = self.get_domain_fixture()
self.backend.create_server(context, server)
self.backend.create_domain(context, domain)
self.backend.delete_domain(context, domain)
def test_create_domain_dup_domain(self):
context = self.get_context()
server = self.get_server_fixture()
domain = self.get_domain_fixture()
self.backend.create_server(context, server)
self.request.error = impl_ipa.IPA_DUPLICATE
self.assertRaises(impl_ipa.IPADuplicateDomain,
self.backend.create_domain,
context, domain)
self.assertIsNone(self.request.error)
def test_update_domain_error_no_domain(self):
context = self.get_context()
server = self.get_server_fixture()
domain = self.get_domain_fixture()
self.backend.create_server(context, server)
self.backend.create_domain(context, domain)
self.request.error = impl_ipa.IPA_NOT_FOUND
self.assertRaises(impl_ipa.IPADomainNotFound,
self.backend.update_domain,
context, domain)
def test_create_record(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
recordset = self.get_recordset_fixture(domain['name'], "A")
record = self.get_record_fixture("A")
self.backend.create_record(context, domain, recordset, record)
self.backend.delete_domain(context, domain)
def test_create_record_error_no_changes(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
recordset = self.get_recordset_fixture(domain['name'], "A")
record = self.get_record_fixture("A")
self.request.error = impl_ipa.IPA_NO_CHANGES
# backend should ignore this error
self.backend.create_record(context, domain, recordset, record)
self.assertIsNone(self.request.error)
self.backend.delete_domain(context, domain)
def test_create_domain_error_no_changes(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.request.error = impl_ipa.IPA_NO_CHANGES
# backend should ignore this error
self.backend.create_domain(context, domain)
self.assertIsNone(self.request.error)
self.backend.delete_domain(context, domain)
def test_create_record_error_dup_record(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
recordset = self.get_recordset_fixture(domain['name'], "A")
record = self.get_record_fixture("A")
self.request.error = impl_ipa.IPA_DUPLICATE # causes request to raise
self.assertRaises(impl_ipa.IPADuplicateRecord,
self.backend.create_record,
context, domain, recordset, record)
self.assertIsNone(self.request.error)
self.backend.delete_domain(context, domain)
def test_update_record_error_no_record(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
recordset = self.get_recordset_fixture(domain['name'], "A")
record = self.get_record_fixture("A")
self.request.error = impl_ipa.IPA_NOT_FOUND # causes request to raise
self.assertRaises(impl_ipa.IPARecordNotFound,
self.backend.update_record,
context, domain, recordset, record)
self.assertIsNone(self.request.error)
self.backend.delete_domain(context, domain)
def test_update_record_unknown_error(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
recordset = self.get_recordset_fixture(domain['name'], "A")
record = self.get_record_fixture("A")
self.request.error = 1234 # causes request to raise
self.assertRaises(impl_ipa.IPAUnknownError, self.backend.update_record,
context, domain, recordset, record)
self.assertIsNone(self.request.error)
self.backend.delete_domain(context, domain)
def test_create_record_reauth(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
recordset = self.get_recordset_fixture(domain['name'], "A")
record = self.get_record_fixture("A")
self.request.needauth = True # causes request to reauth
beforecount = self.request.myauth.count
self.backend.create_record(context, domain, recordset, record)
self.assertFalse(self.request.needauth)
self.assertEqual(self.request.myauth.count, (beforecount + 1))
self.backend.delete_domain(context, domain)
def test_create_record_reauth_fail(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
recordset = self.get_recordset_fixture(domain['name'], "A")
record = self.get_record_fixture("A")
self.request.needauth = True # causes request to reauth
self.backend.ntries = 0 # force exception upon retry
self.assertRaises(impl_ipa.IPACommunicationFailure,
self.backend.create_record, context, domain,
recordset, record)
self.assertFalse(self.request.needauth)
self.assertNotEqual(self.backend.ntries, 0)
self.backend.delete_domain(context, domain)
| melodous/designate | designate/tests/test_backend/test_ipa.py | Python | apache-2.0 | 12,721 |
import os
import random
import tweepy
import redis
CONSUMER_KEY = os.environ.get('TWITTER_CONSUMER_KEY')
CONSUMER_SECRET = os.environ.get('TWITTER_CONSUMER_SECRET')
ACCESS_TOKEN = os.environ.get('TWITTER_ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = os.environ.get('TWITTER_ACCESS_TOKEN_SECRET')
r = redis.from_url(os.environ.get("REDIS_URL"))
phrases = ['Here you go',
'A link coming right up',
'Done',
"I know what you're thinking. Zelda was the princess though"]
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
twitter = tweepy.API(auth)
def tweet_back(tweet):
username = tweet.user.screen_name
img = 'img/{}.png'.format(random.choice(range(10)))
message = '{}, @{}!'.format(random.choice(phrases), username)
print("Replying to {}'s tweet with ID {}".format(username, tweet.id))
twitter.update_with_media(filename=img, status=message, in_reply_to_status_id=tweet.id)
if __name__ == '__main__':
tweets = twitter.search('"link please"')
random_tweet = next((tweet for tweet in tweets if not tweet.retweeted), None)
tweet_back(random_tweet)
replies = twitter.search('@alinkplease link please', since_id=r.get('last'))
if replies:
r.set('last', replies[0].id)
for tweet in replies:
tweet_back(tweet) | avyfain/a-link-please | tweet.py | Python | mit | 1,359 |
# -*- coding: utf-8 -*-
"""
DNSLabel/DNSBuffer - DNS label handling & encoding/decoding
"""
import fnmatch
from dnslib.bit import get_bits,set_bits
from dnslib.buffer import Buffer, BufferError
class DNSLabelError(Exception):
pass
class DNSLabel(object):
"""
Container for DNS label
Supports IDNA encoding for unicode domain names
>>> l1 = DNSLabel("aaa.bbb.ccc.")
>>> l2 = DNSLabel([b"aaa",b"bbb",b"ccc"])
>>> l1 == l2
True
>>> l3 = DNSLabel("AAA.BBB.CCC")
>>> l1 == l3
True
>>> l1 == 'AAA.BBB.CCC'
True
>>> x = { l1 : 1 }
>>> x[l1]
1
>>> l1
<DNSLabel: 'aaa.bbb.ccc.'>
>>> str(l1)
'aaa.bbb.ccc.'
>>> l3 = l1.add("xxx.yyy")
>>> l3
<DNSLabel: 'xxx.yyy.aaa.bbb.ccc.'>
>>> l3.matchSuffix(l1)
True
>>> l3.matchSuffix("xxx.yyy.")
False
>>> l3.stripSuffix("bbb.ccc.")
<DNSLabel: 'xxx.yyy.aaa.'>
>>> l3.matchGlob("*.[abc]aa.BBB.ccc")
True
>>> l3.matchGlob("*.[abc]xx.bbb.ccc")
False
# Too hard to get unicode doctests to work on Python 3.2
# (works on 3.3)
# >>> u1 = DNSLabel(u'\\u2295.com')
# >>> u1.__str__() == u'\\u2295.com.'
# True
# >>> u1.label == ( b"xn--keh", b"com" )
# True
"""
def __init__(self,label):
"""
Create DNS label instance
Label can be specified as:
- a list/tuple of byte strings
- a byte string (split into components separated by b'.')
- a unicode string which will be encoded according to RFC3490/IDNA
"""
if type(label) == DNSLabel:
self.label = label.label
elif type(label) in (list,tuple):
self.label = tuple(label)
else:
if not label or label in (b'.','.'):
self.label = ()
elif type(label) is not bytes:
self.label = tuple(label.encode("idna").\
rstrip(b".").split(b"."))
else:
self.label = tuple(label.rstrip(b".").split(b"."))
def add(self,name):
"""
Prepend name to label
"""
new = DNSLabel(name)
if self.label:
new.label += self.label
return new
def matchGlob(self,pattern):
if type(pattern) != DNSLabel:
pattern = DNSLabel(pattern)
return fnmatch.fnmatch(str(self).lower(),str(pattern).lower())
def matchSuffix(self,suffix):
"""
Return True if label suffix matches
"""
suffix = DNSLabel(suffix)
return self.label[-len(suffix.label):] == suffix.label
def stripSuffix(self,suffix):
"""
Strip suffix from label
"""
suffix = DNSLabel(suffix)
if self.label[-len(suffix.label):] == suffix.label:
return DNSLabel(self.label[:-len(suffix.label)])
else:
return self
def idna(self):
return ".".join([ s.decode("idna") for s in self.label ]) + "."
def __str__(self):
return ".".join([ s.decode() for s in self.label ]) + "."
def __repr__(self):
return "<DNSLabel: '%s'>" % str(self)
def __hash__(self):
return hash(self.label)
def __ne__(self,other):
return not self == other
def __eq__(self,other):
if type(other) != DNSLabel:
return self.__eq__(DNSLabel(other))
else:
return [ l.lower() for l in self.label ] == \
[ l.lower() for l in other.label ]
def __len__(self):
return len(b'.'.join(self.label))
class DNSBuffer(Buffer):
"""
Extends Buffer to provide DNS name encoding/decoding (with caching)
# Needed for Python 2/3 doctest compatibility
>>> def p(s):
... if not isinstance(s,str):
... return s.decode()
... return s
>>> b = DNSBuffer()
>>> b.encode_name(b'aaa.bbb.ccc.')
>>> len(b)
13
>>> b.encode_name(b'aaa.bbb.ccc.')
>>> len(b)
15
>>> b.encode_name(b'xxx.yyy.zzz')
>>> len(b)
28
>>> b.encode_name(b'zzz.xxx.bbb.ccc.')
>>> len(b)
38
>>> b.encode_name(b'aaa.xxx.bbb.ccc')
>>> len(b)
44
>>> b.offset = 0
>>> print(b.decode_name())
aaa.bbb.ccc.
>>> print(b.decode_name())
aaa.bbb.ccc.
>>> print(b.decode_name())
xxx.yyy.zzz.
>>> print(b.decode_name())
zzz.xxx.bbb.ccc.
>>> print(b.decode_name())
aaa.xxx.bbb.ccc.
>>> b = DNSBuffer()
>>> b.encode_name([b'a.aa',b'b.bb',b'c.cc'])
>>> b.offset = 0
>>> len(b.decode_name().label)
3
>>> b = DNSBuffer()
>>> b.encode_name_nocompress(b'aaa.bbb.ccc.')
>>> len(b)
13
>>> b.encode_name_nocompress(b'aaa.bbb.ccc.')
>>> len(b)
26
>>> b.offset = 0
>>> print(b.decode_name())
aaa.bbb.ccc.
>>> print(b.decode_name())
aaa.bbb.ccc.
"""
def __init__(self,data=b''):
"""
Add 'names' dict to cache stored labels
"""
super(DNSBuffer,self).__init__(data)
self.names = {}
def decode_name(self,last=-1):
"""
Decode label at current offset in buffer (following pointers
to cached elements where necessary)
"""
label = []
done = False
while not done:
(length,) = self.unpack("!B")
if get_bits(length,6,2) == 3:
# Pointer
self.offset -= 1
pointer = get_bits(self.unpack("!H")[0],0,14)
save = self.offset
if last == save:
raise BufferError("Recursive pointer in DNSLabel [offset=%d,pointer=%d,length=%d]" %
(self.offset,pointer,len(self.data)))
if pointer < self.offset:
self.offset = pointer
else:
# Pointer can't point forwards
raise BufferError("Invalid pointer in DNSLabel [offset=%d,pointer=%d,length=%d]" %
(self.offset,pointer,len(self.data)))
label.extend(self.decode_name(save).label)
self.offset = save
done = True
else:
if length > 0:
l = self.get(length)
try:
l.decode()
except UnicodeDecodeError:
raise BufferError("Invalid label <%s>" % l)
label.append(l)
else:
done = True
return DNSLabel(label)
def encode_name(self,name):
"""
Encode label and store at end of buffer (compressing
cached elements where needed) and store elements
in 'names' dict
"""
if not isinstance(name,DNSLabel):
name = DNSLabel(name)
if len(name) > 253:
raise DNSLabelError("Domain label too long: %r" % name)
name = list(name.label)
while name:
if tuple(name) in self.names:
# Cached - set pointer
pointer = self.names[tuple(name)]
pointer = set_bits(pointer,3,14,2)
self.pack("!H",pointer)
return
else:
self.names[tuple(name)] = self.offset
element = name.pop(0)
if len(element) > 63:
raise DNSLabelError("Label component too long: %r" % element)
self.pack("!B",len(element))
self.append(element)
self.append(b'\x00')
def encode_name_nocompress(self,name):
"""
Encode and store label with no compression
(needed for RRSIG)
"""
if not isinstance(name,DNSLabel):
name = DNSLabel(name)
if len(name) > 253:
raise DNSLabelError("Domain label too long: %r" % name)
name = list(name.label)
while name:
element = name.pop(0)
if len(element) > 63:
raise DNSLabelError("Label component too long: %r" % element)
self.pack("!B",len(element))
self.append(element)
self.append(b'\x00')
if __name__ == '__main__':
import doctest
doctest.testmod()
| xyuanmu/XX-Net | code/default/lib/noarch/dnslib/label.py | Python | bsd-2-clause | 8,345 |
import numpy as np
from r_support import *
def forest_aad_loss_linear(w, xi, yi, qval, in_constr_set=None, x_tau=None, Ca=1.0, Cn=1.0, Cx=1.0,
withprior=False, w_prior=None, sigma2=1.0):
"""
Computes AAD loss:
for square_slack:
( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
else:
( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
:param w: numpy.array
parameter vector with both weights and slack variables
:param xi: csr_matrix
:param yi: numpy.array
:param qval: float
tau-th quantile value
:param Ca: float
:param Cn: float
:param Cx: float
:param withprior: boolean
:param w_prior: numpy.array
:param w_old: numpy.array
:param sigma2: float
:param square_slack: boolean
:return:
"""
s = xi.dot(w)
loss_a = 0 # loss w.r.t w for anomalies
loss_n = 0 # loss w.r.t w for nominals
n_anom = 0
n_noml = 0
tau_rel_loss = None
if x_tau is not None:
tau_rel_loss = x_tau.dot(w)
for i in range(len(yi)):
lbl = yi[i]
if lbl == 1:
n_anom += 1
else:
n_noml += 1
if lbl == 1 and s[i] < qval:
loss_a += Ca * (qval - s[i])
elif lbl == 0 and s[i] >= qval:
loss_n += Cn * (s[i] - qval)
else:
# no loss
pass
if tau_rel_loss is not None and (in_constr_set is None or in_constr_set[i] == 1):
# TODO: Test this code.
# add loss relative to tau-th ranked instance
# loss =
# Cx * (x_tau - xi).w if yi = 1 and (x_tau - xi).w > 0
# Cx * (xi - x_tau).w if y1 = 0 and (xi - x_tau).w > 0
tau_val = tau_rel_loss[0]
if lbl == 1 and s[i] < tau_val:
loss_a += Cx * (tau_val - s[i])
elif lbl == 0 and s[i] >= tau_val:
loss_n += Cx * (s[i] - tau_val)
else:
# no loss
pass
loss = (loss_a / max(1, n_anom)) + (loss_n / max(1, n_noml))
if withprior and w_prior is not None:
w_diff = w - w_prior
loss += (1 / (2 * sigma2)) * (w_diff.dot(w_diff))
return loss
def forest_aad_loss_gradient_linear(w, xi, yi, qval, in_constr_set=None, x_tau=None, Ca=1.0, Cn=1.0, Cx=1.0,
withprior=False, w_prior=None, sigma2=1.0):
"""
Computes jacobian of AAD loss:
for square_slack:
jacobian( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
else:
jacobian( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
"""
m = ncol(xi)
grad = np.zeros(m, dtype=float)
s = xi.dot(w)
loss_a = rep(0, m) # the derivative of loss w.r.t w for anomalies
loss_n = rep(0, m) # the derivative of loss w.r.t w for nominals
n_anom = 0
n_noml = 0
anom_idxs = list()
noml_idxs = list()
anom_tau_idxs = list()
noml_tau_idxs = list()
tau_score = None
if x_tau is not None:
tau_score = x_tau.dot(w)
for i in range(len(yi)):
lbl = yi[i]
if lbl == 1:
n_anom += 1
else:
n_noml += 1
if lbl == 1 and s[i] < qval:
# loss_a[:] = loss_a - Ca * xi[i, :]
anom_idxs.append(i)
elif lbl == 0 and s[i] >= qval:
# loss_n[:] = loss_n + Cn * xi[i, :]
noml_idxs.append(i)
else:
# no loss
pass
# add loss-gradient relative to tau-th ranked instance
if x_tau is not None and (in_constr_set is None or in_constr_set[i] == 1):
# add loss-gradient relative to tau-th ranked instance
# loss =
# Cx * (x_tau - xi).w if yi = 1 and (x_tau - xi).w > 0
# Cx * (xi - x_tau).w if y1 = 0 and (xi - x_tau).w > 0
# loss_gradient =
# Cx * (x_tau - xi) if yi = 1 and (x_tau - xi).w > 0
# Cx * (xi - x_tau) if y1 = 0 and (xi - x_tau).w > 0
tau_val = tau_score[0]
if lbl == 1 and s[i] < tau_val:
# loss_a[:] = loss_a + Cx * (x_tau - xi[i, :])
anom_tau_idxs.append(i)
elif lbl == 0 and s[i] >= tau_val:
# loss_n[:] = loss_n + Cx * (xi[i, :] - x_tau)
noml_tau_idxs.append(i)
else:
# no loss
pass
anom_idxs = np.array(anom_idxs, dtype=int)
noml_idxs = np.array(noml_idxs, dtype=int)
anom_tau_idxs = np.array(anom_tau_idxs, dtype=int)
noml_tau_idxs = np.array(noml_tau_idxs, dtype=int)
if len(anom_idxs) > 0:
loss_a[:] = -Ca * np.sum(xi[anom_idxs], axis=0)
if len(anom_tau_idxs) > 0:
loss_a[:] = loss_a + Cx * (len(anom_tau_idxs) * x_tau - np.sum(xi[anom_tau_idxs], axis=0))
if len(noml_idxs) > 0:
loss_n[:] = Cn * np.sum(xi[noml_idxs], axis=0)
if len(noml_tau_idxs) > 0:
loss_n[:] = loss_n + Cx * (np.sum(xi[noml_tau_idxs], axis=0) - len(noml_tau_idxs) * x_tau)
grad[0:m] = (loss_a / max(1, n_anom)) + (loss_n / max(1, n_noml))
if withprior and w_prior is not None:
w_diff = w - w_prior
grad[0:m] += (1 / sigma2) * w_diff
return grad
def forest_aad_loss_exp(w, xi, yi, qval, in_constr_set=None, x_tau=None, Ca=1.0, Cn=1.0, Cx=1.0,
withprior=False, w_prior=None, sigma2=1.0):
"""
Computes AAD loss:
for square_slack:
( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
else:
( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
:param w: numpy.array
parameter vector with both weights and slack variables
:param xi: csr_matrix
:param yi: numpy.array
:param qval: float
tau-th quantile value
:param in_constr_set: list of int
indicators 0/1 whether to include in constraint set or not
:param Ca: float
:param Cn: float
:param Cx: float
:param withprior: boolean
:param w_prior: numpy.array
:param w_old: numpy.array
:param sigma2: float
:param square_slack: boolean
:return:
"""
loss_a = 0 # loss w.r.t w for anomalies
loss_n = 0 # loss w.r.t w for nominals
n_anom = 0
n_noml = 0
vals = xi.dot(w)
tau_rel_loss = None
if x_tau is not None:
tau_rel_loss = x_tau.dot(w)
for i in range(len(yi)):
if yi[i] == 1:
n_anom += 1
else:
n_noml += 1
if yi[i] == 1 and vals[i] < qval:
loss_a = loss_a + Ca * np.exp(qval - vals[i])
elif yi[i] == 0 and vals[i] >= qval:
loss_n = loss_n + Cn * np.exp(vals[i] - qval)
else:
# no loss
pass
if tau_rel_loss is not None and (in_constr_set is None or in_constr_set[i] == 1):
# add loss relative to tau-th ranked instance
# loss =
# Cx * (x_tau - xi).w if yi = 1 and (x_tau - xi).w > 0
# Cx * (xi - x_tau).w if y1 = 0 and (xi - x_tau).w > 0
tau_val = tau_rel_loss[0]
if yi[i] == 1 and vals[i] < tau_val:
loss_a += Cx * (tau_val - vals[i])
elif yi[i] == 0 and vals[i] >= tau_val:
loss_n += Cx * (vals[i] - tau_val)
else:
# no loss
pass
loss = (loss_a / max(1, n_anom)) + (loss_n / max(1, n_noml))
if withprior and w_prior is not None:
w_diff = w - w_prior
loss += (1 / (2 * sigma2)) * (w_diff.dot(w_diff))
return loss
def forest_aad_loss_gradient_exp(w, xi, yi, qval, in_constr_set=None, x_tau=None, Ca=1.0, Cn=1.0, Cx=1.0,
withprior=False, w_prior=None, sigma2=1.0):
"""
Computes jacobian of AAD loss:
for square_slack:
jacobian( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
else:
jacobian( score_loss + 1/(2*sigma2) * (w - w_prior)^2 )
"""
vals = xi.dot(w)
m = ncol(xi)
loss_a = rep(0, m) # the derivative of loss w.r.t w for anomalies
loss_n = rep(0, m) # the derivative of loss w.r.t w for nominals
n_anom = 0
n_noml = 0
tau_score = None
if x_tau is not None:
tau_score = x_tau.dot(w)
for i in range(len(yi)):
lbl = yi[i]
if lbl == 1:
n_anom += 1
else:
n_noml += 1
if lbl == 1 and vals[i] < qval:
exp_diff = np.minimum(np.exp(qval - vals[i]), 1000) # element-wise
# exp_diff = np.exp(qval - vals[i])
loss_a[:] = loss_a - Ca * exp_diff * xi[i, :]
elif lbl == 0 and vals[i] >= qval:
exp_diff = np.minimum(np.exp(vals[i] - qval), 1000) # element-wise
# exp_diff = np.exp(vals[i] - qval)
loss_n[:] = loss_n + Cn * exp_diff * xi[i, :]
else:
# no loss
pass
# add loss-gradient relative to tau-th ranked instance
if x_tau is not None and (in_constr_set is None or in_constr_set[i] == 1):
# add loss-gradient relative to tau-th ranked instance
# loss =
# Cx * (x_tau - xi).w if yi = 1 and (x_tau - xi).w > 0
# Cx * (xi - x_tau).w if y1 = 0 and (xi - x_tau).w > 0
# loss_gradient =
# Cx * (x_tau - xi) if yi = 1 and (x_tau - xi).w > 0
# Cx * (xi - x_tau) if y1 = 0 and (xi - x_tau).w > 0
tau_val = tau_score[0]
if lbl == 1 and vals[i] < tau_val:
loss_a[:] = loss_a + Cx * (x_tau - xi[i, :])
elif lbl == 0 and vals[i] >= tau_val:
loss_n[:] = loss_n + Cx * (xi[i, :] - x_tau)
else:
# no loss
pass
dl_dw = (loss_a / max(1, n_anom)) + (loss_n / max(1, n_noml))
if withprior and w_prior is not None:
w_diff = w - w_prior
dl_dw[0:m] += (1 / sigma2) * w_diff
return dl_dw
| shubhomoydas/pyaad | pyalad/forest_aad_loss.py | Python | mit | 10,027 |
# -*- test-case-name: twisted.test.test_udp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various asynchronous UDP classes.
Please do not use this module directly.
@var _sockErrReadIgnore: list of symbolic error constants (from the C{errno}
module) representing socket errors where the error is temporary and can be
ignored.
@var _sockErrReadRefuse: list of symbolic error constants (from the C{errno}
module) representing socket errors that indicate connection refused.
"""
from __future__ import division, absolute_import
# System Imports
import socket
import operator
import struct
import warnings
from zope.interface import implementer
from twisted.python.runtime import platformType
if platformType == 'win32':
from errno import WSAEWOULDBLOCK
from errno import WSAEINTR, WSAEMSGSIZE, WSAETIMEDOUT
from errno import WSAECONNREFUSED, WSAECONNRESET, WSAENETRESET
from errno import WSAEINPROGRESS
# Classify read and write errors
_sockErrReadIgnore = [WSAEINTR, WSAEWOULDBLOCK, WSAEMSGSIZE, WSAEINPROGRESS]
_sockErrReadRefuse = [WSAECONNREFUSED, WSAECONNRESET, WSAENETRESET,
WSAETIMEDOUT]
# POSIX-compatible write errors
EMSGSIZE = WSAEMSGSIZE
ECONNREFUSED = WSAECONNREFUSED
EAGAIN = WSAEWOULDBLOCK
EINTR = WSAEINTR
else:
from errno import EWOULDBLOCK, EINTR, EMSGSIZE, ECONNREFUSED, EAGAIN
_sockErrReadIgnore = [EAGAIN, EINTR, EWOULDBLOCK]
_sockErrReadRefuse = [ECONNREFUSED]
# Twisted Imports
from twisted.internet import base, defer, address
from twisted.python import log, failure
from twisted.internet import abstract, error, interfaces
@implementer(
interfaces.IListeningPort, interfaces.IUDPTransport,
interfaces.ISystemHandle)
class Port(base.BasePort):
"""
UDP port, listening for packets.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_DGRAM
maxThroughput = 256 * 1024 # max bytes we read in one eventloop iteration
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
def __init__(self, port, proto, interface='', maxPacketSize=8192, reactor=None):
"""
Initialize with a numeric port to listen on.
"""
base.BasePort.__init__(self, reactor)
self.port = port
self.protocol = proto
self.maxPacketSize = maxPacketSize
self.interface = interface
self.setLogStr()
self._connectedAddr = None
def __repr__(self):
if self._realPortNumber is not None:
return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber)
else:
return "<%s not connected>" % (self.protocol.__class__,)
def getHandle(self):
"""
Return a socket object.
"""
return self.socket
def startListening(self):
"""
Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
self._bindSocket()
self._connectToProtocol()
def _bindSocket(self):
try:
skt = self.createInternetSocket()
skt.bind((self.interface, self.port))
except socket.error as le:
raise error.CannotListenError(self.interface, self.port, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (
self._getLogPrefix(self.protocol), self._realPortNumber))
self.connected = 1
self.socket = skt
self.fileno = self.socket.fileno
def _connectToProtocol(self):
self.protocol.makeConnection(self)
self.startReading()
def doRead(self):
"""
Called when my socket is ready for reading.
"""
read = 0
while read < self.maxThroughput:
try:
data, addr = self.socket.recvfrom(self.maxPacketSize)
except socket.error as se:
no = se.args[0]
if no in _sockErrReadIgnore:
return
if no in _sockErrReadRefuse:
if self._connectedAddr:
self.protocol.connectionRefused()
return
raise
else:
read += len(data)
try:
self.protocol.datagramReceived(data, addr)
except:
log.err()
def write(self, datagram, addr=None):
"""
Write a datagram.
@type datagram: C{str}
@param datagram: The datagram to be sent.
@type addr: C{tuple} containing C{str} as first element and C{int} as
second element, or C{None}
@param addr: A tuple of (I{stringified dotted-quad IP address},
I{integer port number}); can be C{None} in connected mode.
"""
if self._connectedAddr:
assert addr in (None, self._connectedAddr)
try:
return self.socket.send(datagram)
except socket.error as se:
no = se.args[0]
if no == EINTR:
return self.write(datagram)
elif no == EMSGSIZE:
raise error.MessageLengthError("message too long")
elif no == ECONNREFUSED:
self.protocol.connectionRefused()
else:
raise
else:
assert addr != None
if not addr[0].replace(".", "").isdigit() and addr[0] != "<broadcast>":
warnings.warn("Please only pass IPs to write(), not hostnames",
DeprecationWarning, stacklevel=2)
try:
return self.socket.sendto(datagram, addr)
except socket.error as se:
no = se.args[0]
if no == EINTR:
return self.write(datagram, addr)
elif no == EMSGSIZE:
raise error.MessageLengthError("message too long")
elif no == ECONNREFUSED:
# in non-connected UDP ECONNREFUSED is platform dependent, I
# think and the info is not necessarily useful. Nevertheless
# maybe we should call connectionRefused? XXX
return
else:
raise
def writeSequence(self, seq, addr):
self.write("".join(seq), addr)
def connect(self, host, port):
"""
'Connect' to remote server.
"""
if self._connectedAddr:
raise RuntimeError("already connected, reconnecting is not currently supported")
if not abstract.isIPAddress(host):
raise ValueError("please pass only IP addresses, not domain names")
self._connectedAddr = (host, port)
self.socket.connect((host, port))
def _loseConnection(self):
self.stopReading()
if self.connected: # actually means if we are *listening*
self.reactor.callLater(0, self.connectionLost)
def stopListening(self):
if self.connected:
result = self.d = defer.Deferred()
else:
result = None
self._loseConnection()
return result
def loseConnection(self):
warnings.warn("Please use stopListening() to disconnect port", DeprecationWarning, stacklevel=2)
self.stopListening()
def connectionLost(self, reason=None):
"""
Cleans up my socket.
"""
log.msg('(UDP Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
base.BasePort.connectionLost(self, reason)
self.protocol.doStop()
self.socket.close()
del self.socket
del self.fileno
if hasattr(self, "d"):
self.d.callback(None)
del self.d
def setLogStr(self):
"""
Initialize the C{logstr} attribute to be used by C{logPrefix}.
"""
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = "%s (UDP)" % logPrefix
def logPrefix(self):
"""
Return the prefix to log with.
"""
return self.logstr
def getHost(self):
"""
Returns an IPv4Address.
This indicates the address from which I am connecting.
"""
return address.IPv4Address('UDP', *self.socket.getsockname())
class MulticastMixin:
"""
Implement multicast functionality.
"""
def getOutgoingInterface(self):
i = self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF)
return socket.inet_ntoa(struct.pack("@i", i))
def setOutgoingInterface(self, addr):
"""Returns Deferred of success."""
return self.reactor.resolve(addr).addCallback(self._setInterface)
def _setInterface(self, addr):
i = socket.inet_aton(addr)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
return 1
def getLoopbackMode(self):
return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP)
def setLoopbackMode(self, mode):
mode = struct.pack("b", operator.truth(mode))
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, mode)
def getTTL(self):
return self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL)
def setTTL(self, ttl):
ttl = struct.pack("B", ttl)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
def joinGroup(self, addr, interface=""):
"""Join a multicast group. Returns Deferred of success."""
return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 1)
def _joinAddr1(self, addr, interface, join):
return self.reactor.resolve(interface).addCallback(self._joinAddr2, addr, join)
def _joinAddr2(self, interface, addr, join):
addr = socket.inet_aton(addr)
interface = socket.inet_aton(interface)
if join:
cmd = socket.IP_ADD_MEMBERSHIP
else:
cmd = socket.IP_DROP_MEMBERSHIP
try:
self.socket.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
except socket.error as e:
return failure.Failure(error.MulticastJoinError(addr, interface, *e.args))
def leaveGroup(self, addr, interface=""):
"""Leave multicast group, return Deferred of success."""
return self.reactor.resolve(addr).addCallback(self._joinAddr1, interface, 0)
@implementer(interfaces.IMulticastTransport)
class MulticastPort(MulticastMixin, Port):
"""
UDP Port that supports multicasting.
"""
def __init__(self, port, proto, interface='', maxPacketSize=8192, reactor=None, listenMultiple=False):
"""
@see: L{twisted.internet.interfaces.IReactorMulticast.listenMulticast}
"""
Port.__init__(self, port, proto, interface, maxPacketSize, reactor)
self.listenMultiple = listenMultiple
def createInternetSocket(self):
skt = Port.createInternetSocket(self)
if self.listenMultiple:
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
return skt
| ecolitan/fatics | venv/lib/python2.7/site-packages/twisted/internet/udp.py | Python | agpl-3.0 | 11,644 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.