text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright 2020 Red Hat, Inc. Jake Hunsaker <jhunsake@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.cleaner.mappings import SoSMap
class SoSUsernameMap(SoSMap):
"""Mapping to store usernames matched from ``lastlog`` output.
Usernames are obfuscated as ``obfuscateduserX`` where ``X`` is a counter
that gets incremented for every new username found.
Note that this specifically obfuscates user_names_ and not UIDs.
"""
name_count = 0
def load_names_from_options(self, opt_names):
for name in opt_names:
if name not in self.dataset.keys():
self.add(name)
def sanitize_item(self, username):
"""Obfuscate a new username not currently found in the map
"""
ob_name = "obfuscateduser%s" % self.name_count
self.name_count += 1
if ob_name in self.dataset.values():
return self.sanitize_item(username)
return ob_name
| slashdd/sos | sos/cleaner/mappings/username_map.py | Python | gpl-2.0 | 1,265 | 0 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyIlmbase(AutotoolsPackage):
"""The PyIlmBase libraries provides python bindings for the IlmBase libraries."""
homepage = "https://github.com/AcademySoftwareFoundation/openexr/tree/v2.3.0/PyIlmBase"
url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz"
version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425')
depends_on('ilmbase')
depends_on('boost+python')
# https://github.com/AcademySoftwareFoundation/openexr/issues/336
parallel = False
def configure_args(self):
spec = self.spec
args = [
'--with-boost-python-libname=boost_python{0}'.format(
spec['python'].version.up_to(2).joined)
]
return args
| LLNL/spack | var/spack/repos/builtin/packages/py-ilmbase/package.py | Python | lgpl-2.1 | 1,026 | 0.004873 |
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
db = Database()
db = Database('sqlite', ':memory:')
class Product(db.Entity):
id = PrimaryKey(int)
name = Required(str)
comments = Set('Comment')
@property
def sum_01(self):
return coalesce(select(c.points for c in self.comments).sum(), 0)
@property
def sum_02(self):
return coalesce(select(c.points for c in self.comments).sum(), 0.0)
@property
def sum_03(self):
return coalesce(select(sum(c.points) for c in self.comments), 0)
@property
def sum_04(self):
return coalesce(select(sum(c.points) for c in self.comments), 0.0)
@property
def sum_05(self):
return sum(c.points for c in self.comments)
@property
def sum_06(self):
return coalesce(sum(c.points for c in self.comments), 0)
@property
def sum_07(self):
return coalesce(sum(c.points for c in self.comments), 0.0)
@property
def sum_08(self):
return select(sum(c.points) for c in self.comments)
@property
def sum_09(self):
return select(coalesce(sum(c.points), 0) for c in self.comments)
@property
def sum_10(self):
return select(coalesce(sum(c.points), 0.0) for c in self.comments)
@property
def sum_11(self):
return select(sum(c.points) for c in self.comments)
@property
def sum_12(self):
return sum(self.comments.points)
@property
def sum_13(self):
return coalesce(sum(self.comments.points), 0)
@property
def sum_14(self):
return coalesce(sum(self.comments.points), 0.0)
class Comment(db.Entity):
id = PrimaryKey(int)
points = Required(int)
product = Optional('Product')
class TestQuerySetMonad(unittest.TestCase):
@classmethod
def setUpClass(cls):
setup_database(db)
with db_session:
p1 = Product(id=1, name='P1')
p2 = Product(id=2, name='P1', comments=[
Comment(id=201, points=5)
])
p3 = Product(id=3, name='P1', comments=[
Comment(id=301, points=1), Comment(id=302, points=2)
])
p4 = Product(id=4, name='P1', comments=[
Comment(id=401, points=1), Comment(id=402, points=5), Comment(id=403, points=1)
])
@classmethod
def tearDownClass(cls):
teardown_database(db)
def setUp(self):
rollback()
db_session.__enter__()
def tearDown(self):
rollback()
db_session.__exit__()
def test_sum_01(self):
q = list(Product.select().sort_by(lambda p: p.sum_01))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_02(self):
q = list(Product.select().sort_by(lambda p: p.sum_02))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_03(self):
q = list(Product.select().sort_by(lambda p: p.sum_03))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_04(self):
q = list(Product.select().sort_by(lambda p: p.sum_04))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_05(self):
q = list(Product.select().sort_by(lambda p: p.sum_05))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_06(self):
q = list(Product.select().sort_by(lambda p: p.sum_06))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_07(self):
q = list(Product.select().sort_by(lambda p: p.sum_07))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_08(self):
q = list(Product.select().sort_by(lambda p: p.sum_08))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_09(self):
q = list(Product.select().sort_by(lambda p: p.sum_09))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_10(self):
q = list(Product.select().sort_by(lambda p: p.sum_10))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_11(self):
q = list(Product.select().sort_by(lambda p: p.sum_11))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_12(self):
q = list(Product.select().sort_by(lambda p: p.sum_12))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_13(self):
q = list(Product.select().sort_by(lambda p: p.sum_13))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_14(self):
q = list(Product.select().sort_by(lambda p: p.sum_14))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
if __name__ == "__main__":
unittest.main()
| ponyorm/pony | pony/orm/tests/test_prop_sum_orderby.py | Python | apache-2.0 | 5,191 | 0.000385 |
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
result.stdout.fnmatch_lines([
'pytest-random-order options:',
'*--random-order-bucket={global,package,module,class,parent,grandparent,none}*',
'*--random-order-seed=*',
])
def test_markers_message(testdir):
result = testdir.runpytest(
'--markers',
)
result.stdout.fnmatch_lines([
'*@pytest.mark.random_order(disabled=True): disable reordering*',
])
| jbasko/pytest-random-order | tests/test_cli.py | Python | mit | 503 | 0.001988 |
"core.Pixmap tests"
from unittest import SkipTest
from testutils import *
from gfxprim.core import Pixmap
from gfxprim import gfx, core
def test_gfx_submodule_loads():
"gfx is present in a Pixmap"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
assert c.gfx
def test_gfx_submodule_has_C():
"gfx contains C"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
assert c.gfx.C
assert gfx.C
# These set the param types of the functions in GFX
gfx_params = {
'arc_segment': 'IIIIIFFP',
'circle': 'IIIP',
'ellipse': 'IIIIP',
'fill': 'P',
'fill_circle': 'IIIP',
'fill_ellipse': 'IIIIP',
'fill_polygon': ([(0,0),(1,1),(1,0)], 0, {}),
'fill_rect': 'IIIIP',
'fill_ring': 'IIIIP',
'fill_tetragon': 'IIIIIIIIP',
'fill_triangle': 'IIIIIIP',
'hline': 'IIIP',
'hline_aa': 'IIIP', # Fixpoint, originally 'FFFP'
'line': 'IIIIP',
'line_aa': 'IIIIP', # Fixpoint, originally 'FFFFP'
'polygon': ([(0,0),(1,1),(1,0)], 0, {}),
'putpixel_aa': 'IIP', # Fixpoint, originally 'FFP'
'rect': 'IIIIP',
'ring': 'IIIIP',
'tetragon': 'IIIIIIIIP',
'triangle': 'IIIIIIP',
'vline': 'IIIP',
'vline_aa': 'IIIP', # Fixpoint, originally 'FFFP'
}
def test_all_methods_are_known():
"All methods of gfx submodule have known param types in this test"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
for name in dir(c.gfx):
if name[0] != '_' and name not in ['C', 'ctx']:
assert name in gfx_params
def gen_dummy_args(params):
"""
Generate dummy parameter tuple according to characters in the given string.
0 - 0
S - String ("")
I - Int (1)
F - Float (0.5)
P - Pixel (0)
"""
args = []
for t in params:
if t == '0':
args.append(0)
elif t == 'I':
args.append(1)
elif t == 'P':
args.append(0)
elif t == 'F':
args.append(0.5)
elif t == 'S':
args.append("")
else:
assert False
return tuple(args)
@for_each_case(gfx_params)
def test_method_call(n, params):
"Calling with dummy parameters:"
c = PixmapRand(10, 10, core.C.PIXEL_RGB888)
if isinstance(params, str):
c.gfx.__getattribute__(n)(*gen_dummy_args(params))
else:
assert isinstance(params, tuple) and isinstance(params[-1], dict)
c.gfx.__getattribute__(n)(*params[:-1], **params[-1])
def test_Polygon():
"Polygon() works"
c0 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
c1 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
c2 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
assert c1 == c0
c1.gfx.polygon([1,2,0,4,7,9,5,4,3,2], 43)
c2.gfx.polygon([(1,2),(0,4),(7,9),(5,4),(3,2)], 43)
assert c1 == c2
assert c1 != c0
def test_FillPolygon():
"FillPolygon() works"
c0 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
c1 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
c2 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
assert c1 == c0
c1.gfx.fill_polygon([1,2,0,4,7,9,5,4,3,2], 0)
c2.gfx.fill_polygon([(1,2),(0,4),(7,9),(5,4),(3,2)], 0)
assert c1 == c2
assert c1 != c0
| gfxprim/gfxprim | tests/pylib/test_gfx.py | Python | lgpl-2.1 | 3,030 | 0.032013 |
"""Clean up notifications schema, some other parts.
Revision ID: 2979a1322381
Revises: 2b478162b2b7
Create Date: 2020-03-03 07:32:54.113550
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "2979a1322381"
down_revision = "2b478162b2b7"
def upgrade():
op.drop_index("ix_notification_channels", table_name="notification")
op.drop_table("notification")
op.drop_column("diagram", "data")
op.drop_constraint("document_parent_id_fkey", "document", type_="foreignkey")
# op.alter_column('role', 'is_muted',
# existing_type=sa.BOOLEAN(),
# nullable=False)
op.drop_column("role", "notified_at")
def downgrade():
pass
| pudo/aleph | aleph/migrate/versions/2979a1322381_cleanup.py | Python | mit | 712 | 0.001404 |
# Natural Language Toolkit: PanLex Corpus Reader
#
# Copyright (C) 2001-2016 NLTK Project
# Author: David Kamholz <kamholz@panlex.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
CorpusReader for PanLex Lite, a stripped down version of PanLex distributed
as an SQLite database. See the README.txt in the panlex_lite corpus directory
for more information on PanLex Lite.
"""
import os
import sqlite3
from nltk.corpus.reader.api import CorpusReader
class PanLexLiteCorpusReader(CorpusReader):
MEANING_Q = """
SELECT dnx2.mn, dnx2.uq, dnx2.ap, dnx2.ui, ex2.tt, ex2.lv
FROM dnx
JOIN ex ON (ex.ex = dnx.ex)
JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
JOIN ex ex2 ON (ex2.ex = dnx2.ex)
WHERE dnx.ex != dnx2.ex AND ex.tt = ? AND ex.lv = ?
ORDER BY dnx2.uq DESC
"""
TRANSLATION_Q = """
SELECT s.tt, sum(s.uq) AS trq FROM (
SELECT ex2.tt, max(dnx.uq) AS uq
FROM dnx
JOIN ex ON (ex.ex = dnx.ex)
JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
JOIN ex ex2 ON (ex2.ex = dnx2.ex)
WHERE dnx.ex != dnx2.ex AND ex.lv = ? AND ex.tt = ? AND ex2.lv = ?
GROUP BY ex2.tt, dnx.ui
) s
GROUP BY s.tt
ORDER BY trq DESC, s.tt
"""
def __init__(self, root):
self._c = sqlite3.connect(os.path.join(root, 'db.sqlite')).cursor()
self._uid_lv = {}
self._lv_uid = {}
for row in self._c.execute('SELECT uid, lv FROM lv'):
self._uid_lv[row[0]] = row[1]
self._lv_uid[row[1]] = row[0]
def language_varieties(self, lc=None):
"""
Return a list of PanLex language varieties.
:param lc: ISO 639 alpha-3 code. If specified, filters returned varieties
by this code. If unspecified, all varieties are returned.
:return: the specified language varieties as a list of tuples. The first
element is the language variety's seven-character uniform identifier,
and the second element is its default name.
:rtype: list(tuple)
"""
if lc == None:
return self._c.execute('SELECT uid, tt FROM lv ORDER BY uid').fetchall()
else:
return self._c.execute('SELECT uid, tt FROM lv WHERE lc = ? ORDER BY uid', (lc,)).fetchall()
def meanings(self, expr_uid, expr_tt):
"""
Return a list of meanings for an expression.
:param expr_uid: the expression's language variety, as a seven-character
uniform identifier.
:param expr_tt: the expression's text.
:return: a list of Meaning objects.
:rtype: list(Meaning)
"""
expr_lv = self._uid_lv[expr_uid]
mn_info = {}
for i in self._c.execute(self.MEANING_Q, (expr_tt, expr_lv)):
mn = i[0]
uid = self._lv_uid[i[5]]
if not mn in mn_info:
mn_info[mn] = { 'uq': i[1], 'ap': i[2], 'ui': i[3], 'ex': { expr_uid: [expr_tt] } }
if not uid in mn_info[mn]['ex']:
mn_info[mn]['ex'][uid] = []
mn_info[mn]['ex'][uid].append(i[4])
return [ Meaning(mn, mn_info[mn]) for mn in mn_info ]
def translations(self, from_uid, from_tt, to_uid):
"""
Return a list of translations for an expression into a single language
variety.
:param from_uid: the source expression's language variety, as a
seven-character uniform identifier.
:param from_tt: the source expression's text.
:param to_uid: the target language variety, as a seven-character
uniform identifier.
:return a list of translation tuples. The first element is the expression
text and the second element is the translation quality.
:rtype: list(tuple)
"""
from_lv = self._uid_lv[from_uid]
to_lv = self._uid_lv[to_uid]
return self._c.execute(self.TRANSLATION_Q, (from_lv, from_tt, to_lv)).fetchall()
class Meaning(dict):
"""
Represents a single PanLex meaning. A meaning is a translation set derived
from a single source.
"""
def __init__(self, mn, attr):
super(Meaning, self).__init__(**attr)
self['mn'] = mn
def id(self):
"""
:return: the meaning's id.
:rtype: int
"""
return self['mn']
def quality(self):
"""
:return: the meaning's source's quality (0=worst, 9=best).
:rtype: int
"""
return self['uq']
def source(self):
"""
:return: the meaning's source id.
:rtype: int
"""
return self['ap']
def source_group(self):
"""
:return: the meaning's source group id.
:rtype: int
"""
return self['ui']
def expressions(self):
"""
:return: the meaning's expressions as a dictionary whose keys are language
variety uniform identifiers and whose values are lists of expression
texts.
:rtype: dict
"""
return self['ex']
| JFriel/honours_project | venv/lib/python2.7/site-packages/nltk/corpus/reader/panlex_lite.py | Python | gpl-3.0 | 5,153 | 0.004463 |
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
from gnuradio import gr, gr_unittest, filter, blocks
import math
def sig_source_c(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x) for x in t]
return y
def sig_source_f(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.sin(2.*math.pi*freq*x) for x in t]
return y
class test_pfb_arb_resampler(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_fff_000(self):
N = 500 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 2.3421 # resampling rate
nfilts = 32
taps = filter.firdes.low_pass_2(nfilts, nfilts*fs, fs / 2, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 121.213
data = sig_source_f(fs, freq, 1, N)
signal = blocks.vector_source_f(data)
pfb = filter.pfb_arb_resampler_fff(rrate, taps, nfilts)
snk = blocks.vector_sink_f()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertFloatTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccf_000(self):
N = 5000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 2.4321 # resampling rate
nfilts = 32
taps = filter.firdes.low_pass_2(nfilts, nfilts*fs, fs / 2, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccf(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccf_001(self):
N = 50000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 0.75 # resampling rate
nfilts = 32
taps = filter.firdes.low_pass_2(nfilts, nfilts*fs, fs / 4, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccf(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccc_000(self):
N = 5000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 3.4321 # resampling rate
nfilts = 32
taps = filter.firdes.complex_band_pass_2(nfilts, nfilts*fs, 50, 400, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccc(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccc_001(self):
N = 50000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 0.715 # resampling rate
nfilts = 32
taps = filter.firdes.complex_band_pass_2(nfilts, nfilts*fs, 50, 400, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccc(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
if __name__ == '__main__':
gr_unittest.run(test_pfb_arb_resampler, "test_pfb_arb_resampler.xml")
| iohannez/gnuradio | gr-filter/python/filter/qa_pfb_arb_resampler.py | Python | gpl-3.0 | 8,395 | 0.004169 |
#! /usr/bin/python3
import pygame
from colors import Colors
class Heart(pygame.sprite.Sprite):
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = self.load_image("heart.png")
self.rect.x = pos[0]
self.rect.y = pos[1]
def load_image(self, name, colorkey=None):
image = pygame.image.load(name)
image = image.convert_alpha()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
| joalcava/space_invaders | heart.py | Python | gpl-3.0 | 626 | 0.004808 |
__author__ = 'panzer'
from swampy.TurtleWorld import *
from math import radians, sin
def initTurtle(delay = 0.01):
"""
Initializes a turtle object
:param delay: Delay before each action of the turtle. Lower it is the faster the turtle moves.
:return: turtle object
"""
TurtleWorld()
t = Turtle()
t.delay = delay
return t
def isosceles(t, eq_side, ineq_side, angle):
"""
Draws an isosceles triangle
:param t: Turtle object
:param eq_side: Equal Side
:param ineq_side: Inequal Side
:param angle: Angle by the inequal side
:return: draws isosceles triangle
"""
fd(t, eq_side)
lt(t, angle)
fd(t, ineq_side)
lt(t, angle)
fd(t, eq_side)
def pie(t, n, length):
"""
Draws a pie
:param t: Turtle object
:param n: number of sides
:param length: length of each side
:return: Draws a Pie(Spiked polygon)
"""
angle = float(360.0/n)
eq_side = length/2.0/sin(radians(angle/2.0))
for _ in range(n):
isosceles(t, eq_side, length, 180 - (180 - angle)/2.0)
lt(t, 180)
if __name__ == '__main__':
# Figure 4.2 a
pie(initTurtle(), 5, 100)
# Figure 4.2 a
pie(initTurtle(), 6, 100)
# Figure 4.2 a
pie(initTurtle(), 7, 100)
wait_for_user() | BigFatNoob-NCSU/x9115george2 | hw/code/2/4_3.py | Python | mit | 1,206 | 0.024876 |
__version__ = "0.3.9"
| nschloe/pyfvm | src/pyfvm/__about__.py | Python | gpl-3.0 | 22 | 0 |
"""
Notes:
- Brugia protein sequences: https://www.ncbi.nlm.nih.gov/bioproject/PRJNA10729
- wBm protein sequences: https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=292805
- BLASTP against Reference proteins (refseq protein) from Human, using BLOSUM45 matrix.
- BLASTP against nr proteins from O. volvulus and wOv, using BLOSUM45 matrix.
- Caution about the Oncho results; I'm not sure how many protein sequences have been annotated.
- The ChEMBL search results were performed under the "Target Search" tab on their website. Downloaded as a tab-deliminited file.
"""
import os, cPickle, pandas, re
from molecbio import sequ
from cobra.flux_analysis import single_reaction_deletion, double_reaction_deletion
from model_tools import load_model, id_bottleneck_metabolites
import xml.etree.ElementTree as ET
def get_rxns_to_delete(model):
rxn_to_genes = {}
for rxn in model.reactions:
if not rxn.gene_names or not rxn.id.startswith(('R', 'ACYLCOA', 'N00001')):
continue
rxn_to_genes[rxn.id] = [g.strip() for g in rxn.gene_names.split(';')]
return rxn_to_genes
def do_deletions(rxn_data, model, rxn_to_genes, do_double_ko=False, obj_fraction=0.0):
fraction_epsilon = 0.0001
orig_f = float(model.optimize().f)
s_rates, s_stats = single_reaction_deletion(model, list(rxn_to_genes.keys()))
print('Original objective %.1f; %i reactions knocked out.' % (orig_f, len(s_stats)))
print('Calculating model deficiencies for each knockout...')
for r_id, new_f in s_rates.items():
if abs(new_f) < fraction_epsilon:
new_f = 0.0
stat = s_stats[r_id]
if new_f/orig_f <= obj_fraction+fraction_epsilon:
if stat == 'optimal':
deficiencies = find_model_deficiencies(model, orig_f, new_f, r_id)
else:
deficiencies = 'infeasible'
rxn_data[r_id] = {'objective':round(new_f/orig_f*100, 1), 'deficiencies':deficiencies, 'genes':rxn_to_genes[r_id]}
if do_double_ko:
double_rxn_ids = [r for r in list(rxn_to_genes.keys()) if r not in rxn_data]
print('Performing double knockouts on %i candidates...' % len(double_rxn_ids))
double_ko_data = double_reaction_deletion(model, double_rxn_ids[:5], number_of_processes=3)
d_r1, d_r2, d_rates = double_ko_data['y'], double_ko_data['x'], double_ko_data['data']
def find_model_deficiencies(model, orig_f, new_f, r_id):
deficiencies = []
ob = model.reactions.get_by_id(r_id).bounds
model.reactions.get_by_id(r_id).bounds = (0,0)
diffs = id_bottleneck_metabolites(model, new_f, 'BIOMASS', threshold=1.0)
for recovered_f, mtb_id in diffs:
def_str = '%s (%.1f)' % (mtb_id, recovered_f/orig_f*100)
sub_defs = []
for sub_f, sub_mtb_id in id_bottleneck_metabolites(model, new_f, mtb_id.upper(), threshold=1.0):
sub_defs.append('%s(%.1f)' % (sub_mtb_id, sub_f/orig_f*100))
if sub_defs:
def_str += ' [%s]' % ', '.join(sub_defs)
deficiencies.append(def_str)
model.reactions.get_by_id(r_id).bounds = ob
if not deficiencies:
return 'unrecoverable'
else:
return ', '.join(deficiencies)
def process_gene_data(rxn_data):
gene_data = {}
for r_id, data in rxn_data.items():
for gene in set(data['genes']):
g_entry = generate_gene_entry(data, r_id, gene)
gene_data.setdefault(gene, []).append(g_entry)
for gene, entries in gene_data.items():
rs_per_g = len(entries)
if rs_per_g > 1:
for e in entries:
e['num_reactions'] = rs_per_g
return gene_data
def generate_gene_entry(r_data, r_id, gene):
g_data = {}
if len(set(r_data['genes'])) == 1:
g_data['other_genes'] = ''
else:
g_data['other_genes'] = ','.join(sorted(list(set(r_data['genes']) - set([gene]))))
g_data['reaction'] = r_id
g_data['objective'] = r_data['objective']
g_data['deficiencies'] = r_data['deficiencies']
g_data['num_reactions'] = 1
return g_data
# # # Save/load functions
def save_data_object(data_obj, file_path):
with open(file_path, 'wb') as f:
cPickle.dump(data_obj, f, protocol=0)
print('Saved data to %s' % file_path)
def load_data_object(file_path):
with open(file_path, 'rb') as f:
data_obj = cPickle.load(f)
print('Loaded data from %s' % file_path)
return data_obj
def save_data_to_excel(gene_data, gene_data_out_file, expression_headings):
min_column_width = 10
header_bg = '#DEEDED'
sheet_name = 'Single knockouts'
gene_header = 'Gene ID'
headers_atts = [('# Reactions','num_reactions'), ('Reaction','reaction'), ('Associated genes','other_genes'), ('Objective %','objective'), ('Biomass deficiencies','deficiencies')]
ortho_headers = ['Human homologs\n(#|% identity|% coverage)', 'O. volvulus homologs\n(#|% identity|% coverage)']
chembl_headers = ['# ChEMBL hits', 'ChEMBL hits\n(% identity|species)']
data = {h[0]:[] for h in headers_atts+expression_headings}
for h in [gene_header] + ortho_headers + chembl_headers:
data[h] = []
gene_order = sorted(list(gene_data.keys()))
gene_order.sort(key=lambda g:gene_data[g][0]['deficiencies'])
for gene in gene_order:
for g_data in gene_data[gene]:
data[gene_header].append(gene)
for h, att in headers_atts:
data[h].append(g_data.get(att, 'NOT FOUND'))
human_hlogs = '%i | %.1f | %.1f' % (g_data['num_human_prots'], g_data['human_prot_identity'],g_data['human_prot_coverage']) if g_data['num_human_prots'] else ' '
data[ortho_headers[0]].append(human_hlogs)
oncho_hlogs = '%i | %.1f | %.1f' % (g_data['num_oncho_prots'], g_data['oncho_prot_identity'],g_data['oncho_prot_coverage']) if g_data['num_oncho_prots'] else ' '
data[ortho_headers[1]].append(oncho_hlogs)
data[chembl_headers[0]].append(g_data.get('num_chembl_hits', 0))
data[chembl_headers[1]].append(g_data.get('chembl_hits', ''))
if '_max_observed_expression' in g_data['expression_levels']:
max_expression = round(g_data['expression_levels']['_max_observed_expression'], 1)
else:
max_expression = " "
data[expression_headings[0][0]].append(max_expression)
for h, ls in expression_headings[1:]:
exp_levels = [g_data['expression_levels'].get(l) for l in ls]
data[h].append(' | '.join(exp_levels))
col_headers = [gene_header] + [h[0] for h in headers_atts] + [i for i in ortho_headers+chembl_headers] + [j[0] for j in expression_headings]
writer = pandas.ExcelWriter(gene_data_out_file, engine='xlsxwriter')
df = pandas.DataFrame(data)[col_headers] # The [] specifies the order of the columns.
df.to_excel(writer, sheet_name=sheet_name, index=False, startrow=1, header=False)
worksheet = writer.sheets[sheet_name]
header_format = writer.book.add_format({'bold': True, 'text_wrap': True, 'align': 'center', 'valign': 'top', 'bg_color': header_bg, 'border': 1})
for i, h in enumerate(col_headers):
col_w = max(len(line.strip()) for line in h.splitlines())
col_width = max(col_w+1, min_column_width)
if i in (0, 2, 3, 5, 9):
col_format = writer.book.add_format({'align': 'left'})
elif i == 10:
col_format = writer.book.add_format({'align': 'center'})
else:
col_format = writer.book.add_format({'align': 'center'})
worksheet.set_column(i, i, col_width, col_format)
worksheet.write(0, i, h, header_format) # Header added manually.
worksheet.freeze_panes(1, 0) # Freezes header row.
writer.save()
print('Data saved to %s' % gene_data_out_file)
# # # Getting protein names and sequences
def save_prot_names_list(gene_data):
prot_list_file = 'utility/b_mal_4.5-wip_single_ko_prot_names.txt'
prot_list = sorted(gene_data.keys())
with open(prot_list_file, 'w') as f:
f.write('\n'.join(prot_list))
print('Saved protein list to %s' % prot_list_file)
def get_prot_name_translations(gene_data, gen_pept_file):
print('Parsing %s...' % gen_pept_file)
prot_to_std, found_names = {}, set()
with open(gen_pept_file, 'r') as f:
prot_name, std_name = None, None
for line in f:
if prot_name == None and line.startswith('VERSION'):
prot_name = line.strip().split()[1]
elif prot_name and "/standard_name=" in line:
std_name = line.partition('=')[2].strip()[1:-1]
if std_name in gene_data:
prot_to_std[prot_name] = std_name
found_names.add(std_name)
prot_name, std_name = None, None
for gene in gene_data:
if gene not in found_names:
prot_to_std['%s.1' % gene] = gene
return prot_to_std
def save_prot_sequences(gene_data, prot_to_std, prot_sequences_file):
prots_fasta_file = 'utility/b_malayi_and_wBm_prots.fasta'
all_seqs = sequ.loadfasta(prots_fasta_file)
prots, found_genes = [], set()
for seq in all_seqs:
gene = prot_to_std.get(seq.name)
if not gene: continue
if gene in found_genes:
print('Error: multiple sequences were found matching "%s".' % seq.name)
exit()
prots.append(sequ.Sequence(name=gene, sequence=seq.seq))
found_genes.add(gene)
if len(prots) != len(gene_data):
print('Warning: only found sequences for %i of %i genes. Missing genes:' % (len(prots), len(gene_data)))
for g in set(gene_data) - found_genes:
print(g)
exit()
sequ.savefasta(prots, prot_sequences_file, spaces=False, numbers=False)
print('Saved %i sequences to %s' % (len(prots), prot_sequences_file))
return prots
# # # Parsing BLAST output
def parse_blast_xml(gene_data, blast_xml_file, taxon_name, spc_str):
"""taxon_name is used to name the properties saved in gene_data."""
min_e_val = 1E-30
property_strs = ['num_%s_prots', '%s_prot_id', '%s_prot_identity', '%s_prot_coverage']
gi_split_regex = re.compile('\s?>gi\|\S+\|\S+\|\S+\|\s?')
gene_spc_regex = re.compile('(.+) \[(.+)\]$')
isoform_regex = re.compile('(.+) (isoform \S+)(.*)$')
tree = ET.parse(blast_xml_file)
root = tree.getroot()
iterations = root.find('BlastOutput_iterations')
for q_hit in iterations:
gene = q_hit.find('Iteration_query-def').text
if gene not in gene_data:
continue
prot_len = float(q_hit.find('Iteration_query-len').text)
s_hits = q_hit.find('Iteration_hits')
hit_names, top_hit_id, top_e_val, top_identity, top_coverage = get_good_hits(s_hits, min_e_val, spc_str.lower(), gi_split_regex, gene_spc_regex, isoform_regex)
num_hits = len(hit_names)
top_coverage = round(top_coverage/prot_len*100.0, 1)
for g_data in gene_data[gene]:
for p_str, val in zip(property_strs, [num_hits, top_hit_id, top_identity, top_coverage]):
g_data[p_str % taxon_name] = val
def get_good_hits(s_hits, min_e_val, spc_str, gi_split_regex, gene_spc_regex, isoform_regex):
"""Counts based on the 'Hit_def' field in the subject hits, which is the name. Attempts to remove isoforms and predicted proteins from the count.
"""
best_hit_id, best_e_val, best_ident, best_coverage = None, min_e_val + 1, 0, 0
hit_names = set()
for s_hit in s_hits:
hit_e_val, hit_ident, hit_coverage = min_e_val + 1, 0, 0
for hsp in s_hit.find('Hit_hsps'):
e_val = float(hsp.find('Hsp_evalue').text)
if e_val < hit_e_val:
hit_e_val = e_val
hit_ident = round(float(hsp.find('Hsp_identity').text)/float(hsp.find('Hsp_align-len').text)*100, 1)
hit_coverage = int(hsp.find('Hsp_query-to').text) - int(hsp.find('Hsp_query-from').text)
if hit_e_val < min_e_val:
name = parse_name_from_hit(s_hit, spc_str, gi_split_regex, gene_spc_regex, isoform_regex)
if not name:
continue # A hit was found, but it did not match the spc_str
hit_names.add(name)
if hit_e_val < best_e_val:
best_hit_id = s_hit.find('Hit_accession').text.strip()
best_ident = hit_ident
best_e_val, best_coverage = hit_e_val, hit_coverage
if not hit_names:
return hit_names, None, None, 0, 0
return hit_names, best_hit_id, best_e_val, best_ident, best_coverage
def parse_name_from_hit(s_hit, spc_str, gi_split_regex, gene_spc_regex, isoform_regex):
name = find_gene_from_species(s_hit, spc_str, gi_split_regex, gene_spc_regex)
if not name:
return False
if 'isoform' in name:
nm, iso, rem = isoform_regex.match(name).groups()
name = nm + rem
if name.lower().startswith('predicted: '):
name = name[11:]
return name
def find_gene_from_species(s_hit, spc_str, gi_split_regex, gene_spc_regex):
for hit in gi_split_regex.split( s_hit.find('Hit_def').text ):
m = gene_spc_regex.match(hit)
if not m:
continue
name, spc = m.groups()
if spc_str in spc.lower():
return name
return False
# # # Getting expression data
def get_expression_data(gene_data, expression_file, sheetnames, conditions):
for sheetname in sheetnames:
parse_expression_sheet(gene_data, expression_file, sheetname, conditions)
null_exp = {c:'--' for c in conditions}
for gene, entries in gene_data.items():
for e in entries:
if 'expression_levels' not in e:
e['expression_levels'] = null_exp
def parse_expression_sheet(gene_data, filename, sheetname, conditions):
seq_name_key = 'Sequence Name'
replicate_inds = ['a', 'b', 'c']
frame = pandas.read_excel(filename, sheetname)
if len(frame.columns) != len(set(frame.columns)):
print('Error: at least one column header was not unique in sheet %s.' % sheetname)
exit()
cond_keys = [[cond+ind for ind in replicate_inds if cond+ind in frame.columns] for cond in conditions]
for i in frame.index:
row = frame.ix[i]
seq_name = row[seq_name_key]
if seq_name not in gene_data:
continue
avgs = [sum(row[k] for k in ck)/float(len(ck)) for ck in cond_keys]
max_expression = max(avgs)
exp = {c:'%i'%(round(a/max_expression*100.0, 0) if max_expression else 0) for a,c in zip(avgs, conditions)}
exp['_max_observed_expression'] = max_expression
for entry in gene_data[seq_name]:
entry['expression_levels'] = exp
# # # Parse ChEMBL search file
def parse_chembl_results(gene_data, chembl_results_file):
max_e_val = 1E-30
chembl_data = {}
total_hits, sig_hits = 0, 0
with open(chembl_results_file) as f:
f.readline() # Header
for line in f:
if not line.strip():
continue
total_hits += 1
gene, chembl_id, tid, description, uniprot_id, target_type, species, _, _, identity, blast_score, e_value = line.split('\t')
identity, e_value = float(identity), float(e_value)
if e_value > max_e_val:
continue
sig_hits += 1
hit_data = {'chembl_id':chembl_id, 'species':species, 'identity':identity, 'e_value':e_value}
chembl_data.setdefault(gene, []).append(hit_data)
print('%i of the %i ChEMBL hits were below the E-value threshold of %.1e' % (sig_hits, total_hits, max_e_val))
for gene, data_list in chembl_data.items():
if gene not in gene_data:
continue
data_list.sort(key=lambda d: d['e_value'])
chembl_hits = ', '.join('%s (%i | %s)' % (d['chembl_id'], round(d['identity'], 0), d['species']) for d in data_list)
for g_data in gene_data[gene]:
g_data['num_chembl_hits'] = len(data_list)
g_data['chembl_hits'] = chembl_hits
# # # Misc functions
def print_deficiencies(rxn_data):
r_list = sorted(list(rxn_data.keys()))
r_list.sort(key=lambda r:rxn_data[r]['deficiencies'])
print('%i reactions with significant impact:' % len(r_list))
for r_id in r_list:
print('%s %.1f%% of objective value.' % (r_id, rxn_data[r_id]['objective']))
print('\t%s' % rxn_data[r_id]['deficiencies'])
print('\t%s' % ', '.join(rxn_data[r_id]['genes']))
# # # Main paths
files_dir = '/mnt/hgfs/win_projects/brugia_project'
utility_dir = '/home/dave/Desktop/projects/brugia_project/utility'
# # # Main run options
model_file = 'model_b_mal_4.5-wip.xlsx'
run_str = 'bm_4.5-lo_ox-lo_glu'
wolbachia_ratio = 0.1
objective_threshold_fraction = 0.25 # Considered significant if resulting objective function is less than 0.25 (25%) of the original.
do_double_ko = False
expression_conditions = ['L3', 'L3D6', 'L3D9', 'L4', 'F30', 'M30', 'F42', 'M42', 'F120', 'M120']
expression_headings = [('Max\nexpression',), ('Larval expression\n(L3|L3D6|L3D9|L4)', ('L3','L3D6','L3D9','L4')), ('Adult female expression\n(F30|F42|F120)', ('F30','F42','F120')), ('Adult male expression\n(M30|M42|M120)', ('M30','M42','M120'))]
gene_data_out_file = os.path.join(files_dir, '%s_gene_info.xlsx'%(run_str))
# # # Required files
expression_file = os.path.join(files_dir, 'All_Stages_Brugia_Wolbachia_FPKMs.xlsx')
expression_sheets = ('Brugia_FPKMs', 'Wolbachia_FPKMs')
gen_pept_file = os.path.join(utility_dir, 'b_malayi_genpept.gp')
human_blast_xml_file = os.path.join(utility_dir, '%s_human_blast.xml'%(run_str))
oncho_blast_xml_file = os.path.join(utility_dir, '%s_oncho_blast.xml'%(run_str))
chembl_results_file = os.path.join(utility_dir, '%s_chembl.txt'%(run_str))
# # # Intermediate files created
prot_sequences_file = os.path.join(utility_dir, '%s_prots.fa'%(run_str))
rxn_ko_data_file = os.path.join(utility_dir, '%s_rxns.pkl'%(run_str))
gene_ko_data_file = os.path.join(utility_dir, '%s_genes.pkl'%(run_str))
# # # Run steps
if not os.path.isfile(rxn_ko_data_file):
rxn_data = {}
model_path = os.path.join(files_dir, model_file)
model = load_model(model_path, wolbachia_ratio)
rxn_to_genes = get_rxns_to_delete(model)
do_deletions(rxn_data, model, rxn_to_genes, do_double_ko, objective_threshold_fraction) # Fills out 'objective', 'deficiencies', and 'genes' of reactions in rxn_data.
save_data_object(rxn_data, rxn_ko_data_file)
else:
rxn_data = load_data_object(rxn_ko_data_file)
#print_deficiencies(rxn_data)
if not os.path.isfile(gene_ko_data_file):
gene_data = process_gene_data(rxn_data)
get_expression_data(gene_data, expression_file, expression_sheets, expression_conditions) # Fills out 'expression_levels'
if not os.path.isfile(prot_sequences_file):
prot_to_std = get_prot_name_translations(gene_data, gen_pept_file)
prots = save_prot_sequences(gene_data, prot_to_std, prot_sequences_file)
else:
prots = sequ.loadfasta(prot_sequences_file)
for blast_file in [human_blast_xml_file, oncho_blast_xml_file]:
if not os.path.isfile(blast_file):
print('Error: no BLAST results found at %s' % blast_file)
exit()
parse_blast_xml(gene_data, human_blast_xml_file, 'human', 'homo sapiens')
parse_blast_xml(gene_data, oncho_blast_xml_file, 'oncho', 'onchocerca volvulus')
if not os.path.isfile(chembl_results_file):
print('Error: no ChEMBL results found at %s' % chembl_results_file)
exit()
# parse_chembl_results(gene_data, chembl_results_file) # Where it should be called.
save_data_object(gene_data, gene_ko_data_file)
else:
gene_data = load_data_object(gene_ko_data_file)
parse_chembl_results(gene_data, chembl_results_file) # # # Temp place to be called from.
save_data_to_excel(gene_data, gene_data_out_file, expression_headings)
| dave-the-scientist/brugia_project | get_knockout_info.py | Python | gpl-3.0 | 20,007 | 0.006998 |
#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
"""Module for generating lexicon using Rao and Ravichandran's method (2009).
"""
##################################################################
# Imports
from __future__ import unicode_literals, print_function
from blair_goldensohn import build_mtx, seeds2seedpos
from common import POSITIVE, NEGATIVE, NEUTRAL
from graph import Graph
from itertools import chain
from scipy import sparse
import numpy as np
import sys
##################################################################
# Constants
POS_IDX = 0
NEG_IDX = 1
NEUT_IDX = 2
POL_IDX = 1
SCORE_IDX = 2
MAX_I = 300
IDX2CLS = {POS_IDX: POSITIVE, NEG_IDX: NEGATIVE, NEUT_IDX: NEUTRAL}
##################################################################
# Methods
def _eq_sparse(a_M1, a_M2):
"""Compare two sparse matrices.
@param a_M1 - first sparse matrix to compare
@param a_M2 - second sparse matrix to compare
@return True if both matrices are equal, non-False otherwise
"""
if type(a_M1) != type(a_M2):
return False
if not np.allclose(a_M1.get_shape(), a_M1.get_shape()):
return False
X, Y = a_M1.nonzero()
IDX1 = set([(x, y) for x, y in zip(X, Y)])
X, Y = a_M2.nonzero()
IDX2 = [(x, y) for x, y in zip(X, Y) if (x, y) not in IDX1]
IDX = list(IDX1)
IDX.extend(IDX2)
IDX.sort()
for x_i, y_i in IDX:
# print("a_M1[{:d}, {:d}] = {:f}".format(x_i, y_i, a_M1[x_i, y_i]))
# print("a_M2[{:d}, {:d}] = {:f}".format(x_i, y_i, a_M2[x_i, y_i]))
# print("is_close", np.isclose(a_M1[x_i, y_i], a_M2[x_i, y_i]))
if not np.isclose(a_M1[x_i, y_i], a_M2[x_i, y_i]):
return False
return True
def _mtx2tlist(a_Y, a_term2idx):
"""Convert matrix to a list of polar terms.
@param a_Y - matrix of polar terms
@param a_terms2idx - mapping from terms to their matrix indices
@return list of 3-tuples (word, polarity, score)
"""
ret = []
iscore = 0.
irow = None
lex2lidx = {}
ipol = lidx = 0
for (iword, ipos), idx in a_term2idx.iteritems():
# obtain matrix row for that term
irow = a_Y.getrow(idx).toarray()
# print("irow =", repr(irow))
ipol = irow.argmax(axis=1)[0]
iscore = irow[0, ipol]
# print("ipol =", repr(ipol))
# print("iscore =", repr(iscore))
if ipol != NEUT_IDX:
ipol = IDX2CLS[ipol]
if iword in lex2lidx:
lidx = lex2lidx[iword]
if abs(iscore) > abs(ret[lidx][SCORE_IDX]):
ret[lidx][POL_IDX] = ipol
ret[lidx][SCORE_IDX] = iscore
else:
lex2lidx[iword] = len(ret)
ret.append([iword, ipol, iscore])
return ret
def _sign_normalize(a_Y, a_terms2idx, a_pos, a_neg, a_neut,
a_set_dflt=None):
"""Fix seed values and row-normalize the class matrix.
@param a_Y - class matrix to be changed
@param a_terms2idx - mapping from terms to their matrix indices
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_set_dflt - function to set the default value of an unkown term
@return void
@note modifies the input matrix in place
"""
seed_found = False
for iterm, i in a_terms2idx.iteritems():
if iterm in a_pos:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, POS_IDX] = 1.
elif iterm in a_neg:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, NEG_IDX] = 1.
elif iterm in a_neut:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, NEUT_IDX] = 1.
elif a_set_dflt is not None:
a_set_dflt(a_Y, i)
assert seed_found, "No seed term found in matrix."
# normalize class scores
Z = a_Y.sum(1)
x, y = a_Y.nonzero()
for i, j in zip(x, y):
# print("a_Y[{:d}, {:d}] =".format(i, j), repr(a_Y[i, j]))
# print("Z[{:d}, 0] =".format(i), repr(Z[i, 0]))
a_Y[i, j] /= float(Z[i, 0]) or 1.
# print("*a_Y[{:d}, {:d}] =".format(i, j), repr(a_Y[i, j]))
def prune_normalize(a_M):
"""Make each of the adjacency matrix sum up to one.
Args:
a_M (scipy.sparse.csr): matrix to be normalized
Returns:
void:
Note:
modifies the input matrix in place
"""
# remove negative transitions
nonzero_xy = a_M.nonzero()
for i, j in zip(*nonzero_xy):
if a_M[i, j] < 0.:
a_M[i, j] = 0.
a_M.prune()
# normalize all outgoing transitions
Z = a_M.sum(0)
nonzero_xy = a_M.nonzero()
for i, j in zip(*nonzero_xy):
a_M[i, j] /= float(Z[0, j]) or 1.
def rao_min_cut(a_germanet, a_pos, a_neg, a_neut, a_seed_pos,
a_ext_syn_rels):
"""Extend sentiment lexicons using the min-cut method of Rao (2009).
@param a_germanet - GermaNet instance
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_seed_pos - part-of-speech class of seed synsets ("none" for no
restriction)
@param a_ext_syn_rels - use extended set of synonymous relations
@return list of polar terms, their polarities, and scores
"""
sgraph = Graph(a_germanet, a_ext_syn_rels)
# partition the graph into subjective and objective terms
mcs, cut_edges, _, _ = sgraph.min_cut(a_pos | a_neg, a_neut, a_seed_pos)
print("min_cut_score (subj. vs. obj.) = {:d}".format(mcs),
file=sys.stderr)
# remove edges belonging to the min cut (i.e., cut the graph)
for isrc, itrg in cut_edges:
if isrc in sgraph.nodes:
sgraph.nodes[isrc].pop(itrg, None)
# separate the graph into positive and negative terms
mcs, _, pos, neg = sgraph.min_cut(a_pos, a_neg, a_seed_pos)
print("min_cut_score (pos. vs. neg.) = {:d}".format(mcs),
file=sys.stderr)
ret = [(inode[0], POSITIVE, 1.) for inode in pos]
ret.extend((inode[0], NEGATIVE, -1.) for inode in neg)
return ret
def rao_lbl_prop(a_germanet, a_pos, a_neg, a_neut, a_seed_pos,
a_ext_syn_rels):
"""Extend sentiment lexicons using the lbl-prop method of Rao (2009).
@param a_germanet - GermaNet instance
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_seed_pos - part-of-speech class of seed synsets ("none" for no
restriction)
@param a_ext_syn_rels - use extended set of synonymous relations
@return list of polar terms, their polarities, and scores
"""
if a_seed_pos is None:
a_seed_pos = ["adj", "nomen", "verben"]
else:
a_seed_pos = [a_seed_pos]
a_pos = seeds2seedpos(a_pos, a_seed_pos)
a_neg = seeds2seedpos(a_neg, a_seed_pos)
a_neut = seeds2seedpos(a_neut, a_seed_pos)
# obtain and row-normalize the adjacency matrix
terms = set((ilex, ipos)
for isynid, ipos in a_germanet.synid2pos.iteritems()
for ilexid in a_germanet.synid2lexids[isynid]
for ilex in a_germanet.lexid2lex[ilexid]
)
terms2idx = {iterm: i for i, iterm in enumerate(terms)}
M = build_mtx(a_germanet, terms2idx, set(),
a_ext_syn_rels, len(terms))
prune_normalize(M)
# no need to transpose M[i, j] is the link going from node j to the node i;
# and, in Y, the Y[j, k] cell is the polarity score of the class k for the
# term j
# M = M.transpose()
# check that the matrix is column normalized
assert np.all(i == 0 or np.isclose([i], [1.])
for i in M.sum(0)[0, :])
# initialize label matrix
Y = sparse.lil_matrix((len(terms), len(IDX2CLS)), dtype=np.float32)
def _set_neut_one(X, i):
X[i, NEUT_IDX] = 1.
_sign_normalize(Y, terms2idx, a_pos, a_neg, a_neut,
_set_neut_one)
# Y = Y.tocsr()
# output first M row and Y column
# for i in xrange(len(terms)):
# if M[0, i] != 0:
# print("M[0, {:d}] =".format(i), M[0, i], file=sys.stderr)
# if Y[i, 0] != 0:
# print("Y[i, 0] =", Y[i, 0], file=sys.stderr)
# B = M.dot(Y)
# print("B[0, 0] =", B[0, 0], file=sys.stderr)
# perform multiplication until convergence
i = 0
prev_Y = None
while not _eq_sparse(prev_Y, Y) and i < MAX_I:
prev_Y = Y.copy()
Y = Y.tocsc()
Y = M.dot(Y)
Y = Y.tolil()
_sign_normalize(Y, terms2idx, a_pos, a_neg, a_neut)
i += 1
ret = _mtx2tlist(Y, terms2idx)
ret.sort(key=lambda el: abs(el[-1]), reverse=True)
return ret
| WladimirSidorenko/SentiLex | scripts/rao.py | Python | mit | 8,962 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013, 2014 CERN
# Author: Pawel Szostek (pawel.szostek@cern.ch)
# Multi-tool support by Javier D. Garcia-Lasheras (javier@garcialasheras.com)
#
# This file is part of Hdlmake.
#
# Hdlmake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hdlmake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hdlmake. If not, see <http://www.gnu.org/licenses/>.
#
import string
from string import Template
import fetch
from makefile_writer import MakefileWriter
import logging
class ToolControls(MakefileWriter):
def detect_version(self, path):
pass
def get_keys(self):
tool_info = {
'name': 'Aldec Active-HDL',
'id': 'aldec',
'windows_bin': 'vsimsa',
'linux_bin': None
}
return tool_info
def get_standard_libraries(self):
ALDEC_STANDARD_LIBS = ['ieee', 'std']
return ALDEC_STANDARD_LIBS
def generate_simulation_makefile(self, fileset, top_module):
# TODO: ??
from srcfile import VHDLFile, VerilogFile, SVFile
makefile_tmplt_1 = string.Template("""TOP_MODULE := ${top_module}
ALDEC_CRAP := \
run.command \
library.cfg
#target for performing local simulation
sim: sim_pre_cmd
""")
makefile_text_1 = makefile_tmplt_1.substitute(
top_module=top_module.top_module
)
self.write(makefile_text_1)
self.writeln("\t\techo \"# Active-HDL command file, generated by HDLMake\" > run.command")
self.writeln()
self.writeln("\t\techo \"# Create library and set as default target\" >> run.command")
self.writeln("\t\techo \"alib work\" >> run.command")
self.writeln("\t\techo \"set worklib work\" >> run.command")
self.writeln()
self.writeln("\t\techo \"# Compiling HDL source files\" >> run.command")
for vl in fileset.filter(VerilogFile):
self.writeln("\t\techo \"alog " + vl.rel_path() + "\" >> run.command")
for sv in fileset.filter(SVFile):
self.writeln("\t\techo \"alog " + sv.rel_path() + "\" >> run.command")
for vhdl in fileset.filter(VHDLFile):
self.writeln("\t\techo \"acom " + vhdl.rel_path() + "\" >> run.command")
self.writeln()
makefile_tmplt_2 = string.Template("""
\t\tvsimsa -do run.command
sim_pre_cmd:
\t\t${sim_pre_cmd}
sim_post_cmd: sim
\t\t${sim_post_cmd}
#target for cleaning all intermediate stuff
clean:
\t\trm -rf $$(ALDEC_CRAP) work
#target for cleaning final files
mrproper: clean
\t\trm -f *.vcd *.asdb
.PHONY: mrproper clean sim sim_pre_cmd sim_post_cmd
""")
if top_module.sim_pre_cmd:
sim_pre_cmd = top_module.sim_pre_cmd
else:
sim_pre_cmd = ''
if top_module.sim_post_cmd:
sim_post_cmd = top_module.sim_post_cmd
else:
sim_post_cmd = ''
makefile_text_2 = makefile_tmplt_2.substitute(
sim_pre_cmd=sim_pre_cmd,
sim_post_cmd=sim_post_cmd,
)
self.write(makefile_text_2)
| JamesHyunKim/myhdl | hdlmake/tools/aldec/aldec.py | Python | gpl-3.0 | 3,541 | 0.003106 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import decorators
from telemetry.internal.actions import loop
from telemetry.testing import tab_test_case
import py_utils
AUDIO_1_LOOP_CHECK = 'window.__hasEventCompleted("#audio_1", "loop");'
VIDEO_1_LOOP_CHECK = 'window.__hasEventCompleted("#video_1", "loop");'
class LoopActionTest(tab_test_case.TabTestCase):
def setUp(self):
tab_test_case.TabTestCase.setUp(self)
self.Navigate('video_test.html')
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithNoSelector(self):
"""Tests that with no selector Loop action loops first media element."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
action.RunAction(self._tab)
# Assert only first video has played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithAllSelector(self):
"""Tests that Loop action loops all video elements with selector='all'."""
action = loop.LoopAction(loop_count=2, selector='all',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
# Both videos not playing before running action.
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
action.RunAction(self._tab)
# Assert all media elements played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWaitForLoopTimeout(self):
"""Tests that wait_for_loop timeout_in_secondss if video does not loop."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=1)
action.WillRunAction(self._tab)
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertRaises(py_utils.TimeoutException, action.RunAction, self._tab)
| benschmaus/catapult | telemetry/telemetry/internal/actions/loop_unittest.py | Python | bsd-3-clause | 2,356 | 0.002971 |
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="sankey.link.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/sankey/link/hoverlabel/_font.py | Python | mit | 1,877 | 0.000533 |
#!/usr/bin/env python
"""beanstalkc - A beanstalkd Client Library for Python"""
import logging
import socket
import sys
__license__ = '''
Copyright (C) 2008-2016 Andreas Bolka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__version__ = '0.4.0'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 11300
DEFAULT_PRIORITY = 2 ** 31
DEFAULT_TTR = 120
DEFAULT_TUBE_NAME = 'default'
class BeanstalkcException(Exception): pass
class UnexpectedResponse(BeanstalkcException): pass
class CommandFailed(BeanstalkcException): pass
class DeadlineSoon(BeanstalkcException): pass
class SocketError(BeanstalkcException):
@staticmethod
def wrap(wrapped_function, *args, **kwargs):
try:
return wrapped_function(*args, **kwargs)
except socket.error:
err = sys.exc_info()[1]
raise SocketError(err)
class Connection(object):
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, parse_yaml=True,
connect_timeout=socket.getdefaulttimeout()):
if parse_yaml is True:
try:
parse_yaml = __import__('yaml').load
except ImportError:
logging.error('Failed to load PyYAML, will not parse YAML')
parse_yaml = False
self._connect_timeout = connect_timeout
self._parse_yaml = parse_yaml or (lambda x: x)
self.host = host
self.port = port
self.connect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def connect(self):
"""Connect to beanstalkd server."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._connect_timeout)
SocketError.wrap(self._socket.connect, (self.host, self.port))
self._socket.settimeout(None)
self._socket_file = self._socket.makefile('rb')
def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass
def reconnect(self):
"""Re-connect to server."""
self.close()
self.connect()
def _interact(self, command, expected_ok, expected_err=[]):
SocketError.wrap(self._socket.sendall, command)
status, results = self._read_response()
if status in expected_ok:
return results
elif status in expected_err:
raise CommandFailed(command.split()[0], status, results)
else:
raise UnexpectedResponse(command.split()[0], status, results)
def _read_response(self):
line = SocketError.wrap(self._socket_file.readline)
if not line:
raise SocketError()
response = line.split()
return response[0], response[1:]
def _read_body(self, size):
body = SocketError.wrap(self._socket_file.read, size)
SocketError.wrap(self._socket_file.read, 2) # trailing crlf
if size > 0 and not body:
raise SocketError()
return body
def _interact_value(self, command, expected_ok, expected_err=[]):
return self._interact(command, expected_ok, expected_err)[0]
def _interact_job(self, command, expected_ok, expected_err, reserved=True):
jid, size = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return Job(self, int(jid), body, reserved)
def _interact_yaml(self, command, expected_ok, expected_err=[]):
size, = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return self._parse_yaml(body)
def _interact_peek(self, command):
try:
return self._interact_job(command, ['FOUND'], ['NOT_FOUND'], False)
except CommandFailed:
return None
# -- public interface --
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
assert isinstance(body, str), 'Job body must be a str instance'
jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % (
priority, delay, ttr, len(body), body),
['INSERTED'],
['JOB_TOO_BIG', 'BURIED', 'DRAINING'])
return int(jid)
def reserve(self, timeout=None):
"""Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out."""
if timeout is not None:
command = 'reserve-with-timeout %d\r\n' % timeout
else:
command = 'reserve\r\n'
try:
return self._interact_job(command,
['RESERVED'],
['DEADLINE_SOON', 'TIMED_OUT'])
except CommandFailed:
exc = sys.exc_info()[1]
_, status, results = exc.args
if status == 'TIMED_OUT':
return None
elif status == 'DEADLINE_SOON':
raise DeadlineSoon(results)
def kick(self, bound=1):
"""Kick at most bound jobs into the ready queue."""
return int(self._interact_value('kick %d\r\n' % bound, ['KICKED']))
def kick_job(self, jid):
"""Kick a specific job into the ready queue."""
self._interact('kick-job %d\r\n' % jid, ['KICKED'], ['NOT_FOUND'])
def peek(self, jid):
"""Peek at a job. Returns a Job, or None."""
return self._interact_peek('peek %d\r\n' % jid)
def peek_ready(self):
"""Peek at next ready job. Returns a Job, or None."""
return self._interact_peek('peek-ready\r\n')
def peek_delayed(self):
"""Peek at next delayed job. Returns a Job, or None."""
return self._interact_peek('peek-delayed\r\n')
def peek_buried(self):
"""Peek at next buried job. Returns a Job, or None."""
return self._interact_peek('peek-buried\r\n')
def tubes(self):
"""Return a list of all existing tubes."""
return self._interact_yaml('list-tubes\r\n', ['OK'])
def using(self):
"""Return the tube currently being used."""
return self._interact_value('list-tube-used\r\n', ['USING'])
def use(self, name):
"""Use a given tube."""
return self._interact_value('use %s\r\n' % name, ['USING'])
def watching(self):
"""Return a list of all tubes being watched."""
return self._interact_yaml('list-tubes-watched\r\n', ['OK'])
def watch(self, name):
"""Watch a given tube."""
return int(self._interact_value('watch %s\r\n' % name, ['WATCHING']))
def ignore(self, name):
"""Stop watching a given tube."""
try:
return int(self._interact_value('ignore %s\r\n' % name,
['WATCHING'],
['NOT_IGNORED']))
except CommandFailed:
# Tried to ignore the only tube in the watchlist, which failed.
return 0
def stats(self):
"""Return a dict of beanstalkd statistics."""
return self._interact_yaml('stats\r\n', ['OK'])
def stats_tube(self, name):
"""Return a dict of stats about a given tube."""
return self._interact_yaml('stats-tube %s\r\n' % name,
['OK'],
['NOT_FOUND'])
def pause_tube(self, name, delay):
"""Pause a tube for a given delay time, in seconds."""
self._interact('pause-tube %s %d\r\n' % (name, delay),
['PAUSED'],
['NOT_FOUND'])
# -- job interactors --
def delete(self, jid):
"""Delete a job, by job id."""
self._interact('delete %d\r\n' % jid, ['DELETED'], ['NOT_FOUND'])
def release(self, jid, priority=DEFAULT_PRIORITY, delay=0):
"""Release a reserved job back into the ready queue."""
self._interact('release %d %d %d\r\n' % (jid, priority, delay),
['RELEASED', 'BURIED'],
['NOT_FOUND'])
def bury(self, jid, priority=DEFAULT_PRIORITY):
"""Bury a job, by job id."""
self._interact('bury %d %d\r\n' % (jid, priority),
['BURIED'],
['NOT_FOUND'])
def touch(self, jid):
"""Touch a job, by job id, requesting more time to work on a reserved
job before it expires."""
self._interact('touch %d\r\n' % jid, ['TOUCHED'], ['NOT_FOUND'])
def stats_job(self, jid):
"""Return a dict of stats about a job, by job id."""
return self._interact_yaml('stats-job %d\r\n' % jid,
['OK'],
['NOT_FOUND'])
class Job(object):
def __init__(self, conn, jid, body, reserved=True):
self.conn = conn
self.jid = jid
self.body = body
self.reserved = reserved
def _priority(self):
stats = self.stats()
if isinstance(stats, dict):
return stats['pri']
return DEFAULT_PRIORITY
# -- public interface --
def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False
def release(self, priority=None, delay=0):
"""Release this job back into the ready queue."""
if self.reserved:
self.conn.release(self.jid, priority or self._priority(), delay)
self.reserved = False
def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False
def kick(self):
"""Kick this job alive."""
self.conn.kick_job(self.jid)
def touch(self):
"""Touch this reserved job, requesting more time to work on it before
it expires."""
if self.reserved:
self.conn.touch(self.jid)
def stats(self):
"""Return a dict of stats about this job."""
return self.conn.stats_job(self.jid)
if __name__ == '__main__':
import nose
nose.main(argv=['nosetests', '-c', '.nose.cfg'])
| earl/beanstalkc | beanstalkc.py | Python | apache-2.0 | 10,921 | 0.000458 |
"""
Imports all submodules
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from deepchem.dock.pose_generation import PoseGenerator
from deepchem.dock.pose_generation import VinaPoseGenerator
from deepchem.dock.pose_scoring import PoseScorer
from deepchem.dock.pose_scoring import GridPoseScorer
from deepchem.dock.docking import Docker
from deepchem.dock.docking import VinaGridRFDocker
from deepchem.dock.docking import VinaGridDNNDocker
from deepchem.dock.binding_pocket import ConvexHullPocketFinder
from deepchem.dock.binding_pocket import RFConvexHullPocketFinder
| bowenliu16/deepchem | deepchem/dock/__init__.py | Python | gpl-3.0 | 637 | 0.00157 |
import findspark #pyspark can't be detected if file is at other folders than where it is installed
findspark.init('/home/jake/spark/spark-2.2.0-bin-hadoop2.7')
## 1) SPARK DATAFRAME
#--------------------------------------------------------
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
spark = SparkSession.builder.appName("Basics").getOrCreate() #appName can be anything
## READING
#--------------------------------------------------------
df = spark.read.json('people.json') #json
df = spark.read.csv('appl_stock.csv', inferSchema=True, header=True) #csv
df = spark.read.csv(r'/home/jake/Desktop/test3.txt') #text
df.show()
df.show(20, False) #show non-truncated results
df.head() #shows a list of row objects
# [Row(age=None, name='Michael'), Row(age=30, name='Andy')]
## WRITING
#--------------------------------------------------------
# csv
df.toPandas().to_csv("sample.csv", header=True)
# will auto write to hdfs
#best to check & define column data types first
df.write.option('path','jake/foldername/operator_lookup.parquet').partitionBy("datestring").format("parquet").saveAsTable("operator_lookup")
## BASICS
#--------------------------------------------------------
df[:10].collect() #collect the result instead of showing
row.asDict() #produce as dictionary
df.show() #print the results
df.count() # print row count
len(df.columns) #print column count
df.printSchema() #print schema, datatypes, nullable
## SCHEMA & DATATYPES
#--------------------------------------------------------
#changing the schema
from pyspark.sql.types import StructField,StringType,IntegerType,StructType
# true = nullable, false = non-nullable
schema = StructType([StructField("age", IntegerType(), True),
StructField("name", StringType(), True)])
df = spark.read.json('people.json', schema)
df.printSchema()
# FORMAT DECIMAL PLACES
sales_std.select(format_number('std',2)).show()
# CONVERT DATATYPE
df = df.withColumn("Acct-Session-Time", df["Acct-Session-Time"].cast("integer"))
## CREATE DATAFRAME
#--------------------------------------------------------
# value, column name
df = sqlContext.createDataFrame([('cat \n\n elephant rat \n rat cat', )], ['word'])
<<<<<<< HEAD
=======
>>>>>>> d3e08e82105d237a7b8091d0368a90829943847f
# empty dataframe
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
schema = StructType([StructField("k", StringType(), True), StructField("v", IntegerType(), False)])
# or df = sc.parallelize([]).toDF(schema)
df = spark.createDataFrame([], schema)
<<<<<<< HEAD
=======
>>>>>>> d3e08e82105d237a7b8091d0368a90829943847f
# from pandas
df = pd.DataFrame([("foo", 1), ("bar", 2)], columns=("k", "v"))
sqlCtx = SQLContext(sc)
sqlCtx.createDataFrame(df).show()
<<<<<<< HEAD
=======
>>>>>>> d3e08e82105d237a7b8091d0368a90829943847f
# from dictionary
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkContext
dict = {1: 'test', 2: 'test2'}
sc = SparkContext()
spark = SparkSession(sc)
rdd = spark.parallelize([dict])
#toDF() needs a SparkSession, must be a row type before conversion to df
rdd.map(lambda x: Row(**x)).toDF().show() # **x transposes the row
# OR using createDataFrame
rdd = rdd.map(lambda x: Row(**x))
spark.createDataFrame(rdd).show()
## APPENDING NEW DATA
#--------------------------------------------------------
firstDF = spark.range(3).toDF("myCol")
newRow = spark.createDataFrame([[20]])
appended = firstDF.union(newRow)
display(appended)
## EXPLORATORY
#--------------------------------------------------------
df.describe() #show datatypes
df.describe().show() #show max, min, stdev
## COLUMNS
#--------------------------------------------------------
df.columns #show column names
df.select('age').show() #have to use select to choose entire column
df.select(['age','name']).show() #multiple columns
# NEW COLUMNS
# Adding a new column with a simple copy
df.withColumn('newage',df['age']).show()
df.withColumn('add_one_age',df['age']+1).show() #with calculation
# RENAME COLUMN
df = df.withColumnRenamed('age','supernewage')
# DROP COLUMNS
df.drop('columnName')
## SQL
#--------------------------------------------------------
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
spark.sql("SELECT * FROM people WHERE age=30").show()
# ORDER BY
df.orderBy("Sales").show() #ascending
df.orderBy(df["Sales"].desc()).show() #descending
# REPLACE
from spark.sql.functions import *
df = df.withColumn('address', regexp_replace('address', 'lane', 'ln')) #in column address, replace lane with ln
## UDF (User-Defined Function)
#--------------------------------------------------------
from pyspark.sql.functions import udf
# using normal function
def CountryCode(input):
something
return something_else
udf_CountryCode = udf(CountryCode)
df = df.select("*", udf_CountryCode(df['target_column']).alias('new_column'))
# using udf lambda
udf_UserName = udf(lambda x: x.split('@')[0])
df = df.select("*", df('target_column').alias('new_column'))
## NULL VALUES
#--------------------------------------------------------
# DROP NAN
# Drop any row that contains missing data
df.na.drop().show()
# Has to have at least 2 NON-null values in a row
df.na.drop(thresh=2).show()
# rows in Sales that have null
df.na.drop(subset=["Sales"]).show()
# rows that have any nulls
df.na.drop(how='any').show()
# rows that have all nulls
df.na.drop(how='all').show()
# FILL NAN
# Spark is actually smart enough to match up & fill the data types.
# only fill in strings
df.na.fill('NEW VALUE').show()
# only fill in numeric
df.na.fill(0).show()
# fill in specific column
df.na.fill('No Name',subset=['Name']).show()
# fill in values with mean
df.na.fill(df.select(mean(df['Sales'])).collect()[0][0],['Sales']).show()
## FILTERING
#--------------------------------------------------------
df.filter("Close < 500").show() #SQL synatx
df.filter(df["Close"] < 500).show() #Python synatx
df.filter("Close<500").select(['Open','Close']).show()
#Multiple conditions
df.filter( (df["Close"] < 200) & (df['Open'] > 200) ).show() #AND &
df.filter( (df["Close"] < 200) | (df['Open'] > 200) ).show() #OR |
df.filter( (df["Close"] < 200) & ~(df['Open'] < 200) ).show() #NOT ~
df.filter(df["Low"] == 197.16).show()
## AGGREGATE
#--------------------------------------------------------
df.groupBy("Company").mean().show() #Mean
df.groupBy("Company").count().show() #Count
df.groupBy("Company").max().show() #Max
df.groupBy("Company").min().show() #Min
df.groupBy("Company").sum().show() #Sum
df.agg({'Sales':'max'}).show() #aggregate across all rows to get one result
from pyspark.sql.functions import countDistinct, avg, stddev
df.select(countDistinct("Sales")).show() #count distinct
df.select(countDistinct("Sales").alias("Distinct Sales")).show() #change alias name
df.select(avg('Sales')).show() #average
df.select(stddev("Sales")).show() #stdev
## DATETIME
#--------------------------------------------------------
from pyspark.sql.functions import (format_number, dayofmonth, hour,
dayofyear, month, year,
weekofyear, date_format)
df.select(dayofmonth(df['Date'])).show() #date of month
df.select(hour(df['Date'])).show() #hour
df.select(dayofyear(df['Date'])).show() #day of year
df.select(month(df['Date'])).show() #month
df.select(year(df['Date'])).show() #year
## 2) USING RDD (Resilient Distributed Dataset)
# spark is transiting slowly to spark dataframe, but its stil good to learn the original parsing in RDD
# especially when data is non-dataframe type
#--------------------------------------------------------
from pyspark import SparkConf, SparkContext
# set configuration & spark context object
conf = SparkConf().setMaster("local").setAppName("MinTemperatures")
conf = SparkConf().setMaster("local[*]").setAppName("MovieSimilarities") #[*] use all cores in local computer
sc = SparkContext(conf = conf)
#--------------------------------------------------------
# call the data from file and create RDD (Resilient Distributed Dataset)
rdd = sc.textFile("file:///Users/Spark/1800.csv")
rdd.collect() #print results
rdd.take(n) #print n results
#--------------
### RDD MAPPING
parsedLines = rdd.map(parseLine) #use map function, output has same number of entries, just that it can be transformed.
words = input.flatMap(lambda x: x.split()) #use flat map function,flattens output, thus will have more entries than input
# difference illustrated here: https://www.linkedin.com/pulse/difference-between-map-flatmap-transformations-spark-pyspark-pandey
dicts = sc.parallelize([{"foo": 1, "bar": 2}, {"foo": 3, "baz": -1, "bar": 5}])
dicts.flatMap(lambda x: x.items()).collect()
>>> [('bar', 2), ('foo', 1), ('bar', 5), ('foo', 3), ('baz', -1)]
#--------------
### RDD REDUCE
# key/value functions
# reduce by key, x & y represent values of same key
total = parsedLines.reduceByKey(lambda x, y: x + y)
totalsByAge = rdd.mapValues(lambda x: (x, 1)).reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))
# swap key with value; and sort result by key
swap = total.map(lambda x: (x[1],x[0])).sortByKey() #or .sortByKey(ascending = False)
# look up from another RDD
mostPopularName = namesRdd.lookup(mostPopular[1])[0]
#--------------
# collect the results
results = words.collect()
#--------------
# broadcasting; send data to every node ahead of time
nameDict = sc.broadcast(loadMovieNames())
#--------------
# partition; spark does not distribute on its own
# for reduceByKey(), join(), lookup(), groupByKey(), etc.
.partitionBy(100)
#--------------
# SUBMIT IN CMD TO RUN SCRIPT
spark-submit script_name.py
spark-submit --executor-memory 1g MovieSimilarities1M.py 260 #change executor memory from default 512Mb to 1G
spark-submit --version #check spark version
# troubleshooting UI
# type localhost:4040 in browser when script is running. Open troubleshooting UI
| mapattacker/cheatsheets | python/pyspark.py | Python | mit | 10,361 | 0.02326 |
from generate_chain_system import *
class BezierConstraintSolverOdd( BezierConstraintSolver ):
'''
Free direction, magnitude fixed (for G1 or A).
'''
def update_system_with_result_of_previous_iteration( self, solution ):
### Iterate only over the parts of the matrix that will change,
### such as the lagrange multipliers across G1 or A edges and the right-hand-side.
solution = asarray(solution)
num = len(self.bundles)
assert solution.shape == (num, 4, 2)
for i in range(num):
dir1 = dir_allow_zero( solution[i][1]-solution[i][0] )
dir2 = dir_allow_zero( solution[i][2]-solution[i][3] )
self.bundles[i].directions[0] = dir1
self.bundles[i].directions[1] = dir2
mag1 = mag( solution[i][1]-solution[i][0] )
mag2 = mag( solution[i][2]-solution[i][3] )
self.bundles[i].magnitudes[0] = mag1
self.bundles[i].magnitudes[1] = mag2
## The lagrange multipliers changed, but not the locations of the zeros.
self._update_bundles( lagrange_only = True )
self.system_factored = None
## UPDATE: Actually, if fixed angles are parallel or perpendicular,
## then the lagrange multiplier systems may gain
## or lose zeros. So, reset the symbolic factorization.
## UPDATE 2: If we could update_bundles once with all fixed angles
## not parallel or perpendicular, and then compute the symbolic
## factorization, we could keep it.
## UPDATE 3: Let's try it assuming that the first time through there are no zeros.
## UPDATE 4: I tried it and it makes no difference to performance at all
## up to alec's alligator. So, we'll reset the symbolic factorization
## in case the initial configuration has zeros.
self.system_symbolic_factored = None
def solve( self ):
dim = 2
num = len(self.bundles)
#print 'rhs:'
#print self.rhs.tolist()
if self.system_symbolic_factored is None:
#print 'odd symbolic factoring'
system = self.to_system_solve_t( self.system )
self.system_symbolic_factored = self.compute_symbolic_factorization( system )
self.system_factored = self.system_symbolic_factored( system )
elif self.system_factored is None:
#print 'odd numeric factoring'
system = self.to_system_solve_t( self.system )
self.system_factored = self.system_symbolic_factored( system )
#print 'odd solve'
x = self.system_factored( self.rhs )
# x = linalg.solve( self.system, self.rhs )
# x = scipy.sparse.linalg.spsolve( self.system, self.rhs )
### Return a nicely formatted chain of bezier curves.
x = array( x[:self.total_dofs] ).reshape(-1,4).T
solution = []
for i in range(num):
P = x[:, i*dim:(i+1)*dim ]
solution.append( P )
if parameters.kClampOn == True: solution = clamp_solution( self.bundles, solution )
return solution
def lagrange_equations_for_fixed_opening( self, bundle, is_head ):
## handle the case of open end path.
dofs = self.compute_dofs_per_curve(bundle)
dim = 2
R = zeros( ( sum(dofs), dim ) )
rhs = zeros(R.shape[1])
if is_head:
# assert bundle.constraints[0][1] == True
fixed_positions = bundle.control_points[0][:2]
fixed_positions = asarray(fixed_positions)
'''
Boundary Conditions are as follows:
lambda1 * ( P1x' - constraint_X' ) = 0
lambda2 * ( P1y' - constraint_Y' ) = 0
'''
for i in range( dim ):
R[i*4, i] = 1
rhs = fixed_positions
else:
# assert bundle.constraints[-1][1] == True
fixed_positions = bundle.control_points[-1][:2]
fixed_positions = asarray(fixed_positions)
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - constraint_X' ) = 0
lambda2 * ( P4y' - constraint_Y' ) = 0
'''
for i in range( dim ):
R[i*4+3, i] = 1
rhs = fixed_positions
return R.T, rhs
def lagrange_equations_for_curve_constraints( self, bundle0, bundle1, angle ):
mag0, mag1 = bundle0.magnitudes[1], bundle1.magnitudes[0]
cos_theta = angle[0]
sin_theta = angle[1]
dim = 2
dofs0 = self.compute_dofs_per_curve(bundle0)
dofs1 = self.compute_dofs_per_curve(bundle1)
dofs = sum(dofs0) + sum(dofs1)
smoothness = bundle0.constraints[1][0]
if smoothness == 'C0': ## C0
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - Q1x' ) = 0
lambda2 * ( P4y' - Q1y' ) = 0
'''
R = zeros( ( dofs, dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0) + i*4, i] = -1
elif smoothness == 'A': ## fixed angle
'''
Boundary Conditions are as follows:
lambda1 * ( P4x - Q1x ) = 0
lambda2 * ( P4y - Q1y ) = 0
lambda3 * ( mag1(P4x-P3x) + mag0[cos_theta(Q2x-Q1x)-sin_theta(Q2y-Q1y)] ) = 0
lambda4 * ( mag1(P4y-P3y) + mag0[sin_theta(Q2x-Q1x)+cos_theta(Q2y-Q1y)] ) = 0
'''
R = zeros( ( dofs, 2*dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0)+i*4, i] = -1
R[i*4+3, i+dim] = 1
R[i*4+2, i+dim] = -1
R[sum(dofs0):sum(dofs0)+dim, dim:] = asarray([[-cos_theta, sin_theta], [cos_theta, -sin_theta]])
R[-dim*2:-dim, dim:] = asarray([[-sin_theta, -cos_theta], [sin_theta, cos_theta]])
## add weights to lambda
R[ :sum(dofs0), dim: ] *= mag1
R[ sum(dofs0):, dim: ] *= mag0
elif smoothness == 'C1': ## C1
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - Q1x' ) = 0
lambda2 * ( P4y' - Q1y' ) = 0
lambda3 * ( w_q(P4x' - P3x') + w_p(Q1x' - Q2x')) = 0
lambda4 * ( w_q(P4y' - P3y') + w_p(Q1y' - Q2y')) = 0
'''
R = zeros( ( dofs, 2*dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0)+i*4, i] = -1
R[i*4+3, i+dim] = 1
R[i*4+2, i+dim] = -1
R[sum(dofs0)+i*4+1, i+dim] = -1
R[sum(dofs0)+i*4, i+dim] = 1
## add weights to lambda
R[ :sum(dofs0), dim: ] *= mag1
R[ sum(dofs0):, dim: ] *= mag0
elif smoothness == 'G1': ## G1
R = zeros( ( dofs, 2*dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0)+i*4, i] = -1
R[i*4+3, i+dim] = 1
R[i*4+2, i+dim] = -1
R[sum(dofs0)+i*4+1, i+dim] = -1
R[sum(dofs0)+i*4, i+dim] = 1
## add weights to lambda
R[ :sum(dofs0), dim: ] *= mag1
R[ sum(dofs0):, dim: ] *= mag0
else:
R = zeros( ( dofs, 0 ) )
rhs = zeros(R.shape[1])
fixed_positions = bundle0.control_points[-1][:2]
is_fixed = bundle0.constraints[1][1]
assert type( is_fixed ) == bool
if is_fixed:
fixed_positions = asarray(fixed_positions)
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - constraint_X' ) = 0
lambda2 * ( P4y' - constraint_Y' ) = 0
'''
R2 = zeros( ( dofs, dim ) )
for i in range( dim ):
R2[i*4+3, i] = 1
R = concatenate((R, R2), axis=1)
rhs = concatenate((rhs, fixed_positions))
return R.T, rhs
def system_for_curve( self, bundle ):
'''
## A is computed using Sage, integral of (tbar.T * tbar) with respect to t.
# A = asarray( [[ 1./7, 1./6, 1./5, 1./4], [ 1./6, 1./5, 1./4, 1./3],
# [ 1./5, 1./4, 1./3, 1./2], [1./4, 1./3, 1./2, 1.]] )
## MAM is computed using Sage. MAM = M * A * M
'''
length = bundle.length
MAM = asarray( self.MAM )
dim = 2
Left = zeros((8, 8))
for i in range(dim):
Left[ i*4:(i+1)*4, i*4:(i+1)*4 ] = MAM[:,:]
return Left*length
def system_for_curve_with_arc_length( self, bundle ):
'''
## Solve the same integral as system__for_curve only with dt replaced by ds
'''
length = bundle.length
ts = bundle.ts
dts = bundle.dts
dim = 2
Left = zeros( ( 8, 8 ) )
tbar = ones( ( 4, 1 ) )
MAM = zeros( ( 4, 4 ) )
for i in range(len(dts)):
t = (ts[i] + ts[i+1])/2
ds = dts[i]
tbar[0] = t*t*t
tbar[1] = t*t
tbar[2] = t
Mtbar = dot( M.T, tbar )
MAM += dot( Mtbar, Mtbar.T )*ds
for i in range( dim ):
Left[ i*4:( i+1 )*4, i*4:( i+1 )*4 ] = MAM[:,:]
return Left*length
def compute_dofs_per_curve( self, bundle ):
dofs = zeros( 2, dtype = int )
'''
assume open end points can only emerge at the endpoints
'''
for i, (smoothness, is_fixed) in enumerate(bundle.constraints):
if smoothness == 'C0': dofs[i] += 4 ## C0
elif smoothness == 'A': dofs[i] += 4 ## fixed angle
elif smoothness == 'C1': dofs[i] += 4 ## C1
elif smoothness == 'G1': dofs[i] += 4 ## G1
elif smoothness == 'None': dofs[i] += 4 ## Free of constraint
return dofs
def constraint_number_per_joint(self, constraint ):
assert len(constraint) == 2
smoothness = constraint[0]
is_fixed = constraint[1]
num = 0
if smoothness == 'C0': num = 2 ## C0
elif smoothness == 'A': num = 4 ## fixed angle
elif smoothness == 'C1': num = 4 ## C1
elif smoothness == 'G1': num = 4 ## G1
assert type( is_fixed ) == bool
if is_fixed:
num += 2
return num
def rhs_for_curve( self, bundle, transforms ):
'''
The rhs is computed according to the formula:
rhs = sum(Ti * P.T * M.T * W_i * M)
'''
length = bundle.length
# W_matrices = bundle.W_matrices
# controls = bundle.control_points
#
# Right = zeros( (3, 4) )
# for i in range( len( transforms ) ):
#
# T_i = mat( asarray(transforms[i]).reshape(3,3) )
# W_i = W_matrices[i,0]
#
# Right = Right + T_i * (controls.T) * M * mat( W_i ) * M
#
# Right = asarray(Right).reshape(-1)
# Right = Right[:8]
W_matrices = bundle.W_matrices
controls = bundle.control_points
Right = zeros( 8 )
temp = zeros( (3, 4) )
for i in range( len( transforms ) ):
T_i = mat( asarray(transforms[i]).reshape(3, 3) )
W_i = asarray(W_matrices[i])
temp = temp + dot(asarray(T_i*(controls.T)*M), W_i)
R = temp[:2,:]
Right[:] = concatenate((R[0, :], R[1, :]))
return Right*length
| songrun/VectorSkinning | src/bezier_constraint_odd_solver.py | Python | apache-2.0 | 9,730 | 0.063412 |
from typing import TypedDict
class Point(TypedDict):
x: int
y: int
def is_even(x: Point) -> bool:
pass
is_even({'<caret>'})
| jwren/intellij-community | python/testData/completion/dictLiteralCompletion/EmptyLiteralsInCallExpressionsWithQuotes/main.py | Python | apache-2.0 | 142 | 0 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Accuvant, Inc. (bspengler@accuvant.com)
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Authenticode(Signature):
name = "static_authenticode"
description = "提供一个Authenticode数字签名"
severity = 1
weight = -1
confidence = 30
categories = ["static"]
authors = ["Accuvant"]
minimum = "1.2"
def run(self):
found_sig = False
if "static" in self.results:
if "digital_signers" in self.results["static"] and self.results["static"]["digital_signers"]:
for sign in self.results["static"]["digital_signers"]:
self.data.append(sign)
found_sig = True
return found_sig
| lixiangning888/whole_project | modules/signatures_merge_tmp/static_authenticode.py | Python | lgpl-3.0 | 876 | 0.002326 |
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='historic_cadastre',
version='0.1',
description='SITN, a sitn project',
author='sitn',
author_email='sitn@ne.ch',
url='http://www.ne.ch/sitn',
install_requires=[
'pyramid',
'SQLAlchemy',
'transaction',
'pyramid_tm',
'pyramid_debugtoolbar',
'pyramid-mako',
'zope.sqlalchemy',
'waitress',
'sqlahelper',
'JSTools',
'httplib2',
'simplejson'
],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=False,
entry_points={
'paste.app_factory': [
'main = historic_cadastre:main',
],
'console_scripts': [
'print_tpl = historic_cadastre.scripts.print_tpl:main',
],
},
) | kalbermattenm/historic_cadastre | setup.py | Python | gpl-2.0 | 1,011 | 0.000989 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo.config import cfg
from webob import exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
import nova.network
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import quota
CONF = cfg.CONF
try:
os_network_opts = [
cfg.BoolOpt("enable_network_quota",
default=False,
help="Enables or disables quotaing of tenant networks"),
cfg.StrOpt('use_neutron_default_nets',
default="False",
deprecated_name='use_quantum_default_nets',
help=('Control for checking for default networks')),
cfg.StrOpt('neutron_default_tenant_id',
default="default",
deprecated_name='quantum_default_tenant_id',
help=('Default tenant id when creating neutron '
'networks'))
]
CONF.register_opts(os_network_opts)
except cfg.DuplicateOptError:
# NOTE(jkoelker) These options are verbatim elsewhere this is here
# to make sure they are registered for our use.
pass
if CONF.enable_network_quota:
opts = [
cfg.IntOpt('quota_networks',
default=3,
help='number of private networks allowed per project'),
]
CONF.register_opts(opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
def network_dict(network):
return {"id": network.get("uuid") or network.get("id"),
"cidr": network.get("cidr"),
"label": network.get("label")}
class NetworkController(object):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception("Failed to get default networks")
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = self.network_api.get_all(context)
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
LOG.debug(_("Showing network with id %s") % id)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
return {'network': network_dict(network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_("Failed to update usages deallocating "
"network."))
LOG.info(_("Deleting network with id %s") % id)
try:
self.network_api.delete(context, id)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
response = exc.HTTPAccepted()
except exception.NetworkNotFound:
response = exc.HTTPNotFound(_("Network not found"))
return response
def create(self, req, body):
if not body:
raise exc.HTTPUnprocessableEntity()
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = dict((k, network.get(k)) for k in keys)
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class Os_tenant_networks(extensions.ExtensionDescriptor):
"""Tenant-based Network Management Extension."""
name = "OSTenantNetworks"
alias = "os-tenant-networks"
namespace = ("http://docs.openstack.org/compute/"
"ext/os-tenant-networks/api/v2")
updated = "2012-03-07T09:46:43-05:00"
def get_resources(self):
ext = extensions.ResourceExtension('os-tenant-networks',
NetworkController())
return [ext]
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks'))
| plumgrid/plumgrid-nova | nova/api/openstack/compute/contrib/os_tenant_networks.py | Python | apache-2.0 | 7,948 | 0.000881 |
"""Axis switch platform tests."""
from unittest.mock import Mock, call as mock_call
from homeassistant.components import axis
import homeassistant.components.switch as switch
from homeassistant.setup import async_setup_component
from .test_device import NAME, setup_axis_integration
EVENTS = [
{
"operation": "Initialized",
"topic": "tns1:Device/Trigger/Relay",
"source": "RelayToken",
"source_idx": "0",
"type": "LogicalState",
"value": "inactive",
},
{
"operation": "Initialized",
"topic": "tns1:Device/Trigger/Relay",
"source": "RelayToken",
"source_idx": "1",
"type": "LogicalState",
"value": "active",
},
]
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert await async_setup_component(
hass, switch.DOMAIN, {"switch": {"platform": axis.DOMAIN}}
)
assert axis.DOMAIN not in hass.data
async def test_no_switches(hass):
"""Test that no output events in Axis results in no switch entities."""
await setup_axis_integration(hass)
assert not hass.states.async_entity_ids("switch")
async def test_switches(hass):
"""Test that switches are loaded properly."""
device = await setup_axis_integration(hass)
device.api.vapix.ports = {"0": Mock(), "1": Mock()}
device.api.vapix.ports["0"].name = "Doorbell"
device.api.vapix.ports["1"].name = ""
for event in EVENTS:
device.api.stream.event.manage_event(event)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("switch")) == 2
relay_0 = hass.states.get(f"switch.{NAME}_doorbell")
assert relay_0.state == "off"
assert relay_0.name == f"{NAME} Doorbell"
relay_1 = hass.states.get(f"switch.{NAME}_relay_1")
assert relay_1.state == "on"
assert relay_1.name == f"{NAME} Relay 1"
device.api.vapix.ports["0"].action = Mock()
await hass.services.async_call(
"switch", "turn_on", {"entity_id": f"switch.{NAME}_doorbell"}, blocking=True
)
await hass.services.async_call(
"switch", "turn_off", {"entity_id": f"switch.{NAME}_doorbell"}, blocking=True
)
assert device.api.vapix.ports["0"].action.call_args_list == [
mock_call("/"),
mock_call("\\"),
]
| postlund/home-assistant | tests/components/axis/test_switch.py | Python | apache-2.0 | 2,374 | 0.000842 |
# -*- coding: utf-8 -*-
import pytest
from numpy import array
from numpy.testing import assert_array_almost_equal
from gdal2mbtiles.constants import (EPSG_WEB_MERCATOR,
EPSG3857_EXTENTS)
from gdal2mbtiles.gdal import SpatialReference
@pytest.fixture
def epsg_3857_from_proj4():
"""
Return a gdal spatial reference object with
3857 crs using the ImportFromProj4 method.
"""
spatial_ref = SpatialReference()
spatial_ref.ImportFromProj4('+init=epsg:3857')
return spatial_ref
@pytest.fixture
def epsg_3857_from_epsg():
"""
Return a gdal spatial reference object with
3857 crs using the FromEPSG method.
"""
spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR)
return spatial_ref
def test_epsg_3857_proj4(epsg_3857_from_proj4):
extents = epsg_3857_from_proj4.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
def test_epsg_3857_from_epsg(epsg_3857_from_epsg):
extents = epsg_3857_from_epsg.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
| ecometrica/gdal2mbtiles | tests/test_spatial_reference.py | Python | apache-2.0 | 1,178 | 0 |
# coding=utf-8#
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import hashlib
import json
import logging as std_logging
import os
import urllib
from eventlet import greenthread
from time import strftime
from time import time
from requests import HTTPError
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip.cluster_manager import \
ClusterManager
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 as f5const
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_builder import \
LBaaSBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_driver import \
LBaaSBaseDriver
from f5_openstack_agent.lbaasv2.drivers.bigip import network_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.network_service import \
NetworkServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from f5_openstack_agent.lbaasv2.drivers.bigip import stat_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import \
SystemHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.tenants import \
BigipTenantManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import serialized
from f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address import \
VirtualAddress
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
__VERSION__ = '0.1.1'
# configuration objects specific to iControl driver
# XXX see /etc/neutron/services/f5/f5-openstack-agent.ini
OPTS = [ # XXX maybe we should make this a dictionary
cfg.StrOpt(
'bigiq_hostname',
help='The hostname (name or IP address) to use for the BIG-IQ host'
),
cfg.StrOpt(
'bigiq_admin_username',
default='admin',
help='The admin username to use for BIG-IQ authentication',
),
cfg.StrOpt(
'bigiq_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_keystone_uri',
default='http://192.0.2.248:5000/',
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_admin_username',
default='admin',
help='The admin username to use for authentication '
'with the Keystone service'
),
cfg.StrOpt(
'openstack_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for authentication'
' with the Keystone service'
),
cfg.StrOpt(
'bigip_management_username',
default='admin',
help='The admin username that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'bigip_management_password',
default='[Provide password in config file]',
secret=True,
help='The admin password that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'f5_device_type', default='external',
help='What type of device onboarding'
),
cfg.StrOpt(
'f5_ha_type', default='pair',
help='Are we standalone, pair(active/standby), or scalen'
),
cfg.ListOpt(
'f5_external_physical_mappings', default=['default:1.1:True'],
help='Mapping between Neutron physical_network to interfaces'
),
cfg.StrOpt(
'f5_vtep_folder', default='Common',
help='Folder for the VTEP SelfIP'
),
cfg.StrOpt(
'f5_vtep_selfip_name', default=None,
help='Name of the VTEP SelfIP'
),
cfg.ListOpt(
'advertised_tunnel_types', default=['vxlan'],
help='tunnel types which are advertised to other VTEPs'
),
cfg.BoolOpt(
'f5_populate_static_arp', default=False,
help='create static arp entries based on service entries'
),
cfg.StrOpt(
'vlan_binding_driver',
default=None,
help='driver class for binding vlans to device ports'
),
cfg.StrOpt(
'interface_port_static_mappings',
default=None,
help='JSON encoded static mapping of'
'devices to list of '
'interface and port_id'
),
cfg.StrOpt(
'l3_binding_driver',
default=None,
help='driver class for binding l3 address to l2 ports'
),
cfg.StrOpt(
'l3_binding_static_mappings', default=None,
help='JSON encoded static mapping of'
'subnet_id to list of '
'port_id, device_id list.'
),
cfg.BoolOpt(
'f5_route_domain_strictness', default=False,
help='Strict route domain isolation'
),
cfg.BoolOpt(
'f5_common_networks', default=False,
help='All networks defined under Common partition'
),
cfg.BoolOpt(
'f5_common_external_networks', default=True,
help='Treat external networks as common'
),
cfg.BoolOpt(
'external_gateway_mode', default=False,
help='All subnets have an external l3 route on gateway'
),
cfg.StrOpt(
'icontrol_vcmp_hostname',
help='The hostname (name or IP address) to use for vCMP Host '
'iControl access'
),
cfg.StrOpt(
'icontrol_hostname',
default="10.190.5.7",
help='The hostname (name or IP address) to use for iControl access'
),
cfg.StrOpt(
'icontrol_username', default='admin',
help='The username to use for iControl access'
),
cfg.StrOpt(
'icontrol_password', default='admin', secret=True,
help='The password to use for iControl access'
),
cfg.IntOpt(
'icontrol_connection_timeout', default=30,
help='How many seconds to timeout a connection to BIG-IP'
),
cfg.IntOpt(
'icontrol_connection_retry_interval', default=10,
help='How many seconds to wait between retry connection attempts'
),
cfg.DictOpt(
'common_network_ids', default={},
help='network uuid to existing Common networks mapping'
),
cfg.StrOpt(
'icontrol_config_mode', default='objects',
help='Whether to use iapp or objects for bigip configuration'
),
cfg.IntOpt(
'max_namespaces_per_tenant', default=1,
help='How many routing tables the BIG-IP will allocate per tenant'
' in order to accommodate overlapping IP subnets'
),
cfg.StrOpt(
'cert_manager',
default=None,
help='Class name of the certificate mangager used for retrieving '
'certificates and keys.'
),
cfg.StrOpt(
'auth_version',
default=None,
help='Keystone authentication version (v2 or v3) for Barbican client.'
),
cfg.StrOpt(
'os_project_id',
default='service',
help='OpenStack project ID.'
),
cfg.StrOpt(
'os_auth_url',
default=None,
help='OpenStack authentication URL.'
),
cfg.StrOpt(
'os_username',
default=None,
help='OpenStack user name for Keystone authentication.'
),
cfg.StrOpt(
'os_user_domain_name',
default=None,
help='OpenStack user domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_name',
default=None,
help='OpenStack project name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_domain_name',
default=None,
help='OpenStack domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_password',
default=None,
help='OpenStack user password for Keystone authentication.'
),
cfg.StrOpt(
'f5_network_segment_physical_network', default=None,
help='Name of physical network to use for discovery of segment ID'
),
cfg.StrOpt(
'unlegacy_setting_placeholder', default=None,
help='use this setting to separate legacy with hw/etc on agent side'
),
cfg.IntOpt(
'f5_network_segment_polling_interval', default=10,
help='Seconds between periodic scans for disconnected virtual servers'
),
cfg.IntOpt(
'f5_network_segment_gross_timeout', default=300,
help='Seconds to wait for a virtual server to become connected'
),
cfg.StrOpt(
'f5_parent_ssl_profile',
default='clientssl',
help='Parent profile used when creating client SSL profiles '
'for listeners with TERMINATED_HTTPS protocols.'
),
cfg.StrOpt(
'os_tenant_name',
default=None,
help='OpenStack tenant name for Keystone authentication (v2 only).'
),
cfg.BoolOpt(
'trace_service_requests',
default=False,
help='Log service object.'
),
cfg.BoolOpt(
'report_esd_names_in_agent',
default=False,
help='whether or not to add valid esd names during report.'
)
]
def is_operational(method):
# Decorator to check we are operational before provisioning.
def wrapper(*args, **kwargs):
instance = args[0]
if instance.operational:
try:
return method(*args, **kwargs)
except IOError as ioe:
LOG.error('IO Error detected: %s' % method.__name__)
LOG.error(str(ioe))
raise ioe
else:
LOG.error('Cannot execute %s. Not operational. Re-initializing.'
% method.__name__)
instance._init_bigips()
return wrapper
class iControlDriver(LBaaSBaseDriver):
"""Control service deployment."""
# pzhang(NOTE) here: we only sync, CRUD objs in below status
positive_plugin_const_state = \
tuple([f5const.F5_PENDING_CREATE,
f5const.F5_PENDING_UPDATE])
def __init__(self, conf, registerOpts=True):
# The registerOpts parameter allows a test to
# turn off config option handling so that it can
# set the options manually instead.
super(iControlDriver, self).__init__(conf)
self.conf = conf
if registerOpts:
self.conf.register_opts(OPTS)
self.initialized = False
self.hostnames = None
self.device_type = conf.f5_device_type
self.plugin_rpc = None # overrides base, same value
self.agent_report_state = None # overrides base, same value
self.operational = False # overrides base, same value
self.driver_name = 'f5-lbaasv2-icontrol'
#
# BIG-IP containers
#
# BIG-IPs which currectly active
self.__bigips = {}
self.__last_connect_attempt = None
# HA and traffic group validation
self.ha_validated = False
self.tg_initialized = False
# traffic groups discovered from BIG-IPs for service placement
self.__traffic_groups = []
# base configurations to report to Neutron agent state reports
self.agent_configurations = {} # overrides base, same value
self.agent_configurations['device_drivers'] = [self.driver_name]
self.agent_configurations['icontrol_endpoints'] = {}
# to store the verified esd names
self.esd_names = []
# service component managers
self.tenant_manager = None
self.cluster_manager = None
self.system_helper = None
self.lbaas_builder = None
self.service_adapter = None
self.vlan_binding = None
self.l3_binding = None
self.cert_manager = None # overrides register_OPTS
# server helpers
self.stat_helper = stat_helper.StatHelper()
self.network_helper = network_helper.NetworkHelper()
# f5-sdk helpers
self.vs_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.pool_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
try:
# debug logging of service requests recieved by driver
if self.conf.trace_service_requests:
path = '/var/log/neutron/service/'
if not os.path.exists(path):
os.makedirs(path)
self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json'
with open(self.file_name, 'w') as fp:
fp.write('[{}] ')
# driver mode settings - GRM vs L2 adjacent
if self.conf.f5_global_routed_mode:
LOG.info('WARNING - f5_global_routed_mode enabled.'
' There will be no L2 or L3 orchestration'
' or tenant isolation provisioned. All vips'
' and pool members must be routable through'
' pre-provisioned SelfIPs.')
self.conf.use_namespaces = False
self.conf.f5_snat_mode = True
self.conf.f5_snat_addresses_per_subnet = 0
self.agent_configurations['tunnel_types'] = []
self.agent_configurations['bridge_mappings'] = {}
else:
self.agent_configurations['tunnel_types'] = \
self.conf.advertised_tunnel_types
for net_id in self.conf.common_network_ids:
LOG.debug('network %s will be mapped to /Common/%s'
% (net_id, self.conf.common_network_ids[net_id]))
self.agent_configurations['common_networks'] = \
self.conf.common_network_ids
LOG.debug('Setting static ARP population to %s'
% self.conf.f5_populate_static_arp)
self.agent_configurations['f5_common_external_networks'] = \
self.conf.f5_common_external_networks
f5const.FDB_POPULATE_STATIC_ARP = \
self.conf.f5_populate_static_arp
# parse the icontrol_hostname setting
self._init_bigip_hostnames()
# instantiate the managers
self._init_bigip_managers()
self.initialized = True
LOG.debug('iControlDriver loaded successfully')
except Exception as exc:
LOG.error("exception in intializing driver %s" % str(exc))
self._set_agent_status(False)
def connect(self):
# initialize communications wiht BIG-IP via iControl
try:
self._init_bigips()
except Exception as exc:
LOG.error("exception in intializing communications to BIG-IPs %s"
% str(exc))
self._set_agent_status(False)
def get_valid_esd_names(self):
LOG.debug("verified esd names in get_valid_esd_names():")
LOG.debug(self.esd_names)
return self.esd_names
def _init_bigip_managers(self):
if self.conf.vlan_binding_driver:
try:
self.vlan_binding = importutils.import_object(
self.conf.vlan_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import VLAN binding driver: %s'
% self.conf.vlan_binding_driver)
if self.conf.l3_binding_driver:
try:
self.l3_binding = importutils.import_object(
self.conf.l3_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import L3 binding driver: %s'
% self.conf.l3_binding_driver)
else:
LOG.debug('No L3 binding driver configured.'
' No L3 binding will be done.')
if self.conf.cert_manager:
try:
self.cert_manager = importutils.import_object(
self.conf.cert_manager, self.conf)
except ImportError as import_err:
LOG.error('Failed to import CertManager: %s.' %
import_err.message)
raise
except Exception as err:
LOG.error('Failed to initialize CertManager. %s' % err.message)
# re-raise as ImportError to cause agent exit
raise ImportError(err.message)
self.service_adapter = ServiceModelAdapter(self.conf)
self.tenant_manager = BigipTenantManager(self.conf, self)
self.cluster_manager = ClusterManager()
self.system_helper = SystemHelper()
self.lbaas_builder = LBaaSBuilder(self.conf, self)
if self.conf.f5_global_routed_mode:
self.network_builder = None
else:
self.network_builder = NetworkServiceBuilder(
self.conf.f5_global_routed_mode,
self.conf,
self,
self.l3_binding)
def _init_bigip_hostnames(self):
# Validate and parse bigip credentials
if not self.conf.icontrol_hostname:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_hostname',
opt_value='valid hostname or IP address'
)
if not self.conf.icontrol_username:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_username',
opt_value='valid username'
)
if not self.conf.icontrol_password:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_password',
opt_value='valid password'
)
self.hostnames = self.conf.icontrol_hostname.split(',')
self.hostnames = [item.strip() for item in self.hostnames]
self.hostnames = sorted(self.hostnames)
# initialize per host agent_configurations
for hostname in self.hostnames:
self.__bigips[hostname] = bigip = type('', (), {})()
bigip.hostname = hostname
bigip.status = 'creating'
bigip.status_message = 'creating BIG-IP from iControl hostnames'
bigip.device_interfaces = dict()
self.agent_configurations[
'icontrol_endpoints'][hostname] = {}
self.agent_configurations[
'icontrol_endpoints'][hostname]['failover_state'] = \
'undiscovered'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status'] = 'unknown'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status_message'] = ''
def _init_bigips(self):
# Connect to all BIG-IPs
if self.operational:
LOG.debug('iControl driver reports connection is operational')
return
LOG.debug('initializing communications to BIG-IPs')
try:
# setup logging options
if not self.conf.debug:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('learned traffic groups from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True)
def _init_errored_bigips(self):
try:
errored_bigips = self.get_errored_bigips_hostnames()
if errored_bigips:
LOG.debug('attempting to recover %s BIG-IPs' %
len(errored_bigips))
for hostname in errored_bigips:
# try to connect and set status
bigip = self._open_bigip(hostname)
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
LOG.debug('proceeding to initialize %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('known traffic groups initialized',
' from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = \
'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s'
% hostname)
bigip.status = 'active'
bigip.status_message = \
'BIG-IP ready for provisioning'
self._post_init()
self._set_agent_status(True)
else:
LOG.debug('setting status to error for %s'
% hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.debug('there are no BIG-IPs with error status')
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
def _open_bigip(self, hostname):
# Open bigip connection
try:
bigip = self.__bigips[hostname]
if bigip.status not in ['creating', 'error']:
LOG.debug('BIG-IP %s status invalid %s to open a connection'
% (hostname, bigip.status))
return bigip
bigip.status = 'connecting'
bigip.status_message = 'requesting iControl endpoint'
LOG.info('opening iControl connection to %s @ %s' %
(self.conf.icontrol_username, hostname))
bigip = ManagementRoot(hostname,
self.conf.icontrol_username,
self.conf.icontrol_password,
timeout=f5const.DEVICE_CONNECTION_TIMEOUT,
debug=self.conf.debug)
bigip.status = 'connected'
bigip.status_message = 'connected to BIG-IP'
self.__bigips[hostname] = bigip
return bigip
except Exception as exc:
LOG.error('could not communicate with ' +
'iControl device: %s' % hostname)
# since no bigip object was created, create a dummy object
# so we can store the status and status_message attributes
errbigip = type('', (), {})()
errbigip.hostname = hostname
errbigip.status = 'error'
errbigip.status_message = str(exc)[:80]
self.__bigips[hostname] = errbigip
return errbigip
def _init_bigip(self, bigip, hostname, check_group_name=None):
# Prepare a bigip for usage
try:
major_version, minor_version = self._validate_bigip_version(
bigip, hostname)
device_group_name = None
extramb = self.system_helper.get_provision_extramb(bigip)
if int(extramb) < f5const.MIN_EXTRA_MB:
raise f5ex.ProvisioningExtraMBValidateFailed(
'Device %s BIG-IP not provisioned for '
'management LARGE.' % hostname)
if self.conf.f5_ha_type == 'pair' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type == 'scalen' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is scalen and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type != 'standalone':
device_group_name = \
self.cluster_manager.get_device_group(bigip)
if not device_group_name:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is %s and no sync failover '
'device group found for device %s.'
% (self.conf.f5_ha_type, hostname))
if check_group_name and device_group_name != check_group_name:
raise f5ex.BigIPClusterInvalidHA(
'Invalid HA. Device %s is in device group'
' %s but should be in %s.'
% (hostname, device_group_name, check_group_name))
bigip.device_group_name = device_group_name
if self.network_builder:
for network in self.conf.common_network_ids.values():
if not self.network_builder.vlan_exists(bigip,
network,
folder='Common'):
raise f5ex.MissingNetwork(
'Common network %s on %s does not exist'
% (network, bigip.hostname))
bigip.device_name = self.cluster_manager.get_device_name(bigip)
bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip)
LOG.debug("Initialized BIG-IP %s with MAC addresses %s" %
(bigip.device_name, ', '.join(bigip.mac_addresses)))
bigip.device_interfaces = \
self.system_helper.get_interface_macaddresses_dict(bigip)
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
if self.conf.f5_ha_type != 'standalone':
self.cluster_manager.disable_auto_sync(
device_group_name, bigip)
# validate VTEP SelfIPs
if not self.conf.f5_global_routed_mode:
self.network_builder.initialize_tunneling(bigip)
# Turn off tunnel syncing between BIG-IP
# as our VTEPs properly use only local SelfIPs
if self.system_helper.get_tunnel_sync(bigip) == 'enable':
self.system_helper.set_tunnel_sync(bigip, enabled=False)
LOG.debug('connected to iControl %s @ %s ver %s.%s'
% (self.conf.icontrol_username, hostname,
major_version, minor_version))
except Exception as exc:
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
raise
return bigip
def _post_init(self):
# After we have a connection to the BIG-IPs, initialize vCMP
# on all connected BIG-IPs
if self.network_builder:
self.network_builder.initialize_vcmp()
self.agent_configurations['network_segment_physical_network'] = \
self.conf.f5_network_segment_physical_network
LOG.info('iControlDriver initialized to %d bigips with username:%s'
% (len(self.get_active_bigips()),
self.conf.icontrol_username))
LOG.info('iControlDriver dynamic agent configurations:%s'
% self.agent_configurations)
if self.vlan_binding:
LOG.debug(
'getting BIG-IP device interface for VLAN Binding')
self.vlan_binding.register_bigip_interfaces()
if self.l3_binding:
LOG.debug('getting BIG-IP MAC Address for L3 Binding')
self.l3_binding.register_bigip_mac_addresses()
# endpoints = self.agent_configurations['icontrol_endpoints']
# for ic_host in endpoints.keys():
for hostbigip in self.get_all_bigips():
# hostbigip = self.__bigips[ic_host]
mac_addrs = [mac_addr for interface, mac_addr in
hostbigip.device_interfaces.items()
if interface != "mgmt"]
ports = self.plugin_rpc.get_ports_for_mac_addresses(
mac_addresses=mac_addrs)
if ports:
self.agent_configurations['nova_managed'] = True
else:
self.agent_configurations['nova_managed'] = False
if self.network_builder:
self.network_builder.post_init()
# read enhanced services definitions
esd_dir = os.path.join(self.get_config_dir(), 'esd')
esd = EsdTagProcessor(esd_dir)
try:
esd.process_esd(self.get_all_bigips())
self.lbaas_builder.init_esd(esd)
self.service_adapter.init_esd(esd)
LOG.debug('esd details here after process_esd(): ')
LOG.debug(esd)
self.esd_names = esd.esd_dict.keys() or []
LOG.debug('##### self.esd_names obtainded here:')
LOG.debug(self.esd_names)
except f5ex.esdJSONFileInvalidException as err:
LOG.error("unable to initialize ESD. Error: %s.", err.message)
self._set_agent_status(False)
def _validate_ha(self, bigip):
# if there was only one address supplied and
# this is not a standalone device, get the
# devices trusted by this device.
device_group_name = None
if self.conf.f5_ha_type == 'standalone':
if len(self.hostnames) != 1:
bigip.status = 'error'
bigip.status_message = \
'HA mode is standalone and %d hosts found.'\
% len(self.hostnames)
raise f5ex.BigIPClusterInvalidHA(
'HA mode is standalone and %d hosts found.'
% len(self.hostnames))
device_group_name = 'standalone'
elif self.conf.f5_ha_type == 'pair':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) != 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device))
self.hostnames = mgmt_addrs
if len(self.hostnames) != 2:
bigip.status = 'error'
bigip.status_message = 'HA mode is pair and %d hosts found.' \
% len(self.hostnames)
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and %d hosts found.'
% len(self.hostnames))
elif self.conf.f5_ha_type == 'scalen':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) < 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device)
)
self.hostnames = mgmt_addrs
if len(self.hostnames) < 2:
bigip.status = 'error'
bigip.status_message = 'HA mode is scale and 1 hosts found.'
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and 1 hosts found.')
return device_group_name
def _validate_ha_operational(self, bigip):
if self.conf.f5_ha_type == 'standalone':
return True
else:
# how many active BIG-IPs are there?
active_bigips = self.get_active_bigips()
if active_bigips:
sync_status = self.cluster_manager.get_sync_status(bigip)
if sync_status in ['Disconnected', 'Sync Failure']:
if len(active_bigips) > 1:
# the device should not be in the disconnected state
return False
if len(active_bigips) > 1:
# it should be in the same sync-failover group
# as the rest of the active bigips
device_group_name = \
self.cluster_manager.get_device_group(bigip)
for active_bigip in active_bigips:
adgn = self.cluster_manager.get_device_group(
active_bigip)
if not adgn == device_group_name:
return False
return True
else:
return True
def _init_agent_config(self, bigip):
# Init agent config
ic_host = {}
ic_host['version'] = self.system_helper.get_version(bigip)
ic_host['device_name'] = bigip.device_name
ic_host['platform'] = self.system_helper.get_platform(bigip)
ic_host['serial_number'] = self.system_helper.get_serial_number(bigip)
ic_host['status'] = bigip.status
ic_host['status_message'] = bigip.status_message
ic_host['failover_state'] = self.get_failover_state(bigip)
if hasattr(bigip, 'local_ip') and bigip.local_ip:
ic_host['local_ip'] = bigip.local_ip
else:
ic_host['local_ip'] = 'VTEP disabled'
self.agent_configurations['tunnel_types'] = list()
self.agent_configurations['icontrol_endpoints'][bigip.hostname] = \
ic_host
if self.network_builder:
self.agent_configurations['bridge_mappings'] = \
self.network_builder.interface_mapping
def _set_agent_status(self, force_resync=False):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status'] = bigip.status
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status_message'] = bigip.status_message
if self.conf.report_esd_names_in_agent:
LOG.debug('adding names to report:')
self.agent_configurations['esd_name'] = \
self.get_valid_esd_names()
# Policy - if any BIG-IP are active we're operational
if self.get_active_bigips():
self.operational = True
else:
self.operational = False
if self.agent_report_state:
self.agent_report_state(force_resync=force_resync)
def get_failover_state(self, bigip):
try:
if hasattr(bigip, 'tm'):
fs = bigip.tm.sys.dbs.db.load(name='failover.state')
bigip.failover_state = fs.value
return bigip.failover_state
else:
return 'error'
except Exception as exc:
LOG.exception('Error getting %s failover state' % bigip.hostname)
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
self._set_agent_status(False)
return 'error'
def get_agent_configurations(self):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
if bigip.status == 'active':
failover_state = self.get_failover_state(bigip)
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = failover_state
else:
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = 'unknown'
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status'] = bigip.status
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status_message'] = bigip.status_message
self.agent_configurations['operational'] = \
self.operational
LOG.debug('agent configurations are: %s' % self.agent_configurations)
return dict(self.agent_configurations)
def recover_errored_devices(self):
# trigger a retry on errored BIG-IPs
try:
self._init_errored_bigips()
except Exception as exc:
LOG.error('Could not recover devices: %s' % exc.message)
def backend_integrity(self):
if self.operational:
return True
return False
def generate_capacity_score(self, capacity_policy=None):
"""Generate the capacity score of connected devices."""
if capacity_policy:
highest_metric = 0.0
highest_metric_name = None
my_methods = dir(self)
bigips = self.get_all_bigips()
for metric in capacity_policy:
func_name = 'get_' + metric
if func_name in my_methods:
max_capacity = int(capacity_policy[metric])
metric_func = getattr(self, func_name)
metric_value = 0
for bigip in bigips:
if bigip.status == 'active':
global_stats = \
self.stat_helper.get_global_statistics(bigip)
value = int(
metric_func(bigip=bigip,
global_statistics=global_stats)
)
LOG.debug('calling capacity %s on %s returned: %s'
% (func_name, bigip.hostname, value))
else:
value = 0
if value > metric_value:
metric_value = value
metric_capacity = float(metric_value) / float(max_capacity)
if metric_capacity > highest_metric:
highest_metric = metric_capacity
highest_metric_name = metric
else:
LOG.warn('capacity policy has method '
'%s which is not implemented in this driver'
% metric)
LOG.debug('capacity score: %s based on %s'
% (highest_metric, highest_metric_name))
return highest_metric
return 0
def set_context(self, context):
# Context to keep for database access
if self.network_builder:
self.network_builder.set_context(context)
def set_plugin_rpc(self, plugin_rpc):
# Provide Plugin RPC access
self.plugin_rpc = plugin_rpc
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_l2pop_rpc(l2pop_rpc)
def set_agent_report_state(self, report_state_callback):
"""Set Agent Report State."""
self.agent_report_state = report_state_callback
def service_exists(self, service):
return self._service_exists(service)
def flush_cache(self):
# Remove cached objects so they can be created if necessary
for bigip in self.get_all_bigips():
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
@serialized('get_all_deployed_loadbalancers')
@is_operational
def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False):
LOG.debug('getting all deployed loadbalancers on BIG-IPs')
deployed_lb_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[len(self.service_adapter.prefix):]
if lb_id in deployed_lb_dict:
deployed_lb_dict[lb_id][
'hostnames'].append(bigip.hostname)
else:
deployed_lb_dict[lb_id] = {
'id': lb_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
else:
# delay to assure we are not in the tenant creation
# process before a virtual address is created.
greenthread.sleep(10)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[
len(self.service_adapter.prefix):]
deployed_lb_dict[lb_id] = \
{'id': lb_id, 'tenant_id': tenant_id}
else:
# Orphaned folder!
if purge_orphaned_folders:
try:
self.system_helper.purge_folder_contents(
bigip, folder)
self.system_helper.purge_folder(
bigip, folder)
LOG.error('orphaned folder %s on %s' %
(folder, bigip.hostname))
except Exception as exc:
LOG.error('error purging folder %s: %s' %
(folder, str(exc)))
return deployed_lb_dict
@serialized('get_all_deployed_listeners')
@is_operational
def get_all_deployed_listeners(self, expand_subcollections=False):
LOG.debug('getting all deployed listeners on BIG-IPs')
deployed_virtual_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
deployed_listeners = resource.get_resources(
bigip, folder, expand_subcollections)
if deployed_listeners:
for virtual in deployed_listeners:
virtual_id = \
virtual.name[len(self.service_adapter.prefix):]
l7_policy = ''
if hasattr(virtual, 'policiesReference') and \
'items' in virtual.policiesReference:
l7_policy = \
virtual.policiesReference['items'][0]
l7_policy = l7_policy['fullPath']
if virtual_id in deployed_virtual_dict:
deployed_virtual_dict[virtual_id][
'hostnames'].append(bigip.hostname)
else:
deployed_virtual_dict[virtual_id] = {
'id': virtual_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname],
'l7_policy': l7_policy
}
return deployed_virtual_dict
@serialized('purge_orphaned_nodes')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_nodes(self, tenant_members):
node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
node_dict = dict()
for bigip in self.get_all_bigips():
for tenant_id, members in tenant_members.iteritems():
partition = self.service_adapter.prefix + tenant_id
nodes = node_helper.get_resources(bigip, partition=partition)
for n in nodes:
node_dict[n.name] = n
for member in members:
rd = self.network_builder.find_subnet_route_domain(
tenant_id, member.get('subnet_id', None))
node_name = "{}%{}".format(member['address'], rd)
node_dict.pop(node_name, None)
for node_name, node in node_dict.iteritems():
try:
node_helper.delete(bigip, name=urllib.quote(node_name),
partition=partition)
except HTTPError as error:
if error.response.status_code == 400:
LOG.error(error.response)
@serialized('get_all_deployed_pools')
@is_operational
def get_all_deployed_pools(self):
LOG.debug('getting all deployed pools on BIG-IPs')
deployed_pool_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
deployed_pools = resource.get_resources(bigip, folder)
if deployed_pools:
for pool in deployed_pools:
pool_id = \
pool.name[len(self.service_adapter.prefix):]
monitor_id = ''
if hasattr(pool, 'monitor'):
monitor = pool.monitor.split('/')[2].strip()
monitor_id = \
monitor[len(self.service_adapter.prefix):]
LOG.debug(
'pool {} has monitor {}'.format(
pool.name, monitor))
else:
LOG.debug(
'pool {} has no healthmonitors'.format(
pool.name))
if pool_id in deployed_pool_dict:
deployed_pool_dict[pool_id][
'hostnames'].append(bigip.hostname)
else:
deployed_pool_dict[pool_id] = {
'id': pool_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname],
'monitors': monitor_id
}
return deployed_pool_dict
@serialized('purge_orphaned_pool')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_pool(self, tenant_id=None, pool_id=None,
hostnames=list()):
node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
pool_name = self.service_adapter.prefix + pool_id
partition = self.service_adapter.prefix + tenant_id
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, pool_name, partition)
members = pool.members_s.get_collection()
pool.delete()
for member in members:
node_name = member.address
try:
node_helper.delete(bigip,
name=urllib.quote(node_name),
partition=partition)
except HTTPError as e:
if e.response.status_code == 404:
pass
if e.response.status_code == 400:
LOG.warn("Failed to delete node -- in use")
else:
LOG.exception("Failed to delete node")
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('pool %s not on BIG-IP %s.'
% (pool_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging pool %s' % str(exc))
@serialized('get_all_deployed_monitors')
@is_operational
def get_all_deployed_health_monitors(self):
"""Retrieve a list of all Health Monitors deployed"""
LOG.debug('getting all deployed monitors on BIG-IP\'s')
monitor_types = ['http_monitor', 'https_monitor', 'tcp_monitor',
'ping_monitor']
deployed_monitor_dict = {}
adapter_prefix = self.service_adapter.prefix
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(adapter_prefix):]
if str(folder).startswith(adapter_prefix):
resources = map(
lambda x: resource_helper.BigIPResourceHelper(
getattr(resource_helper.ResourceType, x)),
monitor_types)
for resource in resources:
deployed_monitors = resource.get_resources(
bigip, folder)
if deployed_monitors:
for monitor in deployed_monitors:
monitor_id = monitor.name[len(adapter_prefix):]
if monitor_id in deployed_monitor_dict:
deployed_monitor_dict[monitor_id][
'hostnames'].append(bigip.hostname)
else:
deployed_monitor_dict[monitor_id] = {
'id': monitor_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_monitor_dict
@serialized('purge_orphaned_health_monitor')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None,
hostnames=list()):
"""Purge all monitors that exist on the BIG-IP but not in Neutron"""
resource_types = [
resource_helper.BigIPResourceHelper(x) for x in [
resource_helper.ResourceType.http_monitor,
resource_helper.ResourceType.https_monitor,
resource_helper.ResourceType.ping_monitor,
resource_helper.ResourceType.tcp_monitor]]
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
monitor_name = self.service_adapter.prefix + monitor_id
partition = self.service_adapter.prefix + tenant_id
monitor = None
for monitor_type in resource_types:
try:
monitor = monitor_type.load(bigip, monitor_name,
partition)
break
except HTTPError as err:
if err.response.status_code == 404:
continue
monitor.delete()
except TypeError as err:
if 'NoneType' in err:
LOG.exception("Could not find monitor {}".format(
monitor_name))
except Exception as exc:
LOG.exception('Exception purging monitor %s' % str(exc))
@serialized('get_all_deployed_l7_policys')
@is_operational
def get_all_deployed_l7_policys(self):
"""Retrieve a dict of all l7policies deployed
The dict returned will have the following format:
{policy_bigip_id_0: {'id': policy_id_0,
'tenant_id': tenant_id,
'hostnames': [hostnames_0]}
...
}
Where hostnames is the list of BIG-IP hostnames impacted, and the
policy_id is the policy_bigip_id without 'wrapper_policy_'
"""
LOG.debug('getting all deployed l7_policys on BIG-IP\'s')
deployed_l7_policys_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.l7policy)
deployed_l7_policys = resource.get_resources(
bigip, folder)
if deployed_l7_policys:
for l7_policy in deployed_l7_policys:
l7_policy_id = l7_policy.name
if l7_policy_id in deployed_l7_policys_dict:
my_dict = \
deployed_l7_policys_dict[l7_policy_id]
my_dict['hostnames'].append(bigip.hostname)
else:
po_id = l7_policy_id.replace(
'wrapper_policy_', '')
deployed_l7_policys_dict[l7_policy_id] = {
'id': po_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_l7_policys_dict
@serialized('purge_orphaned_l7_policy')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None,
hostnames=list(), listener_id=None):
"""Purge all l7_policys that exist on the BIG-IP but not in Neutron"""
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
error = None
try:
l7_policy_name = l7_policy_id
partition = self.service_adapter.prefix + tenant_id
if listener_id and partition:
if self.service_adapter.prefix not in listener_id:
listener_id = \
self.service_adapter.prefix + listener_id
li_resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).load(
bigip, listener_id, partition)
li_resource.update(policies=[])
l7_policy = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.l7policy).load(
bigip, l7_policy_name, partition)
l7_policy.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('l7_policy %s not on BIG-IP %s.'
% (l7_policy_id, bigip.hostname))
else:
error = err
except Exception as exc:
error = err
if error:
kwargs = dict(
tenant_id=tenant_id, l7_policy_id=l7_policy_id,
hostname=bigip.hostname, listener_id=listener_id)
LOG.exception('Exception: purge_orphaned_l7_policy({}) '
'"{}"'.format(kwargs, exc))
@serialized('purge_orphaned_loadbalancer')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_loadbalancer(self, tenant_id=None,
loadbalancer_id=None, hostnames=list()):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
va_name = self.service_adapter.prefix + loadbalancer_id
partition = self.service_adapter.prefix + tenant_id
va = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).load(
bigip, va_name, partition)
# get virtual services (listeners)
# referencing this virtual address
vses = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).get_resources(
bigip, partition)
vs_dest_compare = '/' + partition + '/' + va.name
for vs in vses:
if str(vs.destination).startswith(vs_dest_compare):
if hasattr(vs, 'pool'):
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, os.path.basename(vs.pool),
partition)
vs.delete()
pool.delete()
else:
vs.delete()
resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).delete(
bigip, va_name, partition)
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('loadbalancer %s not on BIG-IP %s.'
% (loadbalancer_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging loadbalancer %s'
% str(exc))
@serialized('purge_orphaned_listener')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_listener(
self, tenant_id=None, listener_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
listener_name = self.service_adapter.prefix + listener_id
partition = self.service_adapter.prefix + tenant_id
listener = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).load(
bigip, listener_name, partition)
listener.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('listener %s not on BIG-IP %s.'
% (listener_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging listener %s' % str(exc))
@serialized('create_loadbalancer')
@is_operational
def create_loadbalancer(self, loadbalancer, service):
"""Create virtual server."""
self._common_service_handler(service)
return self._update_target(service)
@serialized('update_loadbalancer')
@is_operational
def update_loadbalancer(self, old_loadbalancer, loadbalancer, service):
"""Update virtual server."""
# anti-pattern three args unused.
self._common_service_handler(service)
return self._update_target(service)
@serialized('delete_loadbalancer')
@is_operational
def delete_loadbalancer(self, loadbalancer, service):
"""Delete loadbalancer."""
LOG.debug("Deleting loadbalancer")
self._common_service_handler(
service,
delete_partition=True,
delete_event=True)
return self._update_target(service)
@serialized('create_listener')
@is_operational
@log_helpers.log_method_call
def create_listener(self, listener, service):
"""Create virtual server."""
LOG.debug("Creating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('update_listener')
@is_operational
def update_listener(self, old_listener, listener, service):
"""Update virtual server."""
LOG.debug("Updating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('delete_listener')
@is_operational
def delete_listener(self, listener, service):
"""Delete virtual server."""
LOG.debug("Deleting listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('create_pool')
@is_operational
def create_pool(self, pool, service):
"""Create lb pool."""
LOG.debug("Creating pool")
# pzhang(NOTE): pool may not bound with a listener
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('update_pool')
@is_operational
def update_pool(self, old_pool, pool, service):
"""Update lb pool."""
LOG.debug("Updating pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('delete_pool')
@is_operational
def delete_pool(self, pool, service):
"""Delete lb pool."""
LOG.debug("Deleting pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('create_l7policy')
@is_operational
def create_l7policy(self, l7policy, service):
"""Create lb l7policy."""
LOG.debug("Creating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
@serialized('update_l7policy')
@is_operational
def update_l7policy(self, old_l7policy, l7policy, service):
"""Update lb l7policy."""
LOG.debug("Updating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
@serialized('delete_l7policy')
@is_operational
def delete_l7policy(self, l7policy, service):
"""Delete lb l7policy."""
LOG.debug("Deleting l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
# TODO(pzhang): test this
@serialized('create_l7rule')
@is_operational
def create_l7rule(self, l7rule, service):
"""Create lb l7rule."""
LOG.debug("Creating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
# TODO(pzhang): test this
@serialized('update_l7rule')
@is_operational
def update_l7rule(self, old_l7rule, l7rule, service):
"""Update lb l7rule."""
LOG.debug("Updating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
# TODO(pzhang): test this
@serialized('delete_l7rule')
@is_operational
def delete_l7rule(self, l7rule, service):
"""Delete lb l7rule."""
LOG.debug("Deleting l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
@serialized('create_member')
@is_operational
def create_member(self, member, service):
"""Create pool member."""
LOG.debug("Creating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('update_member')
@is_operational
def update_member(self, old_member, member, service):
"""Update pool member."""
LOG.debug("Updating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('delete_member')
@is_operational
def delete_member(self, member, service):
"""Delete pool member."""
LOG.debug("Deleting member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('create_health_monitor')
@is_operational
def create_health_monitor(self, health_monitor, service):
"""Create pool health monitor."""
LOG.debug("Creating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
@serialized('update_health_monitor')
@is_operational
def update_health_monitor(self, old_health_monitor,
health_monitor, service):
"""Update pool health monitor."""
LOG.debug("Updating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
@serialized('delete_health_monitor')
@is_operational
def delete_health_monitor(self, health_monitor, service):
"""Delete pool health monitor."""
LOG.debug("Deleting health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
def _update_target(self, service,
update_method=None, target=None):
if self.do_service_update:
if target is not None and update_method is not None:
update_method(target)
self._update_loadbalancer_status(service, timed_out=False)
loadbalancer = service.get('loadbalancer', {})
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
lb_pending = \
(lb_provisioning_status == f5const.F5_PENDING_CREATE or
lb_provisioning_status == f5const.F5_PENDING_UPDATE)
return lb_pending
@is_operational
def get_stats(self, service):
lb_stats = {}
stats = ['clientside.bitsIn',
'clientside.bitsOut',
'clientside.curConns',
'clientside.totConns']
loadbalancer = service['loadbalancer']
try:
# sum virtual server stats for all BIG-IPs
vs_stats = self.lbaas_builder.get_listener_stats(service, stats)
# convert to bytes
lb_stats[f5const.F5_STATS_IN_BYTES] = \
vs_stats['clientside.bitsIn']/8
lb_stats[f5const.F5_STATS_OUT_BYTES] = \
vs_stats['clientside.bitsOut']/8
lb_stats[f5const.F5_STATS_ACTIVE_CONNECTIONS] = \
vs_stats['clientside.curConns']
lb_stats[f5const.F5_STATS_TOTAL_CONNECTIONS] = \
vs_stats['clientside.totConns']
# update Neutron
self.plugin_rpc.update_loadbalancer_stats(
loadbalancer['id'], lb_stats)
except Exception as e:
LOG.error("Error getting loadbalancer stats: %s", e.message)
finally:
return lb_stats
def fdb_add(self, fdb):
# Add (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.add_bigip_fdb(bigip, fdb)
def fdb_remove(self, fdb):
# Remove (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.remove_bigip_fdb(bigip, fdb)
def fdb_update(self, fdb):
# Update (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.update_bigip_fdb(bigip, fdb)
def tunnel_update(self, **kwargs):
# Tunnel Update from Neutron Core RPC
pass
def tunnel_sync(self):
# Only sync when supported types are present
if not [i for i in self.agent_configurations['tunnel_types']
if i in ['gre', 'vxlan']]:
return False
tunnel_ips = []
for bigip in self.get_all_bigips():
if bigip.local_ip:
tunnel_ips.append(bigip.local_ip)
self.network_builder.tunnel_sync(tunnel_ips)
# Tunnel sync sent.
return False
@serialized('sync')
@is_operational
def sync(self, service):
"""Sync service defintion to device."""
# loadbalancer and plugin_rpc may not be set
lb_id = service.get('loadbalancer', dict()).get('id', '')
if hasattr(self, 'plugin_rpc') and self.plugin_rpc and lb_id:
# Get the latest service. It may have changed.
service = self.plugin_rpc.get_service_by_loadbalancer_id(lb_id)
if service.get('loadbalancer', None):
self.lbaas_builder.to_sync = True
self._common_service_handler(service)
self.lbaas_builder.to_sync = False
# pzhang(NOTE): move udpate neutron db out here for the lb tree
if self.do_service_update:
self.update_service_status(service)
loadbalancer = service.get('loadbalancer', {})
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
lb_pending = \
(lb_provisioning_status == f5const.F5_PENDING_CREATE or
lb_provisioning_status == f5const.F5_PENDING_UPDATE)
return lb_pending
else:
LOG.debug("Attempted sync of deleted pool")
@serialized('backup_configuration')
@is_operational
def backup_configuration(self):
# Save Configuration on Devices
for bigip in self.get_all_bigips():
LOG.debug('_backup_configuration: saving device %s.'
% bigip.hostname)
self.cluster_manager.save_config(bigip)
def _get_monitor_endpoint(self, bigip, service):
monitor_type = self.service_adapter.get_monitor_type(service)
if not monitor_type:
monitor_type = ""
if monitor_type == "HTTPS":
hm = bigip.tm.ltm.monitor.https_s.https
elif monitor_type == "TCP":
hm = bigip.tm.ltm.monitor.tcps.tcp
elif monitor_type == "PING":
hm = bigip.tm.ltm.monitor.gateway_icmps.gateway_icmp
else:
hm = bigip.tm.ltm.monitor.https.http
return hm
def service_rename_required(self, service):
rename_required = False
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
# Does the correctly named virtual address exist?
for bigip in bigips:
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
rename_required = True
break
return rename_required
def service_object_teardown(self, service):
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Change to bigips
for bigip in bigips:
# Delete all virtuals
v = bigip.tm.ltm.virtuals.virtual
for listener in service['listeners']:
l_name = listener.get("name", "")
if not l_name:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
vip = self.service_adapter.get_virtual(svc)
l_name = vip['name']
if v.exists(name=l_name, partition=folder_name):
# Found a virtual that is named by the OS object,
# delete it.
l_obj = v.load(name=l_name, partition=folder_name)
LOG.warn("Deleting listener: /%s/%s" %
(folder_name, l_name))
l_obj.delete(name=l_name, partition=folder_name)
# Delete all pools
p = bigip.tm.ltm.pools.pool
for os_pool in service['pools']:
p_name = os_pool.get('name', "")
if not p_name:
svc = {"loadbalancer": loadbalancer,
"pool": os_pool}
pool = self.service_adapter.get_pool(svc)
p_name = pool['name']
if p.exists(name=p_name, partition=folder_name):
p_obj = p.load(name=p_name, partition=folder_name)
LOG.warn("Deleting pool: /%s/%s" % (folder_name, p_name))
p_obj.delete(name=p_name, partition=folder_name)
# Delete all healthmonitors
for healthmonitor in service['healthmonitors']:
svc = {'loadbalancer': loadbalancer,
'healthmonitor': healthmonitor}
monitor_ep = self._get_monitor_endpoint(bigip, svc)
m_name = healthmonitor.get('name', "")
if not m_name:
hm = self.service_adapter.get_healthmonitor(svc)
m_name = hm['name']
if monitor_ep.exists(name=m_name, partition=folder_name):
m_obj = monitor_ep.load(name=m_name, partition=folder_name)
LOG.warn("Deleting monitor: /%s/%s" % (
folder_name, m_name))
m_obj.delete()
def _service_exists(self, service):
# Returns whether the bigip has the service defined
if not service['loadbalancer']:
return False
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
if self.network_builder:
# append route domain to member address
self.network_builder._annotate_service_route_domains(service)
# Foreach bigip in the cluster:
for bigip in self.get_config_bigips():
# Does the tenant folder exist?
if not self.system_helper.folder_exists(bigip, folder_name):
LOG.error("Folder %s does not exists on bigip: %s" %
(folder_name, bigip.hostname))
return False
# Get the virtual address
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
LOG.error("Virtual address %s(%s) does not "
"exists on bigip: %s" % (virtual_address.name,
virtual_address.address,
bigip.hostname))
return False
# Ensure that each virtual service exists.
for listener in service['listeners']:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
virtual_server = self.service_adapter.get_virtual_name(svc)
if not self.vs_manager.exists(bigip,
name=virtual_server['name'],
partition=folder_name):
LOG.error("Virtual /%s/%s not found on bigip: %s" %
(virtual_server['name'], folder_name,
bigip.hostname))
return False
# Ensure that each pool exists.
for pool in service['pools']:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
bigip_pool = self.service_adapter.get_pool(svc)
if not self.pool_manager.exists(
bigip,
name=bigip_pool['name'],
partition=folder_name):
LOG.error("Pool /%s/%s not found on bigip: %s" %
(folder_name, bigip_pool['name'],
bigip.hostname))
return False
else:
deployed_pool = self.pool_manager.load(
bigip,
name=bigip_pool['name'],
partition=folder_name)
deployed_members = \
deployed_pool.members_s.get_collection()
# First check that number of members deployed
# is equal to the number in the service.
if len(deployed_members) != len(pool['members']):
LOG.error("Pool %s members member count mismatch "
"match: deployed %d != service %d" %
(bigip_pool['name'], len(deployed_members),
len(pool['members'])))
return False
# Ensure each pool member exists
for member in service['members']:
if member['pool_id'] == pool['id']:
lb = self.lbaas_builder
pool = lb.get_pool_by_id(
service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if not lb.pool_builder.member_exists(svc, bigip):
LOG.error("Pool member not found: %s" %
svc['member'])
return False
# Ensure that each health monitor exists.
for healthmonitor in service['healthmonitors']:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": healthmonitor}
monitor = self.service_adapter.get_healthmonitor(svc)
monitor_ep = self._get_monitor_endpoint(bigip, svc)
if not monitor_ep.exists(name=monitor['name'],
partition=folder_name):
LOG.error("Monitor /%s/%s not found on bigip: %s" %
(monitor['name'], folder_name, bigip.hostname))
return False
return True
def get_loadbalancers_in_tenant(self, tenant_id):
loadbalancers = self.plugin_rpc.get_all_loadbalancers()
return [lb['lb_id'] for lb in loadbalancers
if lb['tenant_id'] == tenant_id]
def _common_service_handler(self, service,
delete_partition=False,
delete_event=False):
# Assure that the service is configured on bigip(s)
start_time = time()
lb_pending = True
self.do_service_update = True
if self.conf.trace_service_requests:
self.trace_service_requests(service)
loadbalancer = service.get("loadbalancer", None)
if not loadbalancer:
LOG.error("_common_service_handler: Service loadbalancer is None")
return lb_pending
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
try:
try:
self.tenant_manager.assure_tenant_created(service)
except Exception as e:
LOG.error("Tenant folder creation exception: %s",
e.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
raise e
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self.service_to_traffic_group(service)
loadbalancer['traffic_group'] = traffic_group
if self.network_builder:
start_time = time()
try:
self.network_builder.prep_service_networking(
service, traffic_group)
except f5ex.NetworkNotReady as error:
LOG.debug("Network creation deferred until network "
"definition is completed: %s",
error.message)
if not delete_event:
self.do_service_update = False
raise error
except Exception as error:
LOG.error("Prep-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
if not delete_event:
raise error
finally:
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
all_subnet_hints = {}
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
LOG.debug("XXXXXXXXX: Pre assure service")
self.lbaas_builder.assure_service(service,
traffic_group,
all_subnet_hints)
LOG.debug("XXXXXXXXX: Post assure service")
if self.network_builder:
start_time = time()
try:
self.network_builder.post_service_networking(
service, all_subnet_hints)
except Exception as error:
LOG.error("Post-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
raise error
if time() - start_time > .001:
LOG.debug(" _post_service_networking "
"took %.5f secs" % (time() - start_time))
except f5ex.NetworkNotReady as error:
pass
except Exception as err:
LOG.exception(err)
finally:
# only delete partition if loadbalancer is being deleted
if lb_provisioning_status == f5const.F5_PENDING_DELETE:
self.tenant_manager.assure_tenant_cleanup(service,
all_subnet_hints)
def update_service_status(self, service, timed_out=False):
"""Update status of objects in controller."""
LOG.debug("_update_service_status")
if not self.plugin_rpc:
LOG.error("Cannot update status in Neutron without "
"RPC handler.")
return
if 'members' in service:
# Call update_members_status
self._update_member_status(service['members'], timed_out)
if 'healthmonitors' in service:
# Call update_monitor_status
self._update_health_monitor_status(
service['healthmonitors']
)
if 'pools' in service:
# Call update_pool_status
self._update_pool_status(
service['pools']
)
if 'listeners' in service:
# Call update_listener_status
self._update_listener_status(service)
if 'l7policy_rules' in service:
self._update_l7rule_status(service['l7policy_rules'])
if 'l7policies' in service:
self._update_l7policy_status(service['l7policies'])
self._update_loadbalancer_status(service, timed_out)
def _update_member_status(self, members, timed_out=False):
"""Update member status in OpenStack."""
for member in members:
if 'provisioning_status' in member:
provisioning_status = member['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
if timed_out and \
provisioning_status != f5const.F5_ACTIVE:
member['provisioning_status'] = f5const.F5_ERROR
operating_status = f5const.F5_OFFLINE
else:
member['provisioning_status'] = f5const.F5_ACTIVE
operating_status = f5const.F5_ONLINE
self.plugin_rpc.update_member_status(
member['id'],
member['provisioning_status'],
operating_status
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
if not member.get('parent_pool_deleted', False):
self.plugin_rpc.member_destroyed(
member['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_member_status(
member['id'],
f5const.F5_ERROR,
f5const.F5_OFFLINE)
def _update_health_monitor_status(self, health_monitors):
"""Update pool monitor status in OpenStack."""
for health_monitor in health_monitors:
if 'provisioning_status' in health_monitor:
provisioning_status = health_monitor['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
health_monitor['provisioning_status'] = \
f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.health_monitor_destroyed(
health_monitor['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'])
@log_helpers.log_method_call
def _update_pool_status(self, pools):
"""Update pool status in OpenStack."""
for pool in pools:
if 'provisioning_status' in pool:
provisioning_status = pool['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_pool_status(
pool['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
pool['provisioning_status'] = f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.pool_destroyed(
pool['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_pool_status(pool['id'])
@log_helpers.log_method_call
def _update_listener_status(self, service):
"""Update listener status in OpenStack."""
listeners = service['listeners']
for listener in listeners:
if 'provisioning_status' in listener:
provisioning_status = listener['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_listener_status(
listener['id'],
f5const.F5_ACTIVE,
listener['operating_status']
)
listener['provisioning_status'] = \
f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.listener_destroyed(
listener['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_listener_status(
listener['id'],
provisioning_status,
f5const.F5_OFFLINE)
@log_helpers.log_method_call
def _update_l7rule_status(self, l7rules):
"""Update l7rule status in OpenStack."""
for l7rule in l7rules:
if 'provisioning_status' in l7rule:
provisioning_status = l7rule['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_l7rule_status(
l7rule['id'],
l7rule['policy_id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.l7rule_destroyed(
l7rule['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_l7rule_status(
l7rule['id'], l7rule['policy_id'])
@log_helpers.log_method_call
def _update_l7policy_status(self, l7policies):
LOG.debug("_update_l7policy_status")
"""Update l7policy status in OpenStack."""
for l7policy in l7policies:
if 'provisioning_status' in l7policy:
provisioning_status = l7policy['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_l7policy_status(
l7policy['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
LOG.debug("calling l7policy_destroyed")
self.plugin_rpc.l7policy_destroyed(
l7policy['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_l7policy_status(l7policy['id'])
@log_helpers.log_method_call
def _update_loadbalancer_status(self, service, timed_out=False):
"""Update loadbalancer status in OpenStack."""
loadbalancer = service.get('loadbalancer', {})
provisioning_status = loadbalancer.get('provisioning_status',
f5const.F5_ERROR)
# if provisioning_status in self.positive_plugin_const_state:
if provisioning_status in self.positive_plugin_const_state:
if timed_out:
operating_status = (f5const.F5_OFFLINE)
if provisioning_status == f5const.F5_PENDING_CREATE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
else:
loadbalancer['provisioning_status'] = \
f5const.F5_ACTIVE
else:
operating_status = (f5const.F5_ONLINE)
loadbalancer['provisioning_status'] = \
f5const.F5_ACTIVE
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
loadbalancer['provisioning_status'],
operating_status)
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.loadbalancer_destroyed(
loadbalancer['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
provisioning_status,
f5const.F5_OFFLINE)
elif provisioning_status == f5const.F5_ACTIVE:
LOG.debug('Loadbalancer provisioning status is active')
else:
LOG.error('Loadbalancer provisioning status is invalid')
@is_operational
def update_operating_status(self, service):
if 'members' in service:
if self.network_builder:
# append route domain to member address
try:
self.network_builder._annotate_service_route_domains(
service)
except f5ex.InvalidNetworkType as exc:
LOG.warning(exc.msg)
return
# get currrent member status
self.lbaas_builder.update_operating_status(service)
# udpate Neutron
for member in service['members']:
if member['provisioning_status'] == f5const.F5_ACTIVE:
operating_status = member.get('operating_status', None)
self.plugin_rpc.update_member_status(
member['id'],
provisioning_status=None,
operating_status=operating_status)
def get_active_bigip(self):
bigips = self.get_all_bigips()
if len(bigips) == 1:
return bigips[0]
for bigip in bigips:
if hasattr(bigip, 'failover_state'):
if bigip.failover_state == 'active':
return bigip
# if can't determine active, default to first one
return bigips[0]
def service_to_traffic_group(self, service):
# Hash service tenant id to index of traffic group
# return which iControlDriver.__traffic_group that tenant is "in?"
return self.tenant_to_traffic_group(
service['loadbalancer']['tenant_id'])
def tenant_to_traffic_group(self, tenant_id):
# Hash tenant id to index of traffic group
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index]
# these functions should return only active BIG-IP
# not errored BIG-IPs.
def get_bigip(self):
hostnames = sorted(list(self.__bigips))
for host in hostnames:
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return self.__bigips[host]
def get_bigip_hosts(self):
return_hosts = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_hosts.append(host)
return sorted(return_hosts)
def get_all_bigips(self):
return_bigips = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_bigips.append(self.__bigips[host])
return return_bigips
def get_config_bigips(self):
return self.get_all_bigips()
# these are the refactored methods
def get_active_bigips(self):
return self.get_all_bigips()
def get_errored_bigips_hostnames(self):
return_hostnames = []
for host in list(self.__bigips):
bigip = self.__bigips[host]
if hasattr(bigip, 'status') and bigip.status == 'error':
return_hostnames.append(host)
return return_hostnames
def get_inbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_inbound_throughput(
bigip, global_stats=global_statistics)
def get_outbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_outbound_throughput(
bigip, global_stats=global_statistics)
def get_throughput(self, bigip=None, global_statistics=None):
return self.stat_helper.get_throughput(
bigip, global_stats=global_statistics)
def get_active_connections(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_connection_count(
bigip, global_stats=global_statistics)
def get_ssltps(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_SSL_TPS(
bigip, global_stats=global_statistics)
def get_node_count(self, bigip=None, global_statistics=None):
return len(bigip.tm.ltm.nodes.get_collection())
def get_clientssl_profile_count(self, bigip=None, global_statistics=None):
return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip)
def get_tenant_count(self, bigip=None, global_statistics=None):
return self.system_helper.get_tenant_folder_count(bigip)
def get_tunnel_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_tunnel_count(bigip)
def get_vlan_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_vlan_count(bigip)
def get_route_domain_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_route_domain_count(bigip)
def _init_traffic_groups(self, bigip):
try:
LOG.debug('retrieving traffic groups from %s' % bigip.hostname)
self.__traffic_groups = \
self.cluster_manager.get_traffic_groups(bigip)
if 'traffic-group-local-only' in self.__traffic_groups:
LOG.debug('removing reference to non-floating traffic group')
self.__traffic_groups.remove('traffic-group-local-only')
self.__traffic_groups.sort()
LOG.debug('service placement will done on traffic group(s): %s'
% self.__traffic_groups)
except Exception:
bigip.status = 'error'
bigip.status_message = \
'could not determine traffic groups for service placement'
raise
def _validate_bigip_version(self, bigip, hostname):
# Ensure the BIG-IP has sufficient version
major_version = self.system_helper.get_major_version(bigip)
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
minor_version = self.system_helper.get_minor_version(bigip)
if minor_version < f5const.MIN_TMOS_MINOR_VERSION:
raise f5ex.MinorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
return major_version, minor_version
def trace_service_requests(self, service):
"""Dump services to a file for debugging."""
with open(self.file_name, 'r+') as fp:
fp.seek(-1, 2)
fp.write(',')
json.dump(service, fp, sort_keys=True, indent=2)
fp.write(']')
def get_config_dir(self):
"""Determine F5 agent configuration directory.
Oslo cfg has a config_dir option, but F5 agent is not currently
started with this option. To be complete, the code will check if
config_dir is defined, and use that value as long as it is a single
string (no idea what to do if it is not a str). If not defined,
get the full dir path of the INI file, which is currently used when
starting F5 agent. If neither option is available,
use /etc/neutron/services/f5.
:return: str defining configuration directory.
"""
if self.conf.config_dir and isinstance(self.conf.config_dir, str):
# use config_dir parameter if defined, and is a string
return self.conf.config_dir
elif self.conf.config_file:
# multiple config files (neutron and agent) are usually defined
if isinstance(self.conf.config_file, list):
# find agent config (f5-openstack-agent.ini)
config_files = self.conf.config_file
for file_name in config_files:
if 'f5-openstack-agent.ini' in file_name:
return os.path.dirname(file_name)
elif isinstance(self.conf.config_file, str):
# not a list, just a single string
return os.path.dirname(self.conf.config_file)
# if all else fails
return '/etc/neutron/services/f5'
| F5Networks/f5-openstack-agent | f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py | Python | apache-2.0 | 112,266 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# SchoolTool - common information systems platform for school administration
# Copyright (c) 2003 Shuttleworth Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
SchoolTool test runner.
Syntax: test.py [options] [pathname-regexp [test-regexp]]
Test cases are located in the directory tree starting at the location of this
script, in subdirectories named 'tests' and in Python modules named
'test*.py'. They are then filtered according to pathname and test regexes.
Alternatively, packages may just have 'tests.py' instead of a subpackage
'tests'.
A leading "!" in a regexp is stripped and negates the regexp. Pathname
regexp is applied to the whole path (package/package/module.py). Test regexp
is applied to a full test id (package.package.module.class.test_method).
Options:
-h, --help print this help message
-v verbose (print dots for each test run)
-vv very verbose (print test names)
-q quiet (do not print anything on success)
-c colorize output
-d invoke pdb when an exception occurs
-1 report only the first failure in doctests
-p show progress bar (can be combined with -v or -vv)
--level n select only tests at level n or lower
--all-levels select all tests
--list-files list all selected test files
--list-tests list all selected test cases
--coverage create code coverage reports
--profile profile the unit tests
--search-in dir limit directory tree walk to dir (optimisation)
--immediate-errors show errors as soon as they happen (default)
--delayed-errors show errors after all tests were run
--resource name enable given resource
"""
#
# This script borrows ideas from Zope 3's test runner heavily. It is smaller
# and cleaner though, at the expense of more limited functionality.
#
import re
import os
import sys
import time
import types
import getopt
import unittest
import traceback
import linecache
import pdb
__metaclass__ = type
RCS_IGNORE = [
"SCCS",
"BitKeeper",
"CVS",
".pc",
".hg",
".svn",
".git",
]
class Options:
"""Configurable properties of the test runner."""
# test location
basedir = '.' # base directory for tests (defaults to
# basedir of argv[0]), must be absolute
search_in = () # list of subdirs to traverse (defaults to
# basedir)
follow_symlinks = True # should symlinks to subdirectories be
# followed? (hardcoded, may cause loops)
# available resources
resources = []
# test filtering
level = 1 # run only tests at this or lower level
# (if None, runs all tests)
pathname_regex = '' # regexp for filtering filenames
test_regex = '' # regexp for filtering test cases
# actions to take
list_files = False # --list-files
list_tests = False # --list-tests
run_tests = True # run tests (disabled by --list-foo)
postmortem = False # invoke pdb when an exception occurs
profile = False
# output verbosity
verbosity = 0 # verbosity level (-v)
quiet = 0 # do not print anything on success (-q)
first_doctest_failure = False # report first doctest failure (-1)
print_import_time = True # print time taken to import test modules
# (currently hardcoded)
progress = False # show running progress (-p)
colorize = False # colorize output (-c)
coverage = False # produce coverage reports (--coverage)
coverdir = 'coverage' # where to put them (currently hardcoded)
immediate_errors = True # show tracebacks twice (--immediate-errors,
# --delayed-errors)
screen_width = 80 # screen width (autodetected)
def compile_matcher(regex):
"""Return a function that takes one argument and returns True or False.
Regex is a regular expression. Empty regex matches everything. There
is one expression: if the regex starts with "!", the meaning of it is
reversed.
"""
if not regex:
return lambda x: True
elif regex == '!':
return lambda x: False
elif regex.startswith('!'):
rx = re.compile(regex[1:])
return lambda x: rx.search(x) is None
else:
rx = re.compile(regex)
return lambda x: rx.search(x) is not None
def walk_with_symlinks(top, func, arg):
"""Like os.path.walk, but follows symlinks on POSIX systems.
If the symlinks create a loop, this function will never finish.
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = os.path.join(top, name)
if os.path.isdir(name):
walk_with_symlinks(name, func, arg)
def has_path_component(path, name):
_drive, path = os.path.splitdrive(path)
head, tail = os.path.split(path)
while head and tail:
if tail == name:
return True
head, tail = os.path.split(head)
return False
def get_test_files(cfg):
"""Return a list of test module filenames."""
matcher = compile_matcher(cfg.pathname_regex)
allresults = []
test_names = ['tests']
baselen = len(cfg.basedir) + 1
def visit(ignored, dir, files):
# Ignore files starting with a dot.
# Do not not descend into subdirs containing a dot.
# Ignore versioning system files
remove = []
for idx, file in enumerate(files):
if file.startswith('.'):
remove.append(idx)
elif '.' in file and os.path.isdir(os.path.join(dir, file)):
remove.append(idx)
elif file in RCS_IGNORE:
remove.append(idx)
remove.reverse()
for idx in remove:
del files[idx]
# Skip non-test directories, but look for tests.py
if not has_path_component(dir, test_name):
if test_name + '.py' in files:
path = os.path.join(dir, test_name + '.py')
if matcher(path[baselen:]):
results.append(path)
return
test_files = [f for f in files if \
f.startswith('test') and f.endswith(".py")]
if '__init__.py' not in files:
if test_files:
# Python test files found, but no __init__.py
print >> sys.stderr, "%s is not a package" % dir
return
for file in test_files:
path = os.path.join(dir, file)
if matcher(path[baselen:]):
results.append(path)
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
for test_name in test_names:
results = []
for dir in cfg.search_in:
walker(dir, visit, None)
results.sort()
allresults += results
return allresults
def import_module(filename, cfg, tracer=None):
"""Import and return a module."""
filename = os.path.splitext(filename)[0]
modname = filename[len(cfg.basedir):].replace(os.path.sep, '.')
if modname.startswith('.'):
modname = modname[1:]
if tracer is not None:
mod = tracer.runfunc(__import__, modname)
else:
mod = __import__(modname)
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def filter_testsuite(suite, matcher, level=None):
"""Return a flattened list of test cases that match the given matcher."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = []
for test in suite._tests:
if level is not None and getattr(test, 'level', 0) > level:
continue
if isinstance(test, unittest.TestCase):
testname = test.id() # package.module.class.method
if matcher(testname):
results.append(test)
else:
filtered = filter_testsuite(test, matcher, level)
results.extend(filtered)
return results
def get_all_test_cases(module):
"""Return a list of all test case classes defined in a given module."""
results = []
for name in dir(module):
if not name.startswith('Test'):
continue
item = getattr(module, name)
if (isinstance(item, (type, types.ClassType)) and
issubclass(item, unittest.TestCase)):
results.append(item)
return results
def get_test_cases(test_files, cfg, tracer=None):
"""Return a list of test cases from a given list of test modules."""
matcher = compile_matcher(cfg.test_regex)
results = []
startTime = time.time()
for file in test_files:
module = import_module(file, cfg, tracer=tracer)
def get_test_suite():
suite = unittest.TestSuite()
for test_case in get_all_test_cases(module):
suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(test_case))
return suite
if tracer is not None:
test_suite = tracer.runfunc(get_test_suite)
else:
test_suite = get_test_suite()
if test_suite is None:
continue
if (cfg.level is not None and
getattr(test_suite, 'level', 0) > cfg.level):
continue
filtered = filter_testsuite(test_suite, matcher, cfg.level)
results.extend(filtered)
stopTime = time.time()
timeTaken = float(stopTime - startTime)
if cfg.print_import_time:
nmodules = len(test_files)
plural = (nmodules != 1) and 's' or ''
print "Imported %d module%s in %.3fs" % (nmodules, plural, timeTaken)
print
return results
def extract_tb(tb, limit=None):
"""Improved version of traceback.extract_tb.
Includes a dict with locals in every stack frame instead of the line.
"""
list = []
while tb is not None and (limit is None or len(list) < limit):
frame = tb.tb_frame
code = frame.f_code
name = code.co_name
filename = code.co_filename
lineno = tb.tb_lineno
locals = frame.f_locals
list.append((filename, lineno, name, locals))
tb = tb.tb_next
return list
colorcodes = {'gray': 0, 'red': 1, 'green': 2, 'yellow': 3,
'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7}
colormap = {'fail': 'red',
'warn': 'yellow',
'pass': 'green',
'count': 'white',
'title': 'white',
'separator': 'dark white',
'longtestname': 'yellow',
'filename': 'dark green',
'lineno': 'green',
'testname': 'dark yellow',
'excname': 'red',
'excstring': 'yellow',
'tbheader': 'dark white',
'doctest_ignored': 'gray',
'doctest_title': 'dark white',
'doctest_code': 'yellow',
'doctest_expected': 'green',
'doctest_got': 'red'}
def colorize(texttype, text):
"""Colorize text by ANSI escape codes in a color provided in colormap."""
color = colormap[texttype]
if color.startswith('dark '):
light = 0
color = color[len('dark '):] # strip the 'dark' prefix
else:
light = 1
code = 30 + colorcodes[color]
return '\033[%d;%dm' % (light, code)+ text + '\033[0;0m'
def colorize_exception_only(lines):
"""Colorize result of traceback.format_exception_only."""
if len(lines) > 1:
return lines # SyntaxError? We won't deal with that for now.
lines = lines[0].splitlines()
# First, colorize the first line, which usually contains the name
# and the string of the exception.
result = []
# A simple exception. Try to colorize the first row, leave others be.
excline = lines[0].split(': ', 1)
if len(excline) == 2:
excname = colorize('excname', excline[0])
excstring = colorize('excstring', excline[1])
result.append('%s: %s' % (excname, excstring))
else:
result.append(colorize('excstring', lines[0]))
result.extend(lines[1:])
return '\n'.join(result)
def format_exception(etype, value, tb, limit=None, basedir=None, color=False):
"""Improved version of traceback.format_exception.
Includes Zope-specific extra information in tracebacks.
If color is True, ANSI codes are used to colorize output.
"""
# Show stack trace.
list = []
if tb:
list = ['Traceback (most recent call last):\n']
if color:
list[0] = colorize('tbheader', list[0])
w = list.append
for filename, lineno, name, locals in extract_tb(tb, limit):
line = linecache.getline(filename, lineno)
if color:
filename = colorize('filename', filename)
lineno = colorize('lineno', str(lineno))
name = colorize('testname', name)
w(' File "%s", line %s, in %s\n' % (filename, lineno, name))
if line:
w(' %s\n' % line.strip())
else:
w(' File "%s", line %s, in %s\n' % (filename, lineno, name))
if line:
w(' %s\n' % line.strip())
tb_info = locals.get('__traceback_info__')
if tb_info is not None:
w(' Extra information: %s\n' % repr(tb_info))
tb_supplement = locals.get('__traceback_supplement__')
if tb_supplement is not None:
tb_supplement = tb_supplement[0](*tb_supplement[1:])
w(' __traceback_supplement__ = %r\n' % (tb_supplement, ))
# Add the representation of the exception itself.
lines = traceback.format_exception_only(etype, value)
if color:
lines = colorize_exception_only(lines)
list.extend(lines)
return list
class CustomTestResult(unittest._TextTestResult):
"""Customised TestResult.
It can show a progress bar, and displays tracebacks for errors and
failures as soon as they happen, in addition to listing them all at
the end.
Another added feature are configurable resources. Needed resources
from tests are checked and if denied the test will be skipped.
"""
__super = unittest._TextTestResult
__super_init = __super.__init__
__super_startTest = __super.startTest
__super_stopTest = __super.stopTest
__super_printErrors = __super.printErrors
__super_printErrorList = __super.printErrorList
def __init__(self, stream, descriptions, verbosity, count, cfg):
self.__super_init(stream, descriptions, verbosity)
self.skipped = []
self.count = count
self.cfg = cfg
if cfg.progress:
self.dots = False
self._lastWidth = 0
self._maxWidth = cfg.screen_width - len("xxxx/xxxx (xxx.x%): ") - 1
def startTest(self, test):
n = self.testsRun + 1
if self.cfg.progress:
# verbosity == 0: 'xxxx/xxxx (xxx.x%)'
# verbosity == 1: 'xxxx/xxxx (xxx.x%): test name'
# verbosity >= 2: 'xxxx/xxxx (xxx.x%): test name ... ok'
self.stream.write("\r%4d" % n)
if self.count:
self.stream.write("/%d (%5.1f%%)"
% (self.count, n * 100.0 / self.count))
if self.showAll: # self.cfg.verbosity == 1
self.stream.write(": ")
elif self.cfg.verbosity:
name = self.getShortDescription(test)
width = len(name)
if width < self._lastWidth:
name += " " * (self._lastWidth - width)
self.stream.write(": %s" % name)
self._lastWidth = width
self.stream.flush()
self.__super_startTest(test) # increments testsRun by one and prints
self.testsRun = n # override the testsRun calculation
self.start_time = time.time()
def getDescription(self, test):
return test.id() # package.module.class.method
def getShortDescription(self, test):
s = test.id() # package.module.class.method
if len(s) > self._maxWidth:
namelen = len(s.split('.')[-1])
left = max(0, (self._maxWidth - namelen) / 2 - 1)
right = self._maxWidth - left - 3
s = "%s...%s" % (s[:left], s[-right:])
return s
def printErrors(self):
w = self.stream.writeln
if self.cfg.progress and not (self.dots or self.showAll):
w()
if self.cfg.immediate_errors and (self.errors or self.failures):
if self.cfg.colorize:
w(colorize('separator', self.separator1))
w(colorize('title', "Tests that failed"))
w(colorize('separator', self.separator2))
else:
w(self.separator1)
w("Tests that failed")
w(self.separator2)
self.__super_printErrors()
def printSkipped(self):
self.stream.writeln()
for test, msg in self.skipped:
self.printSkip(test, msg)
def printSkip(self, test, msg):
w = self.stream.writeln
if self.cfg.colorize:
c = colorize
else:
c = lambda texttype, text: text
kind = c('warn', "SKIPPED")
description = c('testname', self.getDescription(test))
w("%s: %s:" % (kind, description))
w(" %s" % msg)
def formatError(self, err):
return "".join(format_exception(basedir=self.cfg.basedir,
color=self.cfg.colorize, *err))
def printTraceback(self, kind, test, err):
w = self.stream.writeln
if self.cfg.colorize:
c = colorize
else:
c = lambda texttype, text: text
w()
w(c('separator', self.separator1))
kind = c('fail', kind)
description = c('longtestname', self.getDescription(test))
w("%s: %s" % (kind, description))
w(c('separator', self.separator2))
w(self.formatError(err))
w()
def addFailure(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("FAIL", test, err)
if self.cfg.postmortem:
pdb.post_mortem(sys.exc_info()[2])
self.failures.append((test, self.formatError(err)))
def addError(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("ERROR", test, err)
if self.cfg.postmortem:
pdb.post_mortem(sys.exc_info()[2])
self.errors.append((test, self.formatError(err)))
def addSkipped(self, test, msg):
if self.showAll:
self.stream.writeln("skip")
elif self.dots:
self.stream.write("S")
self.skipped.append((test, msg))
def addSuccess(self, test):
now = time.time()
unittest.TestResult.addSuccess(self, test)
if self.cfg.colorize:
c = colorize
else:
c = lambda texttype, text: text
if self.showAll:
time_taken = float(now - self.start_time)
time_str = c('count', '%.1f' % time_taken)
self.stream.writeln("ok (%ss)" % time_str)
elif self.dots:
self.stream.write('.')
def printErrorList(self, flavour, errors):
if self.cfg.immediate_errors:
for test, _err in errors:
description = self.getDescription(test)
self.stream.writeln("%s: %s" % (flavour, description))
else:
self.__super_printErrorList(flavour, errors)
def get_tc_priv(testcase, attr):
"""get mangled private variables of TestCase instances"""
if sys.version_info >= (2, 5, 0, 'alpha', 1):
return getattr(testcase, "_" + attr)
return getattr(testcase, "_TestCase__" + attr)
class CustomTestCase(unittest.TestCase):
"""A test case with improved inequality test and resource support."""
def denied_resources(self, cfg_resources):
resources = getattr(self, "needed_resources", [])
return [x for x in resources if x not in cfg_resources]
def run(self, result=None):
if not isinstance(result, CustomTestResult):
raise ValueError("Needed CustomTestResult object: %r" % result)
result.startTest(self)
testMethod = getattr(self, get_tc_priv(self, "testMethodName"))
try:
denied = self.denied_resources(result.cfg.resources)
if denied:
res = ",".join(["%r" % x for x in denied])
s = len(denied) != 1 and "s" or ""
msg = "missing resource%s %s" % (s, res)
result.addSkipped(self, msg)
return
try:
self.setUp()
except Exception:
result.addError(self, get_tc_priv(self, "exc_info")())
return
ok = False
try:
testMethod()
ok = True
except self.failureException:
result.addFailure(self, get_tc_priv(self, "exc_info")())
except Exception:
result.addError(self, get_tc_priv(self, "exc_info")())
try:
self.tearDown()
except Exception:
result.addError(self, get_tc_priv(self, "exc_info")())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
def failUnlessEqual(self, first, second, msg=None):
"""
Define the first argument as the test value, and the second
one as the excpected value. Adjust the default error message
accordingly.
"""
if msg is None:
r1 = repr(first)
r2 = repr(second)
if len(r1) > 40 or len(r2) > 40:
sep = "\n"
else:
sep = ", "
msg = "got %s%sexpected %s" % (r1, sep, r2)
super(CustomTestCase, self).failUnlessEqual(first, second, msg=msg)
assertEqual = assertEquals = failUnlessEqual
unittest.TestCase = CustomTestCase
class CustomTestRunner(unittest.TextTestRunner):
"""Customised TestRunner.
See CustomisedTextResult for a list of extensions.
"""
__super = unittest.TextTestRunner
__super_init = __super.__init__
__super_run = __super.run
def __init__(self, cfg, stream=sys.stdout, count=None):
self.__super_init(verbosity=cfg.verbosity, stream=stream)
self.cfg = cfg
self.count = count
def run(self, test):
"""Run the given test case or test suite."""
if self.count is None:
self.count = test.countTestCases()
if self.cfg.colorize:
c = colorize
else:
c = lambda texttype, text: text
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = float(stopTime - startTime)
result.printSkipped()
result.printErrors()
run = result.testsRun
if not self.cfg.quiet:
self.stream.writeln(c('separator', result.separator2))
run_str = c('count', str(run))
time_str = c('count', '%.1f' % timeTaken)
self.stream.writeln("Ran %s test%s in %ss" %
(run_str, run != 1 and "s" or "", time_str))
self.stream.writeln()
if result.skipped:
self.stream.write(c('warn', "SKIPPED TESTS"))
count = c('count', str(len(result.skipped)))
self.stream.writeln(" (%s)" % count)
if not result.wasSuccessful():
self.stream.write(c('fail', "FAILED"))
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write(" (failures=%s" % c('count', str(failed)))
if errored:
if failed: self.stream.write(", ")
else: self.stream.write("(")
self.stream.write("errors=%s" % c('count', str(errored)))
self.stream.writeln(")")
elif not self.cfg.quiet:
self.stream.writeln(c('pass', "OK"))
return result
def _makeResult(self):
return CustomTestResult(self.stream, self.descriptions, self.verbosity,
cfg=self.cfg, count=self.count)
def run_tests(cfg, test_cases, tracer):
runner = CustomTestRunner(cfg, count=len(test_cases))
suite = unittest.TestSuite()
suite.addTests(test_cases)
if tracer is not None:
success = tracer.runfunc(runner.run, suite).wasSuccessful()
results = tracer.results()
results.write_results(show_missing=True, coverdir=cfg.coverdir)
else:
if cfg.profile:
import hotshot
prof = hotshot.Profile("unittesttest.prof")
prof.start()
success = runner.run(suite).wasSuccessful()
if cfg.profile:
prof.stop()
return success
def main(argv):
"""Main program."""
# Environment
if sys.version_info < (2, 3):
print >> sys.stderr, '%s: need Python 2.3 or later' % argv[0]
print >> sys.stderr, 'your python is %s' % sys.version
return 1
# Defaults
cfg = Options()
if not cfg.basedir:
cfg.basedir = os.path.abspath(os.path.dirname(argv[0]))
# Figure out terminal size
try:
import curses
except ImportError:
pass
else:
try:
curses.setupterm()
cols = curses.tigetnum('cols')
if cols > 0:
cfg.screen_width = cols
except curses.error:
pass
# Option processing
try:
opts, args = getopt.gnu_getopt(argv[1:], 'hvpcqwd1s:',
['list-files', 'list-tests',
'level=', 'all-levels', 'coverage',
'search-in=', 'immediate-errors',
'delayed-errors', 'help',
'resource=', 'profile',
])
except getopt.error, e:
print >> sys.stderr, '%s: %s' % (argv[0], e)
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
for k, v in opts:
if k in ['-h', '--help']:
print __doc__
return 0
elif k == '-v':
cfg.verbosity += 1
cfg.quiet = False
elif k == '-p':
cfg.progress = True
cfg.quiet = False
elif k == '-c':
cfg.colorize = True
elif k == '-q':
cfg.verbosity = 0
cfg.progress = False
cfg.quiet = True
elif k == '-d':
cfg.postmortem = True
elif k == '-1':
cfg.first_doctest_failure = True
elif k == '--list-files':
cfg.list_files = True
cfg.run_tests = False
elif k == '--list-tests':
cfg.list_tests = True
cfg.run_tests = False
elif k == '--coverage':
cfg.coverage = True
elif k == '--profile':
cfg.profile = True
elif k == '--resource':
cfg.resources.append(v)
elif k == '--level':
try:
cfg.level = int(v)
except ValueError:
print >> sys.stderr, '%s: invalid level: %s' % (argv[0], v)
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
elif k == '--all-levels':
cfg.level = None
elif k in ('-s', '--search-in'):
if not v.startswith(cfg.basedir):
print >> sys.stderr, ('%s: argument to --search-in (%s) must'
' be a subdir of %s'
% (argv[0], v, cfg.basedir))
return 1
cfg.search_in += (v, )
elif k == '--immediate-errors':
cfg.immediate_errors = True
elif k == '--delayed-errors':
cfg.immediate_errors = False
else:
print >> sys.stderr, '%s: invalid option: %s' % (argv[0], k)
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
if args:
cfg.pathname_regex = args[0]
if len(args) > 1:
cfg.test_regex = args[1]
if len(args) > 2:
print >> sys.stderr, '%s: too many arguments: %s' % (argv[0], args[2])
print >> sys.stderr, 'run %s -h for help' % argv[0]
return 1
if not cfg.search_in:
cfg.search_in = (cfg.basedir, )
# Do not print "Imported %d modules in %.3fs" if --list-* was specified
# or if quiet mode is enabled.
if cfg.quiet or cfg.list_tests or cfg.list_files:
cfg.print_import_time = False
# Set up the python path
sys.path.insert(0, cfg.basedir)
# Set up tracing before we start importing things
tracer = None
if cfg.run_tests and cfg.coverage:
import trace
# trace.py in Python 2.3.1 is buggy:
# 1) Despite sys.prefix being in ignoredirs, a lot of system-wide
# modules are included in the coverage reports
# 2) Some module file names do not have the first two characters,
# and in general the prefix used seems to be arbitrary
# These bugs are fixed in src/trace.py which should be in PYTHONPATH
# before the official one.
ignoremods = ['test']
ignoredirs = [sys.prefix, sys.exec_prefix]
tracer = trace.Trace(count=True, trace=False,
ignoremods=ignoremods, ignoredirs=ignoredirs)
# Finding and importing
test_files = get_test_files(cfg)
if cfg.list_tests or cfg.run_tests:
test_cases = get_test_cases(test_files, cfg, tracer=tracer)
# Configure doctests
if cfg.first_doctest_failure:
import doctest
# The doctest module in Python 2.3 does not have this feature
if hasattr(doctest, 'REPORT_ONLY_FIRST_FAILURE'):
doctest.set_unittest_reportflags(doctest.REPORT_ONLY_FIRST_FAILURE)
# Configure the logging module
import logging
logging.basicConfig()
logging.root.setLevel(logging.CRITICAL)
# Running
success = True
if cfg.list_files:
baselen = len(cfg.basedir) + 1
print "\n".join([fn[baselen:] for fn in test_files])
if cfg.list_tests:
print "\n".join([test.id() for test in test_cases])
if cfg.run_tests:
run_tests(cfg, test_cases, tracer)
# That's all
if success:
return 0
else:
return 1
if __name__ == '__main__':
exitcode = main(sys.argv)
sys.exit(exitcode)
| HomeRad/TorCleaner | test.py | Python | gpl-2.0 | 32,033 | 0.00103 |
# -*- coding: utf-8 -*-
import cPickle as pickle
GEO_FILES = './geo_files'
def gen_db():
u'''Функция для генерации pickle базы ipgeobase.ru
'''
res = []
tmp_list = []
cities_dict = {}
# cidr_optim.txt
for line in open('%s/cidr_optim.txt' % GEO_FILES, 'r'):
a = line.split('\t')
a[4] = a[4].strip()
if a[4] == '-':
a[4] = None
else:
a[4] = int(a[4])
tmp_list.append(a[0])
res.append((int(a[0]), int(a[1]), a[3], a[4]))
res = sorted(res, key=lambda i: i[0])
# проверка на дубли
c = 0
for item in res:
if c > 0:
if item[0] == res[c-1][0]:
res.remove(item)
c += 1
# cities.txt
cities_file = open('%s/cities.txt' % GEO_FILES, 'r').read()
lines = cities_file.decode('CP1251').split('\n')
for line in lines:
a = line.split('\t')
if len(a) > 3:
cities_dict.update({int(a[0]): (a[1], a[2])})
f = open('%s/cidr_pickle.db' % GEO_FILES, 'w')
pickle.dump((res, cities_dict), f)
f.close()
if __name__ == '__main__':
gen_db() | greggy/python-ipgeobase | cidr_create.py | Python | bsd-3-clause | 1,230 | 0.010906 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author Philip
import tarfile as tf
import zipfile as zf
import os, re, shutil, sys, platform
pyversion = platform.python_version()
islinux = platform.system().lower() == 'linux'
if pyversion[:3] in ['2.6', '2.7']:
import urllib as urllib_request
import codecs
open = codecs.open
_unichr = unichr
if sys.maxunicode < 0x10000:
def unichr(i):
if i < 0x10000:
return _unichr(i)
else:
return _unichr( 0xD7C0 + ( i>>10 ) ) + _unichr( 0xDC00 + ( i & 0x3FF ) )
elif pyversion[:2] == '3.':
import urllib.request as urllib_request
unichr = chr
def unichr2( *args ):
return [unichr( int( i.split('<')[0][2:], 16 ) ) for i in args]
def unichr3( *args ):
return [unichr( int( i[2:7], 16 ) ) for i in args if i[2:7]]
# DEFINE
SF_MIRROR = 'easynews'
SCIM_TABLES_VER = '0.5.9'
SCIM_PINYIN_VER = '0.5.91'
LIBTABE_VER = '0.2.3'
# END OF DEFINE
def download( url, dest ):
if os.path.isfile( dest ):
print( 'File %s up to date.' % dest )
return
global islinux
if islinux:
# we use wget instead urlretrieve under Linux,
# because wget could display details like download progress
os.system('wget %s' % url)
else:
print( 'Downloading from [%s] ...' % url )
urllib_request.urlretrieve( url, dest )
print( 'Download complete.\n' )
return
def uncompress( fp, member, encoding = 'U8' ):
name = member.rsplit( '/', 1 )[-1]
print( 'Extracting %s ...' % name )
fp.extract( member )
shutil.move( member, name )
if '/' in member:
shutil.rmtree( member.split( '/', 1 )[0] )
return open( name, 'rb', encoding, 'ignore' )
unzip = lambda path, member, encoding = 'U8': \
uncompress( zf.ZipFile( path ), member, encoding )
untargz = lambda path, member, encoding = 'U8': \
uncompress( tf.open( path, 'r:gz' ), member, encoding )
def parserCore( fp, pos, beginmark = None, endmark = None ):
if beginmark and endmark:
start = False
else: start = True
mlist = set()
for line in fp:
if beginmark and line.startswith( beginmark ):
start = True
continue
elif endmark and line.startswith( endmark ):
break
if start and not line.startswith( '#' ):
elems = line.split()
if len( elems ) < 2:
continue
elif len( elems[0] ) > 1:
mlist.add( elems[pos] )
return mlist
def tablesParser( path, name ):
""" Read file from scim-tables and parse it. """
global SCIM_TABLES_VER
src = 'scim-tables-%s/tables/zh/%s' % ( SCIM_TABLES_VER, name )
fp = untargz( path, src, 'U8' )
return parserCore( fp, 1, 'BEGIN_TABLE', 'END_TABLE' )
ezbigParser = lambda path: tablesParser( path, 'EZ-Big.txt.in' )
wubiParser = lambda path: tablesParser( path, 'Wubi.txt.in' )
zrmParser = lambda path: tablesParser( path, 'Ziranma.txt.in' )
def phraseParser( path ):
""" Read phrase_lib.txt and parse it. """
global SCIM_PINYIN_VER
src = 'scim-pinyin-%s/data/phrase_lib.txt' % SCIM_PINYIN_VER
dst = 'phrase_lib.txt'
fp = untargz( path, src, 'U8' )
return parserCore( fp, 0 )
def tsiParser( path ):
""" Read tsi.src and parse it. """
src = 'libtabe/tsi-src/tsi.src'
dst = 'tsi.src'
fp = untargz( path, src, 'big5hkscs' )
return parserCore( fp, 0 )
def unihanParser( path ):
""" Read Unihan_Variants.txt and parse it. """
fp = unzip( path, 'Unihan_Variants.txt', 'U8' )
t2s = dict()
s2t = dict()
for line in fp:
if line.startswith( '#' ):
continue
else:
elems = line.split()
if len( elems ) < 3:
continue
type = elems.pop( 1 )
elems = unichr2( *elems )
if type == 'kTraditionalVariant':
s2t[elems[0]] = elems[1:]
elif type == 'kSimplifiedVariant':
t2s[elems[0]] = elems[1:]
fp.close()
return ( t2s, s2t )
def applyExcludes( mlist, path ):
""" Apply exclude rules from path to mlist. """
excludes = open( path, 'rb', 'U8' ).read().split()
excludes = [word.split( '#' )[0].strip() for word in excludes]
excludes = '|'.join( excludes )
excptn = re.compile( '.*(?:%s).*' % excludes )
diff = [mword for mword in mlist if excptn.search( mword )]
mlist.difference_update( diff )
return mlist
def charManualTable( path ):
fp = open( path, 'rb', 'U8' )
ret = {}
for line in fp:
elems = line.split( '#' )[0].split( '|' )
elems = unichr3( *elems )
if len( elems ) > 1:
ret[elems[0]] = elems[1:]
return ret
def toManyRules( src_table ):
tomany = set()
for ( f, t ) in src_table.iteritems():
for i in range( 1, len( t ) ):
tomany.add( t[i] )
return tomany
def removeRules( path, table ):
fp = open( path, 'rb', 'U8' )
texc = list()
for line in fp:
elems = line.split( '=>' )
f = t = elems[0].strip()
if len( elems ) == 2:
t = elems[1].strip()
f = f.strip('"').strip("'")
t = t.strip('"').strip("'")
if f:
try:
table.pop( f )
except:
pass
if t:
texc.append( t )
texcptn = re.compile( '^(?:%s)$' % '|'.join( texc ) )
for (tmp_f, tmp_t) in table.copy().iteritems():
if texcptn.match( tmp_t ):
table.pop( tmp_f )
return table
def customRules( path ):
fp = open( path, 'rb', 'U8' )
ret = dict()
for line in fp:
elems = line.split( '#' )[0].split()
if len( elems ) > 1:
ret[elems[0]] = elems[1]
return ret
def dictToSortedList( src_table, pos ):
return sorted( src_table.items(), key = lambda m: m[pos] )
def translate( text, conv_table ):
i = 0
while i < len( text ):
for j in range( len( text ) - i, 0, -1 ):
f = text[i:][:j]
t = conv_table.get( f )
if t:
text = text[:i] + t + text[i:][j:]
i += len(t) - 1
break
i += 1
return text
def manualWordsTable( path, conv_table, reconv_table ):
fp = open( path, 'rb', 'U8' )
reconv_table = {}
wordlist = [line.split( '#' )[0].strip() for line in fp]
wordlist = list( set( wordlist ) )
wordlist.sort( key = len, reverse = True )
while wordlist:
word = wordlist.pop()
new_word = translate( word, conv_table )
rcv_word = translate( word, reconv_table )
if word != rcv_word:
reconv_table[word] = word
reconv_table[new_word] = word
return reconv_table
def defaultWordsTable( src_wordlist, src_tomany, char_conv_table, char_reconv_table ):
wordlist = list( src_wordlist )
wordlist.sort( key = len, reverse = True )
word_conv_table = {}
word_reconv_table = {}
conv_table = char_conv_table.copy()
reconv_table = char_reconv_table.copy()
tomanyptn = re.compile( '(?:%s)' % '|'.join( src_tomany ) )
while wordlist:
conv_table.update( word_conv_table )
reconv_table.update( word_reconv_table )
word = wordlist.pop()
new_word_len = word_len = len( word )
while new_word_len == word_len:
add = False
test_word = translate( word, reconv_table )
new_word = translate( word, conv_table )
if not reconv_table.get( new_word ) \
and ( test_word != word \
or ( tomanyptn.search( word ) \
and word != translate( new_word, reconv_table ) ) ):
word_conv_table[word] = new_word
word_reconv_table[new_word] = word
try:
word = wordlist.pop()
except IndexError:
break
new_word_len = len(word)
return word_reconv_table
def PHPArray( table ):
lines = ['\'%s\' => \'%s\',' % (f, t) for (f, t) in table if f and t]
return '\n'.join(lines)
def main():
#Get Unihan.zip:
url = 'http://www.unicode.org/Public/UNIDATA/Unihan.zip'
han_dest = 'Unihan.zip'
download( url, han_dest )
# Get scim-tables-$(SCIM_TABLES_VER).tar.gz:
url = 'http://%s.dl.sourceforge.net/sourceforge/scim/scim-tables-%s.tar.gz' % ( SF_MIRROR, SCIM_TABLES_VER )
tbe_dest = 'scim-tables-%s.tar.gz' % SCIM_TABLES_VER
download( url, tbe_dest )
# Get scim-pinyin-$(SCIM_PINYIN_VER).tar.gz:
url = 'http://%s.dl.sourceforge.net/sourceforge/scim/scim-pinyin-%s.tar.gz' % ( SF_MIRROR, SCIM_PINYIN_VER )
pyn_dest = 'scim-pinyin-%s.tar.gz' % SCIM_PINYIN_VER
download( url, pyn_dest )
# Get libtabe-$(LIBTABE_VER).tgz:
url = 'http://%s.dl.sourceforge.net/sourceforge/libtabe/libtabe-%s.tgz' % ( SF_MIRROR, LIBTABE_VER )
lbt_dest = 'libtabe-%s.tgz' % LIBTABE_VER
download( url, lbt_dest )
# Unihan.txt
( t2s_1tomany, s2t_1tomany ) = unihanParser( han_dest )
t2s_1tomany.update( charManualTable( 'trad2simp.manual' ) )
s2t_1tomany.update( charManualTable( 'simp2trad.manual' ) )
t2s_1to1 = dict( [( f, t[0] ) for ( f, t ) in t2s_1tomany.iteritems()] )
s2t_1to1 = dict( [( f, t[0] ) for ( f, t ) in s2t_1tomany.iteritems()] )
s_tomany = toManyRules( t2s_1tomany )
t_tomany = toManyRules( s2t_1tomany )
# noconvert rules
t2s_1to1 = removeRules( 'trad2simp_noconvert.manual', t2s_1to1 )
s2t_1to1 = removeRules( 'simp2trad_noconvert.manual', s2t_1to1 )
# the supper set for word to word conversion
t2s_1to1_supp = t2s_1to1.copy()
s2t_1to1_supp = s2t_1to1.copy()
t2s_1to1_supp.update( customRules( 'trad2simp_supp_set.manual' ) )
s2t_1to1_supp.update( customRules( 'simp2trad_supp_set.manual' ) )
# word to word manual rules
t2s_word2word_manual = manualWordsTable( 'simpphrases.manual', s2t_1to1_supp, t2s_1to1_supp )
t2s_word2word_manual.update( customRules( 'toSimp.manual' ) )
s2t_word2word_manual = manualWordsTable( 'tradphrases.manual', t2s_1to1_supp, s2t_1to1_supp )
s2t_word2word_manual.update( customRules( 'toTrad.manual' ) )
# word to word rules from input methods
t_wordlist = set()
s_wordlist = set()
t_wordlist.update( ezbigParser( tbe_dest ),
tsiParser( lbt_dest ) )
s_wordlist.update( wubiParser( tbe_dest ),
zrmParser( tbe_dest ),
phraseParser( pyn_dest ) )
# exclude
s_wordlist = applyExcludes( s_wordlist, 'simpphrases_exclude.manual' )
t_wordlist = applyExcludes( t_wordlist, 'tradphrases_exclude.manual' )
s2t_supp = s2t_1to1_supp.copy()
s2t_supp.update( s2t_word2word_manual )
t2s_supp = t2s_1to1_supp.copy()
t2s_supp.update( t2s_word2word_manual )
# parse list to dict
t2s_word2word = defaultWordsTable( s_wordlist, s_tomany, s2t_1to1_supp, t2s_supp )
t2s_word2word.update( t2s_word2word_manual )
s2t_word2word = defaultWordsTable( t_wordlist, t_tomany, t2s_1to1_supp, s2t_supp )
s2t_word2word.update( s2t_word2word_manual )
# Final tables
# sorted list toHans
t2s_1to1 = dict( [( f, t ) for ( f, t ) in t2s_1to1.iteritems() if f != t] )
toHans = dictToSortedList( t2s_1to1, 0 ) + dictToSortedList( t2s_word2word, 1 )
# sorted list toHant
s2t_1to1 = dict( [( f, t ) for ( f, t ) in s2t_1to1.iteritems() if f != t] )
toHant = dictToSortedList( s2t_1to1, 0 ) + dictToSortedList( s2t_word2word, 1 )
# sorted list toCN
toCN = dictToSortedList( customRules( 'toCN.manual' ), 1 )
# sorted list toHK
toHK = dictToSortedList( customRules( 'toHK.manual' ), 1 )
# sorted list toSG
toSG = dictToSortedList( customRules( 'toSG.manual' ), 1 )
# sorted list toTW
toTW = dictToSortedList( customRules( 'toTW.manual' ), 1 )
# Get PHP Array
php = '''<?php
/**
* Simplified / Traditional Chinese conversion tables
*
* Automatically generated using code and data in includes/zhtable/
* Do not modify directly!
*
* @file
*/
$zh2Hant = array(\n'''
php += PHPArray( toHant ) \
+ '\n);\n\n$zh2Hans = array(\n' \
+ PHPArray( toHans ) \
+ '\n);\n\n$zh2TW = array(\n' \
+ PHPArray( toTW ) \
+ '\n);\n\n$zh2HK = array(\n' \
+ PHPArray( toHK ) \
+ '\n);\n\n$zh2CN = array(\n' \
+ PHPArray( toCN ) \
+ '\n);\n\n$zh2SG = array(\n' \
+ PHPArray( toSG ) \
+ '\n);'
f = open( 'ZhConversion.php', 'wb', encoding = 'utf8' )
print ('Writing ZhConversion.php ... ')
f.write( php )
f.close()
#Remove temp files
print ('Deleting temp files ... ')
os.remove('EZ-Big.txt.in')
os.remove('phrase_lib.txt')
os.remove('tsi.src')
os.remove('Unihan_Variants.txt')
os.remove('Wubi.txt.in')
os.remove('Ziranma.txt.in')
if __name__ == '__main__':
main()
| OpenConextApps/OpenConextApps-MediaWiki | includes/zhtable/Makefile.py | Python | gpl-2.0 | 13,107 | 0.036927 |
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import sys
import shutil
import tempfile
import numpy
import h5py
import nose
import platform
from lazyflow.roi import sliceToRoi
import logging
logger = logging.getLogger(__name__)
logger.addHandler( logging.StreamHandler( sys.stdout ) )
logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
from lazyflow.utility.io_util.blockwiseFileset import BlockwiseFileset
from lazyflow.utility.io_util.RESTfulBlockwiseFileset import RESTfulBlockwiseFileset
class TestRESTFullBlockwiseFilset(object):
@classmethod
def setupClass(cls):
# The openconnectome site appears to be down at the moment.
# This test fails when that happens...
raise nose.SkipTest
if platform.system() == 'Windows':
# On windows, there are errors, and we make no attempt to solve them (at the moment).
raise nose.SkipTest
try:
BlockwiseFileset._prepare_system()
except ValueError:
# If the system isn't configured to allow lots of open files, we can't run this test.
raise nose.SkipTest
cls.tempDir = tempfile.mkdtemp()
logger.debug("Working in {}".format( cls.tempDir ))
# Create the two sub-descriptions
Bock11VolumeDescription = """
{
"_schema_name" : "RESTful-volume-description",
"_schema_version" : 1.0,
"name" : "Bock11-level0",
"format" : "hdf5",
"axes" : "zyx",
"##NOTE":"The first z-slice of the bock dataset is 2917, so the origin_offset must be at least 2917",
"origin_offset" : [2917, 50000, 50000],
"bounds" : [4156, 135424, 119808],
"dtype" : "numpy.uint8",
"url_format" : "http://openconnecto.me/ocp/ca/bock11/hdf5/0/{x_start},{x_stop}/{y_start},{y_stop}/{z_start},{z_stop}/",
"hdf5_dataset" : "CUTOUT"
}
"""
blockwiseFilesetDescription = \
"""
{
"_schema_name" : "blockwise-fileset-description",
"_schema_version" : 1.0,
"name" : "bock11-blocks",
"format" : "hdf5",
"axes" : "zyx",
"shape" : [40,40,40],
"dtype" : "numpy.uint8",
"block_shape" : [20, 20, 20],
"block_file_name_format" : "block-{roiString}.h5/CUTOUT",
"dataset_root_dir" : "blocks"
}
"""
# Combine them into the composite description (see RESTfulBlockwiseFileset.DescriptionFields)
compositeDescription = \
"""
{{
"_schema_name" : "RESTful-blockwise-fileset-description",
"_schema_version" : 1.0,
"remote_description" : {remote_description},
"local_description" : {local_description}
}}
""".format( remote_description=Bock11VolumeDescription, local_description=blockwiseFilesetDescription )
# Create the description file
cls.descriptionFilePath = os.path.join(cls.tempDir, "description.json")
with open(cls.descriptionFilePath, 'w') as f:
f.write(compositeDescription)
# Create a new fileset that views the same data and stores it the
# same way locally, but this time we'll use an offset 'view'
# Start with a copy of the non-offset description
offsetDescription = RESTfulBlockwiseFileset.readDescription(cls.descriptionFilePath)
offsetDescription.local_description.view_origin = numpy.array([0,20,0])
offsetDescription.local_description.dataset_root_dir = "offset_blocks"
cls.descriptionFilePath_offset = os.path.join(cls.tempDir, "description_offset.json")
RESTfulBlockwiseFileset.writeDescription(cls.descriptionFilePath_offset, offsetDescription)
@classmethod
def teardownClass(cls):
# If the user is debugging, don't clear the files we're testing with.
if logger.level > logging.DEBUG:
shutil.rmtree(cls.tempDir)
def test_1_SingleDownload(self):
volume = RESTfulBlockwiseFileset( self.descriptionFilePath )
slicing = numpy.s_[0:20, 0:20, 0:20]
roi = sliceToRoi(slicing, volume.description.shape)
data = volume.readData( roi )
assert data.shape == (20,20,20)
assert volume.getBlockStatus( ([0,0,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
def test_2_MultiDownload(self):
volume = RESTfulBlockwiseFileset( self.descriptionFilePath )
slicing = numpy.s_[0:25, 10:30, 0:20]
roi = sliceToRoi(slicing, volume.description.shape)
data = volume.readData( roi )
assert data.shape == (25,20,20)
assert volume.getBlockStatus( ([0,0,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
assert volume.getBlockStatus( ([20,0,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
assert volume.getBlockStatus( ([20,20,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
assert volume.getBlockStatus( ([0,20,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
def test_4_OffsetDownload(self):
volume = RESTfulBlockwiseFileset( self.descriptionFilePath )
slicing = numpy.s_[20:40, 20:40, 20:40]
roi = sliceToRoi(slicing, volume.description.shape)
data = volume.readData( roi )
assert data.shape == (20,20,20)
assert volume.getBlockStatus( ([20,20,20]) ) == BlockwiseFileset.BLOCK_AVAILABLE
offsetVolume = RESTfulBlockwiseFileset( self.descriptionFilePath_offset )
offsetSlicing = numpy.s_[20:40, 0:20, 20:40] # Note middle slice is offset (see view_origin in setupClass)
offsetRoi = sliceToRoi(offsetSlicing, offsetVolume.description.shape)
offsetData = offsetVolume.readData( offsetRoi )
assert offsetData.shape == (20,20,20)
assert offsetVolume.getBlockStatus( ([20,0,20]) ) == BlockwiseFileset.BLOCK_AVAILABLE
# Data should be the same
assert (offsetData == data).all()
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
ret = nose.run(defaultTest=__file__)
if not ret: sys.exit(1)
| stuarteberg/lazyflow | tests/testRESTfulBlockwiseFileset.py | Python | lgpl-3.0 | 7,468 | 0.014596 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
This library implement a chemical element with several properties
* id: atomic number
* name
* altname
* symbol
* serie
* group
* period
* block
* density_Solid
* density_Liq
* density_Gas
* appearance
* date
* country
* discover
* etymology
* atomic_mass
* atomic_volume
* atomic_radius
* covalent_radius
* vanderWaals_radius
* ionic_radii
* lattice_type
* space_group
* lattice_edges
* lattice_angles
* electron_configuration
* oxidation
* electronegativity
* electron_affinity
* first_ionization
* Tf
* Tb
* Heat_f
* Heat_b
* Cp
* k
* T_debye
* color
* notes
'''
import os
import sqlite3
from numpy import linspace, logspace, log
from PyQt5.QtCore import QLocale
from lib.utilities import colors
# Connection to database with element data
connection = sqlite3.connect(os.path.join(
os.environ["pychemqt"], "dat", "elemental.db"))
databank = connection.cursor()
# Load system locale to implement a custon translation system (non qt)
locale = QLocale.system().name().upper()
if "_" in locale:
locale = locale.split("_")[0]
databank.execute("PRAGMA table_info(TRANSLATION)")
translation = []
for i, name, type_, other, other2, primary_key in databank:
if "name_" in name:
translation.append(name.split("_")[-1])
if locale in translation:
tr_available = True
else:
tr_available = False
def cleanFloat(flo):
if flo:
try:
value = float(flo)
except ValueError:
value = float(flo.split("(")[1].split(",")[0])
else:
value = 0
return value
color_serie = ["#DDDDDD", "#795681", "#B92D2D", "#B8873A", "#D7C848",
"#94738F", "#6186AC", "#88AE62", "#949692", "#BF924E",
"#C44343"]
color_phase = ["#DDDDDD", "#BB8F4A", "#7BB245", "#5D82A8"]
NUMERIC_VALUES = ["density_Solid", "density_Liq", "density_Gas", "date",
"atomic_mass", "atomic_volume", "atomic_radius",
"covalent_radius", "vanderWaals_radius", "electronegativity",
"electron_affinity", "first_ionization", "Tf", "Tb",
"Heat_f", "Heat_b", "Cp", "k", "T_debye"]
def _configValues(Preferences):
PROP = Preferences.get("Applications", "elementalColorby")
NUM = Preferences.getint("Applications", "elementalDefinition")
LOG = Preferences.getboolean("Applications", "elementalLog")
PMIN = None
PMAX = None
if PROP == "phase":
CATEGORIES = ["", "Solid", "Liquid", "Gas"]
COLORS = color_phase
elif PROP in NUMERIC_VALUES:
databank.execute("SELECT %s FROM ELEMENTS" % PROP)
PMAX = 0
for st, in databank:
value = cleanFloat(st)
if value > PMAX:
PMAX = value
if LOG:
PMIN = 1
CATEGORIES = logspace(log(PMIN), log(PMAX), NUM)
else:
PMIN = 0
CATEGORIES = linspace(PMIN, PMAX, NUM)
COLORS = colors(NUM, scale=True)
elif PROP == "Element":
CATEGORIES = []
COLORS = []
else:
q = "SELECT %s, COUNT(*) c FROM ELEMENTS GROUP BY %s HAVING c > 0" % (
PROP, PROP)
databank.execute(q)
CATEGORIES = []
for category, count in databank:
CATEGORIES.append(category)
if PROP == "serie":
COLORS = color_serie
else:
COLORS = colors(len(CATEGORIES))
return CATEGORIES, PROP, COLORS, PMAX
class Elemental(object):
"""Chemical element class"""
def __init__(self, id):
"""
Parameters
------------
id : int
atomic number of element, [-]
"""
if id > 118:
id = 118
databank.execute("SELECT * FROM ELEMENTS WHERE id=='%i'" % id)
data = databank.fetchone()
self.id = int(data[0])
self.altname = data[2]
self.symbol = data[3]
self.serie = data[4]
self.group = int(data[5])
self.period = int(data[6])
self.block = data[7]
self.density_Solid = self._unit(data[8])
self.density_Liq = self._unit(data[9])
self.density_Gas = self._unit(data[10])
self.appearance = data[11]
self.date = data[12]
self.country = data[13]
self.discover = data[14]
self.etymology = data[15]
self.atomic_mass = self._unit(data[16])
self.atomic_volume = self._unit(data[17])
self.atomic_radius = self._unit(data[18])
self.covalent_radius = self._unit(data[19])
self.vanderWaals_radius = self._unit(data[20])
self.ionic_radii = data[21]
self.lattice_type = data[22]
self.space_group = data[23]
self.lattice_edges = eval(data[24])
self.lattice_volume = self.lattice_edges[0]*self.lattice_edges[1] * \
self.lattice_edges[2] / 1e9
self.lattice_angles = eval(data[25])
self.electron_configuration = data[26]
self.oxidation = data[27]
self.electronegativity = self._unit(data[28])
self.electron_affinity = self._unit(data[29])
self.first_ionization = self._unit(data[30])
self.Tf = self._unit(data[31])
self.Tb = self._unit(data[32])
if not self.Tf or not self.Tb:
self.phase = ""
elif self.Tf > 273.15:
self.phase = "Solid"
elif self.Tb < 273.15:
self.phase = "Gas"
else:
self.phase = "Liquid"
self.Heat_f = self._unit(data[33])
self.Heat_b = self._unit(data[34])
self.Cp = self._unit(data[35])
self.k = self._unit(data[36])
self.T_debye = self._unit(data[37])
self.color = data[38]
self.notes = data[39]
# Translation
self.name = data[1]
if tr_available:
qu = "SELECT name_%s FROM TRANSLATION WHERE id==%i" % (locale, id)
databank.execute(qu)
tr_name = databank.fetchone()[0]
if tr_name:
self.name = tr_name
# Isotopes
query = "SELECT * FROM ISOTOPES WHERE atomic_number==?" + \
"ORDER BY mass_number"
databank.execute(query, (self.id, ))
self.isotopes = []
for data in databank:
self.isotopes.append((int(data[4]), data[2], data[3]))
def _unit(self, str):
aproximate = False
try:
value = float(str)
except:
if not str:
value = None
elif str[-1] == ")":
value = float(str.split("(")[1].split(",")[0])
aproximate = True
if aproximate:
value.code = "stimated"
return value
| jjgomera/pychemqt | lib/elemental.py | Python | gpl-3.0 | 7,567 | 0.000264 |
#!/usr/bin/env python
import os
import sys
import argparse
import json
# tempate2json.py -k SYSTEM=lonestar.tacc.utexas.edu
# PATH=/home/vaughn/apps -i template.jsonx -o file.json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-k", dest='keys', help='Space-delimited VAR=Value sets', nargs='*')
parser.add_argument("-i", dest='input', help='Input (template.jsonx)')
parser.add_argument("-o", dest="output", help="Output (output.json)")
args = parser.parse_args()
try:
with open(args.input) as f:
s = f.read()
except TypeError, e:
print >> sys.stderr, "[FATAL]", "No filename was provided for -i"
sys.exit(1)
except IOError, e:
print >> sys.stderr, "[FATAL]", args.input, "was not available for reading"
print >> sys.stderr, "Exception: %s" % str(e)
sys.exit(1)
# Iterate through document, replacing variables with values
for kvp in args.keys:
try:
(key, val) = kvp.split('=')
except ValueError:
print '[WARN]', kvp, 'not a valid VAR=Value pair'
keyname = '${' + key + '}'
s = s.replace(keyname, val)
# Print out to JSON
jsonDoc = json.loads(s)
outpath = os.path.dirname(args.output)
if outpath:
if not os.path.exists(os.path.dirname(args.output)):
try:
os.makedirs(os.path.dirname(args.output))
except OSError as exc: # Guard against race condition
print >> sys.stderr, "Exception: %s" % str(exc)
sys.exit(1)
with open(args.output, 'w') as outfile:
json.dump(jsonDoc, outfile, indent=4)
| iPlantCollaborativeOpenSource/cyverse-sdk | src/scripts/template2json.py | Python | bsd-3-clause | 1,718 | 0.001164 |
#!/usr/bin/env python
#Takes a table and index of Exon Lens, calculates RPKMs
import sys
import os
import re
import fileinput
from decimal import Decimal
from decimal import getcontext
from fractions import Fraction
def main(table,index):
indf = open(index)
out_file = "%s_rpkm.table" % os.path.splitext(table)[0]
dic = {}
for line in fileinput.input(index):
(key, val) = line.split('\t')
if val == 0:
print "We Ffd up at " + str(key)
dic[str(key.rstrip())] = Decimal(val)
print dic["Vocar20014554m.g.2.0"]
with open(out_file,'w') as fout:
start = True
tb = "\t"
for line in fileinput.input(table):
if not start:
listl = line.split('\t')
head = listl[0]
vals = listl[1:]
for i in xrange(len(vals)):
comp = Decimal(vals[i])
div = Decimal(dic[head.rstrip()])
print head.rstrip()
ot = "%.2f" % float(Decimal(comp)/div)
vals[i] = ot
fout.write(head+"\t")
fout.write(tb.join(vals)+"\n")
else:
start = False
fout.write(line+"\n")
if __name__ == '__main__':
print 'input table is: ' + sys.argv[1]
print 'input file is ' + sys.argv[2]
main(sys.argv[1],sys.argv[2])
print "completed"
| Luminarys/Bioinformatics | Scripts/GetRPKM.py | Python | gpl-2.0 | 1,450 | 0.006207 |
#! /usr/bin/env python
"""
#############################################################################################
#
#
# Name: LogFileAccessManager.py
#
# @author: Nicholas Lemay
#
# @license: MetPX Copyright (C) 2004-2006 Environment Canada
# MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
# named COPYING in the root of the source directory tree.
#
# Description : Utility class used to manage the access to the log files by the
# the pickle updater.
#
# Note : If this file is to be modified, please run the main() method at the bottom of this
# file to make sure everything still works properly. Feel free to add tests if needed.
#
# While using this class, you can either use only one file with all your entries
# and give a different identifier to all of you entries, or you can use different
# files.
#
# Using a single file however can be problematic if numerous process try to update
# the file at the same time.
#
#############################################################################################
"""
import os, sys, commands, time
sys.path.insert(1, os.path.dirname( os.path.abspath(__file__) ) + '/../../')
from pxStats.lib.StatsPaths import StatsPaths
from pxStats.lib.CpickleWrapper import CpickleWrapper
class LogFileAccessManager(object):
def __init__( self, accessDictionary = None, accessFile = "" ):
"""
@summary: LogFileAccessManager constructor.
@param accessArrays:
@param accessFile:
"""
paths = StatsPaths()
paths.setPaths()
if accessFile =="":
accessFile = paths.STATSLOGACCESS + "default"
self.accessDictionary = accessDictionary or {} # Empty array to start with.
self.accessFile = accessFile #File that contains the current file acces.
if self.accessDictionary == {} and os.path.isfile( self.accessFile ):
self.loadAccessFile()
def saveAccessDictionary( self ):
"""
@summary: Saves the current accessDictionary into the
accessfile.
"""
if not os.path.isdir( os.path.dirname( self.accessFile ) ):
os.makedirs( os.path.dirname( self.accessFile ) )
CpickleWrapper.save( self.accessDictionary, self.accessFile )
def loadAccessFile(self):
"""
@summary: Loads the accessFile into the accessDictionary.
"""
self.accessDictionary = CpickleWrapper.load( self.accessFile )
def getLineAssociatedWith( self, identifier ):
"""
@param identifier: Identifier string of the following format:
fileType_client/sourcename_machineName
@return: returns the first line of the last file accessed by the identifier.
If identifier has no associated line, the returned line will be "".
"""
line = ""
try:#In case the key does not exist.
line = self.accessDictionary[ identifier ][0]
except:#Pass keyerror
pass
return line
def getLastReadPositionAssociatedWith(self, identifier):
"""
@param identifier: Identifier string of the following format:
fileType_client/sourcename_machineName
@return: returns the last read position of the last file
accessed by the identifier. If no position is
associated with identifier will return 0.
"""
lastReadPositon = 0
try:#In case the key does not exist.
lastReadPositon = self.accessDictionary[ identifier ][1]
except:#Pass keyerror
pass
return lastReadPositon
def getFirstLineFromFile(self, fileName):
"""
@summary: Reads the first line of a file and returns it.
@param fileName: File from wich you want to know
@return: The first line of the specified file.
"""
firstLine = ""
if os.path.isfile( fileName ):
fileHandle = open( fileName, "r")
firstLine = fileHandle.readline()
fileHandle.close()
return firstLine
def getFirstLineAndLastReadPositionAssociatedwith(self, identifier):
"""
@param identifier: Identifier string of the following format:
fileType_client/sourcename_machineName
@return : A tuple containing the first line of the last file
read(in string format) and the last read position
(int format).
"""
line = ""
lastReadPositon = 0
try:#In case the key does not exist.
line ,lastReadPositon = self.accessDictionary[ identifier ]
except:#Pass keyerror
pass
return line, lastReadPositon
def setFirstLineAssociatedwith(self, firstLine, identifier ):
"""
@summary: Simple setter that hides data structure implementation
so that methods still work if implementation is ever
to change.
@param firstLine: First line to set.
@param identifier:Identifier string of the following format:
fileType_client/sourcename_machineName
"""
currentLastReadPosition = self.getLastReadPositionAssociatedWith(identifier)
self.accessDictionary[ identifier ] = firstLine, currentLastReadPosition
def setLastReadPositionAssociatedwith(self, lastReadPosition, identifier ):
"""
@summary: Simple setter that hides data structure implementation
so that methods still work if implementation is ever
to change.
@param lastReadPosition: Position to set.
@param identifier:Identifier string of the following format:
fileType_client/sourcename_machineName
"""
currentFirstLine = self.getLineAssociatedWith(identifier)
self.accessDictionary[ identifier ] = currentFirstLine, lastReadPosition
def setFirstLineAndLastReadPositionAssociatedwith(self, firstLine, lastReadPosition, identifier ):
"""
@summary: Simple setter that hides data structure implementation
so that methods still work if implementation is ever
to change.
@param firstLine: First line to set.
@param lastReadPosition: Position to set.
@param identifier:Identifier string of the following format:
fileType_client/sourcename_machineName
"""
self.accessDictionary[ identifier ] = (firstLine, lastReadPosition)
def isTheLastFileThatWasReadByThisIdentifier(self, fileName, identifier ):
"""
@summary : Returns whether or not(True or False ) the specified file
was the last one read by the identifier.
@param fileName: Name fo the file to be verified.
@param identifier: Identifier string of the following format:
fileType_client/sourcename_machineName
@return: Returns whether or not(True or False ) the specified file
was the last one read by the identifier.
"""
lastFileThatWasRead = False
if os.path.isfile(fileName):
lastLineRead = self.getLineAssociatedWith(identifier)
filehandle = open( fileName, "r")
firstLineOfTheFile = filehandle.readline()
if lastLineRead == firstLineOfTheFile:
lastFileThatWasRead = True
filehandle.close()
return lastFileThatWasRead
def main():
"""
@summary: Small test case to see if everything works out well.
@note: IMPORTANT if you modifiy this file, run this method
to make sure it still passes all the tests. If test are
no longer valid, please modify accordingly.
"""
from LogFileAccessManager import LogFileAccessManager
paths = StatsPaths()
paths.setPaths()
#
# Create text file for testing.
#
testDirectory = paths.STATSDATA + "logFileAccessTestFolder/"
if not os.path.isdir( testDirectory ) :
os.makedirs(testDirectory)
testTextfile = testDirectory + "testTextfile"
fileHandle = open( testTextfile , 'w' )
old_stdout = sys.stdout #redirect standard output to the file
sys.stdout = fileHandle
for i in range(100):
print "%s-A line written for testing." %i
fileHandle.close()
sys.stdout = old_stdout #resets standard output
#
#Read file like normal file and stop in the middle.
#
fileHandle = open( testTextfile , 'r' )
for i in range(50):
fileHandle.readline()
lastReadPosition = fileHandle.tell()
fileHandle.close()
#
# Set LogFileAccessManager with the previous infos.
#
testFile = testDirectory + "testLFAMfile"
lfam = LogFileAccessManager( accessFile = testFile )
firstLine = lfam.getFirstLineFromFile( testTextfile )
lfam.setFirstLineAndLastReadPositionAssociatedwith( firstLine, lastReadPosition, "testId" )
#
# Unit-like test every method to make sure the result is what is expected.
# Section for getters.
#
if firstLine != "0-A line written for testing.\n":
print "getFirstLineFromFile is corrupted. Please repair "
if lfam.getFirstLineAndLastReadPositionAssociatedwith("testId") != ("0-A line written for testing.\n",1540 ):
print "getFirstLineAndLastReadPositionAssociatedwith is corrupted. Please repair."
if lfam.getLastReadPositionAssociatedWith( "testId" ) != 1540:
print "getLastReadPositionAssociatedWith is corrupted. Please repair."
#
# Section for testing Setters
#
lfam.setFirstLineAssociatedwith("firstLine", 'testId')
if lfam.getLineAssociatedWith('testId') != 'firstLine':
print "setFirstLineAssociatedwith is corrupted. Please repair."
lfam.setLastReadPositionAssociatedwith( 18987, 'testId')
if lfam.getLastReadPositionAssociatedWith('testId') != 18987:
print "setLastReadPositionAssociatedwith is corrupted. Please repair."
lfam.setFirstLineAndLastReadPositionAssociatedwith("testline2", 1285647, 'testId')
if lfam.getFirstLineAndLastReadPositionAssociatedwith('testId') != ("testline2", 1285647):
print "setFirstLineAndLastReadPositionAssociatedwith is corrupted. Please repair."
lfam.saveAccessDictionary()
lfam.loadAccessFile()
if lfam.getFirstLineAndLastReadPositionAssociatedwith('testId') != ("testline2", 1285647):
print "saveAccessDictionary and/or loadAccessFile is corrupted. Please repair."
print "Testing done."
if __name__ == '__main__':
main()
| khosrow/metpx | pxStats/lib/LogFileAccessManager.py | Python | gpl-2.0 | 12,296 | 0.024723 |
# coding=utf-8
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from unittest import mock
except ImportError:
import mock
from ansible.module_utils.netapp import NetAppESeriesModule
from ansible.modules.storage.netapp.netapp_e_volume import NetAppESeriesVolume
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
class NetAppESeriesVolumeTest(ModuleTestCase):
REQUIRED_PARAMS = {"api_username": "username",
"api_password": "password",
"api_url": "http://localhost/devmgr/v2",
"ssid": "1",
"validate_certs": "no"}
THIN_VOLUME_RESPONSE = [{"capacity": "1288490188800",
"volumeRef": "3A000000600A098000A4B28D000010475C405428",
"status": "optimal",
"protectionType": "type1Protection",
"maxVirtualCapacity": "281474976710656",
"initialProvisionedCapacity": "4294967296",
"currentProvisionedCapacity": "4294967296",
"provisionedCapacityQuota": "1305670057984",
"growthAlertThreshold": 85,
"expansionPolicy": "automatic",
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000001000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "volume"}],
"dataAssurance": True,
"segmentSize": 131072,
"diskPool": True,
"listOfMappings": [],
"mapped": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 0},
"name": "thin_volume",
"id": "3A000000600A098000A4B28D000010475C405428"}]
VOLUME_GET_RESPONSE = [{"offline": False,
"raidLevel": "raid6",
"capacity": "214748364800",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F095C2F7F31",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Clare"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "214748364800",
"name": "Matthew",
"id": "02000000600A098000A4B9D100000F095C2F7F31"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B28D00000FBE5C2F7F26",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Samantha"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Samantha",
"id": "02000000600A098000A4B28D00000FBE5C2F7F26"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F0B5C2F7F40",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"volumeGroupRef": "04000000600A098000A4B9D100000F085C2F7F26",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Micah"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Micah",
"id": "02000000600A098000A4B9D100000F0B5C2F7F40"}]
STORAGE_POOL_GET_RESPONSE = [{"offline": False,
"raidLevel": "raidDiskPool",
"volumeGroupRef": "04000000600A",
"securityType": "capable",
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "diskPool",
"diskPoolData": {"reconstructionReservedDriveCount": 1,
"reconstructionReservedAmt": "296889614336",
"reconstructionReservedDriveCountCurrent": 1,
"poolUtilizationWarningThreshold": 0,
"poolUtilizationCriticalThreshold": 85,
"poolUtilizationState": "utilizationOptimal",
"unusableCapacity": "0",
"degradedReconstructPriority": "high",
"criticalReconstructPriority": "highest",
"backgroundOperationPriority": "low",
"allocGranularity": "4294967296"}},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "863288426496",
"totalRaidedSpace": "2276332666880",
"raidStatus": "optimal",
"freeSpace": "1413044240384",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": True,
"id": "04000000600A098000A4B9D100000F085C2F7F26",
"name": "employee_data_storage_pool"},
{"offline": False,
"raidLevel": "raid1",
"volumeGroupRef": "04000000600A098000A4B28D00000FBD5C2F7F19",
"state": "complete",
"securityType": "capable",
"drawerLossProtection": False,
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "unknown", "diskPoolData": None},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "322122547200",
"totalRaidedSpace": "598926258176",
"raidStatus": "optimal",
"freeSpace": "276803710976",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": False,
"id": "04000000600A098000A4B28D00000FBD5C2F7F19",
"name": "database_storage_pool"}]
GET_LONG_LIVED_OPERATION_RESPONSE = [
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]}]
WORKLOAD_GET_RESPONSE = [{"id": "4200000001000000000000000000000000000000", "name": "general_workload_1",
"workloadAttributes": [{"key": "profileId", "value": "Other_1"}]},
{"id": "4200000002000000000000000000000000000000", "name": "employee_data",
"workloadAttributes": [{"key": "use", "value": "EmployeeData"},
{"key": "location", "value": "ICT"},
{"key": "private", "value": "public"},
{"key": "profileId", "value": "ansible_workload_1"}]},
{"id": "4200000003000000000000000000000000000000", "name": "customer_database",
"workloadAttributes": [{"key": "use", "value": "customer_information"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_2"}]},
{"id": "4200000004000000000000000000000000000000", "name": "product_database",
"workloadAttributes": [{"key": "use", "value": "production_information"},
{"key": "security", "value": "private"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_4"}]}]
REQUEST_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.NetAppESeriesVolume.request"
GET_VOLUME_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.NetAppESeriesVolume.get_volume"
SLEEP_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.sleep"
def _set_args(self, args=None):
module_args = self.REQUIRED_PARAMS.copy()
if args is not None:
module_args.update(args)
set_module_args(module_args)
def test_module_arguments_pass(self):
"""Ensure valid arguments successful create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1024,
"thin_volume_growth_alert_threshold": 99},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "kb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 67108864}]
# validate size normalization
for arg_set in arg_sets:
self._set_args(arg_set)
volume_object = NetAppESeriesVolume()
size_unit_multiplier = NetAppESeriesModule.SIZE_UNIT_MAP[arg_set["size_unit"]]
self.assertEqual(volume_object.size_b, arg_set["size"] * size_unit_multiplier)
self.assertEqual(volume_object.thin_volume_repo_size_b,
arg_set["thin_volume_repo_size"] * size_unit_multiplier)
self.assertEqual(volume_object.thin_volume_expansion_policy, "automatic")
if "thin_volume_max_repo_size" not in arg_set.keys():
self.assertEqual(volume_object.thin_volume_max_repo_size_b, arg_set["size"] * size_unit_multiplier)
else:
self.assertEqual(volume_object.thin_volume_max_repo_size_b,
arg_set["thin_volume_max_repo_size"] * size_unit_multiplier)
# validate metadata form
self._set_args(
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10, "workload_name": "workload1",
"metadata": {"availability": "public", "security": "low"}})
volume_object = NetAppESeriesVolume()
for entry in volume_object.metadata:
self.assertTrue(entry in [{'value': 'low', 'key': 'security'}, {'value': 'public', 'key': 'availability'}])
def test_module_arguments_fail(self):
"""Ensure invalid arguments values do not create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 260},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 9},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 100}]
for arg_set in arg_sets:
with self.assertRaises(AnsibleFailJson):
self._set_args(arg_set)
print(arg_set)
volume_object = NetAppESeriesVolume()
def test_get_volume_pass(self):
"""Evaluate the get_volume method."""
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(),
[entry for entry in self.VOLUME_GET_RESPONSE if entry["name"] == "Matthew"][0])
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "NotAVolume", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(), {})
def test_get_volume_fail(self):
"""Evaluate the get_volume exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thick volumes."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thin volumes."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.VOLUME_GET_RESPONSE), Exception()]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
def tests_wait_for_volume_availability_pass(self):
"""Ensure wait_for_volume_availability completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.GET_VOLUME_FUNC, side_effect=[False, False, True]):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_availability_fail(self):
"""Ensure wait_for_volume_availability throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.get_volume = lambda: False
with self.assertRaisesRegexp(AnsibleFailJson, "Timed out waiting for the volume"):
with mock.patch(self.SLEEP_FUNC, return_value=None):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_action_pass(self):
"""Ensure wait_for_volume_action completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315D494C6F",
"storageVolumeRef": "02000000600A098000A4B9D1000037315DXXXXXX"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
def tests_wait_for_volume_action_fail(self):
"""Ensure wait_for_volume_action throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get volume expansion progress."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.wait_for_volume_action()
with self.assertRaisesRegexp(AnsibleFailJson, "Expansion action failed to complete."):
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0])):
volume_object.wait_for_volume_action(timeout=300)
def test_get_storage_pool_pass(self):
"""Evaluate the get_storage_pool method."""
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.STORAGE_POOL_GET_RESPONSE)):
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool",
"size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), [entry for entry in self.STORAGE_POOL_GET_RESPONSE if
entry["name"] == "employee_data_storage_pool"][0])
self._set_args(
{"state": "present", "name": "NewVolume", "storage_pool_name": "NotAStoragePool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), {})
def test_get_storage_pool_fail(self):
"""Evaluate the get_storage_pool exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of storage pools."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_storage_pool()
def test_check_storage_pool_sufficiency_pass(self):
"""Ensure passing logic."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "employee_data_storage_pool"][0]
volume_object.check_storage_pool_sufficiency()
def test_check_storage_pool_sufficiency_fail(self):
"""Validate exceptions are thrown for insufficient storage pool resources."""
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Requested storage pool"):
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson,
"Thin provisioned volumes can only be created on raid disk pools."):
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "database_storage_pool"][0]
volume_object.volume_detail = {}
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson, "requires the storage pool to be DA-compatible."):
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type0Protection",
"protectionInformationCapable": False}}
volume_object.volume_detail = {}
volume_object.data_assurance_enabled = True
volume_object.check_storage_pool_sufficiency()
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.check_storage_pool_sufficiency()
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson,
"Not enough storage pool free space available for the volume's needs."):
volume_object.pool_detail = {"freeSpace": 10, "diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.volume_detail = {"totalSizeInBytes": 100}
volume_object.data_assurance_enabled = True
volume_object.size_b = 1
volume_object.check_storage_pool_sufficiency()
def test_update_workload_tags_pass(self):
"""Validate updating workload tags."""
test_sets = [[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global"}}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "local"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global", "importance": "no"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload",
"metadata": {"for_testing": "yes"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload"}, True]]
for test in test_sets:
self._set_args(test[0])
volume_object = NetAppESeriesVolume()
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), (200, {"id": 1})]):
self.assertEqual(volume_object.update_workload_tags(), test[1])
def test_update_workload_tags_fail(self):
"""Validate updating workload tags fails appropriately."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage array workload tags."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data", "metadata": {"key": "not-use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data2", "metadata": {"key": "use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
def test_get_volume_property_changes_pass(self):
"""Verify correct dictionary is returned"""
# no property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "90",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
# property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"readCacheEnable": False, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": False, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), {"metaTags": [],
'cacheSettings': {'readCacheEnable': True,
'writeCacheEnable': True,
'readAheadEnable': False},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "95",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'growthAlertThreshold': 90, 'flashCache': True})
def test_get_volume_property_changes_fail(self):
"""Verify correct exception is thrown"""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True, "readAheadMultiplier": 1},
"flashCached": True, "segmentSize": str(512 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Existing volume segment size is"):
volume_object.get_volume_property_changes()
def test_get_expand_volume_changes_pass(self):
"""Verify expansion changes."""
# thick volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": False}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "expansionSize": 100 * 1024 * 1024 * 1024})
# thin volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(1000 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newVirtualSize": 100 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 1000 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 504, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 504 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 756, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 756 * 1024 * 1024 * 1024})
def test_get_expand_volume_changes_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(1000 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Reducing the size of volumes is not permitted."):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 502, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
def test_create_volume_pass(self):
"""Verify volume creation."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
def test_create_volume_fail(self):
"""Verify exceptions thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
def test_update_volume_properties_pass(self):
"""verify property update."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"metadata": [{"key": "workloadId", "value": "12345"}]}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {}
volume_object.workload_id = "4200000001000000000000000000000000000000"
self.assertFalse(volume_object.update_volume_properties())
def test_update_volume_properties_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update thin volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
def test_expand_volume_pass(self):
"""Verify volume expansion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
def test_expand_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": False}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
def test_delete_volume_pass(self):
"""Verify volume deletion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
def test_delete_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
| cchurch/ansible | test/units/modules/storage/netapp/test_netapp_e_volume.py | Python | gpl-3.0 | 58,947 | 0.005038 |
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""A writer for XYZ (Cartesian coordinate) files."""
from . import filewriter
class XYZ(filewriter.Writer):
"""A writer for XYZ (Cartesian coordinate) files."""
def __init__(self, ccdata, splitfiles=False,
firstgeom=False, lastgeom=True, allgeom=False,
*args, **kwargs):
"""Initialize the XYZ writer object.
Inputs:
ccdata - An instance of ccData, parse from a logfile.
splitfiles - Boolean to write multiple files if multiple files are requested. [TODO]
firstgeom - Boolean to write the first available geometry from the logfile.
lastgeom - Boolean to write the last available geometry from the logfile.
allgeom - Boolean to write all available geometries from the logfile.
"""
# Call the __init__ method of the superclass
super(XYZ, self).__init__(ccdata, *args, **kwargs)
self.do_firstgeom = firstgeom
self.do_lastgeom = lastgeom
self.do_allgeom = allgeom
self.generate_repr()
def generate_repr(self):
"""Generate the XYZ representation of the logfile data."""
# Options for output (to a single file):
# 1. Write all geometries from an optimization, which programs like VMD
# can read in like a trajectory.
# 2. Write the final converged geometry, which for any job other than
# a geometry optimization would be the single/only geometry.
# 3. Write the very first geometry, which for any job other than a
# geometry optimization would be the single/only geometry.
# 4. Write the first and last geometries from a geometry optimization.
# Options for ouput (to multiple files):
# 1. Write all geometries from an optimization, to suitably named files. [TODO]
xyzblock = []
lencoords = len(self.ccdata.atomcoords)
if lencoords == 1:
xyzblock.append(self._xyz_from_ccdata(-1))
elif self.do_allgeom:
for index in range(lencoords):
xyzblock.append(self._xyz_from_ccdata(index))
elif self.do_firstgeom and self.do_lastgeom:
xyzblock.append(self._xyz_from_ccdata(0))
xyzblock.append(self._xyz_from_ccdata(-1))
elif self.do_firstgeom:
xyzblock.append(self._xyz_from_ccdata(0))
elif self.do_lastgeom:
xyzblock.append(self._xyz_from_ccdata(-1))
# If none of the options are set, return the empty string.
else:
xyzblock.append("")
return '\n'.join(xyzblock)
def _xyz_from_ccdata(self, index):
"""Create an XYZ file of the geometry at the given index."""
natom = str(self.ccdata.natom)
element_list = [self.pt.element[Z] for Z in self.ccdata.atomnos]
atomcoords = self.ccdata.atomcoords[index]
# Create a comment derived from the filename and the index.
if index == -1:
geometry_num = len(self.ccdata.atomcoords)
else:
geometry_num = index + 1
if self.jobfilename is not None:
comment = "{}: Geometry {}".format(self.jobfilename, geometry_num)
else:
comment = "Geometry {}".format(geometry_num)
atom_template = '{:3s} {:15.10f} {:15.10f} {:15.10f}'
block = []
block.append(natom)
block.append(comment)
for element, (x, y, z) in zip(element_list, atomcoords):
block.append(atom_template.format(element, x, y, z))
return '\n'.join(block)
if __name__ == "__main__":
pass
| andersx/cclib | src/cclib/writer/xyzwriter.py | Python | lgpl-2.1 | 4,078 | 0.000981 |
#!/usr/bin/env python
from setuptools import setup
if __name__ == "__main__":
setup()
| centaurialpha/pireal | setup.py | Python | gpl-3.0 | 91 | 0 |
from __future__ import print_function
from glyphNameFormatter.tools import camelCase
doNotProcessAsLigatureRanges = [
(0xfc5e, 0xfc63),
(0xfe70, 0xfe74),
#(0xfc5e, 0xfc61),
(0xfcf2, 0xfcf4),
(0xfe76, 0xfe80),
]
def process(self):
# Specifically: do not add suffixes to these ligatures,
# they're really arabic marks
for a, b in doNotProcessAsLigatureRanges:
if a <= self.uniNumber <= b:
self.replace('TAIL FRAGMENT', "kashida Fina")
self.replace('INITIAL FORM', "init")
self.replace('MEDIAL FORM', "medi")
self.replace('FINAL FORM', "fina")
self.replace('ISOLATED FORM', "isol")
self.replace('WITH SUPERSCRIPT', "")
self.replace('WITH', "")
self.replace("LIGATURE", "")
self.replace("ARABIC", "")
self.replace("SYMBOL", "")
self.replace("LETTER", "")
self.lower()
self.camelCase()
return True
return False
if __name__ == "__main__":
from glyphNameFormatter import GlyphName
print("\ndoNotProcessAsLigatureRanges", doNotProcessAsLigatureRanges)
odd = 0xfe76
for a, b in doNotProcessAsLigatureRanges:
for u in range(a,b+1):
try:
g = GlyphName(uniNumber=u)
n = g.getName()
print(hex(u), n, g.uniName)
except:
import traceback
traceback.print_exc()
| LettError/glyphNameFormatter | Lib/glyphNameFormatter/rangeProcessors/helper_arabic_ligature_exceptions.py | Python | bsd-3-clause | 1,516 | 0.005937 |
#!/usr/bin/env python
# coding=utf-8
import traceback
try:
raise SyntaxError, "traceback test"
except:
traceback.print_exc()
| zhaochl/python-utils | utils/except_util.py | Python | apache-2.0 | 134 | 0.014925 |
#
# (C) Copyright 2001/2002 Kai Sterker <kaisterker@linuxgames.com>
# Part of the Adonthell Project http://adonthell.linuxgames.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY.
#
# See the COPYING file for more details
#
# -- Movement schedule for Sarin Trailfollower
#
# He walks from one end of the room to the other. From time to
# to time he'll stop and chose another direction
import adonthell
import schedule
import random
def _(message): return message
class sarin (schedule.speak):
def __init__ (self, mapcharacterinstance):
self.myself = mapcharacterinstance
# -- Borders of the area he should stay in
self.min_x = 1
self.max_x = 6
self.min_y = 2
self.max_y = 6
self.direction = self.myself.get_val ("direction")
# -- make random remarks
self.speech = [_("Ruffians, the lot of them!"), \
_("How dare they imprison one better than they?"), \
_("This is an insult to all of the High Born."), \
_("I cannot believe such disrespect. Barbarians!")]
self.speech_delay = (20, 40)
schedule.speak.__init__(self)
self.myself.set_callback (self.goal_reached)
def switch_direction (self):
# -- ... and set the new one accordingly
if self.direction == adonthell.WALK_EAST or self.direction == adonthell.WALK_WEST:
self.direction = random.randrange (adonthell.WALK_NORTH, adonthell.WALK_SOUTH + 1)
else:
self.direction = random.randrange (adonthell.WALK_WEST, adonthell.WALK_EAST + 1)
delay = "%it" % random.randrange (30, 60)
self.myself.time_callback (delay, self.switch_direction)
self.walk ()
def walk (self):
# -- switch direction
if self.direction == adonthell.WALK_NORTH:
goal = (self.myself.posx (), self.min_y, adonthell.STAND_SOUTH, 0, 1)
elif self.direction == adonthell.WALK_SOUTH:
goal = (self.myself.posx (), self.max_y, adonthell.STAND_NORTH, 0, -1)
elif self.direction == adonthell.WALK_EAST:
goal = (self.max_x, self.myself.posy (), adonthell.STAND_WEST, -1, 0)
else:
goal = (self.min_x, self.myself.posy (), adonthell.STAND_EAST, 1, 0)
x, y, d = goal[:3]
self.direction = d + 4
while not self.myself.set_goal (x, y, d):
offx, offy = goal [-2:]
x = x + offx
y = y + offy
def goal_reached (self):
delay = "%it" % random.randrange (3, 6)
self.myself.time_callback (delay, self.walk)
| ksterker/wastesedge | scripts/schedules/mapcharacters/sarin.py | Python | gpl-2.0 | 2,833 | 0.012354 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.util
from cliff import command
from cliff import lister
from cliff import show
from gnocchiclient import exceptions
from gnocchiclient import utils
class CliResourceList(lister.Lister):
"""List resources."""
COLS = ('id', 'type',
'project_id', 'user_id',
'original_resource_id',
'started_at', 'ended_at',
'revision_start', 'revision_end')
def get_parser(self, prog_name, history=True):
parser = super(CliResourceList, self).get_parser(prog_name)
parser.add_argument("--details", action='store_true',
help="Show all attributes of generic resources"),
if history:
parser.add_argument("--history", action='store_true',
help="Show history of the resources"),
parser.add_argument("--limit", type=int, metavar="<LIMIT>",
help="Number of resources to return "
"(Default is server default)")
parser.add_argument("--marker", metavar="<MARKER>",
help="Last item of the previous listing. "
"Return the next results after this value")
parser.add_argument("--sort", action="append", metavar="<SORT>",
help="Sort of resource attribute "
"(example: user_id:desc-nullslast")
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
return parser
def _list2cols(self, resources):
"""Return a formatted list of resources."""
if not resources:
return self.COLS, []
cols = list(self.COLS)
for k in resources[0]:
if k not in cols:
cols.append(k)
if 'creator' in cols:
cols.remove('created_by_user_id')
cols.remove('created_by_project_id')
return utils.list2cols(cols, resources)
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.list(
resource_type=parsed_args.resource_type,
**utils.get_pagination_options(parsed_args))
# Do not dump metrics because it makes the list way too long
for r in resources:
del r['metrics']
return self._list2cols(resources)
class CliResourceHistory(CliResourceList):
"""Show the history of a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceHistory, self).get_parser(prog_name,
history=False)
parser.add_argument("resource_id",
help="ID of a resource")
return parser
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.history(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id,
**utils.get_pagination_options(parsed_args))
if parsed_args.formatter == 'table':
return self._list2cols(list(map(normalize_metrics, resources)))
return self._list2cols(resources)
class CliResourceSearch(CliResourceList):
"""Search resources with specified query rules."""
def get_parser(self, prog_name):
parser = super(CliResourceSearch, self).get_parser(prog_name)
utils.add_query_argument("query", parser)
return parser
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.search(
resource_type=parsed_args.resource_type,
query=parsed_args.query,
**utils.get_pagination_options(parsed_args))
# Do not dump metrics because it makes the list way too long
for r in resources:
del r['metrics']
return self._list2cols(resources)
def normalize_metrics(res):
res['metrics'] = "\n".join(sorted(
["%s: %s" % (name, _id)
for name, _id in res['metrics'].items()]))
return res
class CliResourceShow(show.ShowOne):
"""Show a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceShow, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
parser.add_argument("resource_id",
help="ID of a resource")
return parser
def take_action(self, parsed_args):
res = utils.get_client(self).resource.get(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id)
if parsed_args.formatter == 'table':
normalize_metrics(res)
return self.dict2columns(res)
class CliResourceCreate(show.ShowOne):
"""Create a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceCreate, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
parser.add_argument("resource_id",
help="ID of the resource")
parser.add_argument("-a", "--attribute", action='append',
default=[],
help=("name and value of an attribute "
"separated with a ':'"))
parser.add_argument("-m", "--add-metric", action='append',
default=[],
help="name:id of a metric to add"),
parser.add_argument(
"-n", "--create-metric", action='append', default=[],
help="name:archive_policy_name of a metric to create"),
return parser
def _resource_from_args(self, parsed_args, update=False):
# Get the resource type to set the correct type
rt_attrs = utils.get_client(self).resource_type.get(
name=parsed_args.resource_type)['attributes']
resource = {}
if not update:
resource['id'] = parsed_args.resource_id
if parsed_args.attribute:
for attr in parsed_args.attribute:
attr, __, value = attr.partition(":")
attr_type = rt_attrs.get(attr, {}).get('type')
if attr_type == "number":
value = float(value)
elif attr_type == "bool":
value = bool(distutils.util.strtobool(value))
resource[attr] = value
if (parsed_args.add_metric or
parsed_args.create_metric or
(update and parsed_args.delete_metric)):
if update:
r = utils.get_client(self).resource.get(
parsed_args.resource_type,
parsed_args.resource_id)
default = r['metrics']
for metric_name in parsed_args.delete_metric:
try:
del default[metric_name]
except KeyError:
raise exceptions.MetricNotFound(
message="Metric name %s not found" % metric_name)
else:
default = {}
resource['metrics'] = default
for metric in parsed_args.add_metric:
name, _, value = metric.partition(":")
resource['metrics'][name] = value
for metric in parsed_args.create_metric:
name, _, value = metric.partition(":")
if value:
resource['metrics'][name] = {'archive_policy_name': value}
else:
resource['metrics'][name] = {}
return resource
def take_action(self, parsed_args):
resource = self._resource_from_args(parsed_args)
res = utils.get_client(self).resource.create(
resource_type=parsed_args.resource_type, resource=resource)
if parsed_args.formatter == 'table':
normalize_metrics(res)
return self.dict2columns(res)
class CliResourceUpdate(CliResourceCreate):
"""Update a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceUpdate, self).get_parser(prog_name)
parser.add_argument("-d", "--delete-metric", action='append',
default=[],
help="Name of a metric to delete"),
return parser
def take_action(self, parsed_args):
resource = self._resource_from_args(parsed_args, update=True)
res = utils.get_client(self).resource.update(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id,
resource=resource)
if parsed_args.formatter == 'table':
normalize_metrics(res)
return self.dict2columns(res)
class CliResourceDelete(command.Command):
"""Delete a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceDelete, self).get_parser(prog_name)
parser.add_argument("resource_id",
help="ID of the resource")
return parser
def take_action(self, parsed_args):
utils.get_client(self).resource.delete(parsed_args.resource_id)
class CliResourceBatchDelete(show.ShowOne):
"""Delete a batch of resources based on attribute values."""
def get_parser(self, prog_name):
parser = super(CliResourceBatchDelete, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
utils.add_query_argument("query", parser)
return parser
def take_action(self, parsed_args):
res = utils.get_client(self).resource.batch_delete(
resource_type=parsed_args.resource_type,
query=parsed_args.query)
return self.dict2columns(res)
| gnocchixyz/python-gnocchiclient | gnocchiclient/v1/resource_cli.py | Python | apache-2.0 | 10,591 | 0 |
import shutil
import os
import jinja2
import string
import subprocess
import re
from xen.provisioning.HdManager import HdManager
from settings.settingsLoader import OXA_XEN_SERVER_KERNEL,OXA_XEN_SERVER_INITRD,OXA_DEBIAN_INTERFACES_FILE_LOCATION,OXA_DEBIAN_UDEV_FILE_LOCATION, OXA_DEBIAN_HOSTNAME_FILE_LOCATION, OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION
from utils.Logger import Logger
class OfeliaDebianVMConfigurator:
logger = Logger.getLogger()
''' Private methods '''
@staticmethod
def __configureInterfacesFile(vm,iFile):
#Loopback
iFile.write("auto lo\niface lo inet loopback\n\n")
#Interfaces
for inter in vm.xen_configuration.interfaces.interface :
if inter.ismgmt:
#is a mgmt interface
interfaceString = "auto "+inter.name+"\n"+\
"iface "+inter.name+" inet static\n"+\
"\taddress "+inter.ip +"\n"+\
"\tnetmask "+inter.mask+"\n"
if inter.gw != None and inter.gw != "":
interfaceString +="\tgateway "+inter.gw+"\n"
if inter.dns1 != None and inter.dns1 != "":
interfaceString+="\tdns-nameservers "+inter.dns1
if inter.dns2 != None and inter.dns2 != "":
interfaceString+=" "+inter.dns2
interfaceString +="\n\n"
iFile.write(interfaceString)
else:
#is a data interface
iFile.write("auto "+inter.name+"\n\n")
@staticmethod
def __configureUdevFile(vm,uFile):
for inter in vm.xen_configuration.interfaces.interface:
uFile.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="'+inter.mac+'", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="'+inter.name+'"\n')
@staticmethod
def __configureHostname(vm,hFile):
hFile.write(vm.name)
@staticmethod
def __createParavirtualizationFileHdConfigFile(vm,env):
template_name = "paraVirtualizedFileHd.pt"
template = env.get_template(template_name)
#Set vars&render
output = template.render(
kernelImg=OXA_XEN_SERVER_KERNEL,
initrdImg=OXA_XEN_SERVER_INITRD,
hdFilePath=HdManager.getHdPath(vm),
swapFilePath=HdManager.getSwapPath(vm),
vm=vm)
#write file
cfile = open(HdManager.getConfigFilePath(vm),'w')
cfile.write(output)
cfile.close()
''' Public methods '''
@staticmethod
def getIdentifier():
return OfeliaDebianVMConfigurator.__name__
@staticmethod
def _configureNetworking(vm,path):
#Configure interfaces and udev settings
try:
try:
#Backup current files
shutil.copy(path+OXA_DEBIAN_INTERFACES_FILE_LOCATION,path+OXA_DEBIAN_INTERFACES_FILE_LOCATION+".bak")
shutil.copy(path+OXA_DEBIAN_UDEV_FILE_LOCATION,path+OXA_DEBIAN_UDEV_FILE_LOCATION+".bak")
except Exception as e:
pass
with open(path+OXA_DEBIAN_INTERFACES_FILE_LOCATION,'w') as openif:
OfeliaDebianVMConfigurator.__configureInterfacesFile(vm,openif)
with open(path+OXA_DEBIAN_UDEV_FILE_LOCATION,'w') as openudev:
OfeliaDebianVMConfigurator.__configureUdevFile(vm,openudev)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error(str(e))
raise Exception("Could not configure interfaces or Udev file")
@staticmethod
def _configureLDAPSettings(vm,path):
try:
file = open(path+OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "r")
text = file.read()
file.close()
file = open(path+OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "w")
#Scape spaces and tabs
projectName = string.replace(vm.project_name,' ','_')
projectName = string.replace(projectName,'\t','__')
file.write(text.replace("__projectId","@proj_"+vm.project_id+"_"+projectName))
file.close()
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Could not configure LDAP file!! - "+str(e))
@staticmethod
def _configureHostName(vm,path):
try:
with open(path+OXA_DEBIAN_HOSTNAME_FILE_LOCATION,'w') as openhost:
OfeliaDebianVMConfigurator.__configureHostname(vm, openhost)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Could not configure hostname;skipping.. - "+str(e))
@staticmethod
def _configureSSHServer(vm,path):
try:
OfeliaDebianVMConfigurator.logger.debug("Regenerating SSH keys...\n Deleting old keys...")
subprocess.check_call("rm -f "+path+"/etc/ssh/ssh_host_*", shell=True, stdout=None)
#subprocess.check_call("chroot "+path+" dpkg-reconfigure openssh-server ", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH1 key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_key -N '' -t rsa1", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH2 RSA key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_rsa_key -N '' -t rsa", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH2 DSA key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_dsa_key -N '' -t dsa", shell=True, stdout=None)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Fatal error; could not regenerate SSH keys. Aborting to prevent VM to be unreachable..."+str(e))
raise e
#Public methods
@staticmethod
def createVmConfigurationFile(vm):
#get env
template_dirs = []
template_dirs.append(os.path.join(os.path.dirname(__file__), 'templates/'))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs))
if vm.xen_configuration.hd_setup_type == "file-image" and vm.xen_configuration.virtualization_setup_type == "paravirtualization" :
OfeliaDebianVMConfigurator.__createParavirtualizationFileHdConfigFile(vm,env)
else:
raise Exception("type of file or type of virtualization not supported for the creation of xen vm configuration file")
@staticmethod
def configureVmDisk(vm, path):
if not path or not re.match(r'[\s]*\/\w+\/\w+\/.*', path,re.IGNORECASE): #For security, should never happen anyway
raise Exception("Incorrect vm path")
#Configure networking
OfeliaDebianVMConfigurator._configureNetworking(vm,path)
OfeliaDebianVMConfigurator.logger.info("Network configured successfully...")
#Configure LDAP settings
OfeliaDebianVMConfigurator._configureLDAPSettings(vm,path)
OfeliaDebianVMConfigurator.logger.info("Authentication configured successfully...")
#Configure Hostname
OfeliaDebianVMConfigurator._configureHostName(vm,path)
OfeliaDebianVMConfigurator.logger.info("Hostname configured successfully...")
#Regenerate SSH keys
OfeliaDebianVMConfigurator._configureSSHServer(vm,path)
OfeliaDebianVMConfigurator.logger.info("SSH have been keys regenerated...")
| avlach/univbris-ocf | vt_manager/src/python/agent/xen/provisioning/configurators/ofelia/OfeliaDebianVMConfigurator.py | Python | bsd-3-clause | 6,611 | 0.039782 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
class UtilsTests(TestCase):
"""docstring for UtilsTests"""
def setUp(self):
self.username = 'theskumar'
self.email = 'theskumar@example.com'
def test_foo(self):
self.assertEqual('foo', "foo")
| theskumar/django-unsubscribe | unsubscribe/tests/test_utils.py | Python | bsd-3-clause | 329 | 0 |
import numpy as np
import matplotlib.pyplot as pl
def f(x):
return np.exp(-x**2)
def main():
N = 100000
x = np.arange(N,dtype=np.float)
x[0] = 0.2
counter = 0
for i in range(0, N-1):
x_next = np.random.normal(x[i], 1.)
if np.random.random_sample() < min(1, f(x_next)/f(x[i])):
x[i+1] = x_next
counter = counter + 1
else:
x[i+1] = x[i]
print("acceptance fraction is ", counter/float(N))
pl.hist(x, bins=50, color='blue')
pl.show()
if __name__ == '__main__':
main()
| fbeutler/Metropolis-Hastings | Metropolis_Hastings2.py | Python | gpl-2.0 | 579 | 0.008636 |
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom hook for Google Ads UAC.
For UAC details refer to
https://developers.google.com/adwords/api/docs/guides/mobile-app-campaigns
"""
import enum
import json
import re
from typing import Any, Dict, Optional
import urllib.parse
from airflow.hooks import http_hook
from plugins.pipeline_plugins.hooks import output_hook_interface
from plugins.pipeline_plugins.utils import async_utils
from plugins.pipeline_plugins.utils import blob
from plugins.pipeline_plugins.utils import errors
# RDID (raw device id) should be in UUID format.
_RDID_PATTERN = '^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}$'
_RDID_REGEX = re.compile(_RDID_PATTERN, re.IGNORECASE)
_APP_CONVERSION_TRACKING_PATH = 'pagead/conversion/app/1.0'
_REQUIRED_FIELDS = ('dev_token',
'link_id',
'app_event_type',
'rdid',
'id_type',
'lat',
'app_version',
'os_version',
'sdk_version',
'timestamp')
class AppEventType(enum.Enum):
FIRST_OPEN = 'first_open'
SESSION_START = 'session_start'
IN_APP_PURCHASE = 'in_app_purchase'
VIEW_ITEM_LIST = 'view_item_list'
VIEW_ITEM = 'view_item'
VIEW_SEARCH_RESULTS = 'view_search_results'
ADD_TO_CART = 'add_to_cart'
ECOMMERCE_PURCHASE = 'ecommerce_purchase'
CUSTOM = 'custom'
class IdType(enum.Enum):
ANDROID = 'advertisingid'
IOS = 'idfa'
class EventStatus(enum.Enum):
SUCCESS = enum.auto()
FAILURE = enum.auto()
class AdsUniversalAppCampaignHook(
http_hook.HttpHook, output_hook_interface.OutputHookInterface):
"""Custom hook for Google Ads UAC API.
API SPEC for Apps Conversion Tracking and Remarketing
https://developers.google.com/app-conversion-tracking/api/request-response-specs
"""
def __init__(self, ads_uac_conn_id: str = 'google_ads_uac_default',
ads_uac_dry_run: bool = False, **kwargs) -> None:
"""Initializes the generator of a specified BigQuery table.
Args:
ads_uac_conn_id: Connection id passed to airflow.
ads_uac_dry_run: If true the hook will not send real hits to the endpoint.
**kwargs: Other optional arguments.
"""
super().__init__(http_conn_id=ads_uac_conn_id)
self.dry_run = ads_uac_dry_run
def _get_developer_token(self) -> str:
"""Gets developer token from connection configuration.
Returns:
dev_token: Developer token of Google Ads API.
Raises:
DataOutConnectorValueError: If connection is not available or if password
is missing in the connection.
"""
conn = self.get_connection(self.http_conn_id)
if not conn:
raise errors.DataOutConnectorValueError(
'Cannot get connection {id}.'.format(id=self.http_conn_id),
errors.ErrorNameIDMap
.RETRIABLE_ADS_UAC_HOOK_ERROR_FAIL_TO_GET_AIRFLOW_CONNECTION)
if not conn.password:
raise errors.DataOutConnectorValueError(
'Missing dev token. Please check connection {id} and its password.'
.format(id=self.http_conn_id),
errors.ErrorNameIDMap.RETRIABLE_ADS_UAC_HOOK_ERROR_MISSING_DEV_TOKEN)
return conn.password
def _validate_app_conversion_payload(self, payload: Dict[str, Any]) -> None:
"""Validates payload sent to UAC.
Args:
payload: The payload to be validated before sending to Google Ads UAC.
Raises:
DataOutConnectorValueError: If some value is missing or in wrong format.
"""
for key in _REQUIRED_FIELDS:
if payload.get(key) is None:
raise errors.DataOutConnectorValueError(
"""Missing {key} in payload.""".format(key=key),
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_MISSING_MANDATORY_FIELDS)
if payload.get('app_event_type') not in [item.value
for item in AppEventType]:
raise errors.DataOutConnectorValueError(
"""Unsupported app event type in
payload. Example: 'first_open', 'session_start', 'in_app_purchase',
'view_item_list', 'view_item', 'view_search_results',
'add_to_cart', 'ecommerce_purchase', 'custom'.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_UNSUPPORTED_APP_EVENT_TYPE)
if (payload.get('app_event_name') and
payload.get('app_event_type') != 'custom'):
raise errors.DataOutConnectorValueError(
"""App event type must be 'custom' when app event name exists.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_APP_EVENT_TYPE)
match = _RDID_REGEX.match(payload.get('rdid'))
if not match:
raise errors.DataOutConnectorValueError(
"""Wrong raw device id format in
payload. Should be compatible with RFC4122.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_RAW_DEVICE_ID_FORMAT)
if payload.get('id_type') not in [item.value for item in IdType]:
raise errors.DataOutConnectorValueError(
"""Wrong raw device id type in
payload. Example: 'advertisingid', 'idfa'.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_RAW_DEVICE_ID_TYPE)
if payload.get('lat') != 0 and payload.get('lat') != 1:
raise errors.DataOutConnectorValueError(
"""Wrong limit-ad-tracking status in payload. Example: 0, 1.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_LAT_STATUS)
def send_conversions_to_uac(
self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Sends conversion to UAC via S2S REST API.
Args:
params: Parameters containing required data for app conversion tracking.
Returns:
results: Includes request body, status_code, error_msg, response body and
dry_run flag.
The response refers to the definition of conversion tracking response in
https://developers.google.com/app-conversion-tracking/api/request-response-specs#conversion_tracking_response
"""
try:
request_params = dict(params)
request_params['dev_token'] = self._get_developer_token()
app_event_data = request_params.get('app_event_data')
if 'app_event_data' in request_params:
del request_params['app_event_data']
self._validate_app_conversion_payload(request_params)
except errors.DataOutConnectorValueError as error:
self.log.exception(error)
return {'request': params,
'status_code': 400,
'error_msg': str(error),
'dry_run': self.dry_run}
self.method = 'POST'
query_url = urllib.parse.urlencode(request_params)
complete_url = ('{path}?{default_query}'
.format(
path=_APP_CONVERSION_TRACKING_PATH,
default_query=query_url))
if self.dry_run:
self.log.debug(
"""Dry run mode: Sending conversion tracking data to UAC.
URL:{}. App event data:{}."""
.format(complete_url, json.dumps(app_event_data)))
return {'request': params,
'status_code': 500,
'error_msg': 'Dry run mode',
'dry_run': self.dry_run}
response = None
extra_options = {'check_response': False}
self.log.info(
"""Not Dry run mode: Sending conversion tracking data to UAC.
URL:{}. App event data:{}."""
.format(complete_url, json.dumps(app_event_data)))
response = self.run(endpoint=complete_url,
data=app_event_data,
extra_options=extra_options)
try:
body = response.json()
return {'request': params,
'status_code': response.status_code,
'response': body,
'dry_run': self.dry_run}
except (ValueError, KeyError, TypeError):
return {'request': params,
'status_code': response.status_code,
'error_msg': response.reason,
'dry_run': self.dry_run}
def send_events(self, blb: blob.Blob) -> blob.Blob:
"""Sends all events to the Google Ads UAC API.
Args:
blb: A blob containing Customer Match data to send.
Returns:
A blob containing updated data about any failing events or reports.
Reports will be formatted as a (index, EventStatus, report) tuples.
"""
params_list = [{'params': event} for event in blb.events]
results = async_utils.run_synchronized_function(
self.send_conversions_to_uac, params_list)
for i, result in enumerate(results):
if not (isinstance(result, Dict) and result.get('response')):
blb.append_failed_event(
i + blb.position,
blb.events[i],
errors.ErrorNameIDMap.NON_RETRIABLE_ERROR_EVENT_NOT_SENT)
blb.reports.append((i + blb.position, EventStatus.FAILURE, result))
else:
blb.reports.append((i + blb.position, EventStatus.SUCCESS, result))
return blb
| google/TaglessCRM | src/plugins/pipeline_plugins/hooks/ads_uac_hook.py | Python | apache-2.0 | 9,520 | 0.004832 |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfgen/pdfgeom.py
__version__=''' $Id: pdfgeom.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""
This module includes any mathematical methods needed for PIDDLE.
It should have no dependencies beyond the Python library.
So far, just Robert Kern's bezierArc.
"""
from math import sin, cos, pi, ceil
def bezierArc(x1,y1, x2,y2, startAng=0, extent=90):
"""bezierArc(x1,y1, x2,y2, startAng=0, extent=90) --> List of Bezier
curve control points.
(x1, y1) and (x2, y2) are the corners of the enclosing rectangle. The
coordinate system has coordinates that increase to the right and down.
Angles, measured in degress, start with 0 to the right (the positive X
axis) and increase counter-clockwise. The arc extends from startAng
to startAng+extent. I.e. startAng=0 and extent=180 yields an openside-down
semi-circle.
The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)
such that the curve goes from (x1, y1) to (x4, y4) with (x2, y2) and
(x3, y3) as their respective Bezier control points."""
x1,y1, x2,y2 = min(x1,x2), max(y1,y2), max(x1,x2), min(y1,y2)
if abs(extent) <= 90:
arcList = [startAng]
fragAngle = float(extent)
Nfrag = 1
else:
arcList = []
Nfrag = int(ceil(abs(extent)/90.))
fragAngle = float(extent) / Nfrag
x_cen = (x1+x2)/2.
y_cen = (y1+y2)/2.
rx = (x2-x1)/2.
ry = (y2-y1)/2.
halfAng = fragAngle * pi / 360.
kappa = abs(4. / 3. * (1. - cos(halfAng)) / sin(halfAng))
if fragAngle < 0:
sign = -1
else:
sign = 1
pointList = []
for i in range(Nfrag):
theta0 = (startAng + i*fragAngle) * pi / 180.
theta1 = (startAng + (i+1)*fragAngle) *pi / 180.
if fragAngle > 0:
pointList.append((x_cen + rx * cos(theta0),
y_cen - ry * sin(theta0),
x_cen + rx * (cos(theta0) - kappa * sin(theta0)),
y_cen - ry * (sin(theta0) + kappa * cos(theta0)),
x_cen + rx * (cos(theta1) + kappa * sin(theta1)),
y_cen - ry * (sin(theta1) - kappa * cos(theta1)),
x_cen + rx * cos(theta1),
y_cen - ry * sin(theta1)))
else:
pointList.append((x_cen + rx * cos(theta0),
y_cen - ry * sin(theta0),
x_cen + rx * (cos(theta0) + kappa * sin(theta0)),
y_cen - ry * (sin(theta0) - kappa * cos(theta0)),
x_cen + rx * (cos(theta1) - kappa * sin(theta1)),
y_cen - ry * (sin(theta1) + kappa * cos(theta1)),
x_cen + rx * cos(theta1),
y_cen - ry * sin(theta1)))
return pointList | nickpack/reportlab | src/reportlab/pdfgen/pdfgeom.py | Python | bsd-3-clause | 3,119 | 0.00545 |
import tensorflow as tf
import matplotlib.pyplot as plt
import math
x_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='x_node')
y_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='y_node')
times = 5000
hits = 0
pis = []
with tf.Session() as session:
for i in range(1, times):
x = session.run(x_node)
y = session.run(y_node)
if x*x + y*y < 1:
hits += 1
pass
pi = 4 * float(hits) / i
print(pi)
pis.append(pi)
pass
pass
plt.plot(pis)
plt.plot([0, times], [math.pi, math.pi])
plt.show()
| thiswind/nn_practice | tensorflow/calculate_pi_old.py | Python | gpl-3.0 | 581 | 0.032702 |
from django.db import models
from django.db.models.fields.related import ManyToOneRel, ForeignObjectRel
from elasticsearch_dsl.mapping import Mapping
from elasticsearch_dsl.field import Field
from djes.conf import settings
FIELD_MAPPINGS = {
"AutoField": {"type": "long"},
"BigIntegerField": {"type": "long"},
"BinaryField": {"type": "binary"},
"BooleanField": {"type": "boolean"},
"CharField": {"type": "string"},
"CommaSeparatedIntegerField": {"type": "string"},
"DateField": {"type": "date"},
"DateTimeField": {"type": "date"},
"DecimalField": {"type": "string"},
"DurationField": {"type": "long"},
"EmailField": {"type": "string"},
# "FileField": {"type": ""}, # TODO: make a decision on this
"FilePathField": {"type": "string"},
"FloatField": {"type": "double"},
# "ImageField": {"type": ""}, # TODO: make a decision on this
"IntegerField": {"type": "long"},
"IPAddressField": {"type": "string", "index": "not_analyzed"},
"GenericIPAddressField": {"type": "string", "index": "not_analyzed"},
"NullBooleanField": {"type": "boolean"},
"PositiveIntegerField": {"type": "long"},
"PositiveSmallIntegerField": {"type": "long"},
"SlugField": {"type": "string", "index": "not_analyzed"},
"SmallIntegerField": {"type": "long"},
"TextField": {"type": "string"},
"TimeField": {"type": "string"},
"URLField": {"type": "string"},
"UUIDField": {"type": "string", "index": "not_analyzed"},
"ForeignKey": {"type": "long"},
"ManyToManyField": {"type": "long"},
"OneToOneField": {"type": "long"},
}
def get_first_mapping(cls):
"""This allows for Django-like inheritance of mapping configurations"""
from .models import Indexable
if issubclass(cls, Indexable) and hasattr(cls, "Mapping"):
return cls.Mapping
for base in cls.__bases__:
mapping = get_first_mapping(base)
if mapping:
return mapping
return None
class EmptyMeta(object):
pass
class DjangoMapping(Mapping):
"""A subclass of the elasticsearch_dsl Mapping, allowing the automatic mapping
of many fields on the model, while letting the developer override these settings"""
def __init__(self, model):
from .models import Indexable
self.model = model
if not hasattr(self, "Meta"):
self.Meta = EmptyMeta
default_name = "{}_{}".format(self.model._meta.app_label, self.model._meta.model_name)
name = getattr(self.Meta, "doc_type", default_name)
super(DjangoMapping, self).__init__(name)
self._meta = {}
excludes = getattr(self.Meta, "excludes", [])
includes = getattr(self.Meta, "includes", [])
for field in self.model._meta.get_fields():
if field.auto_created and field.is_relation:
if not hasattr(field, "rel") or not field.rel.parent_link:
continue
db_column, attname = field.db_column, field.attname
manual_field_mapping = getattr(self, field.name, None)
# TODO: I am 90% shirt this is not being utilized. Test later.
if manual_field_mapping:
self.field(field.name, manual_field_mapping)
continue
if field.name in excludes:
continue
self.configure_field(field)
# Now any included relations
for name in includes:
field = self.model._meta.get_field(name)
self.configure_field(field)
# Now any custom fields
for field in dir(self.__class__):
manual_field_mapping = getattr(self, field)
if field not in self.properties.properties.to_dict() and isinstance(manual_field_mapping, Field):
self.field(field, manual_field_mapping)
if getattr(self.Meta, "dynamic", "strict") == "strict":
self.properties._params["dynamic"] = "strict"
def configure_field(self, field):
"""This configures an Elasticsearch Mapping field, based on a Django model field"""
from .models import Indexable
# This is for reverse relations, which do not have a db column
if field.auto_created and field.is_relation:
if isinstance(field, (ForeignObjectRel, ManyToOneRel)) and issubclass(field.related_model, Indexable):
related_properties = field.related_model.search_objects.mapping.properties.properties.to_dict()
self.field(field.name, {"type": "nested", "properties": related_properties})
return
if field.get_internal_type() == "ManyToManyField" and issubclass(field.rel.to, Indexable):
related_properties = field.rel.to.search_objects.mapping.properties.properties.to_dict()
self.field(field.name, {"type": "nested", "properties": related_properties})
return
if isinstance(field, models.ForeignKey):
# This is a related field, so it should maybe be nested?
# We only want to nest fields when they are indexable, and not parent pointers.
if issubclass(field.rel.to, Indexable) and not field.rel.parent_link:
related_properties = field.rel.to.search_objects.mapping.properties.properties.to_dict()
self.field(field.name, {"type": "nested", "properties": related_properties})
return
db_column, attname = field.db_column, field.attname
field_args = FIELD_MAPPINGS.get(field.get_internal_type())
if field_args:
self.field(db_column or attname, field_args)
else:
raise Warning("Can't find {}".format(field.get_internal_type()))
@property
def index(self):
return getattr(self.Meta, "index", settings.ES_INDEX)
| theonion/djes | djes/mapping.py | Python | mit | 5,823 | 0.002919 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a tf.hparams object to set up the
parameters for a model pruning. Here's a typical usage:
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python import pruning_utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_MASK_COLLECTION = core.MASK_COLLECTION
_THRESHOLD_COLLECTION = core.THRESHOLD_COLLECTION
_MASKED_WEIGHT_COLLECTION = core.MASKED_WEIGHT_COLLECTION
_WEIGHT_COLLECTION = core.WEIGHT_COLLECTION
_MASKED_WEIGHT_NAME = core.MASKED_WEIGHT_NAME
def apply_mask(x, scope=''):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to "".
Returns:
Tensor representing masked_weights
"""
mask = pruning_utils.weight_mask_variable(x, scope)
threshold = pruning_utils.weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = math_ops.multiply(mask, x, _MASKED_WEIGHT_NAME)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in ops.get_collection_ref(_MASK_COLLECTION):
ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
ops.add_to_collection(_MASK_COLLECTION, mask)
ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)
ops.add_to_collection(_WEIGHT_COLLECTION, x)
return masked_weights
def get_masked_weights():
return ops.get_collection(_MASKED_WEIGHT_COLLECTION)
def get_masks():
return ops.get_collection(_MASK_COLLECTION)
def get_thresholds():
return ops.get_collection(_THRESHOLD_COLLECTION)
def get_weights():
return ops.get_collection(_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args:
None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [nn_impl.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
weight_sparsity_map: list of strings
comma separed list of weight variable name:target sparsity pairs.
For layers/weights not in this list, sparsity as specified by the
target_sparsity hyperparameter is used.
Eg. [conv1:0.9,conv2/kernel:0.8]
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
block_height: integer
number of rows in a block (defaults to 1)
block_width: integer
number of cols in a block (defaults to 1)
block_pooling_function: string
Whether to perform average (AVG) or max (MAX) pooling in the block
(default: AVG)
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for the gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
use_tpu: False
Indicates whether to use TPU
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - target_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args:
None
Returns:
tf.HParams object initialized to default values
"""
return hparam.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
threshold_decay=0.0,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0.0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3,
use_tpu=False)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec as defined in pruning.proto
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorflow scalar variable storing the sparsity
"""
# Pruning specification
self._spec = spec if spec else get_pruning_hparams()
# Sanity check for pruning hparams
self._validate_spec()
# A tensorflow variable that tracks the sparsity function.
# If not provided as input, the graph must already contain the global_step
# variable before calling this constructor.
self._global_step = self._setup_global_step(global_step)
# Stores the tensorflow sparsity variable.
# Built using self._setup_sparsity() or provided externally
self._sparsity = (sparsity
if sparsity is not None else self._setup_sparsity())
# List of tensorflow assignments ops for new masks and thresholds
self._assign_ops = []
# Tensorflow variable keeping track of the last global step when the masks
# were updated
self._last_update_step = self._setup_last_update_step()
# Block dimensions
self._block_dim = [self._spec.block_height, self._spec.block_width]
# Block pooling function
self._block_pooling_function = self._spec.block_pooling_function
# Mapping of weight names and target sparsity
self._weight_sparsity_map = self._get_weight_sparsity_map()
def _validate_spec(self):
spec = self._spec
if spec.begin_pruning_step < 0:
raise ValueError('Illegal value for begin_pruning_step')
if spec.begin_pruning_step >= spec.end_pruning_step:
if spec.end_pruning_step != -1:
raise ValueError(
'Pruning must begin before it can end. begin_step=%d, end_step=%d.'
'Set end_pruning_step to -1 if pruning is required till training'
'stops' % (spec.begin_pruning_step, spec.end_pruning_step))
if spec.sparsity_function_begin_step < 0:
raise ValueError('Illegal value for sparsity_function_begin_step')
if spec.sparsity_function_begin_step >= spec.sparsity_function_end_step:
raise ValueError(
'Sparsity function requires begin_step < end_step')
if not 0.0 <= spec.threshold_decay < 1.0:
raise ValueError('threshold_decay must be in range [0,1)')
if not 0.0 <= spec.initial_sparsity < 1.0:
raise ValueError('initial_sparsity must be in range [0,1)')
if not 0.0 <= spec.target_sparsity < 1.0:
raise ValueError('target_sparsity must be in range [0,1)')
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = training_util.get_global_step()
return math_ops.cast(graph_global_step, dtypes.int32)
def _setup_sparsity(self):
begin_step = self._spec.sparsity_function_begin_step
end_step = self._spec.sparsity_function_end_step
initial_sparsity = self._spec.initial_sparsity
target_sparsity = self._spec.target_sparsity
exponent = self._spec.sparsity_function_exponent
with ops.name_scope(self._spec.name):
p = math_ops.minimum(
1.0,
math_ops.maximum(
0.0,
math_ops.div(
math_ops.cast(self._global_step - begin_step, dtypes.float32),
end_step - begin_step)))
sparsity = math_ops.add(
math_ops.multiply(initial_sparsity - target_sparsity,
math_ops.pow(1 - p, exponent)),
target_sparsity,
name='sparsity')
return sparsity
def _setup_last_update_step(self):
with variable_scope.variable_scope(
self._spec.name, use_resource=self._spec.use_tpu) as scope:
try:
last_update_step = variable_scope.get_variable(
'last_mask_update_step', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=dtypes.int32)
except ValueError:
scope.reuse_variables()
last_update_step = variable_scope.get_variable(
'last_mask_update_step', dtype=dtypes.int32)
return last_update_step
def _get_weight_sparsity_map(self):
"""Return the map of weight_name:sparsity parsed from the hparams."""
weight_sparsity_map = {}
val_list = self._spec.weight_sparsity_map
filtered_val_list = [l for l in val_list if l]
for val in filtered_val_list:
weight_name, sparsity = val.split(':')
if float(sparsity) >= 1.0:
raise ValueError('Weight sparsity can not exceed 1.0')
weight_sparsity_map[weight_name] = float(sparsity)
return weight_sparsity_map
def _get_sparsity(self, weight_name):
"""Return target sparsity for the given layer/weight name."""
target_sparsity = [
sparsity for name, sparsity in self._weight_sparsity_map.items()
if weight_name.find(name) != -1
]
if not target_sparsity:
return self._sparsity
if len(target_sparsity) > 1:
raise ValueError(
'Multiple matches in weight_sparsity_map for weight %s' % weight_name)
# TODO(suyoggupta): This will work when initial_sparsity = 0. Generalize
# to handle other cases as well.
return math_ops.mul(
self._sparsity,
math_ops.div(target_sparsity[0], self._spec.target_sparsity))
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
sparsity = self._get_sparsity(weights.op.name)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
max_value = math_ops.reduce_max(abs_weights)
cdf_fn = pruning_utils.compute_cdf_from_histogram
if self._spec.use_tpu:
cdf_fn = pruning_utils.compute_cdf
norm_cdf = cdf_fn(abs_weights, [0.0, max_value], nbins=self._spec.nbins)
current_threshold = math_ops.multiply(
math_ops.div(
math_ops.reduce_sum(
math_ops.cast(
math_ops.less(norm_cdf, sparsity), dtypes.float32)),
float(self._spec.nbins)), max_value)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater(abs_weights, smoothed_threshold), dtypes.float32)
return smoothed_threshold, new_mask
def _maybe_update_block_mask(self, weights, threshold):
"""Performs block-granular masking of the weights.
Block pruning occurs only if the block_height or block_width is > 1 and
if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise
pruning occurs.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if block pooling function is not AVG or MAX
"""
squeezed_weights = array_ops.squeeze(weights)
if squeezed_weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
return self._update_mask(weights, threshold)
if self._block_pooling_function not in ['AVG', 'MAX']:
raise ValueError('Unknown pooling function for block sparsity: %s' %
self._block_pooling_function)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(squeezed_weights)
pool_window = [self._block_dim[0], self._block_dim[1]]
pool_fn = pruning_utils.factorized_pool
squeeze_axis = None
if not self._spec.use_tpu:
pool_fn = nn_ops.pool
abs_weights = array_ops.reshape(
abs_weights,
[1, abs_weights.get_shape()[0],
abs_weights.get_shape()[1], 1])
squeeze_axis = [0, 3]
pooled_weights = pool_fn(
abs_weights,
window_shape=pool_window,
pooling_type=self._block_pooling_function,
strides=pool_window,
padding='SAME',
name=weights.op.name + '_pooled')
if pooled_weights.get_shape().ndims != 2:
pooled_weights = array_ops.squeeze(pooled_weights, axis=squeeze_axis)
smoothed_threshold, new_mask = self._update_mask(pooled_weights,
threshold)
updated_mask = pruning_utils.expand_tensor(new_mask, self._block_dim)
sliced_mask = array_ops.slice(
updated_mask, [0, 0],
[squeezed_weights.get_shape()[0],
squeezed_weights.get_shape()[1]])
return smoothed_threshold, array_ops.reshape(sliced_mask,
array_ops.shape(weights))
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
masks = get_masks()
weights = get_weights()
thresholds = get_thresholds()
if len(masks) != len(thresholds):
raise ValueError(
'Number of masks %s and number of thresholds %s mismatch' %
(len(masks), len(thresholds)))
for index, mask in enumerate(masks):
threshold = thresholds[index]
weight = weights[index]
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
new_threshold, new_mask = self._maybe_update_block_mask(weight, threshold)
self._assign_ops.append(
pruning_utils.variable_assign(threshold, new_threshold))
self._assign_ops.append(
pruning_utils.partitioned_variable_assign(mask, new_mask)
if is_partitioned else pruning_utils.variable_assign(mask, new_mask))
def mask_update_op(self):
with ops.name_scope(self._spec.name):
if not self._assign_ops:
self._get_mask_assign_ops()
with ops.control_dependencies([
state_ops.assign(
self._last_update_step,
self._global_step,
name='last_mask_update_step_assign')
]):
with ops.control_dependencies(self._assign_ops):
logging.info('Updating masks.')
return control_flow_ops.no_op('mask_update')
def conditional_mask_update_op(self):
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
def mask_update_op():
return self.mask_update_op()
def no_update_op():
return control_flow_ops.no_op()
return control_flow_ops.cond(maybe_update_masks(), mask_update_op,
no_update_op)
def add_pruning_summaries(self):
"""Adds summaries of weight sparsities and thresholds."""
with ops.name_scope(self._spec.name + '_summaries'):
summary.scalar('sparsity', self._sparsity)
summary.scalar('last_mask_update_step', self._last_update_step)
masks = get_masks()
thresholds = get_thresholds()
for mask, threshold in zip(masks, thresholds):
summary.scalar(mask.op.name + '/sparsity', nn_impl.zero_fraction(mask))
summary.scalar(threshold.op.name + '/threshold', threshold)
def print_hparams(self):
logging.info(self._spec.to_json())
| asimshankar/tensorflow | tensorflow/contrib/model_pruning/python/pruning.py | Python | apache-2.0 | 21,323 | 0.00469 |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for all models.
The model solely consists of the network, while the task combines one or several
models with one or several learners/optimizers.
"""
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import jax
from jax import numpy as jnp
from lingvo.jax import base_input
from lingvo.jax import base_layer
from lingvo.jax import layers
from lingvo.jax import metric_utils
from lingvo.jax import py_utils
from lingvo.jax import train_states
NestedMap = py_utils.NestedMap
JTensor = base_layer.JTensor
InstantiableParams = py_utils.InstantiableParams
Predictions = Union[JTensor, NestedMap, Dict[str, Any]]
Metrics = Dict[str, Tuple[JTensor, JTensor]]
TrainState = train_states.TrainState
def _compute_xent_loss_helper(
predictions: NestedMap, input_batch: NestedMap,
return_predictions: bool) -> Tuple[Metrics, Dict[str, Any]]:
"""Helper for computing the xent loss for Language model and Sequence model.
Args:
predictions: A `.NestedMap` containing the keys `per_example_argmax`,
`total_loss`, `avg_xent`, `aux_loss`, `total_weight` which corresponds to
the output of the Softmax layer.
input_batch: A `.NestedMap` object containing input tensors which contains
the keys `labels` and `weights` which corresponds to the labels and the
`weights` for each token in the sequence.
return_predictions: Whether to return predictions, which can be more
expensive.
Returns:
- A dict or NestedMap containing str keys and (metric, weight) pairs as
values, where one of the entries is expected to correspond to the loss.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index. The base class just returns an empty dict.
"""
if 'tgt' in input_batch:
labels = input_batch.tgt.labels
if 'paddings' in input_batch.tgt:
weights = 1.0 - input_batch.tgt.paddings
else:
weights = jnp.not_equal(input_batch.tgt.segment_ids, 0)
weights = weights.astype(labels.dtype)
else:
labels = input_batch.labels
weights = input_batch.weights
predicted_labels = predictions.per_example_argmax.astype(labels.dtype)
num_preds = predictions.total_weight
mean_acc = jnp.sum(
(labels == predicted_labels) * weights) / jnp.maximum(num_preds, 1)
metric_weight = jnp.array(num_preds, predictions.avg_xent.dtype)
if hasattr(predictions, 'avg_xent_weight'):
avg_xent_weight = predictions.avg_xent_weight
else:
avg_xent_weight = metric_weight
metrics = NestedMap(
total_loss=(predictions.total_loss, metric_weight),
avg_xent=(predictions.avg_xent, avg_xent_weight),
aux_loss=(predictions.aux_loss, jnp.array(1.0,
predictions.aux_loss.dtype)),
log_pplx=(predictions.avg_xent, avg_xent_weight),
fraction_of_correct_next_step_preds=(mean_acc, metric_weight),
num_predictions=(num_preds, jnp.array(1.0, num_preds.dtype)),
)
per_example_output = NestedMap()
if return_predictions:
per_example_output = predictions
return metrics, per_example_output
def greedy_decode(extend_step_fn: Callable[[NestedMap, JTensor],
Tuple[NestedMap, JTensor]],
decoder_state: NestedMap,
target_ids: JTensor,
target_paddings: JTensor,
seq_len: int,
max_decode_steps: Optional[int] = None,
prefix_lengths: Optional[JTensor] = None,
eos_id: Optional[int] = None) -> NestedMap:
"""Greedy decode the input batch.
Args:
extend_step_fn: A function that takes in `states` and the decoded sequence
at the current time step (with shape [B] or [B, P] where B corresponds to
the batch size and P corresponds to a possible prefix) and returns a tuple
of (`NestedMap`, `JTensor`), where the first `NestedMap` corresponds to
the `new_states` and the second `JTensor` corresponds to the logits of the
next step.
decoder_state: The initialized cache for autoregressive cached decoding.
target_ids: The token ids that correspond to the target sequence.
target_paddings: The paddings corresponding to the target sequence, with a 1
denoting padding token and 0 denoting non-padding tokens.
seq_len: The output sequence length to decode to.
max_decode_steps: Python int or None, the max decode step to run after the
prefix (if any). Since the prefixes might be of unequal lengths, this
value is not equivalent with `seq_len` above. When None, decode steps is
only limited by `seq_len` above.
prefix_lengths: Optional argument supplying a prefix sizes to initialize the
model to decode from a certain target prefix for each position in the
batch. This can either be None or a JTensor of shape [batch] signifying
the prefix length for each sequence in the batch.
eos_id: Optional EOS id which to terminate the decoding early.
Returns:
A NestedMap with `.prefix_lengths` (indicating the lengths of prefixes for
each target sequence), `.output_ids` (matrix of int ids with the
decoded output), `.decode_lengths` (vector of ints indicating the lengths
of non-padding tokens in `.output_ids`, which includes the prefix), and
`.logprobs` (the log probability of selected tokens, including the prefix,
where a positive value of 1.0 is used to indicate padded positions).
"""
if seq_len <= 0:
raise ValueError('The sequence length for decoding must be > 0, '
f'current value = {seq_len}.')
max_decode_steps = max_decode_steps or seq_len
batch_size = target_ids.shape[0]
# If prefix length is not specified set it to 0.
if prefix_lengths is None:
prefix_lengths = jnp.zeros([batch_size], dtype=jnp.int32)
output_ids = jnp.zeros(shape=(batch_size, seq_len), dtype=jnp.int32)
output_ids = output_ids.at[:, 0].set(target_ids[:, 0])
val = NestedMap()
val.state = decoder_state
val.step = 0
val.output_ids = output_ids
# Shape [batch_size], whether each row has terminated and should stop.
val.done = jnp.zeros(shape=batch_size, dtype=jnp.bool_)
val.decode_lengths = jnp.ones_like(prefix_lengths) * seq_len
# We use a positive value of 1.0 to indicate blank or padded positions.
val.logprobs = jnp.ones_like(output_ids, dtype=jnp.float32)
def cond_func(val):
"""Whether the while loop should continue."""
# We continue the greedy search iff both:
# (1) We have yet to exceed the max steps set by p.decoder.seqlen, AND;
# (2) At least one row in the batch has not terminated.
length_ok = val.step < seq_len - 1
all_rows_done = jnp.all(val.done)
return jnp.logical_and(length_ok, jnp.logical_not(all_rows_done))
def loop_body(val):
"""From ids at `step`, update output ids at `step + 1`."""
step = val.step
decoder_state, logits = extend_step_fn(val.state, val.output_ids[:, step])
logprobs = jax.nn.log_softmax(logits.astype(jnp.float32))
val.state = decoder_state
# When step becomes prefix_length - 1, the new output has index beyond
# the known prefix.
# If prefix_length is 0, the condition is always False, so we take the
# decoded output rather than the prefix.
new_ids = jnp.where(step < prefix_lengths - 1, target_ids[:, step + 1],
jnp.argmax(logits, axis=1))
prev_done = val.done
new_ids = jnp.where(prev_done, jnp.zeros_like(new_ids), new_ids)
if eos_id is not None:
val.done = jnp.logical_or(prev_done, jnp.equal(new_ids, eos_id))
max_decoding_steps_reached = (jnp.ones_like(prefix_lengths) * (step + 2) -
prefix_lengths) >= max_decode_steps
val.done = jnp.logical_or(val.done, max_decoding_steps_reached)
done_at_this_step = jnp.logical_and(jnp.logical_not(prev_done), val.done)
val.decode_lengths = jnp.where(
done_at_this_step,
jnp.ones_like(val.decode_lengths) * (step + 2), val.decode_lengths)
val.output_ids = val.output_ids.at[:, step + 1].set(new_ids)
logprobs_at_new_ids = logprobs.at[jnp.arange(batch_size), new_ids].get()
logprobs_at_new_ids = jnp.where(prev_done,
jnp.ones_like(logprobs_at_new_ids),
logprobs_at_new_ids)
val.logprobs = val.logprobs.at[:, step + 1].set(logprobs_at_new_ids)
val.step += 1
return val
result = jax.lax.while_loop(cond_func, loop_body, val)
result.prefix_lengths = prefix_lengths
result.original_lengths = jnp.sum(
1.0 - target_paddings, axis=1).astype(jnp.int32)
prefix_ids = target_ids
# We manually pad out the ids not belonging to the prefix because some
# tokenizers tested do not always obey the lengths arg.
indices = jnp.tile(jnp.arange(prefix_ids.shape[1]), (prefix_ids.shape[0], 1))
prefix_lengths_2d = jnp.tile(prefix_lengths[:, None],
(1, prefix_ids.shape[1]))
prefix_ids = jnp.where(indices < prefix_lengths_2d, prefix_ids,
jnp.zeros_like(prefix_ids))
result.prefix_ids = prefix_ids
del result.state, result.step, result.done
return result
class BaseModel(base_layer.BaseLayer):
"""An API that every model should be derived from."""
def compute_predictions(self, input_batch: NestedMap) -> Predictions:
"""Computes predictions for `input_batch`.
This method must be defined in a concrete derived class.
The output can be in the form of probablistic distributions, e.g., softmax
logits for discrete outputs, mixture of logistics for continuous values, or
regression values.
For training/evaluation, the output will be used for computing loss and
gradient updates, including comparing predicted distributions between
teacher and student for distillation. During inference the output can be
used to compute final outputs, perhaps with sampling.
Args:
input_batch: A `.NestedMap` object containing input tensors.
Returns:
Predictions, either a single Tensor, a `.NestedMap`, or a namedtuple.
"""
raise NotImplementedError('Abstract method')
def compute_loss(self, predictions: Union[JTensor, NestedMap],
input_batch: NestedMap) -> Tuple[Metrics, Dict[str, Any]]:
"""Computes the loss and other metrics for the given predictions.
This method must be defined in a concrete derived class.
Args:
predictions: The output of `compute_predictions`.
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
- A dict or NestedMap containing str keys and (metric, weight) pairs as
values, where one of the entries is expected to corresponds to the loss.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index.
"""
raise NotImplementedError('Abstract method')
def fprop(self, input_batch: NestedMap) -> Tuple[Metrics, Dict[str, Any]]:
"""Forward propagation through one tower of the model.
Args:
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
(dict, dict):
- A dict containing str keys and (metric, weight) pairs as values, where
one of the keys is expected to be 'loss'.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index.
"""
with py_utils.AuxLossContext():
predictions = self.compute_predictions(input_batch)
return self.compute_loss(predictions, input_batch)
def decode(self, input_batch: NestedMap) -> Tuple[NestedMap, NestedMap]:
"""Decodes input_batch.
Args:
input_batch: The input batch. A `NestedMap` of tensors. Or, if input batch
spiltting is used, a list of `NestedMap`, one for each split.
Returns:
- metrics, a NestedMap containing str keys and (metric, weight) pairs for
the current batch (a tuple of two scalars).
- results, a `.NestedMap` as decoder output.
"""
raise NotImplementedError('Abstract method')
def process_decode_out(
self, input_obj: base_input.BaseInput,
decode_out: NestedMap) -> Tuple[NestedMap, Sequence[Tuple[str, Any]]]:
"""Processes one batch of decoded outputs.
Args:
input_obj: The input object where a tokenizer is accessible.
decode_out: The output from decode(). May have an extra leading axis.
Returns:
- metrics, a NestedMap containing str keys and (metric, weight) pairs for
the current batch (a tuple of two scalars).
- A list of tuples where each element corresponds to a row in the batch.
Each tuple is a key value pair.
"""
raise NotImplementedError('Abstract method')
class ClassificationMLPModel(BaseModel):
"""Language Model task with a simple MLP model."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('mlp_tpl', layers.linears.MLPBlock.Params(),
'MLP model parameters.')
p.Define('softmax_tpl', layers.SingleShardSharedEmbeddingSoftmax.Params(),
'Input softmax embedding lookup layer.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
self.create_children('mlp_layers', p.mlp_tpl.Copy())
self.create_child('softmax', p.softmax_tpl.Copy())
def compute_predictions(self, input_batch: NestedMap) -> Predictions:
input_emb = self.softmax.emb_lookup(input_batch.ids)
output = self.mlp_layers.fprop(input_emb)
predictions = self.softmax.fprop(
inputs=output,
class_weights=input_batch.weights[:, :, jnp.newaxis],
class_ids=input_batch.ids[:, :, jnp.newaxis])
return predictions
def compute_loss(self, predictions: NestedMap,
input_batch: NestedMap) -> Tuple[Metrics, Dict[str, Any]]:
labels = input_batch.labels
weights = input_batch.weights
class_weights = weights[:, :, jnp.newaxis]
num_preds = jnp.sum(class_weights)
predicted_labels = predictions.per_example_argmax.astype(labels.dtype)
mean_acc = jnp.sum(
(labels == predicted_labels) * weights) / jnp.maximum(num_preds, 1)
metrics = NestedMap(total_loss=(mean_acc, mean_acc),)
return metrics, NestedMap()
class LanguageModel(BaseModel):
"""Language Model base task."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('lm', layers.TransformerLm.Params(), 'LM layer.')
p.Define(
'return_predictions', False, 'Whether to return predictions during'
'eval. Returning predictions is more expensive, but may be useful'
'for debugging.')
greedy_search_p = py_utils.Params()
greedy_search_p.Define('seqlen', 0, 'Maximum output sequence length.')
greedy_search_p.Define(
'min_prefix_len', 5,
'Minimum number of tokens picked to be used as decoding prefix.')
greedy_search_p.Define(
'eos_id', 2,
'The id of EOS token indicating the termination of greedy search.')
greedy_search_p.Define(
'max_decode_steps', None,
'If not None, the max decode steps for each example. If None, this '
'is set to `seqlen`, which contains prefix.')
p.Define('decoder', greedy_search_p, 'Decoder param.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
# Construct the model.
lm_p = p.lm.Copy()
self.create_child('lm', lm_p)
def compute_predictions(self, input_batch: NestedMap) -> Predictions:
"""Computes predictions for `input_batch`."""
p = self.params
if 'tgt' in input_batch:
input_batch = input_batch.tgt
if 'paddings' in input_batch:
paddings = input_batch.paddings
else:
paddings = jnp.equal(input_batch.segment_ids, 0).astype(self.fprop_dtype)
if 'weights' in input_batch:
weights = input_batch.weights
else:
weights = 1.0 - paddings
weights = weights.astype(self.fprop_dtype)
input_batch.weights = weights
inputs = input_batch.ids
labels = NestedMap(class_ids=input_batch.labels, class_weights=weights)
if p.lm.packed_input:
packed_input_kwargs = {
'segment_ids': input_batch.segment_ids,
'segment_pos': input_batch.segment_pos,
}
else:
packed_input_kwargs = {}
return self.lm.fprop(
inputs=inputs,
paddings=paddings,
labels=labels,
**packed_input_kwargs)
def compute_loss(self, predictions: NestedMap,
input_batch: NestedMap) -> Tuple[Metrics, Dict[str, Any]]:
"""Computes the loss and other metrics for the given predictions.
Args:
predictions: The output of `compute_predictions`.
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
- A dict or NestedMap containing str keys and (metric, weight) pairs as
values, where one of the entries is expected to corresponds to the loss.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index.
"""
return _compute_xent_loss_helper(predictions, input_batch,
self.params.return_predictions)
def decode(self, input_batch: NestedMap) -> Tuple[NestedMap, NestedMap]:
"""Greedy decodes the input_batch.
Args:
input_batch: The input batch, with fields like `.ids`.
Returns:
- metrics, a NestedMap containing str keys and (metrics, weight) pairs.
- A NestedMap like `input_batch`, with `.prefix_lengths` (vector of
randomly generated ints indicating the lengths of prefixes for each
row), and `.output_ids` (matrix of int ids with the decoded output).
"""
p = self.params
if p.decoder.seqlen <= 0:
raise ValueError('Must set p.decoder.seqlen > 0, current value = '
f'{p.decoder.seqlen}')
batch_size = input_batch.ids.shape[0]
maxval = jnp.sum(1 - input_batch.paddings, axis=1).astype(jnp.int32)
minval = jnp.minimum(maxval, p.decoder.min_prefix_len)
prefix_lengths = jax.random.randint(base_layer.next_prng_key(),
[batch_size], minval, maxval + 1,
input_batch.ids.dtype)
decoder_state = self.lm.init_states(
target_batch_size=batch_size,
target_max_length=p.decoder.seqlen)
global_step = base_layer.cur_global_step()
lm_theta = self.lm.local_theta()
def extend_step_fn(states, ids):
with base_layer.JaxContext.new_context(
prng_key=base_layer.next_prng_key(),
global_step=global_step) as jax_context:
jax_context.bind(self.lm, self.lm.vars_to_flax_vars(lm_theta),
[base_layer.SCOPE_AUX_LOSS])
new_states, xent = self.lm.extend_step(states, ids)
return new_states, xent.logits
result = greedy_decode(
extend_step_fn,
decoder_state,
input_batch.ids,
input_batch.paddings,
p.decoder.seqlen,
max_decode_steps=p.decoder.max_decode_steps,
prefix_lengths=prefix_lengths,
eos_id=p.decoder.eos_id)
result.update(input_batch)
metrics = NestedMap(
num_decoded=(jnp.array(0.0, jnp.float32),
jnp.array(batch_size, jnp.float32)))
return metrics, result
def process_decode_out(
self, input_obj: base_input.BaseInput,
decode_out: NestedMap) -> Tuple[NestedMap, Sequence[Tuple[str, Any]]]:
"""Processes one batch of decoded outputs.
Args:
input_obj: The input object where a tokenizer is accessible.
decode_out: The output from decode(). May have an extra leading axis.
Returns:
- metrics, a NestedMap containing str keys and (metric, weight) pairs for
the current batch (a tuple of two scalars).
- A list of dict where each entry corresponds to a row in the batch. The
keys should be unique across the entire decode dataset.
"""
decoded_strs = input_obj.ids_to_strings(decode_out.output_ids,
decode_out.decode_lengths)
original_strs = input_obj.ids_to_strings(decode_out.ids,
decode_out.original_lengths)
prefix_strs = input_obj.ids_to_strings(decode_out.prefix_ids,
decode_out.prefix_lengths)
ret = list()
for idx, decoded_str in enumerate(decoded_strs):
ret.append((prefix_strs[idx], {
'prefix': prefix_strs[idx],
'decoded': decoded_str,
'original': original_strs[idx],
'ids': decode_out.output_ids[idx],
'logprobs': decode_out.logprobs[idx],
'prefix_length': decode_out.prefix_lengths[idx],
'decode_length': decode_out.decode_lengths[idx],
}))
decoded_lengths = jnp.average(decode_out.decode_lengths).astype(jnp.float32)
metrics = NestedMap(
decoded_length=(decoded_lengths, jnp.array(1.0, jnp.float32)))
return metrics, ret
class SequenceModel(BaseModel):
"""Sequence Model base task."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('model', layers.TransformerEncoderDecoder.Params(),
'Sequence model layer for this task.')
p.Define(
'return_predictions', False, 'Whether to return predictions during'
'eval. Returning predictions is more expensive, but may be useful'
'for debugging.')
decoder_p = py_utils.Params()
decoder_p.Define('seqlen', 0, 'Maximum output sequence length.')
decoder_p.Define(
'eos_id', 2,
'The id of EOS token indicating the termination of decoding.')
p.Define('decoder', decoder_p, 'Decoder params.')
p.Define(
'label_smoothing_prob', 0.0,
'If > 0.0, smooth out one-hot prob by spreading this amount of'
' prob mass to all other tokens.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
# Construct the model.
model_p = p.model.Copy()
self.create_child('model', model_p)
def compute_predictions(self, input_batch):
"""Computes predictions for `input_batch`."""
p = self.params
if p.model.packed_input:
packed_input_kwargs = {
'input_segment_ids': input_batch.src.segment_ids,
'input_segment_pos': input_batch.src.segment_pos,
'target_segment_ids': input_batch.tgt.segment_ids,
'target_segment_pos': input_batch.tgt.segment_pos,
}
else:
packed_input_kwargs = {}
labels = NestedMap(
class_ids=input_batch.tgt.labels, class_weights=input_batch.tgt.weights)
if p.label_smoothing_prob > 0.0:
vocab_size = p.model.softmax_tpl.num_classes
class_probabilities = jax.nn.one_hot(labels.class_ids, vocab_size)
fill_prob = p.label_smoothing_prob / (vocab_size - 1)
class_probabilities = (
(1.0 - p.label_smoothing_prob) * class_probabilities + fill_prob *
(1.0 - class_probabilities)).astype(self.fprop_dtype)
labels.class_probabilities = class_probabilities
return self.model.fprop(
inputs=input_batch.src.ids,
input_paddings=input_batch.src.paddings,
targets=input_batch.tgt.ids,
target_paddings=input_batch.tgt.paddings,
labels=labels,
**packed_input_kwargs)
def compute_loss(self, predictions, input_batch):
"""Computes the loss and other metrics for the given predictions.
Args:
predictions: The output of `ComputePredictions`.
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
- A dict or NestedMap containing str keys and (metric, weight) pairs as
values, where one of the entries is expected to corresponds to the loss.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index.
"""
return _compute_xent_loss_helper(predictions, input_batch.tgt,
self.params.return_predictions)
def decode(self, input_batch: NestedMap) -> Tuple[NestedMap, NestedMap]:
"""Decodes input_batch.
Args:
input_batch: The input batch, with a field `.src` and `.tgt` corresponding
to source and target, which itself contains the `.ids` and `.paddings.`
Returns:
- metrics, a nestedmap of metrics.
- results, a NestedMap like `input_batch`, with `.output_ids` (matrix of
int ids with the decoded output) as well as the decoded length.
"""
p = self.params
model_theta = self.model.local_theta()
if p.decoder.seqlen <= 0:
raise ValueError('Must set p.decoder.seqlen > 0, current value = '
f'{p.decoder.seqlen}')
batch_size = input_batch.tgt.ids.shape[0]
decoder_state = self.model.init_states(
inputs=input_batch.src.ids,
input_paddings=input_batch.src.paddings,
target_batch_size=batch_size,
target_max_length=p.decoder.seqlen)
global_step = base_layer.cur_global_step()
def extend_step_fn(states, ids):
with base_layer.JaxContext.new_context(
prng_key=base_layer.next_prng_key(),
global_step=global_step) as jax_context:
jax_context.bind(self.model, self.model.vars_to_flax_vars(model_theta),
[base_layer.SCOPE_AUX_LOSS])
new_states, xent = self.model.extend_step(states, ids)
return new_states, xent.logits
result = greedy_decode(
extend_step_fn,
decoder_state,
input_batch.tgt.ids,
input_batch.tgt.paddings,
p.decoder.seqlen,
eos_id=p.decoder.eos_id)
# Prefix lengths are not needed for sequence model decoding.
del result.prefix_lengths
result.update(input_batch)
metrics = NestedMap(
num_decoded=(jnp.array(0.0, jnp.float32),
jnp.array(batch_size, jnp.float32)))
return metrics, result
def process_decode_out(
self, input_obj: base_input.BaseInput,
decode_out: NestedMap) -> Tuple[NestedMap, Sequence[Tuple[str, Any]]]:
"""Processes one batch of decoded outputs.
Args:
input_obj: The input object where a tokenizer is accessible.
decode_out: The output from decode(). May have an extra leading axis.
Returns:
- metrics, a NestedMap containing str keys and (metric, weight) pairs for
the current batch (a tuple of two scalars).
- A list of dict where each entry corresponds to a row in the batch. The
keys should be unique across the entire decode dataset.
"""
decoded_strs = input_obj.ids_to_strings(
decode_out.output_ids, decode_out.decode_lengths, key='tgt')
source_lengths = jnp.sum(
1.0 - decode_out.src.paddings, axis=1).astype(jnp.int32)
source_strs = input_obj.ids_to_strings(
decode_out.src.ids, source_lengths, key='src')
target_strs = input_obj.ids_to_strings(
decode_out.tgt.ids, decode_out.original_lengths, key='tgt')
ret = list()
for idx, decoded_str in enumerate(decoded_strs):
ret.append((source_strs[idx], {
'source': source_strs[idx],
'decoded': decoded_str,
'target': target_strs[idx],
'ids': decode_out.output_ids[idx],
'logprobs': decode_out.logprobs[idx],
'decode_length': decode_out.decode_lengths[idx],
}))
decode_lengths = jnp.average(decode_out.decode_lengths).astype(jnp.float32)
metrics = NestedMap(
decode_length=(decode_lengths, jnp.array(1.0, jnp.float32)))
return metrics, ret
class ClassificationModel(BaseModel):
"""Classification task for images and video."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('network', layers.ResNet.Params(),
'The classifier network, which is ResNet-50 by default.')
p.Define('softmax', layers.SingleShardFullSoftmax.Params(),
'The softmax layer used for the classification.')
p.Define(
'input_field', 'image',
'The input field which contains the image or video features to'
'pass to the classification network.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
self.create_child('network', p.network)
self.create_child('softmax', p.softmax)
def compute_predictions(self, input_batch: NestedMap) -> Predictions:
"""Computes predictions for `input_batch`.
Args:
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
- A NestedMap containing str keys and features, softmax output and the
class weights as values.
"""
p = self.params
inputs = input_batch.Get(p.input_field)
features = self.network.fprop(inputs)
batch_size = inputs.shape[0]
example_weights = jnp.ones([batch_size])
if 'weight' in input_batch:
example_weights = input_batch.weight
if example_weights.shape != (batch_size,):
raise ValueError(
f'Shape of example weights should be ({batch_size},), but instead'
f'is {example_weights.shape}')
# Softmax expects weights to be of shape [..., 1].
softmax_output = self.softmax.fprop(
inputs=features,
class_weights=example_weights[:, jnp.newaxis],
class_probabilities=input_batch.label_probs)
return NestedMap(
features=features,
softmax_output=softmax_output,
example_weights=example_weights)
def compute_loss(self, predictions: NestedMap,
input_batch: NestedMap) -> Tuple[Metrics, Dict[str, Any]]:
"""Computes the loss and other metrics for the given predictions.
Args:
predictions: The output of `compute_predictions`.
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
- A dict or NestedMap containing str keys and (metric, weight) pairs as
values, where one of the entries is expected to correspond to the loss.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index. The base class just returns an empty dict.
"""
avg_xent = predictions.softmax_output.avg_xent
total_weight = predictions.softmax_output.total_weight
metrics = NestedMap(
avg_xent=(avg_xent, total_weight),
num_predictions=(total_weight, jnp.array(1.0, total_weight.dtype)))
# Compute top-1 and top-5 accuracy and add summary.
acc1 = metric_utils.top_k_accuracy(
1,
predictions.softmax_output.logits,
label_probs=input_batch.label_probs,
weights=predictions.example_weights)
acc5 = metric_utils.top_k_accuracy(
5,
predictions.softmax_output.logits,
label_probs=input_batch.label_probs,
weights=predictions.example_weights)
metrics.update(
accuracy=(acc1, predictions.softmax_output.total_weight),
acc5=(acc5, predictions.softmax_output.total_weight),
error=(1.0 - acc1, predictions.softmax_output.total_weight),
error5=(1.0 - acc5, predictions.softmax_output.total_weight))
# Add top-1 and top-5 accuracies to summaries.
base_layer.add_summary('acc1', acc1)
base_layer.add_summary('acc5', acc5)
return metrics, {}
class BertModel(BaseModel):
"""Bert Model base task."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('lm', layers.TransformerLm.Params(), 'Bert lm layer.')
p.Define(
'label_smoothing_prob', 0.0,
'If > 0.0, smooth out one-hot prob by spreading this amount of'
' prob mass to all other tokens.')
p.Define('mask_token_id', 0, 'Mask token id')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
assert p.lm.masked_lm
assert p.lm.packed_input
self.create_child('lm', p.lm)
mlm_augment_p = layers.MaskedLmDataAugmenter.Params()
mlm_augment_p.vocab_size = p.lm.vocab_size
mlm_augment_p.mask_token_id = p.mask_token_id
self.create_child('mlm_augmenter', mlm_augment_p)
def compute_predictions(self, input_batch: NestedMap) -> Predictions:
"""Computes predictions for `input_batch`."""
p = self.params
assert p.lm.packed_input
segment_ids = input_batch.segment_ids
segment_pos = input_batch.segment_pos
paddings = input_batch.paddings
# Note that internal BertTransformer uses input_batch.ids instead.
labels = input_batch.labels
if 'masked_ids' in input_batch:
# Input data already has masking done.
augmented_labels = input_batch.masked_ids
augmented_pos = input_batch.masked_pos
else:
augmented_labels, augmented_pos = self.mlm_augmenter.fprop(
labels, paddings)
if p.label_smoothing_prob > 0.0:
class_probabilities = jax.nn.one_hot(labels, p.lm.vocab_size)
fill_prob = p.label_smoothing_prob / (p.lm.vocab_size - 1)
class_probabilities = (
(1.0 - p.label_smoothing_prob) * class_probabilities + fill_prob *
(1.0 - class_probabilities)).astype(self.fprop_dtype)
# Only compute loss on masked pos.
labels = NestedMap(
class_probabilities=class_probabilities, class_weights=augmented_pos)
else:
# Only compute loss on masked pos.
labels = NestedMap(class_ids=labels, class_weights=augmented_pos)
lm_out = self.lm.fprop(
inputs=augmented_labels,
paddings=paddings,
labels=labels,
segment_ids=segment_ids,
segment_pos=segment_pos)
lm_out.augmented_labels = augmented_labels
lm_out.augmented_pos = augmented_pos
return lm_out
def compute_loss(self, predictions: NestedMap,
input_batch: NestedMap) -> Tuple[Metrics, Dict[str, Any]]:
"""Computes the loss and other metrics for the given predictions.
Args:
predictions: The output of `compute_predictions`.
input_batch: A `.NestedMap` object containing input tensors to this tower.
Returns:
- A dict or NestedMap containing str keys and (metric, weight) pairs as
values, where one of the entries is expected to corresponds to the loss.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index.
"""
labels = input_batch.labels
num_tokens = jnp.sum(1.0 - input_batch.paddings.astype(jnp.float32))
num_seqs = jnp.sum(
jnp.amax(input_batch.segment_ids.astype(jnp.float32), axis=1))
weights = predictions.augmented_pos.astype(jnp.float32)
predicted_labels = predictions.per_example_argmax.astype(labels.dtype)
num_preds = predictions.total_weight.astype(jnp.float32)
mean_acc = jnp.sum(
(labels == predicted_labels) * weights) / jnp.maximum(num_preds, 1)
metric_weight = jnp.array(num_preds, predictions.avg_xent.dtype)
metrics = py_utils.NestedMap(
total_loss=(predictions.total_loss, metric_weight),
avg_xent=(predictions.avg_xent, metric_weight),
aux_loss=(predictions.aux_loss, metric_weight),
log_pplx=(predictions.avg_xent, metric_weight),
fraction_of_correct_preds=(mean_acc, jnp.array(num_preds,
mean_acc.dtype)),
num_predictions=(num_preds, jnp.array(1.0, num_preds.dtype)),
num_tokens=(num_tokens, jnp.array(1.0, num_tokens.dtype)),
num_seqs=(num_seqs, jnp.array(1.0, num_seqs.dtype)),
)
per_example_output = py_utils.NestedMap()
return metrics, per_example_output
| tensorflow/lingvo | lingvo/jax/base_model.py | Python | apache-2.0 | 37,124 | 0.004067 |
class ComplexClause(object):
type_string = ''
def __init__(self, *args):
self.clauses = args
self.add_prefix(self.type_string)
def is_matrix(self):
for c in self.clauses:
if not c.is_matrix():
return False
return True
def involves(self, annotation):
for c in self.clauses:
if c.involves(annotation):
return True
return False
@property
def nodes(self):
"""
Get all annotations involved in the clause.
"""
nodes = []
for a in self.clauses:
nodes.extend(a.nodes)
return nodes
@property
def in_subquery(self):
for a in self.clauses:
if a.in_subquery:
return True
return False
@property
def attributes(self):
"""
Get all attributes involved in the clause.
"""
attributes = []
for a in self.clauses:
attributes.extend(a.attributes)
return attributes
def add_prefix(self, prefix):
"""
Adds a prefix to a clause
Parameters
----------
prefix : str
the prefix to add
"""
for i, c in enumerate(self.clauses):
if isinstance(c, ComplexClause):
c.add_prefix(prefix + str(i))
else:
try:
c.value_alias_prefix += prefix + str(i)
except AttributeError:
pass
def generate_params(self):
"""
Generates dictionary of parameters of ComplexClause
Returns
-------
params : dict
a dictionary of parameters
"""
from .attributes import NodeAttribute
params = {}
for c in self.clauses:
if isinstance(c, ComplexClause):
params.update(c.generate_params())
else:
try:
if not isinstance(c.value, NodeAttribute):
params[c.cypher_value_string()[1:-1].replace('`', '')] = c.value
except AttributeError:
pass
return params
class or_(ComplexClause):
type_string = 'or_'
def for_cypher(self):
"""
Return a Cypher representation of the clause.
"""
temp = ' OR '.join(x.for_cypher() for x in self.clauses)
temp = "(" + temp + ")"
return temp
class and_(ComplexClause):
type_string = 'and_'
def for_cypher(self):
"""
Return a Cypher representation of the clause.
"""
temp = ' AND '.join(x.for_cypher() for x in self.clauses)
temp = "(" + temp + ")"
return temp | PhonologicalCorpusTools/PolyglotDB | polyglotdb/query/base/complex.py | Python | mit | 2,772 | 0.000722 |
import logging
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import views as auth_views, get_user_model, update_session_auth_hash, logout
from django.contrib.auth.tokens import default_token_generator
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy, reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, UpdateView, FormView
from taas.reservation.views import get_payment_order, get_payment_mac
from taas.user import forms
from taas.user import mixins
from taas.user import models
from taas.user import tasks
logger = logging.getLogger(__name__)
class UserCreateView(CreateView):
success_message = _('User has been successfully registered.')
success_url = reverse_lazy('homepage')
template_name = 'user_registration.html'
model = models.User
form_class = forms.UserCreationForm
def form_valid(self, form):
self.object = form.save()
tasks.email_admin_on_user_registration.delay(self.object.id)
messages.success(self.request, self.success_message)
logger.info('Unverified user with email %s has been successfully registered.'
% form.cleaned_data.get('email'))
return HttpResponseRedirect(self.get_success_url())
class UserUpdateView(mixins.LoggedInMixin, UpdateView):
success_message = _('Information has been updated.')
success_url = reverse_lazy('user_update_form')
template_name = 'user_update.html'
model = models.User
form_class = forms.UserUpdateForm
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
kwargs['pin'] = self.request.user.pin
return super(UserUpdateView, self).get_context_data(**kwargs)
def form_valid(self, form):
self.object = form.save()
update_session_auth_hash(self.request, self.object)
messages.success(self.request, self.success_message)
logger.info('User with email %s has been been updated.' % form.cleaned_data.get('email'))
return HttpResponseRedirect(self.get_success_url())
class UserDeactivateView(mixins.LoggedInMixin, SuccessMessageMixin, FormView):
success_message = _('User has been deactivated.')
form_class = forms.UserDeactivateForm
template_name = 'user_deactivate.html'
success_url = reverse_lazy('homepage')
def get_form_kwargs(self):
kwargs = super(UserDeactivateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
self.request.user.is_active = False
self.request.user.save()
tasks.email_admin_on_user_deactivation.delay(self.request.user.id)
logger.info('User with email %s has been been deactivated.' % form.cleaned_data.get('email'))
logout(self.request)
return super(UserDeactivateView, self).form_valid(form)
def password_reset(request):
kwargs = {
'template_name': 'password_reset/form.html',
'email_template_name': 'password_reset/email.html',
'subject_template_name': 'password_reset/subject.html',
'post_reset_redirect': reverse_lazy('homepage')
}
if request.method == 'POST' and request.POST.get('email'):
messages.add_message(request, messages.SUCCESS, _('Email instructions have been sent.'),
fail_silently=True)
response = auth_views.password_reset(request, **kwargs)
return response
def password_reset_confirm(request, uidb64=None, token=None):
template_name = 'password_reset/confirm.html'
post_reset_redirect = reverse('homepage')
token_generator = default_token_generator
set_password_form = forms.CustomPasswordSetForm
UserModel = get_user_model()
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
_('Your password has been set. You may go ahead and log in now.'),
fail_silently=True)
logger.info('Password for user %s has been reset.'
% user.email)
return HttpResponseRedirect(post_reset_redirect)
else:
title = _('Password reset unsuccessful')
else:
form = set_password_form(user)
title = _('Enter new password')
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
return TemplateResponse(request, template_name, context)
class AddBalanceView(mixins.LoggedInMixin, SuccessMessageMixin, FormView):
form_class = forms.AddBalanceForm
template_name = 'update_budget.html'
def form_valid(self, form):
amount = form.cleaned_data['amount']
payment = get_payment_order(amount, 'B%s' % self.request.user.id)
mac = get_payment_mac(payment)
host = settings.MAKSEKESKUS['host']
return render_to_response('proceed_budget.html', {'json': payment, 'mac': mac, 'host': host})
| crypotex/taas | taas/user/views.py | Python | gpl-2.0 | 6,005 | 0.001166 |
# -*- encoding: utf-8 -*-
import datetime
from django import forms
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core import exceptions as django_exceptions
from lyra import models
class Reservation(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.person = kwargs.pop("person")
self.namespace = kwargs.pop("namespace")
self.queryset = kwargs.pop("queryset")
super(Reservation, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(Reservation, self).clean()
if all(k in cleaned_data for k in ("start", "stop")):
start = cleaned_data["start"]
stop = cleaned_data["stop"]
if start > stop:
(self._errors
.setdefault("start", self.error_class())
.append(_("The reservation should begin before it ends")))
return cleaned_data
def save(self, commit=True, **kwargs):
obj = super(Reservation, self).save(commit=False, **kwargs)
if not obj.pk:
obj.person = self.person
obj.namespace = self.namespace
if commit:
obj.save()
return obj
class Meta:
model = models.Reservation
exclude = ("namespace", "person", "long_description_markup")
widgets = {
"style": forms.Select(attrs={"class": "schedule_style"}),
}
class Media:
js = ("shared/js/sivari.stylepreview.js",)
class ReservationExclusive(Reservation):
def toggle_enabled(self, cleaned_data):
return (not hasattr(self, "exclusive")
and cleaned_data.get("exclusive"))
def clean(self):
cleaned_data = super(ReservationExclusive, self).clean()
start_date = cleaned_data.get("start")
stop_date = cleaned_data.get("stop")
if start_date and stop_date and self.toggle_enabled(cleaned_data):
would_conflict = self.queryset.date_range(
start_date, stop_date)
if self.instance.pk:
would_conflict = would_conflict.exclude(pk=self.instance.pk)
if would_conflict.count():
(self._errors
.setdefault("start", self.error_class())
.append(_(u"The reservation would conflict with %(conflict_count)s "
u"other reservations.") % {
"conflict_count": would_conflict}))
return cleaned_data
class ReservationExclusiveEnable(ReservationExclusive):
exclusive = forms.BooleanField(
label=_(u"No overlap"),
required=False)
class ReservationExclusiveDisable(ReservationExclusive):
exclusive = forms.BooleanField(
label=_(u"No overlap"),
required=False,
initial=True)
class ConfirmForm(forms.Form):
confirm = forms.BooleanField()
| hylje/Lyra | lyra/forms.py | Python | bsd-3-clause | 3,064 | 0.00359 |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.routers.ports \
import forms as project_forms
from openstack_dashboard.dashboards.admin.routers.ports \
import tabs as project_tabs
class AddInterfaceView(forms.ModalFormView):
form_class = project_forms.AddInterface
template_name = 'admin/routers/ports/create.html'
success_url = 'horizon:admin:routers:detail'
failure_url = 'horizon:admin:routers:detail'
page_title = _("Add Interface")
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['router_id'],))
@memoized.memoized_method
def get_object(self):
try:
router_id = self.kwargs["router_id"]
return api.neutron.router_get(self.request, router_id)
except Exception:
redirect = reverse(self.failure_url, args=[router_id])
msg = _("Unable to retrieve router.")
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(AddInterfaceView, self).get_context_data(**kwargs)
context['router'] = self.get_object()
context['form_url'] = 'horizon:admin:routers:addinterface'
return context
def get_initial(self):
router = self.get_object()
return {"router_id": self.kwargs['router_id'],
"router_name": router.name_or_id}
class SetGatewayView(forms.ModalFormView):
form_class = project_forms.SetGatewayForm
template_name = 'admin/routers/ports/setgateway.html'
success_url = 'horizon:admin:routers:index'
failure_url = 'horizon:admin:routers:index'
page_title = _("Set Gateway")
def get_success_url(self):
return reverse(self.success_url)
@memoized.memoized_method
def get_object(self):
try:
router_id = self.kwargs["router_id"]
return api.neutron.router_get(self.request, router_id)
except Exception:
redirect = reverse(self.failure_url)
msg = _("Unable to set gateway.")
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(SetGatewayView, self).get_context_data(**kwargs)
context['router'] = self.get_object()
return context
def get_initial(self):
router = self.get_object()
return {"router_id": self.kwargs['router_id'],
"router_name": router.name_or_id}
class DetailView(tabs.TabView):
tab_group_class = project_tabs.PortDetailTabs
template_name = 'admin/networks/ports/detail.html'
| xuweiliang/Codelibrary | openstack_dashboard/dashboards/project/routers/ports/views.py | Python | apache-2.0 | 3,508 | 0 |
import unittest
import nest
from nix4nest.nest_api.models.multimeter import NestMultimeter
class TestNode(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
self.neuron_id = nest.Create('iaf_neuron')[0]
rec_params = {'record_from': ['V_m'], 'withtime': True}
self.mm_id = nest.Create('multimeter', params=rec_params)[0]
nest.Connect([self.mm_id], [self.neuron_id])
self.mm = NestMultimeter(self.mm_id, 'V_m')
def tearDown(self):
nest.ResetKernel()
def test_properties(self):
for k in nest.GetStatus([self.mm_id])[0].keys():
assert(k in self.mm.properties)
def test_data(self):
assert(len(self.mm.data) == 0)
nest.Simulate(50)
assert(len(self.mm.data) == 0)
self.mm.refresh()
assert(len(self.mm.data) == 49)
assert(self.neuron_id in self.mm.senders)
assert((self.mm.senders == self.neuron_id).all()) | asobolev/nix4nest | nix4nest/test/test_nest_api/test_multimeter.py | Python | lgpl-3.0 | 967 | 0.001034 |
#!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
"""
Home-monitor
~~~~~~~~~~~~
:copyright: (c) 2013 by Aurélien Chabot <aurelien@chabot.fr>
:license: GPLv3, see COPYING for more details.
"""
try:
import threading
import sys, os, time, datetime
import json
import urllib2
from ConfigParser import SafeConfigParser
except ImportError as error:
print 'ImportError: ', str(error)
exit(1)
try:
sys.path.insert(0, '../rest/')
sys.path.insert(0, '/usr/local/bin/')
from restClientLib import get_nodes, set_switch, update_sensor, update_switch
except ImportError as error:
print 'Custom py ImportError: ', str(error)
exit(1)
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[default]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else: return self.fp.readline()
if len(sys.argv) > 1:
config = SafeConfigParser()
config.readfp(FakeSecHead(open(sys.argv[1])))
else:
print("You need to provide a configuration")
exit(1)
class Monitor(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def get_rules(self):
data = {}
try:
url = urllib2.urlopen("http://localhost:%(port)d/rules/" % { "port" : config.getint('default', 'PORT') })
data = json.loads(url.read())
except:
print("Failed to get rules")
print_exc()
finally:
return data
def run(self):
while True:
rules = self.get_rules()
for rule in rules:
now = datetime.datetime.now().timetuple()
test_start = int(now[3]) > int(rule["start_time"])
test_end = int(now[3]) < int(rule["end_time"])
if (test_start and test_end) or (int(rule["start_time"])>int(rule["end_time"]) and (test_start or test_end)):
switch = { 'host' : "http://" + rule['switch_host'], 'id' : rule['switch_id']}
sensor = { 'host' : "http://" + rule['sensor_host'], 'id' : rule['sensor_id']}
update_sensor(sensor)
if sensor['value'] < (rule['temp'] - 0.5):
print("Set heater on, current temp is %s, target is %s" % (str(sensor['value']), str(rule['temp'])))
set_switch(switch, 1)
if sensor['value'] > (rule['temp'] + 0.5):
print("Set heater off, current temp is %s, target is %s" % (str(sensor['value']), str(rule['temp'])))
set_switch(switch, 0)
time.sleep(60)
# Launch monitor
monitor = Monitor()
monitor.start()
| trishika/home-monitor | monitor.py | Python | gpl-3.0 | 2,393 | 0.033445 |
from phovea_server.ns import Namespace, abort
from phovea_server.util import jsonify
from phovea_server.config import get as get_config
from phovea_server.plugin import list as list_plugins
import logging
app = Namespace(__name__)
_log = logging.getLogger(__name__)
@app.route('/<path:path>')
def _config(path):
path = path.split('/')
key = path[0]
plugin = next((p for p in list_plugins('tdp-config-safe-keys') if p.id == key), None)
if plugin is None:
_log.error('404: config key "{}" not found'.format(key))
abort(404, 'config key "{}" not found'.format(key))
path[0] = plugin.configKey
return jsonify(get_config('.'.join(path)))
def create():
return app
| datavisyn/tdp_core | tdp_core/config.py | Python | bsd-3-clause | 688 | 0.011628 |
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2018 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Upload weather data to PWS Weather.
`PWS Weather`_ is a site run by AerisWeather_ that "brings together
personal weather station data worldwide from locales not served by
primary weather services."
* Create account: http://www.pwsweather.com/register.php
* API based on WU protocol: `<http://wiki.wunderground.com/index.php/PWS_-_Upload_Protocol>`_
* Additional dependency: http://docs.python-requests.org/
* Example ``weather.ini`` configuration::
[pwsweather]
station = ABCDEFGH1
password = xxxxxxx
[logged]
services = ['pwsweather', 'underground']
.. _PWS Weather: http://www.pwsweather.com/
.. _AerisWeather: https://www.aerisweather.com/
"""
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
from datetime import timedelta
import logging
import os
import sys
import requests
import pywws.service
__docformat__ = "restructuredtext en"
service_name = os.path.splitext(os.path.basename(__file__))[0]
logger = logging.getLogger(__name__)
class ToService(pywws.service.CatchupDataService):
config = {
'station' : ('', True, 'ID'),
'password': ('', True, 'PASSWORD'),
}
fixed_data = {'action': 'updateraw', 'softwaretype': 'pywws'}
logger = logger
service_name = service_name
template = """
#live#
#idx "'dateutc' : '%Y-%m-%d %H:%M:%S',"#
#wind_dir "'winddir' : '%.0f'," "" "winddir_degrees(x)"#
#wind_ave "'windspeedmph': '%.2f'," "" "wind_mph(x)"#
#wind_gust "'windgustmph' : '%.2f'," "" "wind_mph(x)"#
#hum_out "'humidity' : '%.d',"#
#temp_out "'tempf' : '%.1f'," "" "temp_f(x)"#
#rel_pressure "'baromin' : '%.4f'," "" "pressure_inhg(x)"#
#calc "temp_f(dew_point(data['temp_out'], data['hum_out']))" "'dewptf': '%.1f',"#
#calc "rain_inch(rain_hour(data))" "'rainin': '%g',"#
#calc "rain_inch(rain_day(data))" "'dailyrainin': '%g',"#
"""
def __init__(self, context, check_params=True):
super(ToService, self).__init__(context, check_params)
# extend template
if context.params.get('config', 'ws type') == '3080':
self.template += """
#illuminance "'solarradiation': '%.2f'," "" "illuminance_wm2(x)"#
#uv "'UV' : '%d',"#
"""
@contextmanager
def session(self):
with requests.Session() as session:
yield session
def upload_data(self, session, prepared_data={}):
try:
rsp = session.get(
'http://www.pwsweather.com/pwsupdate/pwsupdate.php',
params=prepared_data, timeout=60)
except Exception as ex:
return False, repr(ex)
if rsp.status_code != 200:
return False, 'http status: {:d}'.format(rsp.status_code)
text = rsp.text.strip()
if text:
return True, 'server response "{:s}"'.format(text)
return True, 'OK'
if __name__ == "__main__":
sys.exit(pywws.service.main(ToService))
| jim-easterbrook/pywws | src/pywws/service/pwsweather.py | Python | gpl-2.0 | 3,827 | 0.001045 |
# coding: utf-8
import json
import logging
import webapp2
from webapp2_extras import sessions
from google.appengine.api.taskqueue import TombstonedTaskError, TaskAlreadyExistsError, DuplicateTaskNameError
from domain.entity import User
import error
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
user = self.session.get('user')
if user:
self.user = User.from_json(user)
else:
self.user = None
try:
return webapp2.RequestHandler.dispatch(self)
except webapp2.HTTPException as e:
self.response.set_status(e.code)
if e.message:
self.response.write(e.message)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
@property
def session_id(self):
cookie_name = self.session_store.config['cookie_name']
return self.request.cookies[cookie_name]
class JsonHandler(BaseHandler):
def dispatch(self):
j = super(JsonHandler, self).dispatch()
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
if j is not None:
self.response.out.write(json.dumps(j))
class TaskHandler(BaseHandler):
"""Handle unrecoverable errors."""
def dispatch(self):
try:
super(TaskHandler, self).dispatch()
# Unrecoverable Exceptions such as Invalid Parameter
except error.TaskUnrecoverableException as e:
logging.error(e)
except (TombstonedTaskError,
TaskAlreadyExistsError,
DuplicateTaskNameError) as e:
logging.error(e)
def signin_user_only(f):
"""Raise UnauthorizedException if session user is None
Examples:
class MyHandler(BaseHandler):
@singin_user_only
def get(self):
# following code is executed only if user is signed in.
...
"""
def wrapper(self, *args, **keywords):
if not self.user:
raise error.UnauthorizedException('Need sign in')
else:
return f(self, *args, **keywords)
return wrapper
| hagifoo/gae-pomodoro | app/src/application/handler/__init__.py | Python | mit | 2,303 | 0.000868 |
from glob import glob
import fitsio
import sys
from astrometry.util.fits import *
from astrometry.util.file import *
from astrometry.util.starutil_numpy import *
from astrometry.libkd.spherematch import *
from collections import Counter
from legacypipe.oneblob import _select_model
from legacypipe.survey import wcs_for_brick
from astrometry.util.multiproc import multiproc
B = fits_table('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/survey-bricks.fits.gz')
def patch_one(X):
(ifn, Nfns, fn) = X
T8 = fits_table(fn)
phdr = fitsio.read_header(fn)
hdr = T8.get_header()
amfn = fn.replace('/tractor-', '/all-models-').replace('/tractor/', '/metrics/')
A = fits_table(amfn)
Ahdr = fitsio.read_header(amfn)
abands = Ahdr['BANDS'].strip()
nparams = dict(ptsrc=2, simple=2, rex=3, exp=5, dev=5, comp=9)
galaxy_margin = 3.**2 + (nparams['exp'] - nparams['ptsrc'])
rex = True
brick = B[B.brickname == T8.brickname[0]]
brick = brick[0]
brickwcs = wcs_for_brick(brick)
assert(len(A) == len(np.flatnonzero(T8.type != 'DUP ')))
typemap = dict(ptsrc='PSF', rex='REX', dev='DEV', exp='EXP', comp='COMP')
Tnew = T8.copy()
npatched = 0
for i,(d,ttype) in enumerate(zip(A.dchisq, T8.type)):
dchisqs = dict(zip(['ptsrc','rex','dev','exp','comp'], d))
mod = _select_model(dchisqs, nparams, galaxy_margin, rex)
ttype = ttype.strip()
# The DUP elements appear at the end, and we *zip* A and T8; A does not contain the DUPs
# so is shorter by the number of DUP elements.
assert(ttype != 'DUP')
newtype = typemap[mod]
# type unchanged
if ttype == newtype:
continue
# Copy fit values from the "newtype" entries in all-models
Tnew.type[i] = '%-4s' % newtype
cols = ['ra', 'dec', 'ra_ivar', 'dec_ivar']
nt = newtype.lower()
for c in cols:
Tnew.get(c)[i] = A.get('%s_%s' % (nt,c))[i]
# expand flux, flux_ivar
for c in ['flux', 'flux_ivar']:
flux = A.get('%s_%s' % (nt,c))[i]
if len(abands) == 1:
Tnew.get('%s_%s' % (c,abands[0]))[i] = flux
else:
for ib,band in enumerate(abands):
Tnew.get('%s_%s' % (c,band))[i] = flux[ib]
cc = []
if newtype in ['EXP', 'COMP']:
cc.append('exp')
if newtype in ['DEV', 'COMP']:
cc.append('dev')
for c1 in cc:
for c2 in ['e1','e2','r']:
for c3 in ['', '_ivar']:
c = 'shape%s_%s%s' % (c1, c2, c3)
ac = '%s_shape%s_%s%s' % (nt, c1, c2, c3)
Tnew.get(c)[i] = A.get(ac)[i]
if newtype == 'COMP':
Tnew.fracdev[i] = A.comp_fracdev[i]
Tnew.fracdev_ivar[i] = A.comp_fracdev_ivar[i]
if newtype == 'PSF':
# Zero out
for c1 in ['dev','exp']:
for c2 in ['e1','e2','r']:
for c3 in ['', '_ivar']:
c = 'shape%s_%s%s' % (c1, c2, c3)
Tnew.get(c)[i] = 0.
Tnew.fracdev[i] = 0.
Tnew.fracdev_ivar[i] = 0.
# recompute bx,by, brick_primary
ok,x,y = brickwcs.radec2pixelxy(Tnew.ra[i], Tnew.dec[i])
Tnew.bx[i] = x-1.
Tnew.by[i] = y-1.
Tnew.brick_primary[i] = ((Tnew.ra[i] >= brick.ra1 ) * (Tnew.ra[i] < brick.ra2) *
(Tnew.dec[i] >= brick.dec1) * (Tnew.dec[i] < brick.dec2))
npatched += 1
print('%i of %i: %s patching %i of %i sources' % (ifn+1, Nfns, os.path.basename(fn), npatched, len(Tnew)))
if npatched == 0:
return
phdr.add_record(dict(name='PATCHED', value=npatched,
comment='Patched DR8.2.1 model-sel bug'))
outfn = fn.replace('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/',
'patched/')
outdir = os.path.dirname(outfn)
try:
os.makedirs(outdir)
except:
pass
Tnew.writeto(outfn, header=hdr, primheader=phdr)
def main():
#fns = glob('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/000/tractor-000??00?.fits')
fns = glob('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/*/tractor-*.fits')
fns.sort()
print(len(fns), 'Tractor catalogs')
vers = Counter()
keepfns = []
for fn in fns:
hdr = fitsio.read_header(fn)
ver = hdr['LEGPIPEV']
ver = ver.strip()
vers[ver] += 1
if ver == 'DR8.2.1':
keepfns.append(fn)
print('Header versions:', vers.most_common())
fns = keepfns
print('Keeping', len(fns), 'with bad version')
N = len(fns)
args = [(i,N,fn) for i,fn in enumerate(fns)]
mp = multiproc(8)
mp.map(patch_one, args)
if __name__ == '__main__':
main()
| legacysurvey/pipeline | py/legacyanalysis/fix-model-selection.py | Python | gpl-2.0 | 4,976 | 0.007838 |
# -*- coding: utf-8 -*-
"""
Copyright 2010 cloudControl UG (haftungsbeschraenkt)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
messages = {}
messages['WrongApplication'] = r'This application is unknown.'
messages['WrongDeployment'] = r'This deployment is unknown.'
messages['PasswordsDontMatch'] = r"The passwords don't match."
messages['InvalidApplicationName'] = r'Name may only contain a-z and 0-9 and must not start with a number.'
messages['WrongUsername'] = r'This username is unknown.'
messages['UserBelongsToApp'] = r'This user already belongs to this application.'
messages['RemoveUserGoneError'] = r'No such app or user. Please check app name and user name or email address.'
messages['UserCreatedNowCheckEmail'] = r'User has been created. Please check you e-mail for your confirmation code.'
messages['DeleteOnlyApplication'] = r'You can only delete applications not deployments. Try the undeploy command.'
messages['NoAliasGiven'] = r'You have to specify an alias.'
messages['WrongAlias'] = r'This alias is unknown.'
messages['NotAllowed'] = r'Sorry. You are not allowed to perform this action.'
messages['CannotDeleteDeploymentExist'] = r'You have to undeploy all related deployments, before you can delete the application.'
messages['NotAuthorized'] = r'The authorization failed, check your e-mail address and password.'
messages['PermissionDenied'] = r'You are not allowed to push to this repository. Maybe check your keys using "cctrluser key".'
messages['SecurityQuestionDenied'] = r'Action canceled on user request.'
messages['WrongAddon'] = r'This addon is unknown for this app_name/deployment_name.'
messages['DuplicateAddon'] = r'You can not add the same addon option twice.'
messages['InvalidAddon'] = r'This is not a valid addon name. Check the list of available addons with {0} app_name/deployment_name addon.list .'.format(os.path.basename(sys.argv[0]))
messages['ForbiddenAddon'] = 'You are not allowed to perform this action.\nIf you are trying to use a Beta addon, you can request access from the addon page.'
messages['WrongPubKeyName'] = r'The public key file must be named "id_rsa.pub".'
messages['NoSuchKeyFile'] = r'No such key file. Please check your input.'
messages['WrongKeyFormat'] = r'Your id_rsa.pub public key file seems to be in the wrong format.'
messages['InvalidAppOrDeploymentName'] = r'The application or deployment name is invalid.'
messages['KeyDuplicate'] = r'This key was added previously.'
messages['NoWorkerCommandGiven'] = r'The worker command is missing. Try the path to your PHP file relative from your repository root.'
messages['NoRunCommandGiven'] = r'Run command is missing.'
messages['WrongWorker'] = r'There is no such worker for this app_name/deployment_name.'
messages['NeitherBazaarNorGitFound'] = r'Cannot find "git" nor "bzr"! Please make sure either Bazaar or Git executables are in your path.'
messages['BazaarRequiredToPush'] = r'Please make sure the Bazaar executable is in your path.'
messages['GitRequiredToPush'] = r'Please make sure the Git executable is in your path.'
messages['NoCronURLGiven'] = r'You must provide a URL for cron to call.'
messages['NoSuchCronJob'] = r'Sorry, we can not find cronjob with this ID.'
messages['FileReadOrWriteFailed'] = r'Sorry, could not read or write to file.'
messages['FileNotFound'] = r'Sorry, file not found!'
messages['UserShouldCreateKey'] = r'Sorry, something went wrong when creating a key. Please create a key on your system, then run the command again.'
messages['BazaarConfigFound'] = r'Bazaar configuration found! Using "Bazaar" as repository type.'
messages['GitConfigFound'] = r'Git configuration found! Using "Git" as repository type.'
messages['BazaarExecutableFound'] = r'Bazaar seems to be installed! Using "Bazaar" as repository type.'
messages['GitExecutableFound'] = r'Git seems to be installed! Using "Git" as repository type.'
messages['CreatingAppAsDefaultRepoType'] = r'Using default "Git" as repository type.'
messages['DeleteAppsBeforeUser'] = r'There are still applications associated with this user account. Undeploy and/or delete applications before deleting user.'
messages['NoSuchFile'] = r'File not found.'
messages['APIUnreachable'] = r'Could not connect to API...'
messages['NoBuildpackURL'] = r'You need to provide a buildpack URL for "custom" application type.'
messages['NoCustomApp'] = r'You can only provide a buildpack URL if the app type is "custom".'
messages['NoValidBuildpackURL'] = r'The buildpack URL provided is not valid. Please try again.'
messages['AmbiguousSize'] = r'You can only specify one of --size or --memory.'
messages['InvalidMemory'] = r'Memory size should be an integer between 128 and 1024 MB.'
messages['InvalidSize'] = r'Size should be an integer between 1 and 8.'
messages['NoPreviousVersionFound'] = r'Previous version not found.'
messages['ClearCacheFailed'] = r'Clear buildpack cache failed.'
messages['DeploymentFailed'] = r'Deployment failed.'
messages['CommandNotImplemented'] = r'Sorry, this command is not available.'
messages['ShipAndDeploy'] = r'--ship and --push options cannot be used simultaneously.'
messages['RegisterDisabled'] = r'You can register on {0}.'
messages['NoVariablesGiven'] = r'You must provide some variables.'
messages['DuplicatedFlag'] = r'Please, specify a flag only once.'
messages['NotAuthorizedPublicKey'] = r'Public Key authentication failed. Trying with password.'
messages['WrongPublicKey'] = r'Public Key not found or invalid.'
messages['WrongKeyPath'] = r'Wrong Private Key path.'
messages['EncryptedKey'] = r'Private Key file is encrypted, please check if the ssh-agent is running.'
messages['KeyNotFound'] = r'No Private Key found.'
messages['SignatureCreateFailure'] = r'Signature could not be created.'
messages['RSAKeyRequired'] = r'Currently we support RSA keys only.'
if sys.platform == 'win32':
messages['UpdateAvailable'] = r'A newer version is available. Please update.'
messages['UpdateRequired'] = r'A newer version is required. You need to upgrade before using this program.'
else:
messages['UpdateAvailable'] = r'A newer version is available. To upgrade run: (sudo) pip install {0} --upgrade'
messages['UpdateRequired'] = r'A newer version is required. You need to upgrade before using this program. To upgrade run: (sudo) pip install {0} --upgrade'
class CctrlException(Exception):
def __init__(self, error_key):
self.error_message = messages[error_key]
def __str__(self):
return '[ERROR]' + ' ' + self.error_message
class InputErrorException(CctrlException):
"""
This exception is raised if for some reason someone put something in we
could not understand at all.
"""
pass
class PasswordsDontMatchException(Exception):
"""
This exception is raised if the password and the password check weren't
equal for three times.
"""
pass
class PublicKeyException(CctrlException):
"""
This exception is raised if the Public Key is not found
"""
pass
class SignatureException(CctrlException):
"""
This exception is raised if the signature cannot be created
"""
pass
| cloudControl/cctrl | cctrl/error.py | Python | apache-2.0 | 7,677 | 0.00482 |
#!/usr/bin/env python
import ibus
import dbus
bus = dbus.SessionBus()
e = ibus.interface.IPanel()
print e.Introspect("/", bus)
| ibus/ibus-qt | src/interfaces/introspect_panel.py | Python | gpl-2.0 | 130 | 0.007692 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import sys, time, traceback, Ice
Ice.loadSlice('Latency.ice')
import Demo
class Client(Ice.Application):
def run(self, args):
if len(args) > 1:
print self.appName() + ": too many arguments"
return 1
ping = Demo.PingPrx.checkedCast(self.communicator().propertyToProxy('Ping.Proxy'))
if not ping:
print "invalid proxy"
return 1
# Initial ping to setup the connection.
ping.ice_ping();
repetitions = 100000
print "pinging server " + str(repetitions) + " times (this may take a while)"
tsec = time.time()
i = repetitions
while(i >= 0):
ping.ice_ping()
i = i - 1
tsec = time.time() - tsec
tmsec = tsec * 1000.0
print "time for %d pings: %.3fms" % (repetitions, tmsec)
print "time per ping: %.3fms" % (tmsec / repetitions)
return 0
app = Client()
sys.exit(app.main(sys.argv, "config.client"))
| joshmoore/zeroc-ice | py/demo/Ice/latency/Client.py | Python | gpl-2.0 | 1,349 | 0.005189 |
from .base import BaseInterface
import eventlet
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from flask import Flask, render_template, session, request, send_from_directory
from flask_socketio import SocketIO, emit, join_room, leave_room, close_room, rooms, disconnect
from werkzeug.utils import secure_filename
import threading, os, time, queue
import logging, sys, json
from ..engine.network import get_allip, get_hostname
import socket
from zeroconf import ServiceInfo, Zeroconf
thread = None
thread_lock = threading.Lock()
REGIE_PATH1 = '/opt/RPi-Regie'
REGIE_PATH2 = '/data/RPi-Regie'
class RegieInterface (BaseInterface):
def __init__(self, hplayer, port, datapath):
super(RegieInterface, self).__init__(hplayer, "Regie")
self._port = port
self._datapath = datapath
self._server = None
# HTTP receiver THREAD
def listen(self):
# Advertize on ZeroConf
zeroconf = Zeroconf()
info = ServiceInfo(
"_http._tcp.local.",
"Regie._"+get_hostname()+"._http._tcp.local.",
addresses=[socket.inet_aton(ip) for ip in get_allip()],
port=self._port,
properties={},
server=get_hostname()+".local.",
)
zeroconf.register_service(info)
# Start server
self.log( "regie interface on port", self._port)
with ThreadedHTTPServer(self, self._port) as server:
self._server = server
self.stopped.wait()
self._server = None
# Unregister ZeroConf
zeroconf.unregister_service(info)
zeroconf.close()
def projectPath(self):
return os.path.join(self._datapath, 'project.json')
def projectRaw(self):
project = '{"pool":[], "project":[[]]}'
if os.path.isfile(self.projectPath()):
with open( self.projectPath(), 'r') as file:
project = file.read()
return project
# parse locally for programatic execution
def reload(self):
try:
self._project = json.loads(self.projectRaw())
except:
self._project = None
self.log("Error while parsing project..")
# print(self._project)
return self._project
# play sequence
def playseq(self, sceneIndex, seqIndex):
self.log("PLAYSEQ")
try:
# self.log('PLAYSEQ', seqIndex, sceneIndex, boxes)
orderz = []
boxes = [b for b in self._project["project"][0][sceneIndex]["allMedias"] if b["y"] == seqIndex]
for b in boxes:
peerName = self._project["pool"][ b["x"] ]["name"]
# MEDIA
order = { 'peer': peerName, 'synchro': True}
if b["media"] in ['stop', 'pause', 'unfade'] :
order["event"] = b["media"]
elif b["media"] == '...':
order["event"] = 'continue'
elif b["media"].startswith('fade'):
order["event"] = 'fade'
order["data"] = b["media"].split('fade ')[1]
else:
order["event"] = 'playthen'
order["data"] = [ self._project["project"][0][sceneIndex]["name"] + '/' + b["media"] ]
# ON MEDIA END
if 'onend' in b:
if b['onend'] == 'next':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex+1] } )
elif b['onend'] == 'prev':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex-1] } )
elif b['onend'] == 'replay':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex] } )
orderz.append(order)
# LOOP
if b["loop"] == 'loop':
orderz.append( { 'peer': peerName, 'event': 'loop', 'data': 1} )
elif b["loop"] == 'unloop':
orderz.append( { 'peer': peerName, 'event': 'unloop'} )
# LIGHT
if b["light"] and b["light"] != '...':
order = { 'peer': peerName, 'synchro': True, 'event': 'esp'}
if b["light"].startswith('light'):
order["data"] = {
'topic': 'leds/all',
'data': b["light"].split('light ')[1]
}
elif b["light"].startswith('preset'):
order["data"] = {
'topic': 'leds/mem',
'data': b["light"].split('preset ')[1]
}
elif b["light"].startswith('off'):
order["data"] = {
'topic': 'leds/stop',
'data': ''
}
orderz.append(order)
self.emit('playingseq', sceneIndex, seqIndex)
self.emit('peers.triggers', orderz, 437)
except:
self.log('Error playing Scene', sceneIndex, 'Seq', seqIndex)
#
# Threaded HTTP Server
#
class ThreadedHTTPServer(object):
def __init__(self, regieinterface, port):
self.regieinterface = regieinterface
interface_path = os.path.dirname(os.path.realpath(__file__))
if os.path.isdir(REGIE_PATH1):
www_path = os.path.join(REGIE_PATH1, 'web')
elif os.path.isdir(REGIE_PATH2):
www_path = os.path.join(REGIE_PATH2, 'web')
else:
www_path = os.path.join(interface_path, 'regie')
app = Flask(__name__, template_folder=www_path)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins="*")
#
# FLASK Routing Static
#
@app.route('/')
def index():
# self.regieinterface.log('requesting index')
return send_from_directory(www_path, 'index.html')
@app.route('/<path:path>')
def send_static(path):
# self.regieinterface.log('requesting '+path)
return send_from_directory(www_path, path)
#
# FLASK Routing API
#
# @app.route('/<path:path>')
# def send_static(path):
# # self.regieinterface.log('requesting '+path)
# return send_from_directory(www_path, path)
#
# SOCKETIO Routing
#
self.sendBuffer = queue.Queue()
def background_thread():
while True:
try:
task = self.sendBuffer.get_nowait()
if len(task) > 1: socketio.emit(task[0], task[1])
else: socketio.emit(task[0], None)
self.sendBuffer.task_done()
except queue.Empty:
socketio.sleep(0.1)
@self.regieinterface.hplayer.on('files.dirlist-updated')
def filetree_send(ev, *args):
self.sendBuffer.put( ('data', {'fileTree': self.regieinterface.hplayer.files()}) )
@self.regieinterface.hplayer.on('files.activedir-updated')
def activedir_send(ev, *args):
self.sendBuffer.put( ('data', {'scene': args[1]}) )
@self.regieinterface.hplayer.on('*.peer.*')
def peer_send(ev, *args):
event = ev.split('.')[-1]
if event == 'playingseq':
print(ev, args[0]['data'][1])
self.sendBuffer.put( ('data', {'sequence': args[0]['data'][1]}) )
else:
args[0].update({'type': event})
self.sendBuffer.put( ('peer', args[0]) )
# !!! TODO: stop zyre monitoring when every client are disconnected
@socketio.on('connect')
def client_connect():
self.regieinterface.log('New Remote Regie connected')
@socketio.on('save')
def save(data):
try:
json.loads(data)
with open( os.path.join(self.regieinterface._datapath, 'project.json'), 'w') as file:
file.write(data)
except:
e = str(sys.exc_info()[0])
self.regieinterface.log('fail to save project: '+e+' '+data)
@socketio.on('init')
def init(data):
# send project
emit('data', self.projectData())
# Start update broadcaster
global thread
with thread_lock:
if thread is None:
thread = socketio.start_background_task(target=background_thread)
@socketio.on('register')
def register(data):
# enable peer monitoring
self.regieinterface.emit('peers.getlink')
self.regieinterface.emit('peers.subscribe', ['status', 'settings', 'playingseq'])
@socketio.on('event')
def event(data):
self.regieinterface.emit('peers.triggers', data, 437)
# prepare sub-thread
self.server_thread = threading.Thread(target=lambda:socketio.run(app, host='0.0.0.0', port=port))
self.server_thread.daemon = True
# watchdog project.json
self.watcher()
# internal load project
self.regieinterface.reload()
def start(self):
self.server_thread.start()
def stop(self):
#self.server.stop()
pass
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def projectData(self):
data={
'fullproject': self.regieinterface.projectRaw(),
'fileTree': self.regieinterface.hplayer.files()
}
return data
def watcher(self):
def onchange(e):
self.regieinterface.log('project updated ! pushing it...')
self.regieinterface.reload()
self.sendBuffer.put( ('data', self.projectData()) )
handler = PatternMatchingEventHandler("*/project.json", None, False, True)
handler.on_any_event = onchange
self.projectObserver = Observer()
self.projectObserver.schedule(handler, os.path.dirname(self.regieinterface.projectPath()))
try:
self.projectObserver.start()
except:
self.regieinterface.log('project.json not found') | Hemisphere-Project/HPlayer2 | core/interfaces/regie.py | Python | gpl-3.0 | 10,996 | 0.012095 |
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from operator import attrgetter
from hyperspy.misc.utils import attrsetter
from copy import deepcopy
import dill
from dask.array import Array
def check_that_flags_make_sense(flags):
# one of: fn, id, sig
def do_error(f1, f2):
raise ValueError(
'The flags "%s" and "%s" are not compatible' %
(f1, f2))
if 'fn' in flags:
if 'id' in flags:
do_error('fn', 'id')
if 'sig' in flags:
do_error('fn', 'sig')
if 'id' in flags:
# fn done previously
if 'sig' in flags:
do_error('id', 'sig')
if 'init' in flags:
do_error('id', 'init')
# all sig cases already covered
def parse_flag_string(flags):
return flags.replace(' ', '').split(',')
def export_to_dictionary(target, whitelist, dic, fullcopy=True):
""" Exports attributes of target from whitelist.keys() to dictionary dic
All values are references only by default.
Parameters
----------
target : object
must contain the (nested) attributes of the whitelist.keys()
whitelist : dictionary
A dictionary, keys of which are used as attributes for exporting.
Key 'self' is only available with tag 'id', when the id of the
target is saved. The values are either None, or a tuple, where:
* the first item a string, which containts flags, separated by
commas.
* the second item is None if no 'init' flag is given, otherwise
the object required for the initialization.
The flag conventions are as follows:
* 'init': object used for initialization of the target. The object is
saved in the tuple in whitelist
* 'fn': the targeted attribute is a function, and may be pickled. A
tuple of (thing, value) will be exported to the dictionary,
where thing is None if function is passed as-is, and True if
dill package is used to pickle the function, with the value as
the result of the pickle.
* 'id': the id of the targeted attribute is exported (e.g. id(target.name))
* 'sig': The targeted attribute is a signal, and will be converted to a
dictionary if fullcopy=True
dic : dictionary
A dictionary where the object will be exported
fullcopy : bool
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
"""
whitelist_flags = {}
for key, value in whitelist.items():
if value is None:
# No flags and/or values are given, just save the target
thing = attrgetter(key)(target)
if fullcopy:
thing = deepcopy(thing)
dic[key] = thing
whitelist_flags[key] = ''
continue
flags_str, value = value
flags = parse_flag_string(flags_str)
check_that_flags_make_sense(flags)
if key == 'self':
if 'id' not in flags:
raise ValueError(
'Key "self" is only available with flag "id" given')
value = id(target)
else:
if 'id' in flags:
value = id(attrgetter(key)(target))
# here value is either id(thing), or None (all others except 'init'),
# or something for init
if 'init' not in flags and value is None:
value = attrgetter(key)(target)
# here value either id(thing), or an actual target to export
if 'sig' in flags:
if fullcopy:
from hyperspy.signal import BaseSignal
if isinstance(value, BaseSignal):
value = value._to_dictionary()
value['data'] = deepcopy(value['data'])
elif 'fn' in flags:
if fullcopy:
value = (True, dill.dumps(value))
else:
value = (None, value)
elif fullcopy:
value = deepcopy(value)
dic[key] = value
whitelist_flags[key] = flags_str
if '_whitelist' not in dic:
dic['_whitelist'] = {}
# the saved whitelist does not have any values, as they are saved in the
# original dictionary. Have to restore then when loading from dictionary,
# most notably all with 'init' flags!!
dic['_whitelist'].update(whitelist_flags)
def load_from_dictionary(target, dic):
""" Loads attributes of target to dictionary dic
The attribute list is read from dic['_whitelist'].keys()
Parameters
----------
target : object
must contain the (nested) attributes of the whitelist.keys()
dic : dictionary
A dictionary, containing field '_whitelist', which is a dictionary
with all keys that were exported, with values being flag strings.
The convention of the flags is as follows:
* 'init': object used for initialization of the target. Will be
copied to the _whitelist after loading
* 'fn': the targeted attribute is a function, and may have been
pickled (preferably with dill package).
* 'id': the id of the original object was exported and the
attribute will not be set. The key has to be '_id_'
* 'sig': The targeted attribute was a signal, and may have been
converted to a dictionary if fullcopy=True
"""
new_whitelist = {}
for key, flags_str in dic['_whitelist'].items():
value = dic[key]
flags = parse_flag_string(flags_str)
if 'id' not in flags:
value = reconstruct_object(flags, value)
if 'init' in flags:
new_whitelist[key] = (flags_str, value)
else:
attrsetter(target, key, value)
if len(flags_str):
new_whitelist[key] = (flags_str, None)
else:
new_whitelist[key] = None
if hasattr(target, '_whitelist'):
if isinstance(target._whitelist, dict):
target._whitelist.update(new_whitelist)
else:
attrsetter(target, '_whitelist', new_whitelist)
def reconstruct_object(flags, value):
""" Reconstructs the value (if necessary) after having saved it in a
dictionary
"""
if not isinstance(flags, list):
flags = parse_flag_string(flags)
if 'sig' in flags:
if isinstance(value, dict):
from hyperspy.signal import BaseSignal
value = BaseSignal(**value)
value._assign_subclass()
return value
if 'fn' in flags:
ifdill, thing = value
if ifdill is None:
return thing
if ifdill in [True, 'True', b'True']:
return dill.loads(thing)
# should not be reached
raise ValueError("The object format is not recognized")
if isinstance(value, Array):
value = value.compute()
return value
| dnjohnstone/hyperspy | hyperspy/misc/export_dictionary.py | Python | gpl-3.0 | 7,725 | 0.000647 |
#!/usr/bin/env python
# encoding: utf-8
"""
For local testing purposes
"""
from itertools import compress, chain, product, ifilter
from functools import partial
from reader import read_input, list_files
def is_valid(task, solution):
"""
:param reader.Task task:
:param list[1|0] solution:
:return bool: whether constraints in task are met
"""
sets = compress(task.sets, solution)
items_covered = set(chain.from_iterable(s.items for s in sets))
return len(items_covered) == task.item_count
def calc_cost(task, solution):
"""
:param reader.Task task:
:param list[1|0] solution:
:return int:
"""
sets = compress(task.sets, solution)
return sum(s.cost for s in sets)
def bruteforce_solver(task):
"""
As simple solution as we can make.
It finds the optimal solution, but it can't work on big inputs
(say, 20 sets take a few seconds, 25 sets - take a few minutes)
:param reader.Task task:
:return list[1|0]:
"""
all_configurations = product([0, 1], repeat=task.set_count)
valid_configurations = ifilter(partial(is_valid, task), all_configurations)
return min(valid_configurations, key=partial(calc_cost, task))
def check_solver(solver, inputs=list_files(max_size=20)):
"""
Prove optimality with comparing solution with control version.
Only for small examples, sorry. For big ones you can call is_valid()
:param task:
:param function(task) solver:
:return:
"""
for fn in inputs:
task = read_input(fn)
solution = solver(task)
if not is_valid(task, solution):
print 'ERROR: solution for {fn} is invalid: {solution}'.format(fn=fn, solution=solution)
continue
control_solution = bruteforce_solver(task)
control_cost = calc_cost(task, control_solution)
cost = calc_cost(task, solution)
if cost != control_cost:
msg = ('ERROR: solution for {fn} has cost={cost}, but optimal is {control_cost}:\n' +
' control:{control_solution}\n' +
' tested: {solution}')
print msg.format(fn=fn, cost=cost, control_cost=control_cost,
control_solution=control_solution, solution=solution)
continue
print 'OK: solution for {fn} is optimal, cost={cost}'.format(fn=fn, cost=cost)
if __name__ == '__main__':
from cp_solver import deep_search
check_solver(lambda (task): deep_search(task).best_solution) # put your solver here
# check_solver(lambda (task): deep_search(task).best_solution, list_files(min_size=30, max_size=50)) # put your solver here
| discreteoptimization/setcover | cp_homebrew_003/validator.py | Python | mit | 2,676 | 0.002242 |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import datetime
from collections import OrderedDict
from typing import Dict, List, Tuple, Union
import frappe
from frappe import _
from frappe.utils import date_diff
from erpnext.accounts.report.general_ledger.general_ledger import get_gl_entries
Filters = frappe._dict
Row = frappe._dict
Data = List[Row]
Columns = List[Dict[str, str]]
DateTime = Union[datetime.date, datetime.datetime]
FilteredEntries = List[Dict[str, Union[str, float, DateTime, None]]]
ItemGroupsDict = Dict[Tuple[int, int], Dict[str, Union[str, int]]]
SVDList = List[frappe._dict]
def execute(filters: Filters) -> Tuple[Columns, Data]:
update_filters_with_account(filters)
validate_filters(filters)
columns = get_columns()
data = get_data(filters)
return columns, data
def update_filters_with_account(filters: Filters) -> None:
account = frappe.get_value("Company", filters.get("company"), "default_expense_account")
filters.update(dict(account=account))
def validate_filters(filters: Filters) -> None:
if filters.from_date > filters.to_date:
frappe.throw(_("From Date must be before To Date"))
def get_columns() -> Columns:
return [
{
'label': _('Item Group'),
'fieldname': 'item_group',
'fieldtype': 'Data',
'width': '200'
},
{
'label': _('COGS Debit'),
'fieldname': 'cogs_debit',
'fieldtype': 'Currency',
'width': '200'
}
]
def get_data(filters: Filters) -> Data:
filtered_entries = get_filtered_entries(filters)
svd_list = get_stock_value_difference_list(filtered_entries)
leveled_dict = get_leveled_dict()
assign_self_values(leveled_dict, svd_list)
assign_agg_values(leveled_dict)
data = []
for item in leveled_dict.items():
i = item[1]
if i['agg_value'] == 0:
continue
data.append(get_row(i['name'], i['agg_value'], i['is_group'], i['level']))
if i['self_value'] < i['agg_value'] and i['self_value'] > 0:
data.append(get_row(i['name'], i['self_value'], 0, i['level'] + 1))
return data
def get_filtered_entries(filters: Filters) -> FilteredEntries:
gl_entries = get_gl_entries(filters, [])
filtered_entries = []
for entry in gl_entries:
posting_date = entry.get('posting_date')
from_date = filters.get('from_date')
if date_diff(from_date, posting_date) > 0:
continue
filtered_entries.append(entry)
return filtered_entries
def get_stock_value_difference_list(filtered_entries: FilteredEntries) -> SVDList:
voucher_nos = [fe.get('voucher_no') for fe in filtered_entries]
svd_list = frappe.get_list(
'Stock Ledger Entry', fields=['item_code','stock_value_difference'],
filters=[('voucher_no', 'in', voucher_nos), ("is_cancelled", "=", 0)]
)
assign_item_groups_to_svd_list(svd_list)
return svd_list
def get_leveled_dict() -> OrderedDict:
item_groups_dict = get_item_groups_dict()
lr_list = sorted(item_groups_dict, key=lambda x : int(x[0]))
leveled_dict = OrderedDict()
current_level = 0
nesting_r = []
for l, r in lr_list:
while current_level > 0 and nesting_r[-1] < l:
nesting_r.pop()
current_level -= 1
leveled_dict[(l,r)] = {
'level' : current_level,
'name' : item_groups_dict[(l,r)]['name'],
'is_group' : item_groups_dict[(l,r)]['is_group']
}
if int(r) - int(l) > 1:
current_level += 1
nesting_r.append(r)
update_leveled_dict(leveled_dict)
return leveled_dict
def assign_self_values(leveled_dict: OrderedDict, svd_list: SVDList) -> None:
key_dict = {v['name']:k for k, v in leveled_dict.items()}
for item in svd_list:
key = key_dict[item.get("item_group")]
leveled_dict[key]['self_value'] += -item.get("stock_value_difference")
def assign_agg_values(leveled_dict: OrderedDict) -> None:
keys = list(leveled_dict.keys())[::-1]
prev_level = leveled_dict[keys[-1]]['level']
accu = [0]
for k in keys[:-1]:
curr_level = leveled_dict[k]['level']
if curr_level == prev_level:
accu[-1] += leveled_dict[k]['self_value']
leveled_dict[k]['agg_value'] = leveled_dict[k]['self_value']
elif curr_level > prev_level:
accu.append(leveled_dict[k]['self_value'])
leveled_dict[k]['agg_value'] = accu[-1]
elif curr_level < prev_level:
accu[-1] += leveled_dict[k]['self_value']
leveled_dict[k]['agg_value'] = accu[-1]
prev_level = curr_level
# root node
rk = keys[-1]
leveled_dict[rk]['agg_value'] = sum(accu) + leveled_dict[rk]['self_value']
def get_row(name:str, value:float, is_bold:int, indent:int) -> Row:
item_group = name
if is_bold:
item_group = frappe.bold(item_group)
return frappe._dict(item_group=item_group, cogs_debit=value, indent=indent)
def assign_item_groups_to_svd_list(svd_list: SVDList) -> None:
ig_map = get_item_groups_map(svd_list)
for item in svd_list:
item.item_group = ig_map[item.get("item_code")]
def get_item_groups_map(svd_list: SVDList) -> Dict[str, str]:
item_codes = set(i['item_code'] for i in svd_list)
ig_list = frappe.get_list(
'Item', fields=['item_code','item_group'],
filters=[('item_code', 'in', item_codes)]
)
return {i['item_code']:i['item_group'] for i in ig_list}
def get_item_groups_dict() -> ItemGroupsDict:
item_groups_list = frappe.get_all("Item Group", fields=("name", "is_group", "lft", "rgt"))
return {(i['lft'],i['rgt']):{'name':i['name'], 'is_group':i['is_group']}
for i in item_groups_list}
def update_leveled_dict(leveled_dict: OrderedDict) -> None:
for k in leveled_dict:
leveled_dict[k].update({'self_value':0, 'agg_value':0})
| frappe/erpnext | erpnext/stock/report/cogs_by_item_group/cogs_by_item_group.py | Python | gpl-3.0 | 5,496 | 0.025291 |
"""DeeCluster: provides a namespace for a set of DeeDatabases"""
__version__ = "0.1"
__author__ = "Greg Gaughan"
__copyright__ = "Copyright (C) 2007 Greg Gaughan"
__license__ = "MIT" #see Licence.txt for licence information
from Dee import Relation, Tuple
from DeeDatabase import Database
class Cluster(dict):
"""A namespace for databases"""
def __init__(self, name="nemo"):
"""Create a Cluster
Define initial databases here
(Called once on cluster creation)
"""
dict.__init__(self)
self.name=name
self.databases = Relation(['database_name'], self.vdatabases)
#todo should really have relations, attributes etc. to define this...
def __getattr__(self, key):
if self.has_key(key):
return self[key]
raise AttributeError, repr(key)
def __setattr__(self, key, value):
#todo reject non-Database?
self[key] = value
#todo delattr
def __contains__(self, item):
if item in self.__dict__:
if isinstance(self.__dict__[item], Database):
return True
return False
def __iter__(self):
for (k, v) in self.items():
#for (k, v) in self.__dict__.items():
if isinstance(v, Database):
yield (k, v)
def vdatabases(self):
return [Tuple(database_name=k)
for (k, v) in self]
| ggaughan/dee | DeeCluster.py | Python | mit | 1,475 | 0.008814 |
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'id'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale
USE_L10N = True
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
# Legacy format
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# New format
DATABASES = {
}
# Classes used to implement db routing behaviour
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
)
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when spliting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = False # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# New format
CACHES = {
}
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ()
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Name and domain for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
############
# MESSAGES #
############
# Class to use as messges backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.user_messages.LegacyFallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# The default logging configuration. This sends an email to
# the site admins on every HTTP 500 error. All other log
# records are sent to the bit bucket.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# URL prefix for admin media -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
| freezmeinster/teh-manis | django/conf/global_settings.py | Python | bsd-3-clause | 21,140 | 0.00175 |
import io
import math
import random
from contextlib import redirect_stdout
from unittest import TestCase
from hamcrest import *
from array_util import get_random_unique_array
from chapter16.problem16_1 import greedy_make_change, make_change, print_change
from datastructures.array import Array
from util import between
def get_min_change_size_bruteforce(n, d):
if n == 0:
return 0
min_change = math.inf
for denom in d:
if denom <= n:
min_change = min(min_change, 1 + get_min_change_size_bruteforce(n - denom, d))
return min_change
class TestProblem16_1(TestCase):
def test_greedy_make_change(self):
n = random.randint(1, 20)
d = Array([1, 2, 5, 10, 20, 50])
actual_change = greedy_make_change(n)
expected_change_size = get_min_change_size_bruteforce(n, d)
actual_change_sum = sum(actual_change[i] * d[i] for i in between(1, d.length))
assert_that(sum(actual_change), is_(equal_to(expected_change_size)))
assert_that(actual_change_sum, is_(equal_to(n)))
def test_make_change(self):
n = random.randint(1, 20)
k = random.randint(1, 5)
d, _ = get_random_unique_array(max_size=k, min_value=2, max_value=20)
d[1] = 1
captured_output = io.StringIO()
actual_change, actual_denominators = make_change(n, d)
with redirect_stdout(captured_output):
print_change(n, actual_denominators)
expected_change_size = get_min_change_size_bruteforce(n, d)
assert_that(actual_change[n], is_(equal_to(expected_change_size)))
actual_change_denoms = [int(d) for d in captured_output.getvalue().splitlines()]
assert_that(sum(actual_change_denoms), is_(equal_to(n)))
assert_that(len(actual_change_denoms), is_(equal_to(expected_change_size)))
| wojtask/CormenPy | test/test_chapter16/test_problem16_1.py | Python | gpl-3.0 | 1,843 | 0.00217 |
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from taiga.base.api.utils import get_object_or_404
from taiga.base import filters, response
from taiga.base import exceptions as exc
from taiga.base.decorators import list_route
from taiga.base.api import ModelCrudViewSet
from taiga.projects.models import Project, TaskStatus
from django.http import HttpResponse
from taiga.projects.notifications.mixins import WatchedResourceMixin
from taiga.projects.history.mixins import HistoryResourceMixin
from taiga.projects.occ import OCCResourceMixin
from . import models
from . import permissions
from . import serializers
from . import services
class TaskViewSet(OCCResourceMixin, HistoryResourceMixin, WatchedResourceMixin, ModelCrudViewSet):
model = models.Task
permission_classes = (permissions.TaskPermission,)
filter_backends = (filters.CanViewTasksFilterBackend,)
filter_fields = ["user_story", "milestone", "project", "assigned_to",
"status__is_closed", "watchers"]
def get_serializer_class(self, *args, **kwargs):
if self.action in ["retrieve", "by_ref"]:
return serializers.TaskNeighborsSerializer
if self.action == "list":
return serializers.TaskListSerializer
return serializers.TaskSerializer
def update(self, request, *args, **kwargs):
self.object = self.get_object_or_none()
project_id = request.DATA.get('project', None)
if project_id and self.object and self.object.project.id != project_id:
try:
new_project = Project.objects.get(pk=project_id)
self.check_permissions(request, "destroy", self.object)
self.check_permissions(request, "create", new_project)
sprint_id = request.DATA.get('milestone', None)
if sprint_id is not None and new_project.milestones.filter(pk=sprint_id).count() == 0:
request.DATA['milestone'] = None
us_id = request.DATA.get('user_story', None)
if us_id is not None and new_project.user_stories.filter(pk=us_id).count() == 0:
request.DATA['user_story'] = None
status_id = request.DATA.get('status', None)
if status_id is not None:
try:
old_status = self.object.project.task_statuses.get(pk=status_id)
new_status = new_project.task_statuses.get(slug=old_status.slug)
request.DATA['status'] = new_status.id
except TaskStatus.DoesNotExist:
request.DATA['status'] = new_project.default_task_status.id
except Project.DoesNotExist:
return response.BadRequest(_("The project doesn't exist"))
return super().update(request, *args, **kwargs)
def pre_save(self, obj):
if obj.user_story:
obj.milestone = obj.user_story.milestone
if not obj.id:
obj.owner = self.request.user
super().pre_save(obj)
def pre_conditions_on_save(self, obj):
super().pre_conditions_on_save(obj)
if obj.milestone and obj.milestone.project != obj.project:
raise exc.WrongArguments(_("You don't have permissions to set this sprint to this task."))
if obj.user_story and obj.user_story.project != obj.project:
raise exc.WrongArguments(_("You don't have permissions to set this user story to this task."))
if obj.status and obj.status.project != obj.project:
raise exc.WrongArguments(_("You don't have permissions to set this status to this task."))
if obj.milestone and obj.user_story and obj.milestone != obj.user_story.milestone:
raise exc.WrongArguments(_("You don't have permissions to set this sprint to this task."))
@list_route(methods=["GET"])
def by_ref(self, request):
ref = request.QUERY_PARAMS.get("ref", None)
project_id = request.QUERY_PARAMS.get("project", None)
task = get_object_or_404(models.Task, ref=ref, project_id=project_id)
return self.retrieve(request, pk=task.pk)
@list_route(methods=["GET"])
def csv(self, request):
uuid = request.QUERY_PARAMS.get("uuid", None)
if uuid is None:
return response.NotFound()
project = get_object_or_404(Project, tasks_csv_uuid=uuid)
queryset = project.tasks.all().order_by('ref')
data = services.tasks_to_csv(project, queryset)
csv_response = HttpResponse(data.getvalue(), content_type='application/csv; charset=utf-8')
csv_response['Content-Disposition'] = 'attachment; filename="tasks.csv"'
return csv_response
@list_route(methods=["POST"])
def bulk_create(self, request, **kwargs):
serializer = serializers.TasksBulkSerializer(data=request.DATA)
if serializer.is_valid():
data = serializer.data
project = Project.objects.get(id=data["project_id"])
self.check_permissions(request, 'bulk_create', project)
tasks = services.create_tasks_in_bulk(
data["bulk_tasks"], milestone_id=data["sprint_id"], user_story_id=data["us_id"],
status_id=data.get("status_id") or project.default_task_status_id,
project=project, owner=request.user, callback=self.post_save, precall=self.pre_save)
tasks_serialized = self.get_serializer_class()(tasks, many=True)
return response.Ok(tasks_serialized.data)
return response.BadRequest(serializer.errors)
def _bulk_update_order(self, order_field, request, **kwargs):
serializer = serializers.UpdateTasksOrderBulkSerializer(data=request.DATA)
if not serializer.is_valid():
return response.BadRequest(serializer.errors)
data = serializer.data
project = get_object_or_404(Project, pk=data["project_id"])
self.check_permissions(request, "bulk_update_order", project)
services.update_tasks_order_in_bulk(data["bulk_tasks"],
project=project,
field=order_field)
services.snapshot_tasks_in_bulk(data["bulk_tasks"], request.user)
return response.NoContent()
@list_route(methods=["POST"])
def bulk_update_taskboard_order(self, request, **kwargs):
return self._bulk_update_order("taskboard_order", request, **kwargs)
@list_route(methods=["POST"])
def bulk_update_us_order(self, request, **kwargs):
return self._bulk_update_order("us_order", request, **kwargs)
| Tigerwhit4/taiga-back | taiga/projects/tasks/api.py | Python | agpl-3.0 | 7,479 | 0.002541 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @name: Wascan - Web Application Scanner
# @repo: https://github.com/m4ll0k/Wascan
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'LICENSE.txt
from re import search,I
def jiasule(headers,content):
_ = False
for header in headers.items():
_ |= search(r'__jsluid=|jsl_tracking',header[1],I) is not None
_ |= search(r'jiasule-waf',header[1],I) is not None
if _:break
_ |= search(r'static\.jiasule\.com/static/js/http_error\.js',content) is not None
if _ :
return "Jiasule Web Application Firewall (Jiasule)" | m4ll0k/Spaghetti | plugins/fingerprint/waf/jiasule.py | Python | gpl-3.0 | 587 | 0.040886 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
tags = ['Satellite_5', 'Spacewalk']
name = 'Basic Cobbler settings are correct'
def etc_cobbler_settings(data):
"""
Verify settings in /etc/cobbler/settings:
redhat_management_type: "site"
redhat_management_server: "satellite.example.com"
server: satellite.example.com
Teoretically we can have one option specified multiple times, so we want
to evaluate only last one.
"""
out = []
opts_found = 0
hostname = data['hostname'][0]
etc_cobbler_settings_redhat_management_type = ''
etc_cobbler_settings_redhat_management_server = ''
etc_cobbler_settings_server = ''
for line in data['etc_cobbler_settings']:
if re.match('^\s*redhat_management_type\s*:', line):
opts_found += 1
val = line.split(':')[1].strip()
if re.search(r'[\'"]?\bsite\b[\'"]?', val):
etc_cobbler_settings_redhat_management_type = ''
else:
etc_cobbler_settings_redhat_management_type = 'In /etc/cobbler/settings there should be \'redhat_management_type: "site"\''
if re.match('^\s*redhat_management_server\s*:', line):
opts_found += 1
val = line.split(':')[1].strip()
if re.search(r'[\'"]?\b%s\b[\'"]?' % hostname, val):
etc_cobbler_settings_redhat_management_server = ''
else:
etc_cobbler_settings_redhat_management_server = 'In /etc/cobbler/settings there should be \'redhat_management_server: %s\'' % hostname
if re.match('^\s*server\s*:', line):
opts_found += 1
val = line.split(':')[1].strip()
if re.search(r'[\'"]?\b%s\b[\'"]?' % hostname, val):
etc_cobbler_settings_server = ''
else:
etc_cobbler_settings_server = 'In /etc/cobbler/settings there should be \'server: %s\'' % hostname
if opts_found != 3:
out.append("Not all of redhat_management_type, redhat_management_server and server options found in /etc/cobbler/settings")
for o in (etc_cobbler_settings_redhat_management_type, etc_cobbler_settings_redhat_management_server, etc_cobbler_settings_server):
if o != '':
out.append(o)
return out
def etc_cobbler_modules_conf(data):
"""
Verify settings in /etc/cobbler/modules.conf:
[authentication]
module = authn_spacewalk
"""
out = []
opts_found = 0
etc_cobbler_modules_conf_authentication_module = ''
section_auth = False
for line in data['etc_cobbler_modules_conf']:
if re.match('^\s*\[.*\]\s*$', line):
section_auth = False
if re.match('^\s*\[authentication\]\s*$', line):
section_auth = True
continue
if section_auth and re.match('^\s*module\s*=', line):
opts_found += 1
val = line.split('=')[1].strip()
if re.search(r'[\'"]?\bauthn_spacewalk\b[\'"]?', val):
etc_cobbler_modules_conf_authentication_module = ''
else:
etc_cobbler_modules_conf_authentication_module = 'In /etc/cobbler/modules.conf there should be \'module = authn_spacewalk\''
if opts_found != 1:
out.append("Option module in section authentication not found in /etc/cobbler/modules.conf")
for o in (etc_cobbler_modules_conf_authentication_module,):
if o != '':
out.append(o)
return out
def main(data):
"""
For hostname check noticed in the KB article we have different rule, we are
missing chack for hostname/ip in /etc/hosts though.
"""
out = []
out += etc_cobbler_settings(data)
out += etc_cobbler_modules_conf(data)
if out:
return {'errors': out}
def text(result):
out = ""
out += "Certain config options in Cobbler configuratin should be set as expected:\n"
for e in result['errors']:
out += " %s\n" % e
out += "See https://access.redhat.com/solutions/27936"
return out
| RedHatSatellite/satellite-sanity | satellite_sanity_lib/rules/sat5_cobbler_config.py | Python | gpl-3.0 | 3,726 | 0.021202 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Thomas Amland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import json
import pykka
from models import Movie, Episode
logger = logging.getLogger(__name__)
class XBMCLibrary(pykka.ThreadingActor):
_movie_properties = ['title', 'year', 'imdbnumber', 'playcount']
def __init__(self):
pykka.ThreadingActor.__init__(self)
def movie(self, movieid):
params = {
'movieid': movieid,
'properties': self._movie_properties
}
response = jsonrpc('VideoLibrary.GetMovieDetails', params)
movie = response['result']['moviedetails']
return _load_movie(movie)
def episode(self, episodeid):
params = {
'episodeid': episodeid,
'properties': ['season', 'episode', 'playcount', 'tvshowid'],
}
episode = jsonrpc('VideoLibrary.GetEpisodeDetails', params)['result']['episodedetails']
params = {'tvshowid': episode['tvshowid'], 'properties': ['imdbnumber']}
tvshow = jsonrpc('VideoLibrary.GetTVShowDetails', params)['result']['tvshowdetails']
return _load_episode(episode, tvshow['imdbnumber'])
def movies(self):
params = {'properties': self._movie_properties}
response = jsonrpc('VideoLibrary.GetMovies', params)
movies = response['result'].get('movies', [])
movies = map(_load_movie, movies)
return [m for m in movies if m is not None]
def episodes(self):
params = {'properties': ['imdbnumber']}
tvshows = jsonrpc('VideoLibrary.GetTVShows', params)['result']\
.get('tvshows', [])
ret = []
for tvshow in tvshows:
params = {
'tvshowid': tvshow['tvshowid'],
'properties': ['season', 'episode', 'playcount', 'lastplayed']
}
episodes = jsonrpc('VideoLibrary.GetEpisodes', params)['result']\
.get('episodes', [])
episodes = [_load_episode(ep, tvshow['imdbnumber']) for ep in episodes]
ret.extend(episodes)
return ret
def update_movie_details(self, movie):
if not movie.xbmcid or movie.playcount <= 0:
return False
params = {'movieid': movie.xbmcid, 'playcount': movie.playcount}
r = jsonrpc('VideoLibrary.SetMovieDetails', params)
return r.get('result') == 'OK'
def update_episode_details(self, item):
if not item.xbmcid or item.playcount <= 0:
return False
params = {'episodeid': item.xbmcid, 'playcount': item.playcount}
r = jsonrpc('VideoLibrary.SetEpisodeDetails', params)
return r.get('result') == 'OK'
def _load_movie(r):
return Movie(
title=r['title'],
year=r['year'],
imdbid=r['imdbnumber'],
xbmcid=r['movieid'],
playcount=r['playcount'],
)
def _load_episode(r, tvshowid):
return Episode(
tvdbid=tvshowid,
season=r['season'],
episode=r['episode'],
xbmcid=r['episodeid'],
playcount=r['playcount'],
)
def jsonrpc(method, params=None):
if params is None:
params = {}
payload = {
'jsonrpc': '2.0',
'id': 1,
'method': method,
'params': params,
}
payload = json.dumps(payload, encoding='utf-8')
try:
import xbmc
except:
import requests
response = requests.post(
"http://localhost:8081/jsonrpc",
data=payload,
headers={'content-type': 'application/json'}).json()
else:
response = json.loads(xbmc.executeJSONRPC(payload), encoding='utf-8')
if 'error' in response:
logger.error("jsonrpc error: %r" % response)
return None
return response
| tamland/trakt-sync | xbmc_library.py | Python | gpl-3.0 | 4,438 | 0.001127 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit
class TestCallingConventionFromPython(unittest.TestCase):
def test_varargs(self):
@jit
def f(a, b, *args):
return [a, b, args[1]]
self.assertEqual(f(1, 2, 0, 3, 0), [1, 2, 3])
class TestCallingFlypyConvention(unittest.TestCase):
def test_varargs(self):
@jit
def g(a, b, *args):
return [a, b, args[1]]
@jit
def f(a, b, c, d, e):
return g(a, b, c, d, e)
self.assertEqual(f(1, 2, 0, 3, 0), [1, 2, 3])
def test_unpacking(self):
@jit
def g(a, b, c):
return [a, b, c]
@jit
def f(*args):
return g(*args)
self.assertEqual(f(1, 2, 3), [1, 2, 3])
def test_unpacking2(self):
raise unittest.SkipTest("unpacking with additional varargs")
@jit
def g(a, b, *args):
return [a, b, args[0]]
@jit
def f(*args):
return g(*args)
self.assertEqual(f(1, 2, 3), [1, 2, 3])
# TODO: Test unpacking with GenericTuple
if __name__ == '__main__':
unittest.main() | flypy/flypy | flypy/tests/test_calling_conv.py | Python | bsd-2-clause | 1,235 | 0.004049 |
""" Scrape yahoo industry database through YQL """
import mysql.connector
import stockretriever
import sys
cnx = mysql.connector.connect(user='root', password='root', database='yahoo')
cursor = cnx.cursor()
add_employee = ("INSERT INTO stocks "
"(symbol, name, industry) "
"VALUES (%s, %s, %s) "
"ON DUPLICATE KEY UPDATE industry=VALUES(industry)")
sectors = stockretriever.get_industry_ids()
for sector in sectors:
for industry in sector['industry']:
try:
print "\nProcessing", industry['name'], industry['id']
except TypeError as E:
print E
continue
industry_index = stockretriever.get_industry_index(industry['id'])
try:
industry_name = industry_index['name']
industry_companies = industry_index['company']
industry_id = industry_index['id']
except Exception, e:
print e
continue
for company in industry_companies:
try:
data_employee = (company['symbol'], company['name'], industry_id)
try:
cursor.execute(add_employee, data_employee)
except mysql.connector.errors.IntegrityError, e:
print(e)
continue
try:
print "Success adding", company['symbol'], company['name']
except UnicodeEncodeError as e:
print e
cnx.commit()
except OSError as err:
print(err)
except TypeError as err:
print(err)
except Exception as e:
print "Unknown error, error caught.", e
continue
cursor.close()
cnx.close()
| pettersoderlund/fondout | script/StockScraper-master/import_symbols_from_industry.py | Python | bsd-3-clause | 1,827 | 0.004379 |
# -*- coding: utf-8 -*-
import logging
import argparse
from .imdb import find_movies
logger = logging.getLogger('mrot')
def parse_args():
parser = argparse.ArgumentParser(prog='mrot', description='Show movie ratings over time.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('movie_name', help='the name of the movie')
# Optional arguments
parser.add_argument("-c", "--concurrency", type=int, default=2,
help="maximum number of concurrent requests to the wayback machine")
parser.add_argument("-d", "--delta", type=int, default=365, help="minimum number of days between two ratings")
parser.add_argument("-q", "--quiet", action="store_true", help="don't print progress")
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(level=(logging.WARN if args.quiet else logging.INFO))
# Don't allow more than 20 concurrent requests to the wayback machine
concurrency = min(args.concurrency, 10)
# Find the movies corresponding to the given movie name
imdb_movies = find_movies(args.movie_name)
if len(imdb_movies) > 0:
# Show rating for the first movie matching the given name
imdb_movie = imdb_movies[0]
imdb_movie.plot_ratings(concurrency, args.delta)
else:
logger.info('Movie not found')
| abrenaut/mrot | mrot/cli.py | Python | mit | 1,423 | 0.003514 |
'''
Created on Apr 30, 2017
@author: jamie
'''
import numpy as np
class CartState(object):
'''
Defines a Cartesian state information
'''
pos_I = np.array([0., 0., 0.])
vel_I = np.array([0., 0., 0.])
| jamielapointe/PyPassiveRangingFilters | pyPassiveRanging/dynamicsModels/cartState.py | Python | mit | 236 | 0.016949 |
short_name = "godot"
name = "Godot Engine"
major = 2
minor = 1
patch = 4
status = "beta"
| pixelpicosean/my-godot-2.1 | version.py | Python | mit | 89 | 0 |
from __future__ import division
import datetime
import pytz
from app.models import Patch
def get_match_patch(match_date):
utc = pytz.UTC
#pylint: disable=no-value-for-parameter
match_date = utc.localize(datetime.datetime.fromtimestamp(match_date))
for patch in Patch.objects.all().order_by('-start_date'):
if patch.start_date < match_date:
return patch
return None
#pylint: disable=invalid-name,too-many-arguments
def is_valid_match(gmd, public=None, league=None, team=None, solo=None, \
ranked=None, ap=None, cm=None, ar=None, rap=None):
return check_lobby_type(gmd, public, league, team, solo, ranked) is True \
and check_game_mode(gmd, ap, cm, ar, rap) is True \
and check_abandon(gmd) is False
#pylint: disable=invalid-name,too-many-arguments
def check_lobby_type(match, public=None, league=None, team=None, solo=None, ranked=None):
if public is None and league is None and team is None and solo is None and ranked is None:
public = league = team = solo = ranked = True
if match is not None:
match_type = match['lobby_type']
#pylint: disable=too-many-boolean-expressions
if (public and match_type == 0) or \
(league and match_type == 2) or \
(team and match_type == 5) or \
(solo and match_type == 6) or \
(ranked and match_type == 7):
return True
return False
else:
return None
def check_abandon(match_json):
if match_json is None:
return None
for player in match_json["players"]:
if player["leaver_status"] != 0 or player['hero_id'] is 0:
return True
return False
def check_game_mode(match, ap=None, cm=None, ar=None, rap=None):
if ap is None and cm is None and ar is None and rap is None:
ap = cm = ar = rap = True
game_mode = match["game_mode"]
#pylint: disable=too-many-boolean-expressions
if (ap and game_mode == 1) or \
(cm and game_mode == 2) or \
(ar and game_mode == 5) or \
(rap and game_mode == 22):
return True
return False
| lucashanke/houseofdota | app/util/match_util.py | Python | mit | 2,180 | 0.005046 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PushResult'
db.create_table(u'notos_pushresult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('response_code', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'notos', ['PushResult'])
# Adding model 'ScheduledPush'
db.create_table(u'notos_scheduledpush', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('scheduled_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('send_at', self.gf('django.db.models.fields.DateTimeField')()),
('canceled_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('registration_id', self.gf('django.db.models.fields.CharField')(max_length=4095)),
('result', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['notos.PushResult'], unique=True, null=True, blank=True)),
('attempt_no', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('data', self.gf('json_field.fields.JSONField')(default=u'null')),
))
db.send_create_signal(u'notos', ['ScheduledPush'])
def backwards(self, orm):
# Deleting model 'PushResult'
db.delete_table(u'notos_pushresult')
# Deleting model 'ScheduledPush'
db.delete_table(u'notos_scheduledpush')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'notos.pushresult': {
'Meta': {'object_name': 'PushResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'notos.scheduledpush': {
'Meta': {'object_name': 'ScheduledPush'},
'attempt_no': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'canceled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'data': ('json_field.fields.JSONField', [], {'default': "u'null'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'registration_id': ('django.db.models.fields.CharField', [], {'max_length': '4095'}),
'result': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['notos.PushResult']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'send_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['notos'] | Sigmapoint/notos | src/notos/migrations/0001_initial.py | Python | mit | 3,961 | 0.007321 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-03 10:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('domains', '0003_auto_20161103_1031'),
]
operations = [
migrations.RemoveField(
model_name='domain',
name='subtopics',
),
migrations.AddField(
model_name='subtopic',
name='dmain',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subtopics', to='domains.domain'),
),
]
| adithyabhatkajake/kompile | domains/migrations/0004_auto_20161103_1044.py | Python | gpl-2.0 | 678 | 0.001475 |
from paraview.simple import *
import os
import sys
import numpy as np
path = os.getcwd() + "/"
file_name = sys.argv[1]
inp = file_name + ".e"
outCSV = file_name + ".csv"
reader = ExodusIIReader(FileName=path+inp)
tsteps = reader.TimestepValues
writer = CreateWriter(path+file_name+"_Cells.csv", reader)
writer.FieldAssociation = "Cells" # or "Points"
writer.UpdatePipeline(time=tsteps[len(tsteps)-1])
del writer
writer = CreateWriter(path+file_name+"_Points.csv", reader)
writer.FieldAssociation = "Points" # or "Cells"
writer.UpdatePipeline(time=tsteps[len(tsteps)-1])
del writer
| jhaase1/zapdos | tests/reflections/low_initial/ToCSV.py | Python | lgpl-2.1 | 586 | 0.003413 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for SSH connections."""
import os
import warnings
from base64 import decodebytes
from io import StringIO
from typing import Dict, Optional, Tuple, Union
import paramiko
from paramiko.config import SSH_PORT
from sshtunnel import SSHTunnelForwarder
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
try:
from airflow.utils.platform import getuser
except ImportError:
from getpass import getuser
class SSHHook(BaseHook):
"""
Hook for ssh remote execution using Paramiko.
ref: https://github.com/paramiko/paramiko
This hook also lets you create ssh tunnel and serve as basis for SFTP file transfer
:param ssh_conn_id: :ref:`ssh connection id<howto/connection:ssh>` from airflow
Connections from where all the required parameters can be fetched like
username, password or key_file. Thought the priority is given to the
param passed during init
:type ssh_conn_id: str
:param remote_host: remote host to connect
:type remote_host: str
:param username: username to connect to the remote_host
:type username: str
:param password: password of the username to connect to the remote_host
:type password: str
:param key_file: path to key file to use to connect to the remote_host
:type key_file: str
:param port: port of remote host to connect (Default is paramiko SSH_PORT)
:type port: int
:param timeout: timeout for the attempt to connect to the remote_host.
:type timeout: int
:param keepalive_interval: send a keepalive packet to remote host every
keepalive_interval seconds
:type keepalive_interval: int
"""
# List of classes to try loading private keys as, ordered (roughly) by most common to least common
_pkey_loaders = (
paramiko.RSAKey,
paramiko.ECDSAKey,
paramiko.Ed25519Key,
paramiko.DSSKey,
)
_host_key_mappings = {
'rsa': paramiko.RSAKey,
'dss': paramiko.DSSKey,
'ecdsa': paramiko.ECDSAKey,
'ed25519': paramiko.Ed25519Key,
}
conn_name_attr = 'ssh_conn_id'
default_conn_name = 'ssh_default'
conn_type = 'ssh'
hook_name = 'SSH'
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema'],
"relabeling": {
'login': 'Username',
},
}
def __init__(
self,
ssh_conn_id: Optional[str] = None,
remote_host: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
key_file: Optional[str] = None,
port: Optional[int] = None,
timeout: int = 10,
keepalive_interval: int = 30,
) -> None:
super().__init__()
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.username = username
self.password = password
self.key_file = key_file
self.pkey = None
self.port = port
self.timeout = timeout
self.keepalive_interval = keepalive_interval
# Default values, overridable from Connection
self.compress = True
self.no_host_key_check = True
self.allow_host_key_change = False
self.host_proxy = None
self.host_key = None
self.look_for_keys = True
# Placeholder for deprecated __enter__
self.client = None
# Use connection to override defaults
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if self.username is None:
self.username = conn.login
if self.password is None:
self.password = conn.password
if self.remote_host is None:
self.remote_host = conn.host
if self.port is None:
self.port = conn.port
if conn.extra is not None:
extra_options = conn.extra_dejson
if "key_file" in extra_options and self.key_file is None:
self.key_file = extra_options.get("key_file")
private_key = extra_options.get('private_key')
private_key_passphrase = extra_options.get('private_key_passphrase')
if private_key:
self.pkey = self._pkey_from_private_key(private_key, passphrase=private_key_passphrase)
if "timeout" in extra_options:
self.timeout = int(extra_options["timeout"], 10)
if "compress" in extra_options and str(extra_options["compress"]).lower() == 'false':
self.compress = False
host_key = extra_options.get("host_key")
no_host_key_check = extra_options.get("no_host_key_check")
if no_host_key_check is not None:
no_host_key_check = str(no_host_key_check).lower() == "true"
if host_key is not None and no_host_key_check:
raise ValueError("Must check host key when provided")
self.no_host_key_check = no_host_key_check
if (
"allow_host_key_change" in extra_options
and str(extra_options["allow_host_key_change"]).lower() == 'true'
):
self.allow_host_key_change = True
if (
"look_for_keys" in extra_options
and str(extra_options["look_for_keys"]).lower() == 'false'
):
self.look_for_keys = False
if host_key is not None:
if host_key.startswith("ssh-"):
key_type, host_key = host_key.split(None)[:2]
key_constructor = self._host_key_mappings[key_type[4:]]
else:
key_constructor = paramiko.RSAKey
decoded_host_key = decodebytes(host_key.encode('utf-8'))
self.host_key = key_constructor(data=decoded_host_key)
self.no_host_key_check = False
if self.pkey and self.key_file:
raise AirflowException(
"Params key_file and private_key both provided. Must provide no more than one."
)
if not self.remote_host:
raise AirflowException("Missing required param: remote_host")
# Auto detecting username values from system
if not self.username:
self.log.debug(
"username to ssh to host: %s is not specified for connection id"
" %s. Using system's default provided by getpass.getuser()",
self.remote_host,
self.ssh_conn_id,
)
self.username = getuser()
user_ssh_config_filename = os.path.expanduser('~/.ssh/config')
if os.path.isfile(user_ssh_config_filename):
ssh_conf = paramiko.SSHConfig()
with open(user_ssh_config_filename) as config_fd:
ssh_conf.parse(config_fd)
host_info = ssh_conf.lookup(self.remote_host)
if host_info and host_info.get('proxycommand'):
self.host_proxy = paramiko.ProxyCommand(host_info.get('proxycommand'))
if not (self.password or self.key_file):
if host_info and host_info.get('identityfile'):
self.key_file = host_info.get('identityfile')[0]
self.port = self.port or SSH_PORT
def get_conn(self) -> paramiko.SSHClient:
"""
Opens a ssh connection to the remote host.
:rtype: paramiko.client.SSHClient
"""
self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id)
client = paramiko.SSHClient()
if not self.allow_host_key_change:
self.log.warning(
'Remote Identification Change is not verified. '
'This wont protect against Man-In-The-Middle attacks'
)
client.load_system_host_keys()
if self.no_host_key_check:
self.log.warning('No Host Key Verification. This wont protect against Man-In-The-Middle attacks')
# Default is RejectPolicy
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
else:
if self.host_key is not None:
client_host_keys = client.get_host_keys()
if self.port == SSH_PORT:
client_host_keys.add(self.remote_host, self.host_key.get_name(), self.host_key)
else:
client_host_keys.add(
f"[{self.remote_host}]:{self.port}", self.host_key.get_name(), self.host_key
)
else:
pass # will fallback to system host keys if none explicitly specified in conn extra
connect_kwargs = dict(
hostname=self.remote_host,
username=self.username,
timeout=self.timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy,
look_for_keys=self.look_for_keys,
)
if self.password:
password = self.password.strip()
connect_kwargs.update(password=password)
if self.pkey:
connect_kwargs.update(pkey=self.pkey)
if self.key_file:
connect_kwargs.update(key_filename=self.key_file)
client.connect(**connect_kwargs)
if self.keepalive_interval:
client.get_transport().set_keepalive(self.keepalive_interval)
self.client = client
return client
def __enter__(self) -> 'SSHHook':
warnings.warn(
'The contextmanager of SSHHook is deprecated.'
'Please use get_conn() as a contextmanager instead.'
'This method will be removed in Airflow 2.0',
category=DeprecationWarning,
)
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
if self.client is not None:
self.client.close()
self.client = None
def get_tunnel(
self, remote_port: int, remote_host: str = "localhost", local_port: Optional[int] = None
) -> SSHTunnelForwarder:
"""
Creates a tunnel between two hosts. Like ssh -L <LOCAL_PORT>:host:<REMOTE_PORT>.
:param remote_port: The remote port to create a tunnel to
:type remote_port: int
:param remote_host: The remote host to create a tunnel to (default localhost)
:type remote_host: str
:param local_port: The local port to attach the tunnel to
:type local_port: int
:return: sshtunnel.SSHTunnelForwarder object
"""
if local_port:
local_bind_address: Union[Tuple[str, int], Tuple[str]] = ('localhost', local_port)
else:
local_bind_address = ('localhost',)
tunnel_kwargs = dict(
ssh_port=self.port,
ssh_username=self.username,
ssh_pkey=self.key_file or self.pkey,
ssh_proxy=self.host_proxy,
local_bind_address=local_bind_address,
remote_bind_address=(remote_host, remote_port),
logger=self.log,
)
if self.password:
password = self.password.strip()
tunnel_kwargs.update(
ssh_password=password,
)
else:
tunnel_kwargs.update(
host_pkey_directories=[],
)
client = SSHTunnelForwarder(self.remote_host, **tunnel_kwargs)
return client
def create_tunnel(
self, local_port: int, remote_port: int, remote_host: str = "localhost"
) -> SSHTunnelForwarder:
"""
Creates tunnel for SSH connection [Deprecated].
:param local_port: local port number
:param remote_port: remote port number
:param remote_host: remote host
:return:
"""
warnings.warn(
'SSHHook.create_tunnel is deprecated, Please'
'use get_tunnel() instead. But please note that the'
'order of the parameters have changed'
'This method will be removed in Airflow 2.0',
category=DeprecationWarning,
)
return self.get_tunnel(remote_port, remote_host, local_port)
def _pkey_from_private_key(self, private_key: str, passphrase: Optional[str] = None) -> paramiko.PKey:
"""
Creates appropriate paramiko key for given private key
:param private_key: string containing private key
:return: ``paramiko.PKey`` appropriate for given key
:raises AirflowException: if key cannot be read
"""
for pkey_class in self._pkey_loaders:
try:
key = pkey_class.from_private_key(StringIO(private_key), password=passphrase)
# Test it acutally works. If Paramiko loads an openssh generated key, sometimes it will
# happily load it as the wrong type, only to fail when actually used.
key.sign_ssh_data(b'')
return key
except (paramiko.ssh_exception.SSHException, ValueError):
continue
raise AirflowException(
'Private key provided cannot be read by paramiko.'
'Ensure key provided is valid for one of the following'
'key formats: RSA, DSS, ECDSA, or Ed25519'
)
| dhuang/incubator-airflow | airflow/providers/ssh/hooks/ssh.py | Python | apache-2.0 | 14,364 | 0.001601 |
import os.path
import argparse
import arrow
import iso8601
import requests
import json
import re
import datetime
from yaml import load
from repoze.lru import lru_cache
from dateutil.parser import parse
from time import sleep
from retrying import retry
from logging import getLogger
RE = re.compile(r'(^.*)@(\d{4}-\d{2}-\d{2}--\d{4}-\d{2}-\d{2})?-([a-z\-]*)\.zip')
LOGGER = getLogger("BILLING")
def get_arguments_parser():
parser = argparse.ArgumentParser(
description="Openprocurement Billing"
)
report = parser.add_argument_group('Report', 'Report parameters')
report.add_argument(
'-c',
'--config',
dest='config',
required=True,
help="Path to config file. Required"
)
report.add_argument(
'-b',
'--broker',
dest='broker',
required=True,
help='Broker name. Required'
)
report.add_argument(
'-p',
'--period',
nargs='+',
dest='period',
default=[],
help='Specifies period for billing report.\n '
'By default report will be generated from all database'
)
report.add_argument(
'-t',
'--timezone',
dest='timezone',
default='Europe/Kiev',
help='Timezone. Default "Europe/Kiev"'
)
return parser
def thresholds_headers(cthresholds):
prev_threshold = None
result = []
thresholds = [str(t / 1000) for t in cthresholds]
for t in thresholds:
if not prev_threshold:
result.append("<= " + t)
else:
result.append(">" + prev_threshold + "<=" + t)
prev_threshold = t
result.append(">" + thresholds[-1])
return result
@lru_cache(10000)
@retry(wait_exponential_multiplier=1000, stop_max_attempt_number=5)
def get_rate(currency, date, proxy_address=None):
base_url = 'http://bank.gov.ua/NBUStatService'\
'/v1/statdirectory/exchange?date={}&json'.format(
iso8601.parse_date(date).strftime('%Y%m%d')
)
if proxy_address:
resp = requests.get(base_url, proxies={'http': proxy_address}).text.encode('utf-8')
else:
resp = requests.get(base_url).text.encode('utf-8')
doc = json.loads(resp)
if currency == u'RUR':
currency = u'RUB'
rate = filter(lambda x: x[u'cc'] == currency, doc)[0][u'rate']
sleep(15)
return rate
def value_currency_normalize(value, currency, date, proxy_address=None):
if not isinstance(value, (float, int)):
raise ValueError
rate = get_rate(currency, date, proxy_address)
return value * rate, rate
def create_db_url(host, port, user, passwd, db_name=''):
up = ''
if user and passwd:
up = '{}:{}@'.format(user, passwd)
url = 'http://{}{}:{}'.format(up, host, port)
if db_name:
url += '/{}'.format(db_name)
return url
class Kind(argparse.Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.kinds = set(['general', 'special', 'defense', '_kind'])
super(Kind, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=self.kinds,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(
self, parser, args, values, option_string=None):
options = values.split('=')
self.parser = parser
if len(options) < 2:
parser.error("usage <option>=<kind>")
action = options[0]
kinds = options[1].split(',')
try:
getattr(self, action)(kinds)
except AttributeError:
self.parser.error("<option> should be one from [include, exclude, one]")
setattr(args, self.dest, self.kinds)
def include(self, kinds):
for kind in kinds:
self.kinds.add(kind)
def exclude(self, kinds):
for kind in kinds:
if kind in self.kinds:
self.kinds.remove(kind)
def one(self, kinds):
for kind in kinds:
if kind not in ['general', 'special', 'defense', 'other', '_kind']:
self.parser.error('Allowed only general, special, defense, other and _kind')
self.kinds = set(kinds)
class Status(argparse.Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.statuses = {'action': '', 'statuses': set([u'active',
u'complete',
u'active.awarded',
u'cancelled',
u'unsuccessful'
])}
super(Status, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=self.statuses,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(
self, parser, args, values, option_string=None):
options = values.split('=')
self.parser = parser
if len(options) < 2:
parser.error("usage <option>=<kind>")
action = options[0]
statuses = options[1].split(',')
try:
getattr(self, action)(statuses)
except AttributeError:
self.parser.error("<option> should be one from [include, exclude, one]")
setattr(args, self.dest, self.statuses)
def include(self, sts):
self.statuses['action'] = 'include'
for status in sts:
self.statuses['statuses'].add(status)
def exclude(self, sts):
self.statuses['action'] = 'exclude'
for status in sts:
if status in self.statuses:
self.statuses['statuses'].remove(status)
def one(self, sts):
self.statuses['action'] = 'one'
self.statuses['statuses'] = set(sts)
def convert_date(
date, timezone="Europe/Kiev",
to="UTC", format="%Y-%m-%dT%H:%M:%S.%f"
):
date = arrow.get(parse(date), timezone)
return date.to(to).strftime(format)
def prepare_report_interval(period=None):
if not period:
return ("", "9999-12-30T00:00:00.000000")
if len(period) == 1:
return (convert_date(period[0]), "9999-12-30T00:00:00.000000")
if len(period) == 2:
return (convert_date(period[0]), convert_date(period[1]))
raise ValueError("Invalid period")
def prepare_result_file_name(utility):
start, end = "", ""
if utility.start_date:
start = convert_date(
utility.start_date,
timezone="UTC",
to="Europe/Kiev",
format="%Y-%m-%d"
)
if not utility.end_date.startswith("9999"):
end = convert_date(
utility.end_date,
timezone="UTC",
to="Europe/Kiev",
format="%Y-%m-%d"
)
return os.path.join(
utility.config.out_path,
"{}@{}--{}-{}.csv".format(
utility.broker,
start,
end,
utility.operation
)
)
def parse_period_string(period):
if period:
dates = period.split('--')
if len(dates) > 2:
raise ValueError("Invalid date string")
start, end = [parse(date) for date in period.split('--')]
else:
end = datetime.date.today().replace(day=1)
start = (end - datetime.timedelta(days=1)).replace(day=1)
return start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d")
def get_out_name(files):
broker = os.path.basename(files[0].split('@')[0])
date = os.path.basename(files[0].split('@')[1]).split('-')[:-1]
operations = set(
[os.path.basename(f).split('-')[-1].split('.')[0] for f in files]
)
out_name = '{}@{}-{}.zip'.format(
broker, '-'.join(date), '-'.join(operations)
)
return out_name
def create_email_context_from_filename(file_name):
broker, period, ops = next(iter(re.findall(RE, file_name)))
if ops:
ops = ops.split('-')
type = ' and '.join(ops) if len(ops) == 2 else ', '.join(ops)
return {
'type': type,
'broker': broker,
'encrypted': bool('bids' in ops),
'period': period
}
def read_config(path):
with open(path) as _in:
return load(_in)
| openprocurement/reports | reports/helpers.py | Python | apache-2.0 | 9,179 | 0.000545 |
#!/usr/bin/env python3
########################################################################
# Solves problem 142 from projectEuler.net.
# ???
# Copyright (C) 2011 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.is-a-geek.com.ar
########################################################################
# x + y = a
# x + z = b
# y + z = c
# x - y = d
# x - z = e
# y - z = f
# e = a - c
# f =
| sanSS/programming-contests | project-euler/problem142.py | Python | gpl-3.0 | 1,119 | 0.003575 |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" ISO metadata parser """
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
""" Process gmd:MD_Metadata """
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
# TODO: merge .identificationinfo into .identification
#warnings.warn(
# 'the .identification and .serviceidentification properties will merge into '
# '.identification being a list of properties. This is currently implemented '
# 'in .identificationinfo. '
# 'Please see https://github.com/geopython/OWSLib/issues/38 for more information',
# FutureWarning)
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
""" process CI_Date """
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
""" process CI_ResponsibleParty """
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
self.onlineresource = None
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_DataIdentification(object):
""" process MD_DataIdentification """
def __init__(self, md=None, identtype=None):
if md is None:
self.identtype = None
self.title = None
self.alternatetitle = None
self.aggregationinfo = None
self.uricode = []
self.uricodespace = []
self.date = []
self.datetype = []
self.uselimitation = []
self.accessconstraints = []
self.classification = []
self.otherconstraints = []
self.securityconstraints = []
self.useconstraints = []
self.denominators = []
self.distance = []
self.uom = []
self.resourcelanguage = []
self.creator = None
self.publisher = None
self.originator = None
self.edition = None
self.abstract = None
self.purpose = None
self.status = None
self.contact = []
self.keywords = []
self.topiccategory = []
self.supplementalinformation = None
self.extent = None
self.bbox = None
self.temporalextent_start = None
self.temporalextent_end = None
else:
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
self.uricodespace = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricodespace.append(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:useLimitation', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguage.append(val)
val = md.find(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName', namespaces))
if val is not None:
val2 = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if val2 is not None:
clv = _testCodeListValue(val)
if clv == 'originator':
self.creator = util.testXMLValue(val)
elif clv == 'publisher':
self.publisher = util.testXMLValue(val)
elif clv == 'contributor':
self.originator = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
# There may be multiple geographicElement, create an extent
# from the one containing either an EX_GeographicBoundingBox or EX_BoundingPolygon.
# The schema also specifies an EX_GeographicDescription. This is not implemented yet.
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox # for backwards compatibility
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
""" process MD_Distributor """
def __init__(self, md=None):
if md is None:
self.contact = None
self.online = []
else:
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
""" process MD_Distribution """
def __init__(self, md=None):
if md is None:
self.format = None
self.version = None
self.distributor = []
self.online = []
pass
else:
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
''' process DQ_DataQuality'''
def __init__(self, md=None):
if md is None:
self.conformancetitle = []
self.conformancedate = []
self.conformancedatetype = []
self.conformancedegree = []
self.lineage = None
self.specificationtitle = None
self.specificationdate = []
else:
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
""" process SV_ServiceIdentification """
def __init__(self, md=None):
if md is None:
self.identtype = 'service'
self.type = None
self.version = None
self.fees = None
self.bbox = None
self.couplingtype
self.operations = []
self.operateson = []
else:
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
""" process CI_OnlineResource """
def __init__(self,md=None):
if md is None:
self.url = None
self.protocol = None
self.name = None
self.description = None
self.function = None
else:
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md=None):
if md is None:
self.minx = None
self.maxx = None
self.miny = None
self.maxy = None
else:
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md=None):
if md is None:
self.exterior_ring = None
self.interior_rings = []
else:
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md=None):
if md is None:
self.is_extent
self.polygons = []
else:
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
""" process EX_Extent """
def __init__(self, md=None):
if md is None:
self.boundingBox = None
self.boundingPolygon = None
self.description_code = None
else:
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
""" process MD_ReferenceSystem """
def __init__(self, md):
if md is None:
pass
else:
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
def _testCodeListValue(elpath):
""" get gco:CodeListValue_Type attribute, else get text content """
if elpath is not None: # try to get @codeListValue
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else: # see if there is element text
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
""" process CT_CodelistCatalogue """
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return self.dictionaries.keys()
def getcodedefinitionidentifiers(self, cdl):
if self.dictionaries.has_key(cdl):
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
| Gaia3D/QGIS | python/ext-libs/owslib/iso.py | Python | gpl-2.0 | 36,451 | 0.005816 |
import lie_group_diffeo as lgd
import odl
import numpy as np
# Select space and interpolation
space = odl.uniform_discr([-1, -1], [1, 1], [200, 200], interp='linear')
# Select template and target as gaussians
template = space.element(lambda x: np.exp(-(5 * x[0]**2 + x[1]**2) / 0.4**2))
target = space.element(lambda x: np.exp(-(1 * (x[0] + 0.2)**2 + x[1]**2) / 0.4**2))
# Define data matching functional
data_matching = odl.solvers.L2NormSquared(space).translated(target)
# Define the lie group to use.
lie_grp_type = 'affine'
if lie_grp_type == 'gln':
lie_grp = lgd.GLn(space.ndim)
deform_action = lgd.MatrixImageAction(lie_grp, space)
elif lie_grp_type == 'son':
lie_grp = lgd.SOn(space.ndim)
deform_action = lgd.MatrixImageAction(lie_grp, space)
elif lie_grp_type == 'sln':
lie_grp = lgd.SLn(space.ndim)
deform_action = lgd.MatrixImageAction(lie_grp, space)
elif lie_grp_type == 'affine':
lie_grp = lgd.AffineGroup(space.ndim)
deform_action = lgd.MatrixImageAffineAction(lie_grp, space)
elif lie_grp_type == 'rigid':
lie_grp = lgd.EuclideanGroup(space.ndim)
deform_action = lgd.MatrixImageAffineAction(lie_grp, space)
else:
assert False
# Define what regularizer to use
regularizer = 'determinant'
if regularizer == 'image':
# Create set of all points in space
W = space.tangent_bundle
w = W.element(space.points().T)
# Create regularizing functional
regularizer = 0.01 * odl.solvers.L2NormSquared(W).translated(w)
# Create action
regularizer_action = lgd.ProductSpaceAction(deform_action, W.size)
elif regularizer == 'point':
W = odl.ProductSpace(odl.rn(space.ndim), 3)
w = W.element([[0, 0],
[0, 1],
[1, 0]])
# Create regularizing functional
regularizer = 0.01 * odl.solvers.L2NormSquared(W).translated(w)
# Create action
if lie_grp_type == 'affine' or lie_grp_type == 'rigid':
point_action = lgd.MatrixVectorAffineAction(lie_grp, W[0])
else:
point_action = lgd.MatrixVectorAction(lie_grp, W[0])
regularizer_action = lgd.ProductSpaceAction(point_action, W.size)
elif regularizer == 'determinant':
W = odl.rn(1)
w = W.element([1])
# Create regularizing functional
regularizer = 0.2 * odl.solvers.L2NormSquared(W).translated(w)
# Create action
regularizer_action = lgd.MatrixDeterminantAction(lie_grp, W)
else:
assert False
# Initial guess
g = lie_grp.identity
# Combine action and functional into single object.
action = lgd.ProductSpaceAction(deform_action, regularizer_action)
x = action.domain.element([template, w]).copy()
f = odl.solvers.SeparableSum(data_matching, regularizer)
# Show some results, reuse the plot
template.show('template')
target.show('target')
# Create callback that displays the current iterate and prints the function
# value
callback = odl.solvers.CallbackShow(lie_grp_type, step=10, indices=0)
callback &= odl.solvers.CallbackPrint(f)
# Solve via gradient flow
lgd.gradient_flow_solver(x, f, g, action,
niter=500, line_search=0.2, callback=callback)
| adler-j/lie_grp_diffeo | examples/deformation_closest_pt_2d.py | Python | gpl-3.0 | 3,110 | 0.000322 |
from radar.auth.passwords import (
check_password_hash,
generate_password,
generate_password_hash,
get_password_length,
is_strong_password,
password_to_nato_str,
)
from radar.models.users import User
def test_password_to_nato_str():
password = 'aAzZ123'
assert password_to_nato_str(password) == 'lower alfa, UPPER ALFA, lower zulu, UPPER ZULU, ONE, TWO, THREE'
def test_password_hash():
password = 'password123'
password_hash = generate_password_hash('password123')
assert password_hash != password
assert check_password_hash(password_hash, password)
def test_generate_password(app):
with app.app_context():
password = generate_password()
assert len(password) == get_password_length()
def test_weak_passwords(app):
with app.app_context():
assert not is_strong_password('password123')
def test_strong_passwords(app):
with app.app_context():
assert is_strong_password('besiderisingwoodennearer')
assert is_strong_password('7pJnW4yUWx')
def test_weak_passwords_for_user(app):
user = User()
user.username = 'dtclihbswm'
user.email = 'rihylunxov@example.org'
user.first_name = 'fvgmptirzl'
user.last_name = 'uehnpqjarf'
suffix = 'hello418'
username_password = user.username + suffix
email_password = user.email + suffix
first_name_password = user.first_name + suffix
last_name_password = user.last_name + suffix
with app.app_context():
assert is_strong_password(username_password)
assert is_strong_password(email_password)
assert is_strong_password(first_name_password)
assert is_strong_password(last_name_password)
| renalreg/radar | tests/auth/test_passwords.py | Python | agpl-3.0 | 1,704 | 0.000587 |
# This file is autogenerated by the get_windows_info.py script
# Do not edit.
win_tz = {
'AUS Central Standard Time': 'Australia/Darwin',
'AUS Eastern Standard Time': 'Australia/Sydney',
'Afghanistan Standard Time': 'Asia/Kabul',
'Alaskan Standard Time': 'America/Anchorage',
'Arab Standard Time': 'Asia/Riyadh',
'Arabian Standard Time': 'Asia/Dubai',
'Arabic Standard Time': 'Asia/Baghdad',
'Argentina Standard Time': 'America/Buenos_Aires',
'Atlantic Standard Time': 'America/Halifax',
'Azerbaijan Standard Time': 'Asia/Baku',
'Azores Standard Time': 'Atlantic/Azores',
'Bahia Standard Time': 'America/Bahia',
'Bangladesh Standard Time': 'Asia/Dhaka',
'Canada Central Standard Time': 'America/Regina',
'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
'Caucasus Standard Time': 'Asia/Yerevan',
'Cen. Australia Standard Time': 'Australia/Adelaide',
'Central America Standard Time': 'America/Guatemala',
'Central Asia Standard Time': 'Asia/Almaty',
'Central Brazilian Standard Time': 'America/Cuiaba',
'Central Europe Standard Time': 'Europe/Budapest',
'Central European Standard Time': 'Europe/Warsaw',
'Central Pacific Standard Time': 'Pacific/Guadalcanal',
'Central Standard Time': 'America/Chicago',
'Central Standard Time (Mexico)': 'America/Mexico_City',
'China Standard Time': 'Asia/Shanghai',
'Dateline Standard Time': 'Etc/GMT+12',
'E. Africa Standard Time': 'Africa/Nairobi',
'E. Australia Standard Time': 'Australia/Brisbane',
'E. Europe Standard Time': 'Asia/Nicosia',
'E. South America Standard Time': 'America/Sao_Paulo',
'Eastern Standard Time': 'America/New_York',
'Egypt Standard Time': 'Africa/Cairo',
'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
'FLE Standard Time': 'Europe/Kiev',
'Fiji Standard Time': 'Pacific/Fiji',
'GMT Standard Time': 'Europe/London',
'GTB Standard Time': 'Europe/Bucharest',
'Georgian Standard Time': 'Asia/Tbilisi',
'Greenland Standard Time': 'America/Godthab',
'Greenwich Standard Time': 'Atlantic/Reykjavik',
'Hawaiian Standard Time': 'Pacific/Honolulu',
'India Standard Time': 'Asia/Calcutta',
'Iran Standard Time': 'Asia/Tehran',
'Israel Standard Time': 'Asia/Jerusalem',
'Jordan Standard Time': 'Asia/Amman',
'Kaliningrad Standard Time': 'Europe/Kaliningrad',
'Korea Standard Time': 'Asia/Seoul',
'Libya Standard Time': 'Africa/Tripoli',
'Magadan Standard Time': 'Asia/Magadan',
'Mauritius Standard Time': 'Indian/Mauritius',
'Middle East Standard Time': 'Asia/Beirut',
'Montevideo Standard Time': 'America/Montevideo',
'Morocco Standard Time': 'Africa/Casablanca',
'Mountain Standard Time': 'America/Denver',
'Mountain Standard Time (Mexico)': 'America/Chihuahua',
'Myanmar Standard Time': 'Asia/Rangoon',
'N. Central Asia Standard Time': 'Asia/Novosibirsk',
'Namibia Standard Time': 'Africa/Windhoek',
'Nepal Standard Time': 'Asia/Katmandu',
'New Zealand Standard Time': 'Pacific/Auckland',
'Newfoundland Standard Time': 'America/St_Johns',
'North Asia East Standard Time': 'Asia/Irkutsk',
'North Asia Standard Time': 'Asia/Krasnoyarsk',
'Pacific SA Standard Time': 'America/Santiago',
'Pacific Standard Time': 'America/Los_Angeles',
'Pacific Standard Time (Mexico)': 'America/Santa_Isabel',
'Pakistan Standard Time': 'Asia/Karachi',
'Paraguay Standard Time': 'America/Asuncion',
'Romance Standard Time': 'Europe/Paris',
'Russian Standard Time': 'Europe/Moscow',
'SA Eastern Standard Time': 'America/Cayenne',
'SA Pacific Standard Time': 'America/Bogota',
'SA Western Standard Time': 'America/La_Paz',
'SE Asia Standard Time': 'Asia/Bangkok',
'Samoa Standard Time': 'Pacific/Apia',
'Singapore Standard Time': 'Asia/Singapore',
'South Africa Standard Time': 'Africa/Johannesburg',
'Sri Lanka Standard Time': 'Asia/Colombo',
'Syria Standard Time': 'Asia/Damascus',
'Taipei Standard Time': 'Asia/Taipei',
'Tasmania Standard Time': 'Australia/Hobart',
'Tokyo Standard Time': 'Asia/Tokyo',
'Tonga Standard Time': 'Pacific/Tongatapu',
'Turkey Standard Time': 'Europe/Istanbul',
'US Eastern Standard Time': 'America/Indianapolis',
'US Mountain Standard Time': 'America/Phoenix',
'UTC': 'Etc/GMT',
'UTC+12': 'Etc/GMT-12',
'UTC-02': 'Etc/GMT+2',
'UTC-11': 'Etc/GMT+11',
'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
'Venezuela Standard Time': 'America/Caracas',
'Vladivostok Standard Time': 'Asia/Vladivostok',
'W. Australia Standard Time': 'Australia/Perth',
'W. Central Africa Standard Time': 'Africa/Lagos',
'W. Europe Standard Time': 'Europe/Berlin',
'West Asia Standard Time': 'Asia/Tashkent',
'West Pacific Standard Time': 'Pacific/Port_Moresby',
'Yakutsk Standard Time': 'Asia/Yakutsk'
}
# Old name for the win_tz variable:
tz_names = win_tz
tz_win = {
'Africa/Abidjan': 'Greenwich Standard Time',
'Africa/Accra': 'Greenwich Standard Time',
'Africa/Addis_Ababa': 'E. Africa Standard Time',
'Africa/Algiers': 'W. Central Africa Standard Time',
'Africa/Asmera': 'E. Africa Standard Time',
'Africa/Bamako': 'Greenwich Standard Time',
'Africa/Bangui': 'W. Central Africa Standard Time',
'Africa/Banjul': 'Greenwich Standard Time',
'Africa/Bissau': 'Greenwich Standard Time',
'Africa/Blantyre': 'South Africa Standard Time',
'Africa/Brazzaville': 'W. Central Africa Standard Time',
'Africa/Bujumbura': 'South Africa Standard Time',
'Africa/Cairo': 'Egypt Standard Time',
'Africa/Casablanca': 'Morocco Standard Time',
'Africa/Ceuta': 'Romance Standard Time',
'Africa/Conakry': 'Greenwich Standard Time',
'Africa/Dakar': 'Greenwich Standard Time',
'Africa/Dar_es_Salaam': 'E. Africa Standard Time',
'Africa/Djibouti': 'E. Africa Standard Time',
'Africa/Douala': 'W. Central Africa Standard Time',
'Africa/El_Aaiun': 'Morocco Standard Time',
'Africa/Freetown': 'Greenwich Standard Time',
'Africa/Gaborone': 'South Africa Standard Time',
'Africa/Harare': 'South Africa Standard Time',
'Africa/Johannesburg': 'South Africa Standard Time',
'Africa/Juba': 'E. Africa Standard Time',
'Africa/Kampala': 'E. Africa Standard Time',
'Africa/Khartoum': 'E. Africa Standard Time',
'Africa/Kigali': 'South Africa Standard Time',
'Africa/Kinshasa': 'W. Central Africa Standard Time',
'Africa/Lagos': 'W. Central Africa Standard Time',
'Africa/Libreville': 'W. Central Africa Standard Time',
'Africa/Lome': 'Greenwich Standard Time',
'Africa/Luanda': 'W. Central Africa Standard Time',
'Africa/Lubumbashi': 'South Africa Standard Time',
'Africa/Lusaka': 'South Africa Standard Time',
'Africa/Malabo': 'W. Central Africa Standard Time',
'Africa/Maputo': 'South Africa Standard Time',
'Africa/Maseru': 'South Africa Standard Time',
'Africa/Mbabane': 'South Africa Standard Time',
'Africa/Mogadishu': 'E. Africa Standard Time',
'Africa/Monrovia': 'Greenwich Standard Time',
'Africa/Nairobi': 'E. Africa Standard Time',
'Africa/Ndjamena': 'W. Central Africa Standard Time',
'Africa/Niamey': 'W. Central Africa Standard Time',
'Africa/Nouakchott': 'Greenwich Standard Time',
'Africa/Ouagadougou': 'Greenwich Standard Time',
'Africa/Porto-Novo': 'W. Central Africa Standard Time',
'Africa/Sao_Tome': 'Greenwich Standard Time',
'Africa/Tripoli': 'Libya Standard Time',
'Africa/Tunis': 'W. Central Africa Standard Time',
'Africa/Windhoek': 'Namibia Standard Time',
'America/Anchorage': 'Alaskan Standard Time',
'America/Anguilla': 'SA Western Standard Time',
'America/Antigua': 'SA Western Standard Time',
'America/Araguaina': 'SA Eastern Standard Time',
'America/Argentina/La_Rioja': 'Argentina Standard Time',
'America/Argentina/Rio_Gallegos': 'Argentina Standard Time',
'America/Argentina/Salta': 'Argentina Standard Time',
'America/Argentina/San_Juan': 'Argentina Standard Time',
'America/Argentina/San_Luis': 'Argentina Standard Time',
'America/Argentina/Tucuman': 'Argentina Standard Time',
'America/Argentina/Ushuaia': 'Argentina Standard Time',
'America/Aruba': 'SA Western Standard Time',
'America/Asuncion': 'Paraguay Standard Time',
'America/Bahia': 'Bahia Standard Time',
'America/Bahia_Banderas': 'Central Standard Time (Mexico)',
'America/Barbados': 'SA Western Standard Time',
'America/Belem': 'SA Eastern Standard Time',
'America/Belize': 'Central America Standard Time',
'America/Blanc-Sablon': 'SA Western Standard Time',
'America/Boa_Vista': 'SA Western Standard Time',
'America/Bogota': 'SA Pacific Standard Time',
'America/Boise': 'Mountain Standard Time',
'America/Buenos_Aires': 'Argentina Standard Time',
'America/Cambridge_Bay': 'Mountain Standard Time',
'America/Campo_Grande': 'Central Brazilian Standard Time',
'America/Cancun': 'Central Standard Time (Mexico)',
'America/Caracas': 'Venezuela Standard Time',
'America/Catamarca': 'Argentina Standard Time',
'America/Cayenne': 'SA Eastern Standard Time',
'America/Cayman': 'SA Pacific Standard Time',
'America/Chicago': 'Central Standard Time',
'America/Chihuahua': 'Mountain Standard Time (Mexico)',
'America/Coral_Harbour': 'SA Pacific Standard Time',
'America/Cordoba': 'Argentina Standard Time',
'America/Costa_Rica': 'Central America Standard Time',
'America/Creston': 'US Mountain Standard Time',
'America/Cuiaba': 'Central Brazilian Standard Time',
'America/Curacao': 'SA Western Standard Time',
'America/Danmarkshavn': 'UTC',
'America/Dawson': 'Pacific Standard Time',
'America/Dawson_Creek': 'US Mountain Standard Time',
'America/Denver': 'Mountain Standard Time',
'America/Detroit': 'Eastern Standard Time',
'America/Dominica': 'SA Western Standard Time',
'America/Edmonton': 'Mountain Standard Time',
'America/Eirunepe': 'SA Pacific Standard Time',
'America/El_Salvador': 'Central America Standard Time',
'America/Fortaleza': 'SA Eastern Standard Time',
'America/Glace_Bay': 'Atlantic Standard Time',
'America/Godthab': 'Greenland Standard Time',
'America/Goose_Bay': 'Atlantic Standard Time',
'America/Grand_Turk': 'Eastern Standard Time',
'America/Grenada': 'SA Western Standard Time',
'America/Guadeloupe': 'SA Western Standard Time',
'America/Guatemala': 'Central America Standard Time',
'America/Guayaquil': 'SA Pacific Standard Time',
'America/Guyana': 'SA Western Standard Time',
'America/Halifax': 'Atlantic Standard Time',
'America/Havana': 'Eastern Standard Time',
'America/Hermosillo': 'US Mountain Standard Time',
'America/Indiana/Knox': 'Central Standard Time',
'America/Indiana/Marengo': 'US Eastern Standard Time',
'America/Indiana/Petersburg': 'Eastern Standard Time',
'America/Indiana/Tell_City': 'Central Standard Time',
'America/Indiana/Vevay': 'US Eastern Standard Time',
'America/Indiana/Vincennes': 'Eastern Standard Time',
'America/Indiana/Winamac': 'Eastern Standard Time',
'America/Indianapolis': 'US Eastern Standard Time',
'America/Inuvik': 'Mountain Standard Time',
'America/Iqaluit': 'Eastern Standard Time',
'America/Jamaica': 'SA Pacific Standard Time',
'America/Jujuy': 'Argentina Standard Time',
'America/Juneau': 'Alaskan Standard Time',
'America/Kentucky/Monticello': 'Eastern Standard Time',
'America/Kralendijk': 'SA Western Standard Time',
'America/La_Paz': 'SA Western Standard Time',
'America/Lima': 'SA Pacific Standard Time',
'America/Los_Angeles': 'Pacific Standard Time',
'America/Louisville': 'Eastern Standard Time',
'America/Lower_Princes': 'SA Western Standard Time',
'America/Maceio': 'SA Eastern Standard Time',
'America/Managua': 'Central America Standard Time',
'America/Manaus': 'SA Western Standard Time',
'America/Marigot': 'SA Western Standard Time',
'America/Martinique': 'SA Western Standard Time',
'America/Matamoros': 'Central Standard Time',
'America/Mazatlan': 'Mountain Standard Time (Mexico)',
'America/Mendoza': 'Argentina Standard Time',
'America/Menominee': 'Central Standard Time',
'America/Merida': 'Central Standard Time (Mexico)',
'America/Mexico_City': 'Central Standard Time (Mexico)',
'America/Moncton': 'Atlantic Standard Time',
'America/Monterrey': 'Central Standard Time (Mexico)',
'America/Montevideo': 'Montevideo Standard Time',
'America/Montreal': 'Eastern Standard Time',
'America/Montserrat': 'SA Western Standard Time',
'America/Nassau': 'Eastern Standard Time',
'America/New_York': 'Eastern Standard Time',
'America/Nipigon': 'Eastern Standard Time',
'America/Nome': 'Alaskan Standard Time',
'America/Noronha': 'UTC-02',
'America/North_Dakota/Beulah': 'Central Standard Time',
'America/North_Dakota/Center': 'Central Standard Time',
'America/North_Dakota/New_Salem': 'Central Standard Time',
'America/Ojinaga': 'Mountain Standard Time',
'America/Panama': 'SA Pacific Standard Time',
'America/Pangnirtung': 'Eastern Standard Time',
'America/Paramaribo': 'SA Eastern Standard Time',
'America/Phoenix': 'US Mountain Standard Time',
'America/Port-au-Prince': 'Eastern Standard Time',
'America/Port_of_Spain': 'SA Western Standard Time',
'America/Porto_Velho': 'SA Western Standard Time',
'America/Puerto_Rico': 'SA Western Standard Time',
'America/Rainy_River': 'Central Standard Time',
'America/Rankin_Inlet': 'Central Standard Time',
'America/Recife': 'SA Eastern Standard Time',
'America/Regina': 'Canada Central Standard Time',
'America/Resolute': 'Central Standard Time',
'America/Rio_Branco': 'SA Pacific Standard Time',
'America/Santa_Isabel': 'Pacific Standard Time (Mexico)',
'America/Santarem': 'SA Eastern Standard Time',
'America/Santiago': 'Pacific SA Standard Time',
'America/Santo_Domingo': 'SA Western Standard Time',
'America/Sao_Paulo': 'E. South America Standard Time',
'America/Scoresbysund': 'Azores Standard Time',
'America/Shiprock': 'Mountain Standard Time',
'America/Sitka': 'Alaskan Standard Time',
'America/St_Barthelemy': 'SA Western Standard Time',
'America/St_Johns': 'Newfoundland Standard Time',
'America/St_Kitts': 'SA Western Standard Time',
'America/St_Lucia': 'SA Western Standard Time',
'America/St_Thomas': 'SA Western Standard Time',
'America/St_Vincent': 'SA Western Standard Time',
'America/Swift_Current': 'Canada Central Standard Time',
'America/Tegucigalpa': 'Central America Standard Time',
'America/Thule': 'Atlantic Standard Time',
'America/Thunder_Bay': 'Eastern Standard Time',
'America/Tijuana': 'Pacific Standard Time',
'America/Toronto': 'Eastern Standard Time',
'America/Tortola': 'SA Western Standard Time',
'America/Vancouver': 'Pacific Standard Time',
'America/Whitehorse': 'Pacific Standard Time',
'America/Winnipeg': 'Central Standard Time',
'America/Yakutat': 'Alaskan Standard Time',
'America/Yellowknife': 'Mountain Standard Time',
'Antarctica/Casey': 'W. Australia Standard Time',
'Antarctica/Davis': 'SE Asia Standard Time',
'Antarctica/DumontDUrville': 'West Pacific Standard Time',
'Antarctica/Macquarie': 'Central Pacific Standard Time',
'Antarctica/Mawson': 'West Asia Standard Time',
'Antarctica/McMurdo': 'New Zealand Standard Time',
'Antarctica/Palmer': 'Pacific SA Standard Time',
'Antarctica/Rothera': 'SA Eastern Standard Time',
'Antarctica/South_Pole': 'New Zealand Standard Time',
'Antarctica/Syowa': 'E. Africa Standard Time',
'Antarctica/Vostok': 'Central Asia Standard Time',
'Arctic/Longyearbyen': 'W. Europe Standard Time',
'Asia/Aden': 'Arab Standard Time',
'Asia/Almaty': 'Central Asia Standard Time',
'Asia/Amman': 'Jordan Standard Time',
'Asia/Anadyr': 'Magadan Standard Time',
'Asia/Aqtau': 'West Asia Standard Time',
'Asia/Aqtobe': 'West Asia Standard Time',
'Asia/Ashgabat': 'West Asia Standard Time',
'Asia/Baghdad': 'Arabic Standard Time',
'Asia/Bahrain': 'Arab Standard Time',
'Asia/Baku': 'Azerbaijan Standard Time',
'Asia/Bangkok': 'SE Asia Standard Time',
'Asia/Beirut': 'Middle East Standard Time',
'Asia/Bishkek': 'Central Asia Standard Time',
'Asia/Brunei': 'Singapore Standard Time',
'Asia/Calcutta': 'India Standard Time',
'Asia/Choibalsan': 'Ulaanbaatar Standard Time',
'Asia/Chongqing': 'China Standard Time',
'Asia/Colombo': 'Sri Lanka Standard Time',
'Asia/Damascus': 'Syria Standard Time',
'Asia/Dhaka': 'Bangladesh Standard Time',
'Asia/Dili': 'Tokyo Standard Time',
'Asia/Dubai': 'Arabian Standard Time',
'Asia/Dushanbe': 'West Asia Standard Time',
'Asia/Harbin': 'China Standard Time',
'Asia/Hong_Kong': 'China Standard Time',
'Asia/Hovd': 'SE Asia Standard Time',
'Asia/Irkutsk': 'North Asia East Standard Time',
'Asia/Jakarta': 'SE Asia Standard Time',
'Asia/Jayapura': 'Tokyo Standard Time',
'Asia/Jerusalem': 'Israel Standard Time',
'Asia/Kabul': 'Afghanistan Standard Time',
'Asia/Kamchatka': 'Magadan Standard Time',
'Asia/Karachi': 'Pakistan Standard Time',
'Asia/Kashgar': 'China Standard Time',
'Asia/Katmandu': 'Nepal Standard Time',
'Asia/Khandyga': 'Yakutsk Standard Time',
'Asia/Krasnoyarsk': 'North Asia Standard Time',
'Asia/Kuala_Lumpur': 'Singapore Standard Time',
'Asia/Kuching': 'Singapore Standard Time',
'Asia/Kuwait': 'Arab Standard Time',
'Asia/Macau': 'China Standard Time',
'Asia/Magadan': 'Magadan Standard Time',
'Asia/Makassar': 'Singapore Standard Time',
'Asia/Manila': 'Singapore Standard Time',
'Asia/Muscat': 'Arabian Standard Time',
'Asia/Nicosia': 'E. Europe Standard Time',
'Asia/Novokuznetsk': 'N. Central Asia Standard Time',
'Asia/Novosibirsk': 'N. Central Asia Standard Time',
'Asia/Omsk': 'N. Central Asia Standard Time',
'Asia/Oral': 'West Asia Standard Time',
'Asia/Phnom_Penh': 'SE Asia Standard Time',
'Asia/Pontianak': 'SE Asia Standard Time',
'Asia/Pyongyang': 'Korea Standard Time',
'Asia/Qatar': 'Arab Standard Time',
'Asia/Qyzylorda': 'Central Asia Standard Time',
'Asia/Rangoon': 'Myanmar Standard Time',
'Asia/Riyadh': 'Arab Standard Time',
'Asia/Saigon': 'SE Asia Standard Time',
'Asia/Sakhalin': 'Vladivostok Standard Time',
'Asia/Samarkand': 'West Asia Standard Time',
'Asia/Seoul': 'Korea Standard Time',
'Asia/Shanghai': 'China Standard Time',
'Asia/Singapore': 'Singapore Standard Time',
'Asia/Taipei': 'Taipei Standard Time',
'Asia/Tashkent': 'West Asia Standard Time',
'Asia/Tbilisi': 'Georgian Standard Time',
'Asia/Tehran': 'Iran Standard Time',
'Asia/Thimphu': 'Bangladesh Standard Time',
'Asia/Tokyo': 'Tokyo Standard Time',
'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time',
'Asia/Urumqi': 'China Standard Time',
'Asia/Ust-Nera': 'Vladivostok Standard Time',
'Asia/Vientiane': 'SE Asia Standard Time',
'Asia/Vladivostok': 'Vladivostok Standard Time',
'Asia/Yakutsk': 'Yakutsk Standard Time',
'Asia/Yekaterinburg': 'Ekaterinburg Standard Time',
'Asia/Yerevan': 'Caucasus Standard Time',
'Atlantic/Azores': 'Azores Standard Time',
'Atlantic/Bermuda': 'Atlantic Standard Time',
'Atlantic/Canary': 'GMT Standard Time',
'Atlantic/Cape_Verde': 'Cape Verde Standard Time',
'Atlantic/Faeroe': 'GMT Standard Time',
'Atlantic/Madeira': 'GMT Standard Time',
'Atlantic/Reykjavik': 'Greenwich Standard Time',
'Atlantic/South_Georgia': 'UTC-02',
'Atlantic/St_Helena': 'Greenwich Standard Time',
'Atlantic/Stanley': 'SA Eastern Standard Time',
'Australia/Adelaide': 'Cen. Australia Standard Time',
'Australia/Brisbane': 'E. Australia Standard Time',
'Australia/Broken_Hill': 'Cen. Australia Standard Time',
'Australia/Currie': 'Tasmania Standard Time',
'Australia/Darwin': 'AUS Central Standard Time',
'Australia/Hobart': 'Tasmania Standard Time',
'Australia/Lindeman': 'E. Australia Standard Time',
'Australia/Melbourne': 'AUS Eastern Standard Time',
'Australia/Perth': 'W. Australia Standard Time',
'Australia/Sydney': 'AUS Eastern Standard Time',
'CST6CDT': 'Central Standard Time',
'EST5EDT': 'Eastern Standard Time',
'Etc/GMT': 'UTC',
'Etc/GMT+1': 'Cape Verde Standard Time',
'Etc/GMT+10': 'Hawaiian Standard Time',
'Etc/GMT+11': 'UTC-11',
'Etc/GMT+12': 'Dateline Standard Time',
'Etc/GMT+2': 'UTC-02',
'Etc/GMT+3': 'SA Eastern Standard Time',
'Etc/GMT+4': 'SA Western Standard Time',
'Etc/GMT+5': 'SA Pacific Standard Time',
'Etc/GMT+6': 'Central America Standard Time',
'Etc/GMT+7': 'US Mountain Standard Time',
'Etc/GMT-1': 'W. Central Africa Standard Time',
'Etc/GMT-10': 'West Pacific Standard Time',
'Etc/GMT-11': 'Central Pacific Standard Time',
'Etc/GMT-12': 'UTC+12',
'Etc/GMT-13': 'Tonga Standard Time',
'Etc/GMT-2': 'South Africa Standard Time',
'Etc/GMT-3': 'E. Africa Standard Time',
'Etc/GMT-4': 'Arabian Standard Time',
'Etc/GMT-5': 'West Asia Standard Time',
'Etc/GMT-6': 'Central Asia Standard Time',
'Etc/GMT-7': 'SE Asia Standard Time',
'Etc/GMT-8': 'Singapore Standard Time',
'Etc/GMT-9': 'Tokyo Standard Time',
'Etc/UTC': 'UTC',
'Europe/Amsterdam': 'W. Europe Standard Time',
'Europe/Andorra': 'W. Europe Standard Time',
'Europe/Athens': 'GTB Standard Time',
'Europe/Belgrade': 'Central Europe Standard Time',
'Europe/Berlin': 'W. Europe Standard Time',
'Europe/Bratislava': 'Central Europe Standard Time',
'Europe/Brussels': 'Romance Standard Time',
'Europe/Bucharest': 'GTB Standard Time',
'Europe/Budapest': 'Central Europe Standard Time',
'Europe/Busingen': 'W. Europe Standard Time',
'Europe/Chisinau': 'GTB Standard Time',
'Europe/Copenhagen': 'Romance Standard Time',
'Europe/Dublin': 'GMT Standard Time',
'Europe/Gibraltar': 'W. Europe Standard Time',
'Europe/Guernsey': 'GMT Standard Time',
'Europe/Helsinki': 'FLE Standard Time',
'Europe/Isle_of_Man': 'GMT Standard Time',
'Europe/Istanbul': 'Turkey Standard Time',
'Europe/Jersey': 'GMT Standard Time',
'Europe/Kaliningrad': 'Kaliningrad Standard Time',
'Europe/Kiev': 'FLE Standard Time',
'Europe/Lisbon': 'GMT Standard Time',
'Europe/Ljubljana': 'Central Europe Standard Time',
'Europe/London': 'GMT Standard Time',
'Europe/Luxembourg': 'W. Europe Standard Time',
'Europe/Madrid': 'Romance Standard Time',
'Europe/Malta': 'W. Europe Standard Time',
'Europe/Mariehamn': 'FLE Standard Time',
'Europe/Minsk': 'Kaliningrad Standard Time',
'Europe/Monaco': 'W. Europe Standard Time',
'Europe/Moscow': 'Russian Standard Time',
'Europe/Oslo': 'W. Europe Standard Time',
'Europe/Paris': 'Romance Standard Time',
'Europe/Podgorica': 'Central Europe Standard Time',
'Europe/Prague': 'Central Europe Standard Time',
'Europe/Riga': 'FLE Standard Time',
'Europe/Rome': 'W. Europe Standard Time',
'Europe/Samara': 'Russian Standard Time',
'Europe/San_Marino': 'W. Europe Standard Time',
'Europe/Sarajevo': 'Central European Standard Time',
'Europe/Simferopol': 'FLE Standard Time',
'Europe/Skopje': 'Central European Standard Time',
'Europe/Sofia': 'FLE Standard Time',
'Europe/Stockholm': 'W. Europe Standard Time',
'Europe/Tallinn': 'FLE Standard Time',
'Europe/Tirane': 'Central Europe Standard Time',
'Europe/Uzhgorod': 'FLE Standard Time',
'Europe/Vaduz': 'W. Europe Standard Time',
'Europe/Vatican': 'W. Europe Standard Time',
'Europe/Vienna': 'W. Europe Standard Time',
'Europe/Vilnius': 'FLE Standard Time',
'Europe/Volgograd': 'Russian Standard Time',
'Europe/Warsaw': 'Central European Standard Time',
'Europe/Zagreb': 'Central European Standard Time',
'Europe/Zaporozhye': 'FLE Standard Time',
'Europe/Zurich': 'W. Europe Standard Time',
'Indian/Antananarivo': 'E. Africa Standard Time',
'Indian/Chagos': 'Central Asia Standard Time',
'Indian/Christmas': 'SE Asia Standard Time',
'Indian/Cocos': 'Myanmar Standard Time',
'Indian/Comoro': 'E. Africa Standard Time',
'Indian/Kerguelen': 'West Asia Standard Time',
'Indian/Mahe': 'Mauritius Standard Time',
'Indian/Maldives': 'West Asia Standard Time',
'Indian/Mauritius': 'Mauritius Standard Time',
'Indian/Mayotte': 'E. Africa Standard Time',
'Indian/Reunion': 'Mauritius Standard Time',
'MST7MDT': 'Mountain Standard Time',
'PST8PDT': 'Pacific Standard Time',
'Pacific/Apia': 'Samoa Standard Time',
'Pacific/Auckland': 'New Zealand Standard Time',
'Pacific/Efate': 'Central Pacific Standard Time',
'Pacific/Enderbury': 'Tonga Standard Time',
'Pacific/Fakaofo': 'Tonga Standard Time',
'Pacific/Fiji': 'Fiji Standard Time',
'Pacific/Funafuti': 'UTC+12',
'Pacific/Galapagos': 'Central America Standard Time',
'Pacific/Guadalcanal': 'Central Pacific Standard Time',
'Pacific/Guam': 'West Pacific Standard Time',
'Pacific/Honolulu': 'Hawaiian Standard Time',
'Pacific/Johnston': 'Hawaiian Standard Time',
'Pacific/Kosrae': 'Central Pacific Standard Time',
'Pacific/Kwajalein': 'UTC+12',
'Pacific/Majuro': 'UTC+12',
'Pacific/Midway': 'UTC-11',
'Pacific/Nauru': 'UTC+12',
'Pacific/Niue': 'UTC-11',
'Pacific/Noumea': 'Central Pacific Standard Time',
'Pacific/Pago_Pago': 'UTC-11',
'Pacific/Palau': 'Tokyo Standard Time',
'Pacific/Ponape': 'Central Pacific Standard Time',
'Pacific/Port_Moresby': 'West Pacific Standard Time',
'Pacific/Rarotonga': 'Hawaiian Standard Time',
'Pacific/Saipan': 'West Pacific Standard Time',
'Pacific/Tahiti': 'Hawaiian Standard Time',
'Pacific/Tarawa': 'UTC+12',
'Pacific/Tongatapu': 'Tonga Standard Time',
'Pacific/Truk': 'West Pacific Standard Time',
'Pacific/Wake': 'UTC+12',
'Pacific/Wallis': 'UTC+12'
}
| immanetize/nikola | nikola/packages/tzlocal/windows_tz.py | Python | mit | 26,452 | 0 |
# -----------------------------------------------------------------------------
# Download data:
# - Browser:
# http://midas3.kitware.com/midas/folder/10409 => VisibleMale/vm_head_frozenct.mha
# - Terminal
# curl "http://midas3.kitware.com/midas/download?folders=&items=235235" -o vm_head_frozenct.mha
# -----------------------------------------------------------------------------
from vtk import *
from vtk.web.query_data_model import *
from vtk.web.dataset_builder import *
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
dataset_destination_path = '/Users/seb/Desktop/vm_head_frozenct_vi_%s_%s_%s'
file_path = '/Users/seb/Downloads/vm_head_frozenct.mha'
field = 'MetaImage'
fieldRange = [0.0, 4095.0]
nbSteps = 4
# -----------------------------------------------------------------------------
# VTK Helper methods
# -----------------------------------------------------------------------------
def updatePieceWise(pwf, dataRange, center, halfSpread):
scalarOpacity.RemoveAllPoints()
if (center - halfSpread) <= dataRange[0]:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center, 1.0)
else:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center - halfSpread, 0.0)
scalarOpacity.AddPoint(center, 1.0)
if (center + halfSpread) >= dataRange[1]:
scalarOpacity.AddPoint(dataRange[1], 0.0)
else:
scalarOpacity.AddPoint(center + halfSpread, 0.0)
scalarOpacity.AddPoint(dataRange[1], 0.0)
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
reader = vtkMetaImageReader()
reader.SetFileName(file_path)
mapper = vtkGPUVolumeRayCastMapper()
mapper.SetInputConnection(reader.GetOutputPort())
mapper.RenderToImageOn()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(fieldRange[0], 1.0, 1.0, 1.0)
colorFunction.AddRGBPoint(fieldRange[1], 1.0, 1.0, 1.0)
halfSpread = (fieldRange[1] - fieldRange[0]) / float(2*nbSteps)
centers = [ fieldRange[0] + halfSpread*float(2*i+1) for i in range(nbSteps)]
scalarOpacity = vtkPiecewiseFunction()
volumeProperty = vtkVolumeProperty()
volumeProperty.ShadeOn()
volumeProperty.SetInterpolationType(VTK_LINEAR_INTERPOLATION)
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(scalarOpacity)
volume = vtkVolume()
volume.SetMapper(mapper)
volume.SetProperty(volumeProperty)
window = vtkRenderWindow()
window.SetSize(499, 400)
renderer = vtkRenderer()
window.AddRenderer(renderer)
renderer.AddVolume(volume)
renderer.ResetCamera()
window.Render()
# Camera setting
camera = {
'position': [-0.264, -890.168, -135.0],
'focalPoint': [-0.264, -30.264, -135.0],
'viewUp': [0,0,1]
}
update_camera(renderer, camera)
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
vcdsb = SortedCompositeDataSetBuilder(dataset_destination_path % (nbSteps, halfSpread, window.GetSize()[0]), {'type': 'spherical', 'phi': [0], 'theta': [0]})
idx = 0
vcdsb.start(window, renderer)
for center in centers:
idx += 1
updatePieceWise(scalarOpacity, fieldRange, center, halfSpread)
# Capture layer
vcdsb.activateLayer(field, center)
# Write data
vcdsb.writeData(mapper)
vcdsb.stop()
| Kitware/arctic-viewer | scripts/examples/vtk/medical/head-ct-volume.py | Python | bsd-3-clause | 3,622 | 0.002485 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import sys
import json
from prjxray.xjson import pprint
if __name__ == "__main__":
if len(sys.argv) == 1:
import doctest
doctest.testmod()
else:
assert len(sys.argv) == 2
d = json.load(open(sys.argv[1]))
pprint(sys.stdout, d)
| SymbiFlow/prjxray | utils/xjson.py | Python | isc | 558 | 0 |
#!/usr/bin/env python2
# Copyright (C) 2013:
# Gabes Jean, naparuba@gmail.com
# Pasche Sebastien, sebastien.pasche@leshop.ch
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
'''
This script is a check for lookup at memory consumption over ssh without
having an agent on the other side
'''
import os
import sys
import optparse
# Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
import schecks
except ImportError:
print "ERROR : this plugin needs the local schecks.py lib. Please install it"
sys.exit(2)
VERSION = "0.1"
DEFAULT_WARNING = '10'
DEFAULT_CRITICAL = '60'
NTPQ_PATH=r"""ntpq"""
DEFAULT_DELAY_WARNING = '0.100' # 100 ms
DEFAULT_DELAY_CRITICAL = '0.150' # 150 ms
DEFAULT_OFFSET_WARNING = '0.0025' # 2.5 ms
DEFAULT_OFFSET_CRITICAL = '0.005' # 5ms
def get_ntp_sync(client):
# We are looking for a line like
# remote refid st t when poll reach delay offset jitter
#==============================================================================
# 127.127.1.0 .LOCL. 10 l 53 64 377 0.000 0.000 0.001
# *blabla blabla 3 u 909 1024 377 0.366 -3.200 5.268
#raw = r"""/usr/sbin/ntpq -p"""
raw = "%s -p" % NTPQ_PATH
stdin, stdout, stderr = client.exec_command("export LC_LANG=C && unset LANG && export PATH=$PATH:/usr/bin:/usr/sbin && %s" % raw)
errs = ''.join(l for l in stderr)
if errs:
print "Error: %s" % errs.strip()
client.close()
sys.exit(2)
ref_delay = None
for line in stdout:
line = line.strip()
# We want the line of the reference only
if not line or not line.startswith('*'):
continue
tmp = [e for e in line.split(' ') if e]
ref_delay = abs(float(tmp[8])) / 1000
# Before return, close the client
client.close()
return ref_delay
def get_chrony_sync(client):
# We are looking for a line like
#Reference ID : 195.141.190.190 (time.sunrise.net)
#Stratum : 3
#Ref time (UTC) : Fri Jun 28 09:03:22 2013
#System time : 0.000147811 seconds fast of NTP time
#Last offset : 0.000177244 seconds
#RMS offset : 0.000363876 seconds
#Frequency : 26.497 ppm slow
#Residual freq : 0.024 ppm
#Skew : 0.146 ppm
#Root delay : 0.008953 seconds
#Root dispersion : 0.027807 seconds
#Update interval : 1024.1 seconds
#Leap status : Normal
raw = r"""chronyc tracking"""
stdin, stdout, stderr = client.exec_command("export LC_LANG=C && unset LANG && %s" % raw)
errs = ''.join(l for l in stderr)
if errs:
print "Error: %s" % errs.strip()
client.close()
sys.exit(2)
delay = offset = None
for line in stdout:
line = line.strip()
tmp = line.split(':')
if len(tmp) != 2:
continue
if line.startswith('RMS offset'):
offset = float(tmp[1].strip().split(' ')[0])
if line.startswith('Root delay'):
delay = float(tmp[1].strip().split(' ')[0])
# Before return, close the client
client.close()
return delay, offset
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-H', '--hostname',
dest="hostname", help='Hostname to connect to')
parser.add_option('-p', '--port',
dest="port", type="int", default=22,
help='SSH port to connect to. Default : 22')
parser.add_option('-i', '--ssh-key',
dest="ssh_key_file",
help='SSH key file to use. By default will take ~/.ssh/id_rsa.')
parser.add_option('-u', '--user',
dest="user", help='remote use to use. By default shinken.')
parser.add_option('-P', '--passphrase',
dest="passphrase", help='SSH key passphrase. By default will use void')
parser.add_option('-w', '--warning',
dest="warning",
help='Warning delay for ntp, like 10. couple delay,offset value for chrony '
'0.100,0.0025')
parser.add_option('-c', '--critical',
dest="critical",
help='Warning delay for ntp, like 10. couple delay,offset value for chrony '
'0.150,0.005')
parser.add_option('-C', '--chrony', action='store_true',
dest="chrony", help='check Chrony instead of ntpd')
parser.add_option('-n', '--ntpq',
dest="ntpq", help="remote ntpq bianry path")
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
port = opts.port
hostname = opts.hostname or ''
ntpq = opts.ntpq
if ntpq:
NTPQ_PATH=ntpq
ssh_key_file = opts.ssh_key_file or os.path.expanduser('~/.ssh/id_rsa')
user = opts.user or 'shinken'
passphrase = opts.passphrase or ''
chrony = opts.chrony
if not chrony:
# Try to get numeic warning/critical values
s_warning = opts.warning or DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICAL
warning, critical = schecks.get_warn_crit(s_warning, s_critical)
else:
if opts.warning:
warning_delay = float(opts.warning.split(',')[0])
warning_offset = float(opts.warning.split(',')[1])
else:
warning_delay = float(DEFAULT_DELAY_WARNING)
warning_offset = float(DEFAULT_OFFSET_WARNING)
if opts.critical:
critical_delay = float(opts.critical.split(',')[0])
critical_offset = float(opts.critical.split(',')[1])
else:
critical_delay = float(DEFAULT_DELAY_CRITICAL)
critical_offset = float(DEFAULT_OFFSET_CRITICAL)
# Ok now connect, and try to get values for memory
client = schecks.connect(hostname, port, ssh_key_file, passphrase, user)
if not chrony:
ref_delay = get_ntp_sync(client)
if ref_delay is None:
print "Warning : There is no sync ntp server"
sys.exit(1)
perfdata = "delay=%.2fs;%.2fs;%.2fs;;" % (ref_delay, warning, critical)
if ref_delay > critical:
print "Critical: ntp delay is %.2fs | %s" %(ref_delay, perfdata)
sys.exit(2)
if ref_delay > warning:
print "Warning: ntp delay is %.2fs | %s" %(ref_delay, perfdata)
sys.exit(2)
print "OK: ntp delay is %.2fs | %s" %(ref_delay, perfdata)
sys.exit(0)
else:
delay, offset = get_chrony_sync(client)
if delay is None or offset is None:
print "Warning : cannot get delay or offset value"
sys.exit(1)
perfdata = "delay=%.2fs;%.2fs;%.2fs;;" % (delay, warning_delay, critical_delay)
perfdata += "offset=%.4fs;%.4fs;%.4fs;;" % (offset, warning_offset, critical_offset)
if delay > critical_delay:
print "Critical: ntp/chrony delay is %.2fs | %s" % (delay, perfdata)
sys.exit(2)
if offset > critical_offset:
print "Critical: ntp/chrony offset is %.4fs | %s" % (offset, perfdata)
sys.exit(2)
if delay > warning_delay:
print "Warning: ntp/chrony delay is %.2fs | %s" % (delay, perfdata)
sys.exit(2)
if offset > warning_offset:
print "Warning: ntp/chrony offset is %.4fs | %s" % (offset, perfdata)
sys.exit(2)
print "OK: ntp delay is %.2fs offset is %.4fs | %s" %(delay, offset, perfdata)
sys.exit(0)
| naparuba/check-linux-by-ssh | check_ntp_sync_by_ssh.py | Python | mit | 8,784 | 0.006717 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from abc import ABCMeta
import csv
from performance_tools.exceptions import ProgressBarException, ElasticsearchException
from performance_tools.utils.progress_bar import create_progress_bar
class BaseURLFlowBackend(object):
"""Collect URL flow from backend. URL Flow: Referrer, Request, Time.
It's necessary to implement extract_url_from_result and __iter__ methods.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._total_hits = 0
def extract_url_from_result(self, result, regex=None):
"""Extract origin url and destination url for each entry in result and construct a list with them.
:param result: results obtained from backend in each iteration.
:type result: object
:param regex: Regular expression to normalize id's in URL.
:type regex: re
:return: List of origin urls and destination urls.
:rtype: list
"""
raise NotImplementedError
def to_csv(self, filename, regex=None, verbose=2):
"""Save results as a CSV file.
:param filename: CSV output file.
:type filename: str
:raise: ValueError if not found any result.
"""
progress = None
try:
with open(filename, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['Referrer', 'Request', 'Time'])
count = 0
for result in self:
# Create progress bar or down verbose level
if verbose == 2 and progress is None:
try:
progress = create_progress_bar(self._total_hits, 'Extract URLs', 'url')
except ProgressBarException:
verbose = 1
# Write results to csv
rows = self.extract_url_from_result(result, regex)
writer.writerows(rows)
# Update progress
count += len(rows)
if verbose == 2:
progress.update(count if count < self._total_hits else self._total_hits)
elif verbose == 1:
print "{:d}/{:d} ({:d}%)".format(count, self._total_hits, count * 100 / self._total_hits)
except ZeroDivisionError:
raise ElasticsearchException("Search doesn't return any result")
except KeyError:
raise ElasticsearchException("Invalid result")
def __iter__(self):
"""Iterate over each result.
"""
raise NotImplementedError
| PeRDy/performance-tools | performance_tools/urls_flow/backends/base.py | Python | gpl-2.0 | 2,681 | 0.001865 |
"""
Python script 'process_NCEI_03_prcp_180d.py'
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
matt.e.garcia@gmail.com
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to matt.e.garcia@gmail.com
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: Temporal calculation of PRCP 180-day accumulation
DEPENDENCIES: h5py, numpy
'process_NCEI_03_aux' module has its own requirements
USAGE: '$ python process_NCEI_03_prcp_180d.py NCEI_WLS_1983 1983 ./grids'
INPUT: copied '.h5' file from process_NCEI_03_preprocess.py
(with the naming convention 'grids/[YYYYMMDD]_NCEI_grids_2.h5')
OUTPUT: updated daily '.h5' file with new accumulation grid
(with the naming convention 'grids/[YYYYMMDD]_NCEI_grids_2.h5')
year-end '.h5' and '.pickle' files with rolling accounted variable
"""
import sys
import datetime
import glob
import h5py as hdf
import numpy as np
from process_NCEI_03_aux import get_stn_lists, write_stn_lists, \
write_to_file, cube_sum
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
message(' ')
message('process_NCEI_03_prcp_180d.py started at %s' %
datetime.datetime.now().isoformat())
message(' ')
#
if len(sys.argv) < 4:
message('input warning: no input directory indicated,, using ./grids')
path = './grids'
else:
path = sys.argv[3]
#
if len(sys.argv) < 3:
message('input error: need year to process')
sys.exit(1)
else:
this_year = int(sys.argv[2])
#
if len(sys.argv) < 2:
message('input error: need prefix for weather data h5 file')
sys.exit(1)
else:
NCEIfname = sys.argv[1]
h5infname = '%s/../data/%s_processed.h5' % (path, NCEIfname)
#
message('reading dates information from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
all_dates = np.copy(h5infile['dates'])
message('- information for %d total dates found' % len(all_dates))
dates = sorted([j for j in all_dates if int(j // 1E4) == this_year])
message('- processing %d dates in %d' % (len(dates), this_year))
message(' ')
#
prev_year = this_year - 1
vars_files = sorted(glob.glob('%s/*_year_end_prcp_180d.h5' % path))
use_vars_file = False
if len(vars_files) > 0:
for vars_file in vars_files:
if str(prev_year) in vars_file:
use_vars_file = True
varfname = vars_file
break
#
# if rolling accounting variable files exist to be carried over
# from previous year
if use_vars_file:
message('extracting prcp_180d datacube from %s' % varfname)
with hdf.File(varfname, 'r') as h5infile:
nrows = np.copy(h5infile['nrows'])
ncols = np.copy(h5infile['ncols'])
prcp_180d = np.copy(h5infile['prcp_180d'])
message('extracting station lists')
prcp_180d_stns = get_stn_lists(path, prev_year, 'prcp_180d_stns')
else: # otherwise, initialize the variable space(s)
h5infname = '%s/%d_NCEI_grids_2.h5' % (path, dates[0])
message('extracting grid information from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
nrows = np.copy(h5infile['grid/nrows'])
ncols = np.copy(h5infile['grid/ncols'])
message('establishing prcp_180d datacube')
prcp_180d = np.zeros((180, nrows, ncols))
prcp_180d_stns = []
message(' ')
#
for date in dates:
h5infname = '%s/%d_NCEI_grids_2.h5' % (path, date)
message('extracting PRCP grid from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
prcp_stns = np.copy(h5infile['stns/prcp_stns'])
prcp = np.copy(h5infile['grid_prcp'])
#
year = date // 10000
month = (date - (year * 10000)) // 100
day = date - (year * 10000) - (month * 100)
#
grid_prcp_180d, prcp_180d_stns_all, prcp_180d, prcp_180d_stns = \
cube_sum(180, prcp_180d, prcp, prcp_180d_stns, prcp_stns)
message('- calculated updated 180-day running precipitation total, \
mean %.1f' % np.mean(grid_prcp_180d))
#
h5outfname = '%s/%d_NCEI_grids_2.h5' % (path, date)
message('saving grids to %s' % h5outfname)
with hdf.File(h5outfname, 'r+') as h5outfile:
del h5outfile['meta/last_updated']
h5outfile.create_dataset('meta/last_updated',
data=datetime.datetime.now().isoformat())
del h5outfile['meta/at']
outstr = 'prcp_180d'
h5outfile.create_dataset('meta/at', data=outstr)
write_to_file(h5outfile, 'prcp_180d_sum', grid_prcp_180d,
'prcp_180d_stns', prcp_180d_stns_all)
message(' ')
#
# save rolling accounting variable for next year's run
varfname = '%s/%d_year_end_prcp_180d.h5' % (path, this_year)
message('saving variable datacube to %s' % varfname)
with hdf.File(varfname, 'w') as h5outfile:
h5outfile.create_dataset('nrows', data=nrows)
h5outfile.create_dataset('ncols', data=ncols)
h5outfile.create_dataset('prcp_180d', data=prcp_180d,
dtype=np.float32, compression='gzip')
message('saving station lists')
write_stn_lists(path, this_year, 'prcp_180d_stns', prcp_180d_stns)
#
message('process_NCEI_03_prcp_180d.py completed at %s' %
datetime.datetime.now().isoformat())
message(' ')
sys.exit(0)
# end process_NCEI_03_prcp_180d.py
| megarcia/GT16_JGRA | source/process_NCEI_03_prcp_180d.py | Python | gpl-3.0 | 5,554 | 0 |
# Copyright 2012 Leonidas Poulopoulos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | leopoul/mupy | muparse/__init__.py | Python | apache-2.0 | 582 | 0.001718 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs linker tests on a particular device."""
import logging
import os.path
import sys
import traceback
from pylib import constants
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.linker import test_case
from pylib.utils import apk_helper
# Name of the Android package to install for this to work.
_PACKAGE_NAME = 'ChromiumLinkerTest'
class LinkerExceptionTestResult(base_test_result.BaseTestResult):
"""Test result corresponding to a python exception in a host-custom test."""
def __init__(self, test_name, exc_info):
"""Constructs a LinkerExceptionTestResult object.
Args:
test_name: name of the test which raised an exception.
exc_info: exception info, ostensibly from sys.exc_info().
"""
exc_type, exc_value, exc_traceback = exc_info
trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
log_msg = 'Exception:\n' + trace_info
super(LinkerExceptionTestResult, self).__init__(
test_name,
base_test_result.ResultType.FAIL,
log = "%s %s" % (exc_type, log_msg))
class LinkerTestRunner(base_test_runner.BaseTestRunner):
"""Orchestrates running a set of linker tests.
Any Python exceptions in the tests are caught and translated into a failed
result, rather than being re-raised on the main thread.
"""
#override
def __init__(self, device, tool, push_deps, cleanup_test_files):
"""Creates a new LinkerTestRunner.
Args:
device: Attached android device.
tool: Name of the Valgrind tool.
push_deps: If True, push all dependencies to the device.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
super(LinkerTestRunner, self).__init__(device, tool, push_deps,
cleanup_test_files)
#override
def InstallTestPackage(self):
apk_path = os.path.join(
constants.GetOutDirectory(), 'apks', '%s.apk' % _PACKAGE_NAME)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
package_name = apk_helper.GetPackageName(apk_path)
self.adb.ManagedInstall(apk_path, package_name)
#override
def RunTest(self, test):
"""Sets up and runs a test case.
Args:
test: An object which is ostensibly a subclass of LinkerTestCaseBase.
Returns:
A TestRunResults object which contains the result produced by the test
and, in the case of a failure, the test that should be retried.
"""
assert isinstance(test, test_case.LinkerTestCaseBase)
try:
results = test.Run(self.device)
except Exception:
logging.exception('Caught exception while trying to run test: ' +
test.tagged_name)
exc_info = sys.exc_info()
results = base_test_result.TestRunResults()
results.AddResult(LinkerExceptionTestResult(
test.tagged_name, exc_info))
if not results.DidRunPass():
return results, test
else:
return results, None
| patrickm/chromium.src | build/android/pylib/linker/test_runner.py | Python | bsd-3-clause | 3,271 | 0.007031 |
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n")
else:
print(str(value) + " was found at index " + str(index))
if __name__ == '__main__':
main()
| EverythingAbout/Python | Searches/linear_search.py | Python | mit | 775 | 0.009032 |
import pytest
import numpy as np
import noisily as ns
# FIXME This has got to be an abuse of fixtures, right?
@pytest.fixture(scope='module', params=[(1, 1), (37, 57), (128, 128)])
def indices2D(request):
shape = request.param
return np.transpose(np.indices(shape))
@pytest.fixture(scope='module', params=[(1, 1, 1), (29, 13, 31), (64, 64, 64)])
def indices3D(request):
shape = request.param
return np.transpose(np.indices(shape))
@pytest.fixture(scope='module', params=[(1, 1, 1, 1), (7, 11, 17, 13), (32, 32, 32, 32)])
def indices4D(request):
shape = request.param
return np.transpose(np.indices(shape))
@pytest.fixture(scope='module', params=[ns.perlin2D, ns.value2D, ns.open_simplex2D, ns.cell2D_range, ns.cell2D_range_inv, ns.cell2D_value, ns.cell2D_manhattan, ns.cell2D_manhattan_inv, ns.cell2D_manhattan_value])
def noise2D(request):
return request.param
@pytest.fixture(scope='module', params=[ns.perlin3D, ns.value3D, ns.open_simplex3D, ns.cell3D_range, ns.cell3D_range_inv, ns.cell3D_value, ns.cell3D_manhattan, ns.cell3D_manhattan_inv, ns.cell3D_manhattan_value])
def noise3D(request):
return request.param
@pytest.fixture(scope='module', params=[ns.perlin4D, ns.value4D, ns.open_simplex4D, ns.cell4D_range, ns.cell4D_range_inv, ns.cell4D_value, ns.cell4D_manhattan, ns.cell4D_manhattan_inv, ns.cell4D_manhattan_value])
def noise4D(request):
return request.param
@pytest.fixture(scope='module', params=[{'seed': 123}, {'period': 64}, {'seed': 12345, 'period': 16}])
def generator2D(request, noise2D):
return ns.generator(noise2D, **request.param)
@pytest.fixture(scope='module', params=[{'seed': 123}, {'period': 64}, {'seed': 12345, 'period': 16}])
def generator3D(request, noise3D):
return ns.generator(noise3D, **request.param)
@pytest.fixture(scope='module', params=[{'seed': 123}, {'period': 64}, {'seed': 12345, 'period': 16}])
def generator4D(request, noise4D):
return ns.generator(noise4D, **request.param)
def test_output2D(generator2D, indices2D):
output = generator2D(indices2D)
assert output.shape == indices2D.shape[:-1]
assert output.size == indices2D.size // 2
assert np.array_equal(output, generator2D(indices2D))
def test_output3D(generator3D, indices3D):
output = generator3D(indices3D)
assert output.shape == indices3D.shape[:-1]
assert output.size == indices3D.size // 3
assert np.array_equal(output, generator3D(indices3D))
def test_output4D(generator4D, indices4D):
output = generator4D(indices4D)
assert output.shape == indices4D.shape[:-1]
assert output.size == indices4D.size // 4
assert np.array_equal(output, generator4D(indices4D))
| tocubed/noisily | tests/test_noise.py | Python | apache-2.0 | 2,684 | 0.007079 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.