code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from math import ceil, floor, log10, pi
from sys import argv, stdout
from xml.dom import minidom
import bz2
import csv
# local imports
from my_helper_functions_bare import *
def pretty_mean_std(data):
return uncertain_number_string(my_mean(data), my_means_std(data))
varying_parameters = ["pressures_virial", "pressures_collision", "msds_val",
"msds_diffusion", "times"]
data = { i:[] for i in varying_parameters }
data = dict(data.items() + {"packings": [], "collisions": [], "n_atoms": []}.items())
for input_file in argv[1:]:
xmldoc = minidom.parse(bz2.BZ2File(input_file))
packing = float(xmldoc.getElementsByTagName('PackingFraction')[0].attributes['val'].value)
n_atoms = int(xmldoc.getElementsByTagName('ParticleCount')[0].attributes['val'].value)
if len(data["packings"]) == 0 or packing != data["packings"][-1] \
or n_atoms != data["n_atoms"][-1]:
data["packings"].append(packing)
data["n_atoms"].append(n_atoms)
data["collisions"].append(int(xmldoc.getElementsByTagName(
'Duration')[0].attributes['TwoParticleEvents'].value))
for parameter in varying_parameters:
data[parameter].append([])
data["times"][-1].append(float(
xmldoc.getElementsByTagName('Duration')[0].attributes['Time'].value))
data["pressures_virial"][-1].append(float(
xmldoc.getElementsByTagName('Pressure')[0].attributes['Avg'].value))
data["pressures_collision"][-1].append(my_pressure(data["n_atoms"][-1],
data["collisions"][-1], data["times"][-1][-1]))
try:
data["msds_val"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['val'].value))
data["msds_diffusion"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['diffusionCoeff'].value))
except:
data["msds_val"][-1].append(None)
data["msds_diffusion"][-1].append(None)
stdout_writer = csv.writer(stdout, delimiter='\t')
"""
stdout.write("### Data format: packings\tdensities\tcollisions\tn_atoms\t"
"pressures_virial\tpressures_collision\tmsds_val\tmsds_diffusion\t"
"times\n")
"""
stdout.write("\multicolumn{1}{c}{$\zeta$}\t\multicolumn{1}{c}{$Z_{MD}$}\t"
"\multicolumn{1}{c}{$\Delta Z_{MD}$}\n")
for i in xrange(len(data["packings"])):
if data["msds_diffusion"][i][0] is None:
continue
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(data["packings"][i]*6.0/pi),
data["collisions"][i],
data["n_atoms"][i],
pretty_mean_std(data["pressures_virial"][i]),
pretty_mean_std(data["pressures_collision"][i]),
pretty_mean_std(data["msds_val"][i]),
pretty_mean_std(data["msds_diffusion"][i]),
pretty_mean_std(data["times"][i])
])
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(my_mean(data["pressures_collision"][i])),
"{:.9f}".format(my_means_std(data["pressures_collision"][i]))
])
| macioosch/dynamo-hard-spheres-sim | to_csv_pretty.py | Python | gpl-3.0 | 3,184 |
#!/usr/bin/env python
"""Module for making tests on small molecules in GPAW.
One molecule test to rule them all
One molecule test to run them
One molecule test to save them all
And on the webpage plot them (implementation pending)
"""
from gpaw import GPAW, ConvergenceError
from ase.structure import molecule
from ase.data.g2_1 import atom_names as atoms
from ase.data.g2_1 import molecule_names as g1
from ase.utils.molecule_test import MoleculeTest, EnergyTest, BondLengthTest,\
BatchTest
class GPAWMoleculeTest(MoleculeTest):
def __init__(self, name='gpaw', vacuum=6.0, h=0.17, xc='LDA',
setups='paw', mode='fd', basis=None,
exceptions=(RuntimeError, ConvergenceError)):
MoleculeTest.__init__(self, name=name, vacuum=vacuum,
exceptions=exceptions)
if basis is None:
basis = {}
self.basis = basis
self.mode = mode
self.setups = setups
self.h = h
self.xc = xc
self.bad_formulas = ['NO', 'ClO', 'CH']
def setup_calculator(self, system, formula):
hund = (len(system) == 1)
cell = system.get_cell()
h = self.h
system.set_cell((cell / (4 * h)).round() * 4 * h)
system.center()
calc = GPAW(xc=self.xc,
h=h,
hund=hund,
fixmom=True,
setups=self.setups,
txt=self.get_filename(formula, extension='txt'),
mode=self.mode,
basis=self.basis
)
# Special cases
if formula == 'BeH':
calc.set(idiotproof=False)
#calc.initialize(system)
#calc.nuclei[0].f_si = [(1, 0, 0.5, 0, 0),
# (0.5, 0, 0, 0, 0)]
if formula in self.bad_formulas:
system.positions[:, 1] += h * 1.5
return calc
class GPAWEnergyTest(EnergyTest, GPAWMoleculeTest):
pass
class GPAWBondLengthTest(BondLengthTest, GPAWMoleculeTest):
pass
def main():
formulas = g1 + atoms
dimers = [formula for formula in g1 if len(molecule(formula)) == 2]
kwargs = dict(vacuum=3.0,
mode='lcao',
basis='dzp')
etest = BatchTest(GPAWEnergyTest('test/energy', **kwargs))
btest = BatchTest(GPAWBondLengthTest('test/bonds', **kwargs))
etest.run(formulas)
btest.run(dimers)
if __name__ == '__main__':
main()
| ajylee/gpaw-rtxs | gpaw/testing/molecule_test.py | Python | gpl-3.0 | 2,509 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.utils.importing import cached_load
def get_basket_order_creator(request=None):
return cached_load("SHUUP_BASKET_ORDER_CREATOR_SPEC")(request=request)
def get_basket_view():
view = cached_load("SHUUP_BASKET_VIEW_SPEC")
if hasattr(view, "as_view"): # pragma: no branch
view = view.as_view()
return view
def get_basket_command_dispatcher(request):
"""
:type request: django.http.request.HttpRequest
:rtype: shuup.front.basket.command_dispatcher.BasketCommandDispatcher
"""
return cached_load("SHUUP_BASKET_COMMAND_DISPATCHER_SPEC")(request=request)
def get_basket(request):
"""
:type request: django.http.request.HttpRequest
:rtype: shuup.front.basket.objects.BaseBasket
"""
if not hasattr(request, "basket"):
basket_class = cached_load("SHUUP_BASKET_CLASS_SPEC")
# This is a little weird in that this is likely to be called from `BasketMiddleware`,
# which would do the following assignment anyway. However, in case it's _not_ called
# from there, for some reason, we want to still be able to cache the basket.
request.basket = basket_class(request)
return request.basket
| hrayr-artunyan/shuup | shuup/front/basket/__init__.py | Python | agpl-3.0 | 1,442 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import time
from quodlibet import config
from quodlibet.formats import AudioFile
from quodlibet.plugins import Plugin
from quodlibet.plugins.query import QueryPlugin, QUERY_HANDLER
from quodlibet.query import Query, QueryType
from quodlibet.query import _match as match
from senf import fsnative
from tests import TestCase, skip
class TQuery_is_valid(TestCase):
def test_re(self):
self.failUnless(Query('t = /an re/').valid)
self.failUnless(Query('t = /an re/c').valid)
self.failUnless(Query('t = /an\\/re/').valid)
self.failIf(Query('t = /an/re/').valid)
self.failUnless(Query('t = /aaa/lsic').valid)
self.failIf(Query('t = /aaa/icslx').valid)
def test_str(self):
self.failUnless(Query('t = "a str"').valid)
self.failUnless(Query('t = "a str"c').valid)
self.failUnless(Query('t = "a\\"str"').valid)
# there's no equivalent failure for strings since 'str"' would be
# read as a set of modifiers
def test_tag(self):
self.failUnless(Query('t = tag').valid)
self.failUnless(Query('t = !tag').valid)
self.failUnless(Query('t = |(tag, bar)').valid)
self.failUnless(Query('t = a"tag"').valid)
self.failIf(Query('t = a, tag').valid)
self.failUnless(Query('tag with spaces = tag').valid)
def test_empty(self):
self.failUnless(Query('').valid)
self.failUnless(Query('').is_parsable)
self.failUnless(Query(''))
def test_emptylist(self):
self.failIf(Query("a = &()").valid)
self.failIf(Query("a = |()").valid)
self.failIf(Query("|()").valid)
self.failIf(Query("&()").valid)
def test_nonsense(self):
self.failIf(Query('a string').valid)
self.failIf(Query('t = #(a > b)').valid)
self.failIf(Query("=a= = /b/").valid)
self.failIf(Query("a = &(/b//").valid)
self.failIf(Query("(a = &(/b//)").valid)
def test_trailing(self):
self.failIf(Query('t = /an re/)').valid)
self.failIf(Query('|(a, b = /a/, c, d = /q/) woo').valid)
def test_not(self):
self.failUnless(Query('t = !/a/').valid)
self.failUnless(Query('t = !!/a/').valid)
self.failUnless(Query('!t = "a"').valid)
self.failUnless(Query('!!t = "a"').valid)
self.failUnless(Query('t = !|(/a/, !"b")').valid)
self.failUnless(Query('t = !!|(/a/, !"b")').valid)
self.failUnless(Query('!|(t = /a/)').valid)
def test_taglist(self):
self.failUnless(Query('a, b = /a/').valid)
self.failUnless(Query('a, b, c = |(/a/)').valid)
self.failUnless(Query('|(a, b = /a/, c, d = /q/)').valid)
self.failIf(Query('a = /a/, b').valid)
def test_andor(self):
self.failUnless(Query('a = |(/a/, /b/)').valid)
self.failUnless(Query('a = |(/b/)').valid)
self.failUnless(Query('|(a = /b/, c = /d/)').valid)
self.failUnless(Query('a = &(/a/, /b/)').valid)
self.failUnless(Query('a = &(/b/)').valid)
self.failUnless(Query('&(a = /b/, c = /d/)').valid)
def test_numcmp(self):
self.failUnless(Query("#(t < 3)").valid)
self.failUnless(Query("#(t <= 3)").valid)
self.failUnless(Query("#(t > 3)").valid)
self.failUnless(Query("#(t >= 3)").valid)
self.failUnless(Query("#(t = 3)").valid)
self.failUnless(Query("#(t != 3)").valid)
self.failIf(Query("#(t !> 3)").valid)
self.failIf(Query("#(t >> 3)").valid)
def test_numcmp_func(self):
self.assertTrue(Query("#(t:min < 3)").valid)
self.assertTrue(
Query("&(#(playcount:min = 0), #(added < 1 month ago))").valid)
def test_trinary(self):
self.failUnless(Query("#(2 < t < 3)").valid)
self.failUnless(Query("#(2 >= t > 3)").valid)
# useless, but valid
self.failUnless(Query("#(5 > t = 2)").valid)
def test_list(self):
self.failUnless(Query("#(t < 3, t > 9)").valid)
self.failUnless(Query("t = &(/a/, /b/)").valid)
self.failUnless(Query("s, t = |(/a/, /b/)").valid)
self.failUnless(Query("|(t = /a/, s = /b/)").valid)
def test_nesting(self):
self.failUnless(Query("|(s, t = &(/a/, /b/),!#(2 > q > 3))").valid)
class FakeQueryPlugin(QueryPlugin):
PLUGIN_NAME = "name"
def search(self, song, body):
return body and "DIE" not in body.upper()
def test_extension(self):
plugin = Plugin(self.FakeQueryPlugin)
QUERY_HANDLER.plugin_enable(plugin)
try:
assert Query("@(name)").valid
assert not Query("@(name: DIE)").search("foo")
assert Query("@(name: extension body)").valid
assert Query("@(name: body (with (nested) parens))").valid
assert Query(r"@(name: body \\ with \) escapes)").valid
finally:
QUERY_HANDLER.plugin_disable(plugin)
def test_extension_search(self):
plugin = Plugin(self.FakeQueryPlugin)
QUERY_HANDLER.plugin_enable(plugin)
song = AudioFile({"~filename": "/dev/null"})
try:
assert Query("@(name: LIVE)").search(song)
assert not Query("@(name: DIE)").search(song)
finally:
QUERY_HANDLER.plugin_disable(plugin)
def test_invalid_extension(self):
assert not Query("@(name)").valid, "Unregistered plugin is valid"
assert not Query("@()").valid
assert not Query(r"@(invalid %name!\\)").valid
assert not Query("@(name: mismatched ( parenthesis)").valid
assert not Query(r"@(\()").valid
assert not Query("@(name:unclosed body").valid
assert not Query("@ )").valid
def test_numexpr(self):
self.failUnless(Query("#(t < 3*4)").valid)
self.failUnless(Query("#(t * (1+r) < 7)").valid)
self.failUnless(Query("#(0 = t)").valid)
self.failUnless(Query("#(t < r < 9)").valid)
self.failUnless(Query("#((t-9)*r < -(6*2) = g*g-1)").valid)
self.failUnless(Query("#(t + 1 + 2 + -4 * 9 > g*(r/4 + 6))").valid)
self.failUnless(Query("#(date < 2010-4)").valid)
self.failUnless(Query("#(date < 2010 - 4)").valid)
self.failUnless(Query("#(date > 0000)").valid)
self.failUnless(Query("#(date > 00004)").valid)
self.failUnless(Query("#(t > 3 minutes)").valid)
self.failUnless(Query("#(added > today)").valid)
self.failUnless(Query("#(length < 5:00)").valid)
self.failUnless(Query("#(filesize > 5M)").valid)
self.failUnless(Query("#(added < 7 days ago)").valid)
self.failIf(Query("#(3*4)").valid)
self.failIf(Query("#(t = 3 + )").valid)
self.failIf(Query("#(t = -)").valid)
self.failIf(Query("#(-4 <)").valid)
self.failIf(Query("#(t < ()").valid)
self.failIf(Query("#((t +) - 1 > 8)").valid)
self.failIf(Query("#(t += 8)").valid)
class TQuery(TestCase):
def setUp(self):
config.init()
self.s1 = AudioFile(
{"album": u"I Hate: Tests", "artist": u"piman", "title": u"Quuxly",
"version": u"cake mix",
"~filename": fsnative(u"/dir1/foobar.ogg"),
"~#length": 224, "~#skipcount": 13, "~#playcount": 24,
"date": u"2007-05-24"})
self.s2 = AudioFile(
{"album": u"Foo the Bar", "artist": u"mu", "title": u"Rockin' Out",
"~filename": fsnative(u"/dir2/something.mp3"),
"tracknumber": u"12/15"})
self.s3 = AudioFile({
"artist": u"piman\nmu",
"~filename": fsnative(u"/test/\xf6\xe4\xfc/fo\xfc.ogg"),
"~mountpoint": fsnative(u"/bla/\xf6\xe4\xfc/fo\xfc"),
})
self.s4 = AudioFile({"title": u"Ångström", "utf8": u"Ångström"})
self.s5 = AudioFile({"title": u"oh&blahhh", "artist": u"!ohno"})
def tearDown(self):
config.quit()
def test_basic_tag(self):
assert Query("album=foo").search(self.s2)
assert not Query("album=.").search(self.s2)
assert Query("album=/./").search(self.s2)
def test_inequality(self):
self.failUnless(Query("album!=foo").search(self.s1))
self.failIf(Query("album!=foo").search(self.s2))
@skip("Enable for basic benchmarking of Query")
def test_inequality_performance(self):
t = time.time()
for i in range(500):
# Native assert is a bit lighter...
assert Query("album!=foo the bar").search(self.s1)
assert Query("album=foo the bar").search(self.s2)
assert Query("foo the bar").search(self.s2)
assert not Query("foo the bar").search(self.s1)
us = (time.time() - t) * 1000000 / ((i + 1) * 4)
print("Blended Query searches average %.0f μs" % us)
@skip("Enable for basic benchmarking of Query")
def test_inequality_equalish_performance(self):
t0 = time.time()
repeats = 2000
for i in range(repeats):
assert Query("album!=foo the bar").search(self.s1)
ineq_time = (time.time() - t0)
t1 = time.time()
for i in range(repeats):
assert Query("album=!foo the bar").search(self.s1)
not_val_time = (time.time() - t1)
self.assertAlmostEqual(ineq_time, not_val_time, places=1)
def test_repr(self):
query = Query("foo = bar", [])
self.assertEqual(
repr(query).replace("u'", "'"),
"<Query string='foo = bar' type=VALID star=[]>")
query = Query("bar", ["foo"])
self.assertEqual(
repr(query).replace("u'", "'"),
"<Query string='&(/bar/d)' type=TEXT star=['foo']>")
def test_2007_07_27_synth_search(self):
song = AudioFile({"~filename": fsnative(u"foo/64K/bar.ogg")})
query = Query("~dirname = !64K")
self.failIf(query.search(song), "%r, %r" % (query, song))
def test_empty(self):
self.failIf(Query("foobar = /./").search(self.s1))
def test_gte(self):
self.failUnless(Query("#(track >= 11)").search(self.s2))
def test_re(self):
for s in ["album = /i hate/", "artist = /pi*/", "title = /x.y/"]:
self.failUnless(Query(s).search(self.s1))
self.failIf(Query(s).search(self.s2))
f = Query("artist = /mu|piman/").search
self.failUnless(f(self.s1))
self.failUnless(f(self.s2))
def test_re_escape(self):
af = AudioFile({"foo": "\""})
assert Query('foo="\\""').search(af)
af = AudioFile({"foo": "/"})
assert Query('foo=/\\//').search(af)
def test_not(self):
for s in ["album = !hate", "artist = !pi"]:
self.failIf(Query(s).search(self.s1))
self.failUnless(Query(s).search(self.s2))
def test_abbrs(self):
for s in ["b = /i hate/", "a = /pi*/", "t = /x.y/"]:
self.failUnless(Query(s).search(self.s1))
self.failIf(Query(s).search(self.s2))
def test_str(self):
for k in self.s2.keys():
v = self.s2[k]
self.failUnless(Query('%s = "%s"' % (k, v)).search(self.s2))
self.failIf(Query('%s = !"%s"' % (k, v)).search(self.s2))
def test_numcmp(self):
self.failIf(Query("#(track = 0)").search(self.s1))
self.failIf(Query("#(notatag = 0)").search(self.s1))
self.failUnless(Query("#(track = 12)").search(self.s2))
def test_trinary(self):
self.failUnless(Query("#(11 < track < 13)").search(self.s2))
self.failUnless(Query("#(11 < track <= 12)").search(self.s2))
self.failUnless(Query("#(12 <= track <= 12)").search(self.s2))
self.failUnless(Query("#(12 <= track < 13)").search(self.s2))
self.failUnless(Query("#(13 > track > 11)").search(self.s2))
self.failUnless(Query("#(20 > track < 20)").search(self.s2))
def test_not_2(self):
for s in ["album = !/i hate/", "artist = !/pi*/", "title = !/x.y/"]:
self.failUnless(Query(s).search(self.s2))
self.failIf(Query(s).search(self.s1))
def test_case(self):
self.failUnless(Query("album = /i hate/").search(self.s1))
self.failUnless(Query("album = /I Hate/").search(self.s1))
self.failUnless(Query("album = /i Hate/").search(self.s1))
self.failUnless(Query("album = /i Hate/i").search(self.s1))
self.failUnless(Query(u"title = /ångström/").search(self.s4))
self.failIf(Query("album = /i hate/c").search(self.s1))
self.failIf(Query(u"title = /ångström/c").search(self.s4))
def test_re_and(self):
self.failUnless(Query("album = &(/ate/,/est/)").search(self.s1))
self.failIf(Query("album = &(/ate/, /ets/)").search(self.s1))
self.failIf(Query("album = &(/tate/, /ets/)").search(self.s1))
def test_re_or(self):
self.failUnless(Query("album = |(/ate/,/est/)").search(self.s1))
self.failUnless(Query("album = |(/ate/,/ets/)").search(self.s1))
self.failIf(Query("album = |(/tate/, /ets/)").search(self.s1))
def test_newlines(self):
self.failUnless(Query("a = /\n/").search(self.s3))
self.failUnless(Query("a = /\\n/").search(self.s3))
self.failIf(Query("a = /\n/").search(self.s2))
self.failIf(Query("a = /\\n/").search(self.s2))
def test_exp_and(self):
self.failUnless(Query("&(album = ate, artist = man)").search(self.s1))
self.failIf(Query("&(album = ate, artist = nam)").search(self.s1))
self.failIf(Query("&(album = tea, artist = nam)").search(self.s1))
def test_exp_or(self):
self.failUnless(Query("|(album = ate, artist = man)").search(self.s1))
self.failUnless(Query("|(album = ate, artist = nam)").search(self.s1))
self.failIf(Query("&(album = tea, artist = nam)").search(self.s1))
def test_dumb_search(self):
self.failUnless(Query("ate man").search(self.s1))
self.failUnless(Query("Ate man").search(self.s1))
self.failIf(Query("woo man").search(self.s1))
self.failIf(Query("not crazy").search(self.s1))
def test_dumb_search_value(self):
self.failUnless(Query("|(ate, foobar)").search(self.s1))
self.failUnless(Query("!!|(ate, foobar)").search(self.s1))
self.failUnless(Query("&(ate, te)").search(self.s1))
self.failIf(Query("|(foo, bar)").search(self.s1))
self.failIf(Query("&(ate, foobar)").search(self.s1))
self.failIf(Query("! !&(ate, foobar)").search(self.s1))
self.failIf(Query("&blah").search(self.s1))
self.failUnless(Query("&blah oh").search(self.s5))
self.failUnless(Query("!oh no").search(self.s5))
self.failIf(Query("|blah").search(self.s1))
# https://github.com/quodlibet/quodlibet/issues/1056
self.failUnless(Query("&(ate, piman)").search(self.s1))
def test_dumb_search_value_negate(self):
self.failUnless(Query("!xyz").search(self.s1))
self.failUnless(Query("!!!xyz").search(self.s1))
self.failUnless(Query(" !!!&(xyz, zyx)").search(self.s1))
self.failIf(Query("!man").search(self.s1))
self.failUnless(Query("&(tests,piman)").search(self.s1))
self.failUnless(Query("&(tests,!nope)").search(self.s1))
self.failIf(Query("&(tests,!!nope)").search(self.s1))
self.failIf(Query("&(tests,!piman)").search(self.s1))
self.failUnless(Query("&(tests,|(foo,&(pi,!nope)))").search(self.s1))
def test_dumb_search_regexp(self):
self.failUnless(Query("/(x|H)ate/").search(self.s1))
self.failUnless(Query("'PiMan'").search(self.s1))
self.failIf(Query("'PiMan'c").search(self.s1))
self.failUnless(Query("!'PiMan'c").search(self.s1))
self.failIf(Query("!/(x|H)ate/").search(self.s1))
def test_unslashed_search(self):
self.failUnless(Query("artist=piman").search(self.s1))
self.failUnless(Query(u"title=ång").search(self.s4))
self.failIf(Query("artist=mu").search(self.s1))
self.failIf(Query(u"title=äng").search(self.s4))
def test_synth_search(self):
self.failUnless(Query("~dirname=/dir1/").search(self.s1))
self.failUnless(Query("~dirname=/dir2/").search(self.s2))
self.failIf(Query("~dirname=/dirty/").search(self.s1))
self.failIf(Query("~dirname=/dirty/").search(self.s2))
def test_search_almostequal(self):
a, b = AudioFile({"~#rating": 0.771}), AudioFile({"~#rating": 0.769})
self.failUnless(Query("#(rating = 0.77)").search(a))
self.failUnless(Query("#(rating = 0.77)").search(b))
def test_and_or_neg_operator(self):
union = Query("|(foo=bar,bar=foo)")
inter = Query("&(foo=bar,bar=foo)")
neg = Query("!foo=bar")
numcmp = Query("#(bar = 0)")
tag = Query("foo=bar")
tests = [inter | tag, tag | tag, neg | neg, tag | inter, neg | union,
union | union, inter | inter, numcmp | numcmp, numcmp | union]
self.failIf(
list(filter(lambda x: not isinstance(x, match.Union), tests)))
tests = [inter & tag, tag & tag, neg & neg, tag & inter, neg & union,
union & union, inter & inter, numcmp & numcmp, numcmp & inter]
self.failIf(
list(filter(lambda x: not isinstance(x, match.Inter), tests)))
self.assertTrue(isinstance(-neg, match.Tag))
true = Query("")
self.assertTrue(isinstance(true | inter, match.True_))
self.assertTrue(isinstance(inter | true, match.True_))
self.assertTrue(isinstance(true & inter, match.Inter))
self.assertTrue(isinstance(inter & true, match.Inter))
self.assertTrue(isinstance(true & true, match.True_))
self.assertTrue(isinstance(true | true, match.True_))
self.assertTrue(isinstance(-true, match.Neg))
def test_filter(self):
q = Query("artist=piman")
self.assertEqual(q.filter([self.s1, self.s2]), [self.s1])
self.assertEqual(q.filter(iter([self.s1, self.s2])), [self.s1])
q = Query("")
self.assertEqual(q.filter([self.s1, self.s2]), [self.s1, self.s2])
self.assertEqual(
q.filter(iter([self.s1, self.s2])), [self.s1, self.s2])
def test_match_all(self):
self.failUnless(Query("").matches_all)
self.failUnless(Query(" ").matches_all)
self.failIf(Query("foo").matches_all)
def test_utf8(self):
# also handle undecoded values
self.assertTrue(Query(u"utf8=Ångström").search(self.s4))
def test_fs_utf8(self):
self.failUnless(Query(u"~filename=foü.ogg").search(self.s3))
self.failUnless(Query(u"~filename=öä").search(self.s3))
self.failUnless(Query(u"~dirname=öäü").search(self.s3))
self.failUnless(Query(u"~basename=ü.ogg").search(self.s3))
def test_filename_utf8_fallback(self):
self.failUnless(Query(u"filename=foü.ogg").search(self.s3))
self.failUnless(Query(u"filename=öä").search(self.s3))
def test_mountpoint_utf8_fallback(self):
self.failUnless(Query(u"mountpoint=foü").search(self.s3))
self.failUnless(Query(u"mountpoint=öä").search(self.s3))
def test_mountpoint_no_value(self):
af = AudioFile({"~filename": fsnative(u"foo")})
assert not Query(u"~mountpoint=bla").search(af)
def test_star_numeric(self):
self.assertRaises(ValueError, Query, u"foobar", star=["~#mtime"])
def test_match_diacriticals_explcit(self):
assert Query(u'title=angstrom').search(self.s4)
self.failIf(Query(u'title="Ångstrom"').search(self.s4))
self.failUnless(Query(u'title="Ångstrom"d').search(self.s4))
self.failUnless(Query(u'title=Ångström').search(self.s4))
self.failUnless(Query(u'title="Ångström"').search(self.s4))
self.failUnless(Query(u'title=/Ångström/').search(self.s4))
self.failUnless(Query(u'title="Ångstrom"d').search(self.s4))
self.failUnless(Query(u'title=/Angstrom/d').search(self.s4))
self.failUnless(Query(u'""d').search(self.s4))
def test_match_diacriticals_dumb(self):
self.assertTrue(Query(u'Angstrom').search(self.s4))
self.assertTrue(Query(u'Ångström').search(self.s4))
self.assertTrue(Query(u'Ångstrom').search(self.s4))
self.assertFalse(Query(u'Ängström').search(self.s4))
def test_match_diacriticals_invalid_or_unsupported(self):
# these fall back to test dumb searches:
# invalid regex
Query(u'/Sigur [r-zos/d')
# group refs unsupported for diacritic matching
Query(u'/(<)?(\\w+@\\w+(?:\\.\\w+)+)(?(1)>)/d')
def test_numexpr(self):
self.failUnless(Query("#(length = 224)").search(self.s1))
self.failUnless(Query("#(length = 3:44)").search(self.s1))
self.failUnless(Query("#(length = 3 minutes + 44 seconds)")
.search(self.s1))
self.failUnless(Query("#(playcount > skipcount)").search(self.s1))
self.failUnless(Query("#(playcount < 2 * skipcount)").search(self.s1))
self.failUnless(Query("#(length > 3 minutes)").search(self.s1))
self.failUnless(Query("#(3:00 < length < 4:00)").search(self.s1))
self.failUnless(Query("#(40 seconds < length/5 < 1 minute)")
.search(self.s1))
self.failUnless(Query("#(2+3 * 5 = 17)").search(self.s1))
self.failUnless(Query("#(playcount / 0 > 0)").search(self.s1))
self.failIf(Query("#(track + 1 != 13)").search(self.s2))
def test_numexpr_date(self):
self.failUnless(Query("#(length < 2005-07-19)").search(self.s1))
self.failUnless(Query("#(date > 2005-07-19)").search(self.s1))
self.failUnless(Query("#(2005-11-24 < 2005-07-19)").search(self.s1))
self.failUnless(Query("#(date = (2007-05-19) + 5 days)")
.search(self.s1))
self.failUnless(Query("#(date - 5 days = 2007-05-19)").search(self.s1))
self.failUnless(Query("#(2010-02-18 > date)").search(self.s1))
self.failUnless(Query("#(2010 > date)").search(self.s1))
self.failUnless(Query("#(date > 4)").search(self.s1))
self.failUnless(Query("#(date > 0004)").search(self.s1))
self.failUnless(Query("#(date > 0000)").search(self.s1))
def test_ignore_characters(self):
try:
config.set("browsers", "ignored_characters", "-")
self.failUnless(Query("Foo the Bar - mu").search(self.s2))
config.set("browsers", "ignored_characters", "1234")
self.failUnless(Query("4Fo13o 2th2e3 4Bar4").search(self.s2))
finally:
config.reset("browsers", "ignored_characters")
class TQuery_get_type(TestCase):
def test_red(self):
for p in ["a = /w", "|(sa#"]:
self.failUnlessEqual(QueryType.INVALID, Query(p).type)
def test_black(self):
for p in ["a test", "more test hooray"]:
self.failUnlessEqual(QueryType.TEXT, Query(p).type)
def test_green(self):
for p in ["a = /b/", "&(a = b, c = d)", "/abc/", "!x", "!&(abc, def)"]:
self.failUnlessEqual(QueryType.VALID, Query(p).type)
| Mellthas/quodlibet | tests/test_query.py | Python | gpl-2.0 | 23,503 |
System:
------
1.) cli to take employee onboard.
name:
age:
location:
generate emp_id
print(emp_id)
save to file
emp_id,name,age,location
2.) swipe in , swipe out
3.) first swipin and last swip out for a day.
total hours spend at office
4.) save it into file
attenence_2017_01_18.csv
emp_id in out total_time
1 10:10:10 12:10:10 5
2 10:11:11 12:10:10 4
4.) {emp_id}_{month}.csv
date in out total_time
2017-01-10 10:10 05:10 4
2017-01-11 10:10 05:10 4
2017-01-12 10:16 05:10 4
2017-01-13 10:18 05:10 4
8.) report
deficit report :
Emulation:
---------- | MortalViews/python-notes | exerices.py | Python | apache-2.0 | 595 |
from matplotlib.patches import Circle
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.anchored_artists import AnchoredDrawingArea
fig=plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
ada = AnchoredDrawingArea(40, 20, 0, 0,
loc=1, pad=0., frameon=False)
p1 = Circle((10, 10), 10)
ada.drawing_area.add_artist(p1)
p2 = Circle((30, 10), 5, fc="r")
ada.drawing_area.add_artist(p2)
ax.add_artist(ada)
plt.show()
| lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/users/plotting/examples/anchored_box02.py | Python | mit | 451 |
from __future__ import absolute_import, print_function
import datetime
import mimetypes
import os
import posixpath
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db import transaction
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
import pysvn
from cobra.models import Model
from cobra.models import fields
from cobra.models import sane_repr
from cobra.core.loading import get_class, get_model
from cobra.core.strings import strip, truncatechars
from . import choices
from cobra.core.constants import README_MARKUPS
from .exceptions import map_svn_exceptions
from .markup.hightlighter import make_html
from .utils.binaryornot import get_starting_chunk
from .utils.binaryornot import is_binary_string
NodeManager = get_class('svnkit.managers', 'NodeManager')
@python_2_unicode_compatible
class AbstractRepository(Model):
"""
Meta data for a subversion repository.
"""
project = models.OneToOneField('project.Project')
uuid = models.CharField(max_length=128, editable=False)
root = models.CharField(
help_text=_('Example: svn://example.com or file:///svn/ or http://host:port'),
max_length=512)
prefix = models.CharField(
help_text=_('<strong class="text-danger">Important!</strong> You maybe meet this situation, the svn url you supply is not the '
'root of the repository, and you do not have the right permission '
'to access the real root of repository, input a right prefix of '
'repository, we will replace it for you automatic.<br><strong class="text-danger">If you do not have this problem, please ignore the attention.</strong>'),
max_length=512, blank=True)
uri = models.CharField(
help_text=_('Externally facing URI for the repository, if available'),
max_length=512, blank=True)
is_private = models.BooleanField(default=False)
username = models.CharField(max_length=512, blank=True)
password = models.CharField(max_length=512, blank=True)
last_synced = models.DateTimeField(
default=datetime.datetime.fromtimestamp(0, timezone.utc),
editable=False)
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_repository'
verbose_name_plural = _('repositories')
__repr__ = sane_repr('project_id', 'root')
def __str__(self):
return '%s (%s)' % (self.project.name, self.root)
def _get_login(self, realm, username, may_save):
if not (self.username and self.password):
raise ImproperlyConfigured(_(
'repository requires authentication, '
'but no username and password available'))
return (True, self.username, self.password, True)
def get_svn_client(self):
"""
Return a subversion client for this repository. The
authentication information stored with the repository is bound
with the client. The client can be instantiated with a
subversion config file with the COBRA_SVNKIT_SVN_CONFIG_PATH
settings variable.
"""
if settings.COBRA_SVNKIT_SVN_CONFIG_PATH is not None:
client = pysvn.Client(settings.COBRA_SVNKIT_SVN_CONFIG_PATH)
else:
client = pysvn.Client()
# set the exceptions to be more granular
client.exception_style = 1
# hook for cancelling an api call thats taking too long
started_dt = timezone.now()
def _cancel():
current_dt = timezone.now()
delta = (current_dt - started_dt).seconds
if delta > settings.COBRA_SVNKIT_CLIENT_TIMEOUT:
return True
return False
client.callback_cancel = _cancel
# bind the username and password that might be stored with the
# repository model object in case a login is required.
client.callback_get_login = self._get_login
return client
def sync(self):
"""
Update the model object representations of the given repository.
If the UUID has not been obtained for a repository, it is
obtained from the api. New changesets committed to the
repository, since the last time the repository was synced, are
also collected. If no previous sync has been run, all
changesets are collected.
"""
self.last_synced = timezone.now()
if not self.uuid:
self.sync_uuid()
self.sync_changesets()
self.save()
# @map_svn_exceptions
def sync_uuid(self):
"""Get the UUID of the given repository."""
c = self.get_svn_client()
info = c.info2(self.root, recurse=False)
self.uuid = info[0][1]['repos_UUID']
sync_uuid = map_svn_exceptions(sync_uuid)
def sync_changesets(self):
"""
Get new changesets committed to the repository since the last
time they were collected.
"""
Changeset = get_model('svnkit', 'Changeset')
Change = get_model('svnkit', 'Change')
revision = self.get_latest_revision()
c = self.get_svn_client()
log = c.log(
self.root,
revision_end=pysvn.Revision(
pysvn.opt_revision_kind.number, revision),
discover_changed_paths=True)
for item in log:
# ignore the overlap, the changeset is already stored locally
if item['revision'].number == revision:
continue
changeset = Changeset.objects.create(
repository=self,
date=datetime.datetime.fromtimestamp(item['date'], timezone.utc),
revision=item['revision'].number,
author=item.get('author', ''),
message=item.get('message', '') # Normally, message must be exist, but I meet some condition that there is no message.
)
for changed_path in item['changed_paths']:
copyfrom_revision = None
if changed_path['copyfrom_revision']:
copyfrom_revision = changed_path[
'copyfrom_revision'].number
change = Change.objects.create(
changeset=changeset,
path=changed_path['path'],
action=changed_path['action'],
copied_from_path=changed_path['copyfrom_path'],
copied_from_revision=copyfrom_revision)
sync_changesets = transaction.atomic(
map_svn_exceptions(sync_changesets))
def get_latest_revision(self):
"""
Get the latest revision of the repository.
"""
revision = 0
if self.changesets.count():
revision = self.changesets.all()[0].revision
return revision
def get_node(self, path, revision=None):
"""
Get a `svnkit.models.Node` object at the given
path. Optionally specify a revision.
"""
Node = get_model('svnkit', 'Node')
return Node.objects.get_or_sync(self, path, revision)
@python_2_unicode_compatible
class AbstractChangeset(Model):
"""
The meta data about a revision in a subversion repository.
"""
repository = fields.FlexibleForeignKey('svnkit.Repository', related_name='changesets')
date = models.DateTimeField()
revision = models.PositiveIntegerField(db_index=True)
author = models.CharField(max_length=512)
message = models.TextField()
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_changeset'
unique_together = (('repository', 'revision'),)
ordering = ('-revision',)
__repr__ = sane_repr('repository_id', 'revision')
def __str__(self):
return 'r%s' % self.revision
@property
def title(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 40)
return message
@property
def rest_message(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
split_msgs = message.splitlines()
first_line_msg = split_msgs[0]
if len(first_line_msg) > 40:
split_msgs[0] = '...'+first_line_msg[37:]
else:
del split_msgs[0]
message = '\n'.join(split_msgs)
return message
@models.permalink
def get_absolute_url(self):
return ('svnkit:changeset', (self.repository.project.organization.slug, self.repository.project.slug, self.revision))
def get_previous(self):
"""Get the previous changeset in the repository."""
return self.repository.changesets.filter(revision__lte=self.revision - 1).first()
def get_next(self):
"""Get the next changeset in the repository."""
return self.repository.changesets.filter(revision__gte=self.revision + 1).last()
@python_2_unicode_compatible
class AbstractChange(Model):
"""
A changed path in a changeset, including the action taken.
"""
changeset = fields.FlexibleForeignKey('svnkit.Changeset', related_name='changes')
path = models.CharField(max_length=2048, db_index=True)
action = models.CharField(max_length=1)
copied_from_path = models.CharField(max_length=2048, null=True)
copied_from_revision = models.PositiveIntegerField(null=True)
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_change'
unique_together = (('changeset', 'path'),)
ordering = ('changeset', 'path')
__repr__ = sane_repr('action', 'path')
def __str__(self):
return '%s %s' % (self.action, self.path)
def _get_base_change(self):
if hasattr(self, '_base_change'):
return self._base_change
if self.copied_from_revision is not None:
self._base_change = self.__class__.objects.get(
changeset__repository=self.changeset.repository,
revision=self.copied_from_revision
)
return self._base_change
@property
def relative_path(self):
if self.changeset.repository.prefix:
repo_prefix = self.changeset.repository.prefix
if repo_prefix.endswith(posixpath.sep):
repo_prefix = repo_prefix[:-1]
return self.path.replace(repo_prefix, '', 1)
else:
return self.path
def is_addition(self):
return self.action == 'A'
def is_modification(self):
return self.action == 'M'
def is_deletion(self):
return self.action == 'D'
@python_2_unicode_compatible
class AbstractNode(Model):
"""
The meta data for a path at a revision in a repository.
Nodes can be understood as 'views' of a particular path in a
repository at a particular revision number (a revision that may or
may not have made changes at that path/revision). A node's actual
content is stored in a separate model object, since the content
may remain unchanged across a number of revisions at a particular
path. The `get_last_changeset` method can be used to obtain the
changeset and revision in which the node's path was last changed.
This model largely reflects the information available through the
subversion api. The field `cached` indicates when the data was
retrieved from the api, and `cached_indirectly` indicates whether
or not the node was generated from an api call for the node or
from a related node (parent or one of its possible
children). Indirectly cached nodes (which are usually nodes
created as placeholders for heirarchical connections instead of
through a direct api call) require another api call to collect the
remaining missing information. Nodes can be optionally be included
in a regular cleanup.
"""
repository = fields.FlexibleForeignKey('svnkit.Repository', related_name='nodes')
parent = fields.FlexibleForeignKey('svnkit.Node', related_name='children', null=True)
path = models.CharField(max_length=2048, db_index=True)
node_type = models.CharField(max_length=1)
size = models.PositiveIntegerField(default=0)
last_changed = models.DateTimeField(null=True)
revision = models.PositiveIntegerField()
cached = models.DateTimeField(default=timezone.now)
cached_indirectly = models.BooleanField(default=True)
content = fields.FlexibleForeignKey('svnkit.Content', related_name='nodes', null=True)
objects = NodeManager(cache_fields=(
'pk',
))
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_node'
unique_together = (('repository', 'path', 'revision'),)
ordering = ('node_type', 'path')
__repr__ = sane_repr('path', 'revision')
def __str__(self):
return '%s@%s' % (self.path, self.revision)
def iter_path(self):
"""
Returns a generator that 'walks` up the node hierarchy,
yielding each parent path until the root node is reached ('/').
"""
path = self.path
yield path
while path != posixpath.sep:
path = posixpath.split(path)[0]
yield path
def iter_path_basename(self):
"""
Returns a generator that 'walks' up the node hierarchy,
yielding a tuple of the path, and the basename of the path for
each parent node until the root node is reached ('/').
"""
for path in self.iter_path():
basename = posixpath.basename(path)
if not basename:
# basename = self.repository.label
basename = self.repository.project.name
yield (path, basename)
def get_last_changeset(self):
"""Get the latest `Changeset` object that affected this node."""
c = self.repository.changesets.filter(
date__lte=self.last_changed)#.exclude(revision=self.revision)
if c.count():
return c[0]
else:
return self.repository.changesets.get(date=self.last_changed)
@models.permalink
def get_absolute_url(self):
repository = self.repository
if self.revision != repository.get_latest_revision():
return (
'svnkit:node-revision', (
repository.project.organization.slug, repository.project.slug, self.revision, self.path))
else:
return ('svnkit:node', (repository.project.organization.slug, repository.project.slug, self.path))
def get_basename(self):
"""
The basename of the node, either a file name or a directory
name.
"""
basename = posixpath.basename(self.path)
return basename
def is_directory(self):
"""Whether the node is a directory."""
return self.node_type == choices.NODE_TYPE_DIR
def is_file(self):
"""Whether the node is a file."""
return self.node_type == choices.NODE_TYPE_FILE
def is_root(self):
"""Whether the node is the root node ('/')."""
return self.is_directory() and self.path == posixpath.sep
def has_properties(self):
"""Whether the node has subversion properties set."""
if self.properties.count():
return True
return False
@python_2_unicode_compatible
class AbstractProperty(Model):
"""
A property that has been set on a node.
"""
node = fields.FlexibleForeignKey('svnkit.Node', related_name='properties')
key = models.CharField(max_length=512, db_index=True)
value = models.TextField()
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_property'
unique_together = (('node', 'key'),)
verbose_name_plural = 'properties'
__repr__ = sane_repr('path', 'revision')
def __str__(self):
return '%s: %s' % (self.key, self.value)
@python_2_unicode_compatible
class AbstractContent(Model):
"""
The contents of a node at a revision.
The data is base64 encoded in the database to allow storage of
binary data. The `set_data` and `get_data` methods should be used
to manipulate a node's data. `cached` indicates when the contents
were retrieved from the api. Content objects can optionally be
part of a regular cleanup.
"""
repository = fields.FlexibleForeignKey('svnkit.Repository', related_name='content')
path = models.CharField(max_length=2048)
last_changed = models.DateTimeField()
cached = models.DateTimeField(default=timezone.now)
size = models.PositiveIntegerField(default=0)
data = models.TextField()
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_content'
unique_together = (('repository', 'path', 'last_changed'),)
__repr__ = sane_repr('path', 'repository_id')
def __str__(self):
return '%s@%s' % (self.path, self.get_last_changeset())
def set_data(self, data):
self.size = len(data)
self.data = data.encode('base64')
def get_data(self):
if hasattr(self, '_decoded_data'):
return self._decoded_data
self._decoded_data = self.data.decode('base64')
return self._decoded_data
@property
def lines(self):
return self.get_data().count('\n')
def get_last_changeset(self):
"""Get the changeset in which this content was committed."""
return self.repository.changesets.get(date=self.last_changed)
def get_mimetype(self):
"""
Get the mimetype of the content. This is determined by the
extension of the basename of the path. Defaults to
application/octet-stream if the mimetype cannot be determined.
"""
mtype = mimetypes.guess_type(self.path)[0]
if mtype is None:
return 'application/octet-stream'
return mtype
def get_maintype(self):
"""
Get the maintype of the mimetype, i.e. 'image' in 'image/png'.
"""
return self.get_mimetype().split('/')[0]
def get_subtype(self):
"""
Get the subtype of the mimetype, i.e. 'png' in 'image/png'.
"""
return self.get_mimetype().split('/')[-1]
@models.permalink
def get_absolute_url(self):
return ('svnkit:content', (
self.repository.project.organization.slug, self.repository.project.slug, self.pk, self.get_basename()))
def is_binary(self):
"""
Whether or not the content is binary. This is determined in
part by the mimetype, but if the mimetype is not available,
then if the data cannot be decoded into ascii it will be
presumed a binary format.
"""
# mtype = mimetypes.guess_type(self.path)[0]
# if mtype is None:
# try:
# self.get_data().decode('gbk')
# except UnicodeDecodeError:
# return True
# return False
chunk = get_starting_chunk(self.get_data())
return is_binary_string(chunk)
# if not mtype.startswith('text'):
# return True
# return False
def get_basename(self):
"""Get the basename of the node's full path (the filename)."""
basename = posixpath.basename(self.path)
return basename
def get_data_display(self):
"""
Get the content for display in text. Binary formats are just
shown as '(binary)'. Plain text formats get run through the
appropriate pygments lexer if the package is available.
"""
if self.is_binary():
return _('<pre>(binary)</pre>')
try:
txt = self.get_data().decode('utf-8')
except UnicodeDecodeError:
txt = self.get_data().decode('gbk')
return make_html(txt, self.get_basename()) | lyoniionly/django-cobra | src/cobra/apps/svnkit/abstract_models.py | Python | apache-2.0 | 20,237 |
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
class ArchJsonConf(object):
""" The Base Json Config Class with UAI
"""
def __init__(self, platform, parser):
self.platform = platform
self.parser = parser
self.conf_params = {}
if len(parser._actions) == 1:
self._add_args()
#self._load_args()
def _add_args(self):
""" Add common args here
Child class can add its own args in _add_args
"""
subparsers = self.parser.add_subparsers(dest='commands', help='commands')
# A pack command
pack_parser = subparsers.add_parser('pack', help='Pack local code into docker and upload to uhub')
self._add_account_args(pack_parser)
self._add_pack_args(pack_parser)
# A update command
create_parser = subparsers.add_parser('create', help='Create uai train job')
self._add_account_args(create_parser)
self._add_create_args(create_parser)
def load_params(self):
self.params = vars(self.parser.parse_args())
#set default / solid params
# if not self.params["accelerator"]:
# self.params["accelerator"] = "gpu"
def _add_account_args(self, parser):
args_parser = parser.add_argument_group(
'User-Params', 'User Authentication Parameters')
args_parser.add_argument(
'--public_key',
type=str,
required=True,
help='the public key of the user')
args_parser.add_argument(
'--private_key',
type=str,
required=True,
help='the private key of the user')
args_parser.add_argument(
'--project_id',
type=str,
required=False,
help='the project id of ucloud, could be null')
def _add_create_args(self, parser):
args_parser = parser.add_argument_group(
'Uai-Params', 'Uai TrainJob Managment Parameters')
args_parser.add_argument(
'--job_name',
type=str,
required=True,
help='the uai job name')
args_parser.add_argument(
'--worker_id',
type=int,
default=1860001,
help='the type of worker server')
args_parser.add_argument(
'--uhub_path',
type=str,
required=True,
help='uhubpath')
args_parser.add_argument(
'--ufile_datapath',
type = str,
required = True,
help = 'ufile_datapath')
args_parser.add_argument(
'--ufile_outputpath',
type = str,
required = True,
help = 'ufile_outputpath')
args_parser.add_argument(
'--docker_cmd',
type = str,
required = True,
help = 'docker_cmd')
args_parser.add_argument(
'--max_exectime',
type = int,
default = 6,
help = 'max execute time')
def _add_pack_args(self, parser):
code_parse = parser.add_argument_group(
'Code-Params', 'Code Parameters, help to pack user code into docker image')
code_parse.add_argument(
'--code_path',
type=str,
required=True,
help='the path of the user program containing all code files')
code_parse.add_argument(
'--mainfile_path',
type=str,
required=True,
help='the related path of main python file considering code_path as root')
#todo: add cmd line parameters outside baseconfig, according to different aiframe
self.pack_parser = code_parse
uhub_parse = parser.add_argument_group(
'Docker-Params', 'Docker Parameters, help to upload docker image automatically')
uhub_parse.add_argument(
'--uhub_username',
type=str,
required=True,
help='username to login uhub, which is also username to ucloud console')
uhub_parse.add_argument(
'--uhub_password',
type=str,
required=True,
help='password to login uhub, which is also password to ucloud console')
uhub_parse.add_argument(
'--uhub_registry',
type=str,
required=True,
help='the name of registry owned by user on ucloud console')
uhub_parse.add_argument(
'--uhub_imagename',
type=str,
required=True,
help='the name of user docker image')
uhub_parse.add_argument(
'--uhub_imagetag',
type=str,
required=False,
help='the tag of user docker image')
container_parse = parser.add_argument_group(
'Container-Params', 'Container Enviroment Parameters')
container_parse.add_argument(
'--os',
type=str,
default='ubuntu',
help='the type of the docker os')
container_parse.add_argument(
'--language',
type=str,
default='python-2.7.6',
help='the language of the docker')
container_parse.add_argument(
'--ai_arch_v',
type=str,
# default='tensorflow-1.1.0',
required=True,
help='AI architecture and specific version')
container_parse.add_argument(
'--accelerator',
type=str,
default='gpu',
help='AI architecture and specific version')
cmd_gen_parse = parser.add_argument_group(
'Cmd-Gen-Params', 'Cmd generate params')
cmd_gen_parse.add_argument(
'--test_data_path',
type=str,
required=True,
help='the data dir for local test')
cmd_gen_parse.add_argument(
'--test_output_path',
type=str,
required=True,
help='the output dir for local test')
cmd_gen_parse.add_argument(
'--train_params',
type=str,
required=True,
help='the train related params')
def _load_conf_params(self):
pass
def _load_args(self):
""" Json param loader
"""
self._load_conf_params()
| ucloud/uai-sdk | uaitrain/arch_conf/base_conf.py | Python | apache-2.0 | 7,027 |
from mucomic.core.models import Issue, Series
from mucomic.core import DB, Api
from mucomic import paths
import shutil
import os.path
from urllib.request import urlopen
import zipfile
import re
class Connector:
def __init__(self, config):
self.config = config
self.db = DB(paths.dbfile)
self.api = Api(self.config['MUComicLoad']['username'], self.config['MUComicLoad']['password'])
def updateSeries(self):
jsonseries = self.api.get_all_series()
reg = "(?P<title>.*) \((?P<start>[^ ]*)(?: - (?P<end>[^ ]*))?\)"
parser = re.compile(reg)
series = []
for json in jsonseries:
id = json['id']
parsedTitle = parser.match(json['title'])
if parsedTitle:
title = parsedTitle.group('title')
start = parsedTitle.group('start')
end = parsedTitle.group('end')
else:
print("Could not parse Title %s" % json['title'])
title = json['title']
start = None
end = None
series.append(Series(id, title, start, end))
self.db.add_series(series)
def getIssues(self, series):
issues = self.db.get_issue_list(series.id)
return issues
def get_series(self, series_id):
series = self.db.get_series(series_id)
return series
def get_added_series(self):
series = self.db.get_added_series()
return series
def updateIssues(self, series):
jsonissues = self.api.get_series_by_id(series.id)
issues = [
Issue(
id = json['digital_id'],
series_id = series.id,
title = series.title,
issue_number = json['issue_number'],
cover_url =
"https://i.annihil.us/u/prod/marvel%s/portrait_incredible.jpg" % json['image_url']
)
for json in jsonissues
]
self.db.add_issues(issues)
return issues
def downloadIssue(self, issue):
if not os.path.isdir(self.cbzpath(issue)):
os.makedirs(self.cbzpath(issue))
filename = "%s_" % self.cbzfile(issue)
comiczip = zipfile.ZipFile(filename, mode='w')
pages = self.api.get_issue_by_id(issue.id)['pages']
links = [page['cdnUrls']['jpg_75']['scalar'] for page in pages if page['cdnUrls']
!= {}]
for k, url in enumerate(links):
image = urlopen(url).read()
comiczip.writestr('img_%02d.jpg' % k, image)
comiczip.close()
shutil.move(filename, self.cbzfile(issue))
def getFirstCover(self, series):
maybeFirst = self.db.get_issue_list(series.id, 1)
if maybeFirst:
firstIssue = maybeFirst[0]
return firstIssue.cover()
else:
return None
def hasConfig(self):
return os.path.exists(paths.configfile)
def emptyDatabase(self):
return self.db.count_series() == 0
def updateConfig(self):
with open(paths.configfile, 'w') as config:
self.config.write(config)
self.api = Api(self.config['MUComicLoad']['username'], self.config['MUComicLoad']['password'])
def cbzpath(self, issue):
series = self.get_series(issue.series_id)
safetitle = re.sub('[^\w\-_\.\(\) ]', '',series.formatedseries)
return os.path.join(self.config['MUComicLoad']['downloaddir'],
safetitle)
def cbzfile(self, issue):
series = self.get_series(issue.series_id)
safetitle = re.sub('[^\w\-_\.\(\) ]', '',series.formatedseries)
return os.path.join(self.config['MUComicLoad']['downloaddir'], safetitle,
'%s %s.cbz' % (safetitle, issue.safe_nr))
def issueHasTemp(self, issue):
return os.path.isfile("%s_" % self.cbzfile(issue))
def issueHasLocal(self, issue):
return os.path.isfile(self.cbzfile(issue))
| christofsteel/mucomicload | src/mucomic/core/connector.py | Python | gpl-3.0 | 3,381 |
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008-2016 Red Hat
# see file 'COPYING' for use and warranty inmsgion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Custom exception classes (some which are RPC transparent).
`PrivateError` and its subclasses are custom IPA excetions that will *never* be
forwarded in a Remote Procedure Call (RPC) response.
On the other hand, `PublicError` and its subclasses can be forwarded in an RPC
response. These public errors each carry a unique integer error code as well as
a gettext translated error message (translated at the time the exception is
raised). The purpose of the public errors is to relay information about
*expected* user errors, service availability errors, and so on. They should
*never* be used for *unexpected* programmatic or run-time errors.
For security reasons it is *extremely* important that arbitrary exceptions *not*
be forwarded in an RPC response. Unexpected exceptions can easily contain
compromising information in their error messages. Any time the server catches
any exception that isn't a `PublicError` subclass, it should raise an
`InternalError`, which itself always has the same, static error message (and
therefore cannot be populated with information about the true exception).
The public errors are arranging into five main blocks of error code ranges:
============= ========================================
Error codes Exceptions
============= ========================================
1000 - 1999 `AuthenticationError` and its subclasses
2000 - 2999 `AuthorizationError` and its subclasses
3000 - 3999 `InvocationError` and its subclasses
4000 - 4999 `ExecutionError` and its subclasses
5000 - 5999 `GenericError` and its subclasses
============= ========================================
Within these five blocks some sub-ranges are already allocated for certain types
of error messages, while others are reserved for future use. Here are the
current block assignments:
- **900-5999** `PublicError` and its subclasses
- **901 - 907** Assigned to special top-level public errors
- **908 - 999** *Reserved for future use*
- **1000 - 1999** `AuthenticationError` and its subclasses
- **1001 - 1099** Open for general authentication errors
- **1100 - 1199** `KerberosError` and its subclasses
- **1200 - 1299** `SessionError` and its subclasses
- **1300 - 1999** *Reserved for future use*
- **2000 - 2999** `AuthorizationError` and its subclasses
- **2001 - 2099** Open for general authorization errors
- **2100 - 2199** `ACIError` and its subclasses
- **2200 - 2999** *Reserved for future use*
- **3000 - 3999** `InvocationError` and its subclasses
- **3001 - 3099** Open for general invocation errors
- **3100 - 3199** *Reserved for future use*
- **4000 - 4999** `ExecutionError` and its subclasses
- **4001 - 4099** Open for general execution errors
- **4100 - 4199** `BuiltinError` and its subclasses
- **4200 - 4299** `LDAPError` and its subclasses
- **4300 - 4399** `CertificateError` and its subclasses
- **4400 - 4499** `DNSError` and (some of) its subclasses
- **4500 - 4999** *Reserved for future use*
- **5000 - 5999** `GenericError` and its subclasses
- **5001 - 5099** Open for generic errors
- **5100 - 5999** *Reserved for future use*
"""
import six
from ipalib.text import ngettext as ungettext
from ipalib import messages
class PrivateError(Exception):
"""
Base class for exceptions that are *never* forwarded in an RPC response.
"""
format = ''
def __init__(self, **kw):
self.msg = self.format % kw
self.kw = kw
for (key, value) in kw.items():
assert not hasattr(self, key), 'conflicting kwarg %s.%s = %r' % (
self.__class__.__name__, key, value,
)
setattr(self, key, value)
Exception.__init__(self, self.msg)
if six.PY3:
@property
def message(self):
return str(self)
class SubprocessError(PrivateError):
"""
Raised when ``subprocess.call()`` returns a non-zero exit status.
This custom exception is needed because Python 2.4 doesn't have the
``subprocess.CalledProcessError`` exception (which was added in Python 2.5).
For example:
>>> raise SubprocessError(returncode=2, argv=('ls', '-lh', '/no-foo/'))
Traceback (most recent call last):
...
SubprocessError: return code 2 from ('ls', '-lh', '/no-foo/')
The exit code of the sub-process is available via the ``returncode``
instance attribute. For example:
>>> e = SubprocessError(returncode=1, argv=('/bin/false',))
>>> e.returncode
1
>>> e.argv # argv is also available
('/bin/false',)
"""
format = 'return code %(returncode)d from %(argv)r'
class PluginSubclassError(PrivateError):
"""
Raised when a plugin doesn't subclass from an allowed base.
For example:
>>> raise PluginSubclassError(plugin='bad', bases=('base1', 'base2'))
Traceback (most recent call last):
...
PluginSubclassError: 'bad' not subclass of any base in ('base1', 'base2')
"""
format = '%(plugin)r not subclass of any base in %(bases)r'
class PluginDuplicateError(PrivateError):
"""
Raised when the same plugin class is registered more than once.
For example:
>>> raise PluginDuplicateError(plugin='my_plugin')
Traceback (most recent call last):
...
PluginDuplicateError: 'my_plugin' was already registered
"""
format = '%(plugin)r was already registered'
class PluginOverrideError(PrivateError):
"""
Raised when a plugin overrides another without using ``override=True``.
For example:
>>> raise PluginOverrideError(base='Command', name='env', plugin='my_env')
Traceback (most recent call last):
...
PluginOverrideError: unexpected override of Command.env with 'my_env'
"""
format = 'unexpected override of %(base)s.%(name)s with %(plugin)r'
class PluginMissingOverrideError(PrivateError):
"""
Raised when a plugin overrides another that has not been registered.
For example:
>>> raise PluginMissingOverrideError(base='Command', name='env', plugin='my_env')
Traceback (most recent call last):
...
PluginMissingOverrideError: Command.env not registered, cannot override with 'my_env'
"""
format = '%(base)s.%(name)s not registered, cannot override with %(plugin)r'
class SkipPluginModule(PrivateError):
"""
Raised to abort the loading of a plugin module.
"""
format = '%(reason)s'
class PluginsPackageError(PrivateError):
"""
Raised when ``package.plugins`` is a module instead of a sub-package.
"""
format = '%(name)s must be sub-package, not module: %(file)r'
class PluginModuleError(PrivateError):
"""
Raised when a module is not a valid plugin module.
"""
format = '%(name)s is not a valid plugin module'
##############################################################################
# Public errors:
_texts = []
def _(message):
_texts.append(message)
return message
class PublicError(Exception):
"""
**900** Base class for exceptions that can be forwarded in an RPC response.
"""
def __init__(self, format=None, message=None, **kw):
messages.process_message_arguments(self, format, message, **kw)
super(PublicError, self).__init__(self.msg)
errno = 900
rval = 1
format = None
if six.PY3:
@property
def message(self):
return str(self)
class VersionError(PublicError):
"""
**901** Raised when client and server versions are incompatible.
For example:
>>> raise VersionError(cver='2.0', sver='2.1', server='https://localhost')
Traceback (most recent call last):
...
VersionError: 2.0 client incompatible with 2.1 server at 'https://localhost'
"""
errno = 901
format = _("%(cver)s client incompatible with %(sver)s server at '%(server)s'")
class UnknownError(PublicError):
"""
**902** Raised when client does not know error it caught from server.
For example:
>>> raise UnknownError(code=57, server='localhost', error=u'a new error')
...
Traceback (most recent call last):
...
UnknownError: unknown error 57 from localhost: a new error
"""
errno = 902
format = _('unknown error %(code)d from %(server)s: %(error)s')
class InternalError(PublicError):
"""
**903** Raised to conceal a non-public exception.
For example:
>>> raise InternalError()
Traceback (most recent call last):
...
InternalError: an internal error has occurred
"""
errno = 903
format = _('an internal error has occurred')
def __init__(self, message=None):
"""
Security issue: ignore any information given to constructor.
"""
PublicError.__init__(self)
class ServerInternalError(PublicError):
"""
**904** Raised when client catches an `InternalError` from server.
For example:
>>> raise ServerInternalError(server='https://localhost')
Traceback (most recent call last):
...
ServerInternalError: an internal error has occurred on server at 'https://localhost'
"""
errno = 904
format = _("an internal error has occurred on server at '%(server)s'")
class CommandError(PublicError):
"""
**905** Raised when an unknown command is called.
For example:
>>> raise CommandError(name='foobar')
Traceback (most recent call last):
...
CommandError: unknown command 'foobar'
"""
errno = 905
format = _("unknown command '%(name)s'")
class ServerCommandError(PublicError):
"""
**906** Raised when client catches a `CommandError` from server.
For example:
>>> e = CommandError(name='foobar')
>>> raise ServerCommandError(error=e.message, server='https://localhost')
Traceback (most recent call last):
...
ServerCommandError: error on server 'https://localhost': unknown command 'foobar'
"""
errno = 906
format = _("error on server '%(server)s': %(error)s")
class NetworkError(PublicError):
"""
**907** Raised when a network connection cannot be created.
For example:
>>> raise NetworkError(uri='ldap://localhost:389', error=_(u'Connection refused'))
Traceback (most recent call last):
...
NetworkError: cannot connect to 'ldap://localhost:389': Connection refused
"""
errno = 907
format = _("cannot connect to '%(uri)s': %(error)s")
class ServerNetworkError(PublicError):
"""
**908** Raised when client catches a `NetworkError` from server.
"""
errno = 908
format = _("error on server '%(server)s': %(error)s")
class JSONError(PublicError):
"""
**909** Raised when server received a malformed JSON-RPC request.
"""
errno = 909
format = _('Invalid JSON-RPC request: %(error)s')
class XMLRPCMarshallError(PublicError):
"""
**910** Raised when the XML-RPC lib cannot marshall the request
For example:
>>> raise XMLRPCMarshallError(error=_('int exceeds XML-RPC limits'))
Traceback (most recent call last):
...
XMLRPCMarshallError: error marshalling data for XML-RPC transport: int exceeds XML-RPC limits
"""
errno = 910
format = _('error marshalling data for XML-RPC transport: %(error)s')
class RefererError(PublicError):
"""
**911** Raised when the request does not contain an HTTP referer
For example:
>>> raise RefererError(referer='referer')
Traceback (most recent call last):
...
RefererError: Missing or invalid HTTP Referer, referer
"""
errno = 911
format = _('Missing or invalid HTTP Referer, %(referer)s')
class EnvironmentError(PublicError):
"""
**912** Raised when a command is called with invalid environment settings
"""
errno = 912
##############################################################################
# 1000 - 1999: Authentication errors
class AuthenticationError(PublicError):
"""
**1000** Base class for authentication errors (*1000 - 1999*).
"""
errno = 1000
class KerberosError(AuthenticationError):
"""
**1100** Base class for Kerberos authentication errors (*1100 - 1199*).
For example:
>>> raise KerberosError(major=_('Unspecified GSS failure. Minor code may provide more information'), minor=_('No credentials cache found'))
Traceback (most recent call last):
...
KerberosError: Kerberos error: Unspecified GSS failure. Minor code may provide more information/No credentials cache found
"""
errno = 1100
format= _('Kerberos error: %(major)s/%(minor)s')
class CCacheError(KerberosError):
"""
**1101** Raised when sever does not receive Kerberose credentials.
For example:
>>> raise CCacheError()
Traceback (most recent call last):
...
CCacheError: did not receive Kerberos credentials
"""
errno = 1101
format = _('did not receive Kerberos credentials')
class ServiceError(KerberosError):
"""
**1102** Raised when service is not found in Kerberos DB.
For example:
>>> raise ServiceError(service='HTTP@localhost')
Traceback (most recent call last):
...
ServiceError: Service 'HTTP@localhost' not found in Kerberos database
"""
errno = 1102
format = _("Service '%(service)s' not found in Kerberos database")
class NoCCacheError(KerberosError):
"""
**1103** Raised when a client attempts to use Kerberos without a ccache.
For example:
>>> raise NoCCacheError()
Traceback (most recent call last):
...
NoCCacheError: No credentials cache found
"""
errno = 1103
format = _('No credentials cache found')
class TicketExpired(KerberosError):
"""
**1104** Raised when a client attempts to use an expired ticket
For example:
>>> raise TicketExpired()
Traceback (most recent call last):
...
TicketExpired: Ticket expired
"""
errno = 1104
format = _('Ticket expired')
class BadCCachePerms(KerberosError):
"""
**1105** Raised when a client has bad permissions on their ccache
For example:
>>> raise BadCCachePerms()
Traceback (most recent call last):
...
BadCCachePerms: Credentials cache permissions incorrect
"""
errno = 1105
format = _('Credentials cache permissions incorrect')
class BadCCacheFormat(KerberosError):
"""
**1106** Raised when a client has a misformated ccache
For example:
>>> raise BadCCacheFormat()
Traceback (most recent call last):
...
BadCCacheFormat: Bad format in credentials cache
"""
errno = 1106
format = _('Bad format in credentials cache')
class CannotResolveKDC(KerberosError):
"""
**1107** Raised when the KDC can't be resolved
For example:
>>> raise CannotResolveKDC()
Traceback (most recent call last):
...
CannotResolveKDC: Cannot resolve KDC for requested realm
"""
errno = 1107
format = _('Cannot resolve KDC for requested realm')
class SessionError(AuthenticationError):
"""
**1200** Base class for Session errors (*1200 - 1299*).
For example:
"""
errno = 1200
format= _('Session error')
class InvalidSessionPassword(SessionError):
"""
**1201** Raised when we cannot obtain a TGT for a principal.
"""
errno = 1201
format= _('Principal %(principal)s cannot be authenticated: %(message)s')
class PasswordExpired(InvalidSessionPassword):
"""
**1202** Raised when we cannot obtain a TGT for a principal because the password is expired.
"""
errno = 1202
class KrbPrincipalExpired(SessionError):
"""
**1203** Raised when Kerberos Principal is expired.
"""
errno = 1203
class UserLocked(SessionError):
"""
**1204** Raised when a user account is locked.
"""
errno = 1204
##############################################################################
# 2000 - 2999: Authorization errors
class AuthorizationError(PublicError):
"""
**2000** Base class for authorization errors (*2000 - 2999*).
"""
errno = 2000
class ACIError(AuthorizationError):
"""
**2100** Base class for ACI authorization errors (*2100 - 2199*).
"""
errno = 2100
format = _('Insufficient access: %(info)s')
##############################################################################
# 3000 - 3999: Invocation errors
class InvocationError(PublicError):
"""
**3000** Base class for command invocation errors (*3000 - 3999*).
"""
errno = 3000
class EncodingError(InvocationError):
"""
**3001** Raised when received text is incorrectly encoded.
"""
errno = 3001
class BinaryEncodingError(InvocationError):
"""
**3002** Raised when received binary data is incorrectly encoded.
"""
errno = 3002
class ZeroArgumentError(InvocationError):
"""
**3003** Raised when a command is called with arguments but takes none.
For example:
>>> raise ZeroArgumentError(name='ping')
Traceback (most recent call last):
...
ZeroArgumentError: command 'ping' takes no arguments
"""
errno = 3003
format = _("command '%(name)s' takes no arguments")
class MaxArgumentError(InvocationError):
"""
**3004** Raised when a command is called with too many arguments.
For example:
>>> raise MaxArgumentError(name='user_add', count=2)
Traceback (most recent call last):
...
MaxArgumentError: command 'user_add' takes at most 2 arguments
"""
errno = 3004
def __init__(self, message=None, **kw):
if message is None:
format = ungettext(
"command '%(name)s' takes at most %(count)d argument",
"command '%(name)s' takes at most %(count)d arguments",
kw['count']
)
else:
format = None
InvocationError.__init__(self, format, message, **kw)
class OptionError(InvocationError):
"""
**3005** Raised when a command is called with unknown options.
"""
errno = 3005
class OverlapError(InvocationError):
"""
**3006** Raised when arguments and options overlap.
For example:
>>> raise OverlapError(names=['givenname', 'login'])
Traceback (most recent call last):
...
OverlapError: overlapping arguments and options: ['givenname', 'login']
"""
errno = 3006
format = _("overlapping arguments and options: %(names)s")
class RequirementError(InvocationError):
"""
**3007** Raised when a required parameter is not provided.
For example:
>>> raise RequirementError(name='givenname')
Traceback (most recent call last):
...
RequirementError: 'givenname' is required
"""
errno = 3007
format = _("'%(name)s' is required")
class ConversionError(InvocationError):
"""
**3008** Raised when parameter value can't be converted to correct type.
For example:
>>> raise ConversionError(name='age', error=_(u'must be an integer'))
Traceback (most recent call last):
...
ConversionError: invalid 'age': must be an integer
"""
errno = 3008
format = _("invalid '%(name)s': %(error)s")
class ValidationError(InvocationError):
"""
**3009** Raised when a parameter value fails a validation rule.
For example:
>>> raise ValidationError(name='sn', error=_(u'can be at most 128 characters'))
Traceback (most recent call last):
...
ValidationError: invalid 'sn': can be at most 128 characters
"""
errno = 3009
format = _("invalid '%(name)s': %(error)s")
class NoSuchNamespaceError(InvocationError):
"""
**3010** Raised when an unknown namespace is requested.
For example:
>>> raise NoSuchNamespaceError(name='Plugins')
Traceback (most recent call last):
...
NoSuchNamespaceError: api has no such namespace: 'Plugins'
"""
errno = 3010
format = _("api has no such namespace: '%(name)s'")
class PasswordMismatch(InvocationError):
"""
**3011** Raise when password and password confirmation don't match.
"""
errno = 3011
format = _('Passwords do not match')
class NotImplementedError(InvocationError):
"""
**3012** Raise when a function hasn't been implemented.
"""
errno = 3012
format = _('Command not implemented')
class NotConfiguredError(InvocationError):
"""
**3013** Raise when there is no configuration
"""
errno = 3013
format = _('Client is not configured. Run ipa-client-install.')
class PromptFailed(InvocationError):
"""
**3014** Raise when an interactive prompt failed.
"""
errno = 3014
format = _('Could not get %(name)s interactively')
class DeprecationError(InvocationError):
"""
**3015** Raise when a command has been deprecated
For example:
>>> raise DeprecationError(name='hbacrule_add_sourcehost')
Traceback (most recent call last):
...
DeprecationError: Command 'hbacrule_add_sourcehost' has been deprecated
"""
errno = 3015
format = _("Command '%(name)s' has been deprecated")
class NotAForestRootError(InvocationError):
"""
**3016** Raised when an attempt to establish trust is done against non-root domain
Forest root domain has the same name as the forest itself
For example:
>>> raise NotAForestRootError(forest='example.test', domain='jointops.test')
Traceback (most recent call last):
...
NotAForestRootError: Domain 'jointops.test' is not a root domain for forest 'example.test'
"""
errno = 3016
format = _("Domain '%(domain)s' is not a root domain for forest '%(forest)s'")
##############################################################################
# 4000 - 4999: Execution errors
class ExecutionError(PublicError):
"""
**4000** Base class for execution errors (*4000 - 4999*).
"""
errno = 4000
class NotFound(ExecutionError):
"""
**4001** Raised when an entry is not found.
For example:
>>> raise NotFound(reason='no such user')
Traceback (most recent call last):
...
NotFound: no such user
"""
errno = 4001
rval = 2
format = _('%(reason)s')
class DuplicateEntry(ExecutionError):
"""
**4002** Raised when an entry already exists.
For example:
>>> raise DuplicateEntry
Traceback (most recent call last):
...
DuplicateEntry: This entry already exists
"""
errno = 4002
format = _('This entry already exists')
class HostService(ExecutionError):
"""
**4003** Raised when a host service principal is requested
For example:
>>> raise HostService
Traceback (most recent call last):
...
HostService: You must enroll a host in order to create a host service
"""
errno = 4003
format = _('You must enroll a host in order to create a host service')
class MalformedServicePrincipal(ExecutionError):
"""
**4004** Raised when a service principal is not of the form: service/fully-qualified host name
For example:
>>> raise MalformedServicePrincipal(reason=_('missing service'))
Traceback (most recent call last):
...
MalformedServicePrincipal: Service principal is not of the form: service/fully-qualified host name: missing service
"""
errno = 4004
format = _('Service principal is not of the form: service/fully-qualified host name: %(reason)s')
class RealmMismatch(ExecutionError):
"""
**4005** Raised when the requested realm does not match the IPA realm
For example:
>>> raise RealmMismatch
Traceback (most recent call last):
...
RealmMismatch: The realm for the principal does not match the realm for this IPA server
"""
errno = 4005
format = _('The realm for the principal does not match the realm for this IPA server')
class RequiresRoot(ExecutionError):
"""
**4006** Raised when a command requires the unix super-user to run
For example:
>>> raise RequiresRoot
Traceback (most recent call last):
...
RequiresRoot: This command requires root access
"""
errno = 4006
format = _('This command requires root access')
class AlreadyPosixGroup(ExecutionError):
"""
**4007** Raised when a group is already a posix group
For example:
>>> raise AlreadyPosixGroup
Traceback (most recent call last):
...
AlreadyPosixGroup: This is already a posix group
"""
errno = 4007
format = _('This is already a posix group')
class MalformedUserPrincipal(ExecutionError):
"""
**4008** Raised when a user principal is not of the form: user@REALM
For example:
>>> raise MalformedUserPrincipal(principal='jsmith@@EXAMPLE.COM')
Traceback (most recent call last):
...
MalformedUserPrincipal: Principal is not of the form user@REALM: 'jsmith@@EXAMPLE.COM'
"""
errno = 4008
format = _("Principal is not of the form user@REALM: '%(principal)s'")
class AlreadyActive(ExecutionError):
"""
**4009** Raised when an entry is made active that is already active
For example:
>>> raise AlreadyActive()
Traceback (most recent call last):
...
AlreadyActive: This entry is already enabled
"""
errno = 4009
format = _('This entry is already enabled')
class AlreadyInactive(ExecutionError):
"""
**4010** Raised when an entry is made inactive that is already inactive
For example:
>>> raise AlreadyInactive()
Traceback (most recent call last):
...
AlreadyInactive: This entry is already disabled
"""
errno = 4010
format = _('This entry is already disabled')
class HasNSAccountLock(ExecutionError):
"""
**4011** Raised when an entry has the nsAccountLock attribute set
For example:
>>> raise HasNSAccountLock()
Traceback (most recent call last):
...
HasNSAccountLock: This entry cannot be enabled or disabled
"""
errno = 4011
format = _('This entry cannot be enabled or disabled')
class NotGroupMember(ExecutionError):
"""
**4012** Raised when a non-member is attempted to be removed from a group
For example:
>>> raise NotGroupMember()
Traceback (most recent call last):
...
NotGroupMember: This entry is not a member
"""
errno = 4012
format = _('This entry is not a member')
class RecursiveGroup(ExecutionError):
"""
**4013** Raised when a group is added as a member of itself
For example:
>>> raise RecursiveGroup()
Traceback (most recent call last):
...
RecursiveGroup: A group may not be a member of itself
"""
errno = 4013
format = _('A group may not be a member of itself')
class AlreadyGroupMember(ExecutionError):
"""
**4014** Raised when a member is attempted to be re-added to a group
For example:
>>> raise AlreadyGroupMember()
Traceback (most recent call last):
...
AlreadyGroupMember: This entry is already a member
"""
errno = 4014
format = _('This entry is already a member')
class Base64DecodeError(ExecutionError):
"""
**4015** Raised when a base64-encoded blob cannot decoded
For example:
>>> raise Base64DecodeError(reason=_('Incorrect padding'))
Traceback (most recent call last):
...
Base64DecodeError: Base64 decoding failed: Incorrect padding
"""
errno = 4015
format = _('Base64 decoding failed: %(reason)s')
class RemoteRetrieveError(ExecutionError):
"""
**4016** Raised when retrieving data from a remote server fails
For example:
>>> raise RemoteRetrieveError(reason=_("Failed to get certificate chain."))
Traceback (most recent call last):
...
RemoteRetrieveError: Failed to get certificate chain.
"""
errno = 4016
format = _('%(reason)s')
class SameGroupError(ExecutionError):
"""
**4017** Raised when adding a group as a member of itself
For example:
>>> raise SameGroupError()
Traceback (most recent call last):
...
SameGroupError: A group may not be added as a member of itself
"""
errno = 4017
format = _('A group may not be added as a member of itself')
class DefaultGroupError(ExecutionError):
"""
**4018** Raised when removing the default user group
For example:
>>> raise DefaultGroupError()
Traceback (most recent call last):
...
DefaultGroupError: The default users group cannot be removed
"""
errno = 4018
format = _('The default users group cannot be removed')
class ManagedGroupError(ExecutionError):
"""
**4020** Raised when a managed group is deleted
For example:
>>> raise ManagedGroupError()
Traceback (most recent call last):
...
ManagedGroupError: Deleting a managed group is not allowed. It must be detached first.
"""
errno = 4020
format = _('Deleting a managed group is not allowed. It must be detached first.')
class ManagedPolicyError(ExecutionError):
"""
**4021** Raised when password policy is assigned to a managed group
For example:
>>> raise ManagedPolicyError()
Traceback (most recent call last):
...
ManagedPolicyError: A managed group cannot have a password policy.
"""
errno = 4021
format = _('A managed group cannot have a password policy.')
class FileError(ExecutionError):
"""
**4022** Errors when dealing with files
For example:
>>> raise FileError(reason=_("cannot write file \'test\'"))
Traceback (most recent call last):
...
FileError: cannot write file 'test'
"""
errno = 4022
format = _('%(reason)s')
class NoCertificateError(ExecutionError):
"""
**4023** Raised when trying to retrieve a certificate that doesn't exist.
For example:
>>> raise NoCertificateError(entry='ipa.example.com')
Traceback (most recent call last):
...
NoCertificateError: 'ipa.example.com' doesn't have a certificate.
"""
errno = 4023
format = _('\'%(entry)s\' doesn\'t have a certificate.')
class ManagedGroupExistsError(ExecutionError):
"""
**4024** Raised when adding a user and its managed group exists
For example:
>>> raise ManagedGroupExistsError(group=u'engineering')
Traceback (most recent call last):
...
ManagedGroupExistsError: Unable to create private group. A group 'engineering' already exists.
"""
errno = 4024
format = _('Unable to create private group. A group \'%(group)s\' already exists.')
class ReverseMemberError(ExecutionError):
"""
**4025** Raised when verifying that all reverse members have been added or removed.
For example:
>>> raise ReverseMemberError(verb=_('added'), exc=_("Group 'foo' not found."))
Traceback (most recent call last):
...
ReverseMemberError: A problem was encountered when verifying that all members were added: Group 'foo' not found.
"""
errno = 4025
format = _('A problem was encountered when verifying that all members were %(verb)s: %(exc)s')
class AttrValueNotFound(ExecutionError):
"""
**4026** Raised when an Attribute/Value pair is not found.
For example:
>>> raise AttrValueNotFound(attr='ipasudoopt', value='authenticate')
Traceback (most recent call last):
...
AttrValueNotFound: ipasudoopt does not contain 'authenticate'
"""
errno = 4026
rval = 1
format = _('%(attr)s does not contain \'%(value)s\'')
class SingleMatchExpected(ExecutionError):
"""
**4027** Raised when a search should return a single match
For example:
>>> raise SingleMatchExpected(found=9)
Traceback (most recent call last):
...
SingleMatchExpected: The search criteria was not specific enough. Expected 1 and found 9.
"""
errno = 4027
rval = 1
format = _('The search criteria was not specific enough. Expected 1 and found %(found)d.')
class AlreadyExternalGroup(ExecutionError):
"""
**4028** Raised when a group is already an external member group
For example:
>>> raise AlreadyExternalGroup
Traceback (most recent call last):
...
AlreadyExternalGroup: This group already allows external members
"""
errno = 4028
format = _('This group already allows external members')
class ExternalGroupViolation(ExecutionError):
"""
**4029** Raised when a group is already an external member group
and an attempt is made to use it as posix group
For example:
>>> raise ExternalGroupViolation
Traceback (most recent call last):
...
ExternalGroupViolation: This group cannot be posix because it is external
"""
errno = 4029
format = _('This group cannot be posix because it is external')
class PosixGroupViolation(ExecutionError):
"""
**4030** Raised when a group is already a posix group
and cannot be converted to external
For example:
>>> raise PosixGroupViolation
Traceback (most recent call last):
...
PosixGroupViolation: This is already a posix group and cannot be converted to external one
"""
errno = 4030
format = _('This is already a posix group and cannot be converted to external one')
class EmptyResult(NotFound):
"""
**4031** Raised when a LDAP search returned no results.
For example:
>>> raise EmptyResult(reason='no matching entry found')
Traceback (most recent call last):
...
EmptyResult: no matching entry found
"""
errno = 4031
class InvalidDomainLevelError(ExecutionError):
"""
**4032** Raised when a operation could not be completed due to a invalid
domain level.
For example:
>>> raise InvalidDomainLevelError(reason='feature requires domain level 4')
Traceback (most recent call last):
...
InvalidDomainLevelError: feature requires domain level 4
"""
errno = 4032
format = _('%(reason)s')
class ServerRemovalError(ExecutionError):
"""
**4033** Raised when a removal of IPA server from managed topology fails
For example:
>>> raise ServerRemovalError(reason='Removal disconnects topology')
Traceback (most recent call last):
...
ServerRemovalError: Server removal aborted: Removal disconnects topology
"""
errno = 4033
format = _('Server removal aborted: %(reason)s.')
class OperationNotSupportedForPrincipalType(ExecutionError):
"""
**4034** Raised when an operation is not supported for a principal type
"""
errno = 4034
format = _(
'%(operation)s is not supported for %(principal_type)s principals')
class HTTPRequestError(RemoteRetrieveError):
"""
**4035** Raised when an HTTP request fails. Includes the response
status in the ``status`` attribute.
"""
errno = 4035
format = _('Request failed with status %(status)s: %(reason)s')
class RedundantMappingRule(SingleMatchExpected):
"""
**4036** Raised when more than one rule in a CSR generation ruleset matches
a particular helper.
For example:
>>> raise RedundantMappingRule(ruleset='syntaxSubject', helper='certutil')
Traceback (most recent call last):
...
RedundantMappingRule: Mapping ruleset "syntaxSubject" has more than one
rule for the certutil helper.
"""
errno = 4036
format = _('Mapping ruleset "%(ruleset)s" has more than one rule for the'
' %(helper)s helper')
class CSRTemplateError(ExecutionError):
"""
**4037** Raised when evaluation of a CSR generation template fails
"""
errno = 4037
format = _('%(reason)s')
class BuiltinError(ExecutionError):
"""
**4100** Base class for builtin execution errors (*4100 - 4199*).
"""
errno = 4100
class HelpError(BuiltinError):
"""
**4101** Raised when requesting help for an unknown topic.
For example:
>>> raise HelpError(topic='newfeature')
Traceback (most recent call last):
...
HelpError: no command nor help topic 'newfeature'
"""
errno = 4101
format = _("no command nor help topic '%(topic)s'")
class LDAPError(ExecutionError):
"""
**4200** Base class for LDAP execution errors (*4200 - 4299*).
"""
errno = 4200
class MidairCollision(ExecutionError):
"""
**4201** Raised when a change collides with another change
For example:
>>> raise MidairCollision()
Traceback (most recent call last):
...
MidairCollision: change collided with another change
"""
errno = 4201
format = _('change collided with another change')
class EmptyModlist(ExecutionError):
"""
**4202** Raised when an LDAP update makes no changes
For example:
>>> raise EmptyModlist()
Traceback (most recent call last):
...
EmptyModlist: no modifications to be performed
"""
errno = 4202
format = _('no modifications to be performed')
class DatabaseError(ExecutionError):
"""
**4203** Raised when an LDAP error is not otherwise handled
For example:
>>> raise DatabaseError(desc=_("Can't contact LDAP server"), info=_('Info goes here'))
Traceback (most recent call last):
...
DatabaseError: Can't contact LDAP server: Info goes here
"""
errno = 4203
format = _('%(desc)s: %(info)s')
class LimitsExceeded(ExecutionError):
"""
**4204** Raised when search limits are exceeded.
For example:
>>> raise LimitsExceeded()
Traceback (most recent call last):
...
LimitsExceeded: limits exceeded for this query
"""
errno = 4204
format = _('limits exceeded for this query')
class ObjectclassViolation(ExecutionError):
"""
**4205** Raised when an entry is missing a required attribute or objectclass
For example:
>>> raise ObjectclassViolation(info=_('attribute "krbPrincipalName" not allowed'))
Traceback (most recent call last):
...
ObjectclassViolation: attribute "krbPrincipalName" not allowed
"""
errno = 4205
format = _('%(info)s')
class NotAllowedOnRDN(ExecutionError):
"""
**4206** Raised when an RDN value is modified.
For example:
>>> raise NotAllowedOnRDN()
Traceback (most recent call last):
...
NotAllowedOnRDN: modifying primary key is not allowed
"""
errno = 4206
format = _('modifying primary key is not allowed')
class OnlyOneValueAllowed(ExecutionError):
"""
**4207** Raised when trying to set more than one value to single-value attributes
For example:
>> raise OnlyOneValueAllowed(attr='ipasearchtimelimit')
Traceback (most recent call last):
...
OnlyOneValueAllowed: ipasearchtimelimit: Only one value allowed.
"""
errno = 4207
format = _('%(attr)s: Only one value allowed.')
class InvalidSyntax(ExecutionError):
"""
**4208** Raised when an value does not match the required syntax
For example:
>> raise InvalidSyntax(attr='ipahomesrootdir')
Traceback (most recent call last):
...
InvalidSyntax: ipahomesrootdir: Invalid syntax
"""
errno = 4208
format = _('%(attr)s: Invalid syntax.')
class BadSearchFilter(ExecutionError):
"""
**4209** Raised when an invalid LDAP search filter is used
For example:
>>> raise BadSearchFilter(info=_('invalid syntax'))
Traceback (most recent call last):
...
BadSearchFilter: Bad search filter invalid syntax
"""
errno = 4209
format = _('Bad search filter %(info)s')
class NotAllowedOnNonLeaf(ExecutionError):
"""
**4210** Raised when operation is not allowed on a non-leaf entry
For example:
>>> raise NotAllowedOnNonLeaf()
Traceback (most recent call last):
...
NotAllowedOnNonLeaf: Not allowed on non-leaf entry
"""
errno = 4210
format = _('Not allowed on non-leaf entry')
class DatabaseTimeout(DatabaseError):
"""
**4211** Raised when an LDAP call times out
For example:
>>> raise DatabaseTimeout()
Traceback (most recent call last):
...
DatabaseTimeout: LDAP timeout
"""
errno = 4211
format = _('LDAP timeout')
class TaskTimeout(DatabaseError):
"""
**4213** Raised when an LDAP task times out
For example:
>>> raise TaskTimeout(task='Automember', task_dn='')
Traceback (most recent call last):
...
TaskTimeout: Automember LDAP task timeout, Task DN: ''
"""
errno = 4213
format = _("%(task)s LDAP task timeout, Task DN: '%(task_dn)s'")
class TimeLimitExceeded(LimitsExceeded):
"""
**4214** Raised when time limit for the operation is exceeded.
"""
errno = 4214
format = _('Configured time limit exceeded')
class SizeLimitExceeded(LimitsExceeded):
"""
**4215** Raised when size limit for the operation is exceeded.
"""
errno = 4215
format = _('Configured size limit exceeded')
class AdminLimitExceeded(LimitsExceeded):
"""
**4216** Raised when server limit imposed by administrative authority was
exceeded
"""
errno = 4216
format = _('Configured administrative server limit exceeded')
class CertificateError(ExecutionError):
"""
**4300** Base class for Certificate execution errors (*4300 - 4399*).
"""
errno = 4300
class CertificateOperationError(CertificateError):
"""
**4301** Raised when a certificate operation cannot be completed
For example:
>>> raise CertificateOperationError(error=_(u'bad serial number'))
Traceback (most recent call last):
...
CertificateOperationError: Certificate operation cannot be completed: bad serial number
"""
errno = 4301
format = _('Certificate operation cannot be completed: %(error)s')
class CertificateFormatError(CertificateError):
"""
**4302** Raised when a certificate is badly formatted
For example:
>>> raise CertificateFormatError(error=_(u'improperly formated DER-encoded certificate'))
Traceback (most recent call last):
...
CertificateFormatError: Certificate format error: improperly formated DER-encoded certificate
"""
errno = 4302
format = _('Certificate format error: %(error)s')
class MutuallyExclusiveError(ExecutionError):
"""
**4303** Raised when an operation would result in setting two attributes which are mutually exlusive.
For example:
>>> raise MutuallyExclusiveError(reason=_(u'hosts may not be added when hostcategory=all'))
Traceback (most recent call last):
...
MutuallyExclusiveError: hosts may not be added when hostcategory=all
"""
errno = 4303
format = _('%(reason)s')
class NonFatalError(ExecutionError):
"""
**4304** Raised when part of an operation succeeds and the part that failed isn't critical.
For example:
>>> raise NonFatalError(reason=_(u'The host was added but the DNS update failed'))
Traceback (most recent call last):
...
NonFatalError: The host was added but the DNS update failed
"""
errno = 4304
format = _('%(reason)s')
class AlreadyRegisteredError(ExecutionError):
"""
**4305** Raised when registering a user that is already registered.
For example:
>>> raise AlreadyRegisteredError()
Traceback (most recent call last):
...
AlreadyRegisteredError: Already registered
"""
errno = 4305
format = _('Already registered')
class NotRegisteredError(ExecutionError):
"""
**4306** Raised when not registered and a registration is required
For example:
>>> raise NotRegisteredError()
Traceback (most recent call last):
...
NotRegisteredError: Not registered yet
"""
errno = 4306
format = _('Not registered yet')
class DependentEntry(ExecutionError):
"""
**4307** Raised when an entry being deleted has dependencies
For example:
>>> raise DependentEntry(label=u'SELinux User Map', key=u'test', dependent=u'test1')
Traceback (most recent call last):
...
DependentEntry: test cannot be deleted because SELinux User Map test1 requires it
"""
errno = 4307
format = _('%(key)s cannot be deleted because %(label)s %(dependent)s requires it')
class LastMemberError(ExecutionError):
"""
**4308** Raised when an entry being deleted or disabled is last member of a protected group
For example:
>>> raise LastMemberError(key=u'admin', label=u'group', container=u'admins')
Traceback (most recent call last):
...
LastMemberError: admin cannot be deleted or disabled because it is the last member of group admins
"""
errno = 4308
format = _('%(key)s cannot be deleted or disabled because it is the last member of %(label)s %(container)s')
class ProtectedEntryError(ExecutionError):
"""
**4309** Raised when an entry being deleted or modified in a forbidden way is protected
For example:
>>> raise ProtectedEntryError(label=u'group', key=u'admins', reason=_(u'privileged group'))
Traceback (most recent call last):
...
ProtectedEntryError: group admins cannot be deleted/modified: privileged group
"""
errno = 4309
format = _('%(label)s %(key)s cannot be deleted/modified: %(reason)s')
class CertificateInvalidError(CertificateError):
"""
**4310** Raised when a certificate is not valid
For example:
>>> raise CertificateInvalidError(name=_(u'CA'))
Traceback (most recent call last):
...
CertificateInvalidError: CA certificate is not valid
"""
errno = 4310
format = _('%(name)s certificate is not valid')
class SchemaUpToDate(ExecutionError):
"""
**4311** Raised by server when client asks for metadata but
already has current version. Exception's attribute 'fingerprint'
identitfies schema version to use. Attribute 'ttl' specifies how
long (in seconds) before client should check for schema update.
For example:
>>> raise SchemaUpToDate(fingerprint=u'deadbeef', ttl=3600)
Traceback (most recent call last):
...
SchemaUpToDate: Schema is up to date (FP 'deadbeef', TTL 3600 s)
"""
errno = 4311
format = _("Schema is up to date (FP '%(fingerprint)s', TTL %(ttl)s s)")
class DNSError(ExecutionError):
"""
**4400** Base class for DNS execution errors (*4400 - 4499*).
These are typically wrapper exceptions around dns.exception.DNSException.
"""
errno = 4400
class DNSNotARecordError(DNSError):
"""
**4019** Raised when a hostname is not a DNS A/AAAA record
For example:
>>> raise DNSNotARecordError(hostname='x')
Traceback (most recent call last):
...
DNSNotARecordError: Host 'x' does not have corresponding DNS A/AAAA record
"""
errno = 4019 # this exception was defined before DNSError
format = _(
'Host \'%(hostname)s\' does not have corresponding DNS A/AAAA record')
class DNSDataMismatch(DNSError):
"""
**4212** Raised when an DNS query didn't return expected answer
in a configured time limit.
For example:
>>> raise DNSDataMismatch(expected="zone3.test. 86400 IN A 192.0.2.1", \
got="zone3.test. 86400 IN A 192.168.1.1")
Traceback (most recent call last):
...
DNSDataMismatch: DNS check failed: Expected {zone3.test. 86400 IN A 192.0.2.1} got {zone3.test. 86400 IN A 192.168.1.1}
"""
errno = 4212 # this exception was defined before DNSError
format = _('DNS check failed: Expected {%(expected)s} got {%(got)s}')
class DNSResolverError(DNSError):
"""
**4401** Wrapper around dns.exception.DNSException.
Raised when an error occured in dns.resolver.
For example:
>>> raise DNSResolverError(exception=ValueError("this is bad"))
Traceback (most recent call last):
...
DNSResolverError: this is bad
"""
errno = 4401
format = _('%(exception)s')
class TrustError(ExecutionError):
"""
**4500** Base class for trust execution errors (*4500 - 4599*).
These are typically instantiated when there is an error in establishing or
modifying a trust to another forest.
"""
errno = 4500
class TrustTopologyConflictError(TrustError):
"""
**4501** Raised when an attempt to establish trust fails with a topology
conflict against another forest the target forest trusts
For example:
>>> raise TrustTopologyConflictError(forest='example.test',
conflict='my.ad.test',
domains=['ad.test'])
Traceback (most recent call last):
...
TrustTopologyConflictError: Forest 'example.test' has existing trust to forest(s) ['ad.test'] which prevents a trust to 'my.ad.test'
"""
errno = 4501
format = _("Forest '%(forest)s' has existing trust to forest(s) "
"%(domains)s which prevents a trust to '%(conflict)s'")
##############################################################################
# 5000 - 5999: Generic errors
class GenericError(PublicError):
"""
**5000** Base class for errors that don't fit elsewhere (*5000 - 5999*).
"""
errno = 5000
public_errors = tuple(sorted(
messages.iter_messages(globals(), PublicError), key=lambda E: E.errno))
if __name__ == '__main__':
messages.print_report('public errors', public_errors)
| redhatrises/freeipa | ipalib/errors.py | Python | gpl-3.0 | 50,733 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import mock
import unittest
import json
from hpOneView.common import make_server_dict
from hpOneView.common import make_ServerProfileTemplateV1
from hpOneView.common import uri
from hpOneView.common import make_FirmwareSettingsV3
from hpOneView.common import make_ProfileConnectionV4
from hpOneView.connection import connection
from hpOneView.servers import servers
from hpOneView.activity import activity
class ServersTest(unittest.TestCase):
def setUp(self):
super(ServersTest, self).setUp()
self.host = 'http://1.2.3.4'
self.connection = connection(self.host)
self.servers = servers(self.connection)
self.activity = activity(self.connection)
@mock.patch.object(connection, 'get')
def test_get_connections(self, mock_get):
# Testing with filter
filter = '?start=0&count=10'
self.servers.get_connections(filter=filter)
mock_get.assert_called_once_with(uri['conn'] + filter)
@mock.patch.object(connection, 'get')
def test_get_connection(self, mock_get):
settings_test = {"uri": "/rest/servers/9b1380ee-a0bb-4388-af35-2c5a05e84c47"}
self.servers.get_connection(settings_test)
mock_get.assert_called_once_with(settings_test['uri'])
@mock.patch.object(connection, 'get')
def test_get_utilization(self, mock_get):
settings_test = {"uri": "/rest/servers/9b1380ee-a0bb-4388-af35-2c5a05e84c47"}
self.servers.get_utilization(settings_test)
mock_get.assert_called_once_with(settings_test['uri'] + '/utilization')
@mock.patch.object(connection, 'get')
def test_get_bios(self, mock_get):
settings_test = {"uri": "/rest/servers/9b1380ee-a0bb-4388-af35-2c5a05e84c47"}
self.servers.get_bios(settings_test)
mock_get.assert_called_once_with(settings_test['uri'] + '/bios')
@mock.patch.object(connection, 'get')
def test_get_envconf(self, mock_get):
settings_test = {"uri": "/rest/servers/9b1380ee-a0bb-4388-af35-2c5a05e84c47"}
self.servers.get_env_conf(settings_test)
mock_get.assert_called_once_with(settings_test['uri'] + '/environmentalConfiguration')
@mock.patch.object(connection, 'get')
def test_get_ilo(self, mock_get):
settings_test = {"uri": "/rest/servers/9b1380ee-a0bb-4388-af35-2c5a05e84c47"}
self.servers.get_ilo_sso_url(settings_test)
mock_get.assert_called_once_with(settings_test['uri'] + '/iloSsoUrl')
@mock.patch.object(connection, 'get')
def test_get_server_schema(self, mock_get):
self.servers.get_server_schema()
mock_get.assert_called_once_with(uri['servers'] + '/schema')
@mock.patch.object(connection, 'get')
def test_get_java_remote(self, mock_get):
settings_test = {"uri": "/rest/servers/9b1380ee-a0bb-4388-af35-2c5a05e84c47"}
self.servers.get_java_remote_console_url(settings_test)
mock_get.assert_called_once_with(settings_test['uri'] + '/javaRemoteConsoleUrl')
@mock.patch.object(connection, 'get')
def test_get_remote_console(self, mock_get):
settings_test = {"uri": "/rest/servers/9b1380ee-a0bb-4388-af35-2c5a05e84c47"}
self.servers.get_remote_console_url(settings_test)
mock_get.assert_called_once_with(settings_test['uri'] + '/remoteConsoleUrl')
@mock.patch.object(connection, 'post')
@mock.patch.object(activity, 'wait4task')
def test_create_server_profile_template(self, mock_wait4task, mock_post):
name = 'spt'
description = 'description'
sht = '/rest/server-hardware-types/1234'
eg = '/rest/enclosure-groups/1'
affinity = 'Bay'
hide_flex = False
fw_settings = make_FirmwareSettingsV3('/rest/firmware-drivers/SPP2016020_2015', 'FirmwareOnly', True, False)
# build the V1 SPT
spt = make_ServerProfileTemplateV1(name, description, None, sht, eg, affinity, hide_flex, None, fw_settings)
# build a OV task for the create SPT operation
task = self.build_spt_add_task_resource()
# return the task when waiting for completion
mock_post.return_value = [task, None]
self.servers.create_server_profile_template(name, description, None, sht, eg, affinity, hide_flex, None,
fw_settings)
mock_post.assert_called_once_with(uri['profile-templates'], spt)
@mock.patch.object(connection, 'post')
@mock.patch.object(activity, 'wait4task')
def test_create_server_profile_template_with_connections(self, mock_wait4task, mock_post):
name = 'spt'
description = 'description'
sht = '/rest/server-hardware-types/1234'
eg = '/rest/enclosure-groups/1'
affinity = 'Bay'
hide_flex = True
fw_settings = make_FirmwareSettingsV3('/rest/firmware-drivers/SPP2016020_2015', 'FirmwareOnly', True, False)
connections = make_ProfileConnectionV4(1, "eth1", '/rest/ethernet-networks/17f5e012', True, None, 'Ethernet',
None, None, 'Auto', '1000', None, None, None)
# build the V1 SPT
spt = make_ServerProfileTemplateV1(name, description, None, sht, eg, affinity, hide_flex, connections,
fw_settings)
# build a OV task for the create SPT operation
task = self.build_spt_add_task_resource()
# return the task when waiting for completion
mock_post.return_value = [task, None]
self.servers.create_server_profile_template(name, description, None, sht, eg, affinity, hide_flex, connections,
fw_settings)
mock_post.assert_called_once_with(uri['profile-templates'], spt)
@mock.patch.object(connection, 'post')
@mock.patch.object(activity, 'wait4task')
def test_add_server(self, mock_wait4task, mock_post):
# build the server to add
server = make_server_dict('hostname', 'username', 'password', False, 'OneView')
# build a OV task for the server add operation
task = self.build_server_add_task_resource()
mock_post.return_value = [task, None]
# return the task when waiting for completion
mock_wait4task(task).return_value = task
self.servers.add_server(server)
mock_post.assert_called_once_with(uri['servers'], server)
# helper functions for building Task responses for server operations
def build_server_add_task_resource(self):
task = {
'type': 'TaskResourceV2',
'taskType': 'User',
'stateReason': None,
'associatedResource': {
'resourceUri': '/rest/server-hardware/31393736',
'resourceCategory': 'server-hardware',
'associationType': 'MANAGED_BY',
'resourceName': 'Encl1, bay 2'
},
'hidden': False,
'category': 'tasks',
'data': {
'EncUri': '/rest/enclosures/09SGH100X6J'
},
'percentComplete': 100,
'taskState': 'Complete',
'taskStatus': 'Add server: Encl1, bay 2.',
'taskErrors': [],
'parentTaskUri': '/rest/tasks/7A4F318D-AD78-4F68-879E-DF317E66008E',
'taskOutput': [],
'associatedTaskUri': None,
'completedSteps': 20,
'computedPercentComplete': 100,
'expectedDuration': 480,
'progressUpdates': [],
'totalSteps': 20,
'userInitiated': True,
'name': 'Add',
'owner': 'Administrator',
'eTag': '18',
'uri': '/rest/tasks/6BAFD214-A6CE-4D3A-9E77-C266444CE517'
}
return json.dumps(task)
def build_spt_add_task_resource(self):
task = {
'type': 'TaskResourceV2',
'taskType': 'User',
'stateReason': 'Completed',
'associatedResource': {
'resourceUri': '/rest/server-profile-templates/bc9b8b32',
'resourceCategory': 'server-profile-templates',
'associationType': 'MANAGED_BY',
'resourceName': ''
},
'hidden': False,
'category': 'tasks',
'data': None,
'percentComplete': 100,
'taskState': 'Completed',
'taskStatus': 'Created server profile template:',
'taskErrors': [],
'parentTaskUri': None,
'taskOutput': [],
'associatedTaskUri': None,
'completedSteps': 0,
'computedPercentComplete': 100,
'expectedDuration': 0,
'progressUpdates': [],
'totalSteps': 0,
'userInitiated': False,
'name': 'Create ',
'owner': 'Administrator',
'eTag': '3',
'uri': '/rest/tasks/339A4D47-757B-4425-8495-6ECFCFEF88B5'
}
return json.dumps(task)
if __name__ == '__main__':
unittest.main()
| andreadean5/python-hpOneView | tests/unit/test_servers.py | Python | mit | 10,216 |
from cleo.styles import OutputStyle
from sdoc.sdoc2.formatter.Formatter import Formatter
class HtmlFormatter(Formatter):
"""
Abstract parent class for all formatters for generating the output of nodes in HTML.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, io: OutputStyle, parent: Formatter):
"""
Object constructor.
:param OutputStyle io: The IO object.
:param Formatter parent: The formatter for the parent node.
"""
Formatter.__init__(self, io, parent)
# ----------------------------------------------------------------------------------------------------------------------
| SDoc/py-sdoc | sdoc/sdoc2/formatter/html/HtmlFormatter.py | Python | mit | 745 |
# encoding: utf-8
# Copyright 2008 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
'''
EDRN RDF Service: unit and functional tests.
''' | EDRN/DMCCBackend | src/edrn.rdf/edrn/rdf/tests/__init__.py | Python | apache-2.0 | 189 |
# Declaring constants for use throughout the program
STATE_AUTH = 'AUTH'
STATE_CHAT = 'CHAT'
SIGN_UP = 'SIGN_UP'
LOGIN = 'REGISTER'
EXIT = 'EXIT'
EXIT_COMMAND = '.quit' # This is what the user types when he/she wants to quit
DB_URL = 'storage.db'
PORT = 1236
LOG_FILE_URL = 'chatserver.log'
TEST_USER_FILE = 'sim_users.json'
TEST_MESSAGES = ["Sample Message 1",
"Sample Message 2",
"Sample Message 3",
"Sample Message 4",
"Sample Message 5"]
| srvasn/basic-chat-server | constants.py | Python | gpl-3.0 | 513 |
# -*- coding: utf-8 -*-
'''
@author: arcra
'''
import time, threading, os
import Tkinter as tk
import argparse
import clipsFunctions
from clipsFunctions import clips, _clipsLock
import pyrobotics.BB as BB
from pyrobotics.messages import Command, Response
import GUI
from BBFunctions import assertQueue, ResponseReceived, CreateSharedVar, WriteSharedVar, SubscribeToSharedVar, RunCommand
defaultTimeout = 2000
defaultAttempts = 1
_sleepingLock = threading.Lock()
_sleeping = False
def setCmdTimer(t, cmd, cmdId):
t = threading.Thread(target=cmdTimerThread, args = (t, cmd, cmdId))
t.daemon = True
t.start()
return True
def cmdTimerThread(t, cmd, cmdId):
time.sleep(t/1000.0)
assertQueue.append('(BB_timer "{0}" {1})'.format(cmd, cmdId))
#clipsFunctions.Assert('(BB_timer "{0}" {1})'.format(cmd, cmdId))
def setTimer(t, sym):
t = threading.Thread(target=timerThread, args = (t, sym))
t.daemon = True
t.start()
return True
def timerThread(t, sym):
time.sleep(t/1000.0)
assertQueue.append('(BB_timer {0})'.format(sym))
#clipsFunctions.Assert('(BB_timer {0})'.format(sym))
def SendCommand(cmdName, params):
cmd = Command(cmdName, params)
BB.Send(cmd)
return cmd._id
def SendResponse(cmdName, cmd_id, result, response):
result = str(result).lower() not in ['false', '0']
r = Response(cmdName, result, response)
r._id = cmd_id
BB.Send(r)
def stop():
GUI._pausedLock.acquire()
GUI.gui.paused = True
GUI._pausedLock.release()
return True
def sleep(ms, sym):
t = threading.Thread(target=sleepingTimerThread, args = (ms, sym))
t.daemon = True
t.start()
return True
def sleepingTimerThread(t, sym):
_sleepingLock.acquire()
_sleeping = True
_sleepingLock.release()
time.sleep(t/1000)
_sleepingLock.acquire()
_sleeping = False
_sleepingLock.release()
def Initialize(params):
clips.Memory.Conserve = True
clips.Memory.EnvironmentErrorsEnabled = True
clips.SetExternalTraceback(True)
clips.DebugConfig.FactsWatched = params.watchfacts
clips.DebugConfig.GlobalsWatched = params.watchglobals
clips.DebugConfig.FunctionsWatched = params.watchfunctions
clips.DebugConfig.RulesWatched = params.watchrules
clips.RegisterPythonFunction(SendCommand)
clips.RegisterPythonFunction(SendResponse)
clips.RegisterPythonFunction(setCmdTimer)
clips.RegisterPythonFunction(setTimer)
clips.RegisterPythonFunction(CreateSharedVar)
clips.RegisterPythonFunction(WriteSharedVar)
clips.RegisterPythonFunction(SubscribeToSharedVar)
clips.RegisterPythonFunction(sleep)
clips.RegisterPythonFunction(stop)
clips.BuildGlobal('defaultTimeout', defaultTimeout)
clips.BuildGlobal('defaultAttempts', defaultAttempts)
filePath = os.path.dirname(os.path.abspath(__file__))
clips.BatchStar(os.path.join(filePath, 'CLIPS', 'utils.clp'))
clips.BatchStar(os.path.join(filePath, 'CLIPS', 'BB_interface.clp'))
clipsFunctions.PrintOutput()
GUI.use_gui = not params.nogui
if GUI.use_gui:
GUI.gui = GUI.clipsGUI()
else:
GUI.debug = params.debug
if params.file:
GUI.load_file(params.file)
BB.Initialize(params.port, functionMap = {'*':(RunCommand, True)}, asyncHandler = ResponseReceived)
print 'Waiting for BlackBoard to connect...'
BB.Start()
print 'BlackBoard connected!'
BB.SetReady()
print 'READY!'
def main():
parser = argparse.ArgumentParser(description="Runs an instance of BBCLIPS. (CLIPS interpreter embedded in python with BB communication.)")
parser.add_argument('-p', '--port', default = '2001', type=int, help='States the port number that this instance module should use.')
parser.add_argument('--nogui', default=False, action='store_const', const=True, help='Runs the program without the GUI.')
parser.add_argument('--debug', default=False, action='store_const', const=True, help='Show a CLIPS prompt as in an interactive CLIPS session.')
parser.add_argument('-n', '--steps', default=1, action='store', type=int, help='Number of steps to run when pressing enter on a debug session.')
parser.add_argument('-f', '--file', help='Specifies the file that should be loaded (mainly for nogui usage).')
watch_group = parser.add_argument_group('Watch options', 'Set the watch flags of the clips interpreter.')
watch_group.add_argument('--watchfunctions', '--wfunctions', '--wfunc', default=False, action='store_const', const=True, help='Enables the watch functions flag of the clips interpreter.')
watch_group.add_argument('--watchglobals', '--wglobals', '--wg', default=False, action='store_const', const=True, help='Enables the watch globals flag of the clips interpreter.')
watch_group.add_argument('--watchfacts', '--wfacts', '--wf', default=False, action='store_const', const=True, help='Enables the watch facts flag of the clips interpreter.')
watch_group.add_argument('--watchrules', '--wrules', '--wr', default=False, action='store_const', const=True, help='Enables the watch rules flag of the clips interpreter.')
log_group = parser.add_argument_group('Log options', 'Set the log level of the BBCLIPS module.')
log_group.add_argument('--log', default='ERROR', choices=['INFO', 'WARNING', 'ERROR'], help='Default is ERROR.')
args = parser.parse_args()
Initialize(args)
if args.nogui:
if args.debug:
s = raw_input('[CLIPS]>')
while s != '(exit)':
if s == '(facts)':
clips.PrintFacts()
elif s == '(rules)':
clips.PrintRules()
elif s == '(agenda)':
clips.PrintAgenda()
elif s == '':
assertEnqueuedFacts()
clipsFunctions.PrintOutput()
clipsFunctions.Run(args.steps)
clipsFunctions.PrintOutput()
else:
try:
_clipsLock.acquire()
#clips.SendCommand(s, True)
clips.Eval(s)
clipsFunctions.PrintOutput()
_clipsLock.release()
except:
print 'ERROR: Clips could not run the command.'
clipsFunctions.PrintOutput()
_clipsLock.release()
s = raw_input('[CLIPS]>')
else:
mainLoop()
else:
loop_thread = threading.Thread(target=mainLoop)
loop_thread.daemon = True
loop_thread.start()
tk.mainloop()
def assertEnqueuedFacts():
_clipsLock.acquire()
while True:
try:
f = assertQueue.popleft()
except:
break
asserted = False
while not asserted:
try:
clips.Assert(f)
asserted = True
except:
#print 'Fact: ' + str(f) + ' could not be asserted, trying again...'
pass
if not asserted:
time.sleep(50)
_clipsLock.release()
def mainLoop():
while True:
assertEnqueuedFacts()
_sleepingLock.acquire()
sleeping = _sleeping
_sleepingLock.release()
GUI._pausedLock.acquire()
paused = GUI.gui.paused
GUI._pausedLock.release()
if sleeping or paused or (GUI.use_gui and GUI.gui.runTimes):
clipsFunctions.PrintOutput()
continue
clipsFunctions.Run(2)
clipsFunctions.PrintOutput()
if __name__ == "__main__":
main()
| BioRoboticsUNAM/BBCLIPS | BBCLIPS.py | Python | mit | 7,860 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Patrick Charron
# Email : patrick.charron.pc@gmail.com
# Description : Top Informations Viewer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
MongoDB Driver
"""
from . import driver
import pymongo
class MongoDBDriver(driver.Driver):
"""
MongoDB Driver class
"""
def __init__(self):
driver.Driver.__init__(self)
self._host = None
self._port = None
def tops(self):
"""
Refresh sql information. Including uptime and the list of running process
"""
try:
mongodb_process = self._sql.admin['$cmd.sys.inprog'].find_one({'$all': True})
except:
raise processmanager.ProcessManagerError("Could not retieve process")
all_process = []
try:
for row in mongodb_process[u"inprog"]:
if row[u"active"]:
time = row[u"secs_running"]
else:
time = 0
if row[u"op"] == "insert":
state = "I"
elif row[u"op"] == "query":
state = "Q"
elif row[u"op"] == "update":
state = "U"
elif row[u"op"] == "remove":
state = "R"
elif row[u"op"] == "getmore":
state = "G"
elif row[u"op"] == "command":
state = "C"
p = process.Process(row["opid"], "", row[u"client"].split(':')[0], row[u"ns"].split(".")[0], state, time, str(row[u"query"]))
all_process.append(p)
except Exception as error:
raise driver.DriverError(error)
def initialize(self):
"""Connect to the MongoDB server"""
try:
db = pymongo.MongoClient(host=self._host, port=self._port)
except MySQLdb.OperationalError as e:
raise driver.DriverError("Impossible to connect to the database serveur")
self._sql = db
| flyingcoconut/mytop | mytop/drivers/mongodb.py | Python | gpl-3.0 | 2,619 |
# -*- coding: utf-8 -*-
#
# A Base Codec Class for deciphering data that is requiring an external file
#
# Copyright (C) 2016 Chris Caron <lead2gold@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
import errno
from blist import sortedset
from os.path import isdir
from os.path import isfile
from os.path import exists
from os.path import join
from os.path import basename
from os.path import abspath
from os.path import expanduser
from os import X_OK
from os import access
from tempfile import mkdtemp
from newsreap.Utils import mkdir
from newsreap.NNTPContent import NNTPContent
from newsreap.NNTPArticle import NNTPArticle
from newsreap.NNTPBinaryContent import NNTPBinaryContent
from newsreap.NNTPSettings import DEFAULT_TMP_DIR
from newsreap.Utils import random_str
from newsreap.Utils import bytes_to_strsize
from newsreap.Utils import find
from os.path import splitext
# Logging
import logging
from newsreap.Logging import NEWSREAP_CODEC
logger = logging.getLogger(NEWSREAP_CODEC)
class CompressionLevel(object):
"""
Support general compression level settings so that the calling user doesn't
have to be aware of the different types supported by the actual executable.
"""
# Maximum Compression will be slower to use and generate the most i/o
# but will overall save the most disk space.
Maximum = u'+++'
# The average setting is what the actual executable being called would
# have otherwise defaulted to. It's not nessisarily the highest
# compression level, but it's not the worst either.
Average = u'~'
# This will cause larger files to be generated (thus taking up more disk
# space and posting space/time). However the file generation itself will
# be very fast (with respect to the other levels)
Minimum = u'---'
# Tuple of supported Compression Levels
COMPRESSION_LEVELS = (
CompressionLevel.Maximum,
CompressionLevel.Average,
CompressionLevel.Minimum,
)
class CodecFile(object):
"""
CodecFile compliments CodecBase by wrapping the codecs that can only
be accessed through an outside binary file located on the system.
"""
def __init__(self, work_dir=None, name=None, password=None,
level=CompressionLevel.Average, cpu_cores=None,
*args, **kwargs):
"""
The dir identfies the directory to store our sessions in
until they can be properly handled.
"""
# If the password is set to None then it is presumed that
# you don't want to use it. Keep in mind that setting this
# to an empty string presumes that you want to set a blank
# password (but a password none the less)
self.password = password
# Stores the name to associate with the archive being encoded or
# decoded.
self.name = name
# The number of CPU cores should be set to whatever it is your
# workstation can handle. The more, the faster the processing will
# be.
# Linux users can do this:
# $> egrep '^processor' /proc/cpuinfo -c
# If you set this to None, then the default options are used; thus cpu
# core specifications (threading) are just simply not applied to the
# command
self.cpu_cores = cpu_cores
# Compression Level
self.level = level
if self.level not in COMPRESSION_LEVELS:
# Bad compression level specified
logger.error(
'Invalid CodecFile compression specified (%s)' % str(level),
)
raise AttributeError("Invalid compression level specified.")
if work_dir is None:
self.work_dir = DEFAULT_TMP_DIR
else:
self.work_dir = abspath(expanduser(work_dir))
if not isdir(self.work_dir):
# create directory
if mkdir(self.work_dir):
logger.info('Created directory: %s' % self.work_dir)
else:
logger.error('Failed to created directory: %s' % self.work_dir)
# Should not continue under this circumstance
raise IOError((
errno.EACCES,
'Failed to create directory: %s' % self.work_dir,
))
# Contains a list of paths to be archived
self.archive = set()
def add(self, path):
"""
Adds files, directories, NNTPContent() and NNTPArticle objects
to archive.
"""
_bcnt = len(self.archive)
self.archive |= self.get_paths(path)
return len(self.archive) > _bcnt
def clear(self):
"""
clears out all content added to our internal archive
"""
self.archive.clear()
def encode(self, content=None, *args, **kwargs):
"""
Takes a specified content (dir or file) and compresses it. If this
function is successful, it returns a set of NNTPBinaryContent()
objects that are 'not' detached. Which means if they go out of scope,
the compressed content will be lost.
If this function fails, or there is nothing to encode, the function
should return None.
the content passed into should be passed into the self.add() call
if it's not set to None otherwise. The content encoded is always
that of what is in the self.archive sortedset.
"""
raise NotImplementedError(
"CodecFile() inheriting class is required to impliment compress()"
)
def decode(self, content, *args, **kwargs):
"""
content must be a path containing rar files or at the very least
NNTPContent() objects (or set of) containing rar files.
"""
raise NotImplementedError(
"CodecFile() inheriting class is required to impliment decompress()"
)
def can_exe(self, fpath):
"""
Can test if path exists and is executable
"""
if isinstance(fpath, basestring):
return isfile(fpath) and access(fpath, X_OK)
return False
def mkstemp(self, content=None, suffix='.tmp', prefix='_tmp_'):
"""
A wrapper to mkstemp that only handles reference to the filepath/name
itself. It creates a unique subdirectory that it generates the new
temporary file within that can be referenced.
If a content is specified, then the function parses out the directory
infront of it and possibly a prefix at the end and swaps it with the
prefix specified. This is just an easier way of manipulating a
filename or directory name that was recently pulled from an
NNTPContent() object.
This function returns both the temporary directory created and the
temporary file prepared.
"""
# Create a temporary directory to work in
tmp_path = mkdtemp(prefix='_nr.codec-', dir=self.work_dir)
tmp_file = None
if isinstance(content, basestring):
tmp_file = join(
tmp_path,
'%s%s' % (basename(content), suffix,
))
elif isinstance(content, NNTPContent):
# use the filename based on the path
if content.filename:
tmp_file = join(
tmp_path,
'%s%s' % (splitext(basename(content.filename))[0], suffix,
))
elif isinstance(content, NNTPArticle):
if len(content) > 0:
if content[0].filename:
tmp_file = join(
tmp_path,
'%s%s' % (
splitext(basename(content[0].filename))[0],
suffix,
))
if tmp_file is None:
# Fall back
tmp_file = join(
tmp_path,
'%s%s' % (random_str(), suffix),
)
return tmp_path, tmp_file
def get_paths(self, content):
"""
When supplied content which can be a NNTPArticle(), NNTPContent()
a directory, and/or file. get_paths() returns all of the results
in a unique sortedset(). get_paths() also supports iterating over
tuples, sets, sortedsets and lists to fetch this information.
If a directory is passed in that maps against individual content
within the directory; that content is removed from the list causing
the directory to trump content within.
This is a helper function that greatly makes the handling of multiple
content types easier to work with. Ideally each Codec that inherits
from this class should use this prior to the actual archiving to keep
command line arguments to a minimum and consistent with the rules
defined in this (where directories trump).
"""
# Create a set to store our results in
results = sortedset()
if isinstance(content, (set, tuple, list, sortedset)):
# Iterate over the entries passing them back into this function
# recursively
for v in content:
results |= self.get_paths(v)
elif isinstance(content, basestring):
content = abspath(expanduser(content))
if exists(content):
results.add(content)
elif isinstance(content, NNTPContent):
if content.filepath and exists(content.filepath):
results.add(content.filepath)
elif isinstance(content, NNTPArticle):
for c in content:
if c.filepath and exists(c.filepath):
results.add(c.filepath)
if len(results) <= 1:
# Save ourselves some energy
return results
# Acquire a list of directories since these will trump any file
# entries found that reside in them.
_dirs = set([r for r in results if isdir(r)])
if _dirs:
# Adjust our results to eliminate any files that reside within
# directories that have been queued as well.
#
# Basically we want to look for files that reside in a directory
# we've already identified to include too and turf the files that
# reside within them. Thus directories trump!
#
# Hence if we find:
# - /path/to/data/dir/a/great/file
# - /path/to/data/dir/a.wonderful.file
# - /path/to/data/dir
# - /path/to/another.file
#
# We would keep:
# - /path/to/data/dir
# - /path/to/another.file
#
# We would turf the remaining files because they can be
# found within the /path/to/data/dir
results = sortedset([r for r in results if r not in _dirs and next(
(False for d in _dirs \
if r.startswith(d, 0, len(d)) is True), True)])
if len(_dirs) > 1:
# Perform the same check with our directories (a directory
# can not include another directory higher up) The shortest
# directory trumps a larger one.
# hence if we find:
# - /path/to/data/dir/
# - /path/to/data
#
# We would drop the /path/to/data/dir/ since the /path/to/data
# already includes it
_dirs = set([_d for _d in _dirs if next(
(True for d in _dirs if _d != d and \
d.startswith(_d, 0, len(d)) is True), False)])
# Since we stripped out directories earlier for some pre-processing
# we need to add them back here
results |= _dirs
# Return our results
return results
def watch_dir(self, path, regex=None, prefix=None, suffix=None,
ignore=None, case_sensitive=True, seconds=15):
"""Monitors a directory for files that have been added/changed
path: is the path to monitor
ignore: is a sortedset of files already parsed
seconds: is how long it takes a file to go untouched for before
we presume it has been completely written to disk.
"""
if ignore is None:
ignore = sortedset()
findings = find(
path, fsinfo=True,
regex_filter=regex,
prefix_filter=prefix,
suffix_filter=suffix,
case_sensitive=case_sensitive,
)
findings = [
(p, f['size'], f['created'], f['modified'])
for p, f in findings.items()
if (f['modified'] - f['created']).total_seconds() >= seconds
and f['basename'] not in ignore
]
# Sort list by created date
findings.sort(key=lambda x: x[3])
for f in findings:
logger.info('Created %s (size=%s)' % (
f, bytes_to_strsize(f[1]),
))
# Add to our filter list
ignore.add(f[0])
# Return our ignore list (which is acutally also a found list)
return ignore
def __iter__(self):
"""
Grants usage of the next()
"""
# Ensure our stream is open with read
return iter(self.archive)
def __len__(self):
"""
Returns the number of archive content entries found
"""
return len(self.archive)
def __str__(self):
"""
Return a printable version of the codec
"""
return repr(self)
def __repr__(self):
"""
Return an unambigious version of the objec
"""
return '<CodecFile work_dir="%s" clevel="%s" archives="%d" />' % (
self.work_dir,
self.level,
len(self.archive),
)
| caronc/newsreap | newsreap/codecs/CodecFile.py | Python | gpl-3.0 | 14,438 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-03 19:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_remove_livro_image'),
]
operations = [
migrations.RemoveField(
model_name='link',
name='comentarios',
),
migrations.RemoveField(
model_name='livro',
name='comentarios',
),
migrations.RemoveField(
model_name='pessoa',
name='comentarios',
),
migrations.AddField(
model_name='comentario',
name='livro',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='core.Livro'),
preserve_default=False,
),
]
| LEDS/jediteca | jediteca/core/migrations/0003_auto_20151203_1902.py | Python | gpl-2.0 | 895 |
s = raw_input().lower()
score1 = score2 = 0
for i in xrange(len(s)):
if s[i] == 'a' or s[i] == 'e' or s[i] == 'i' or s[i] == 'o' or s[i] == 'u':
score2 += len(s)-i
else:
score1 += len(s)-i
if score1 > score2:
print 'Stuart ' + str(score1)
elif score1 < score2:
print 'Kevin ' + str(score2)
else:
print 'Draw'
| shree-shubham/Unitype | The Minion Game.py | Python | gpl-3.0 | 345 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test cases to verify presentation of volume id in events table
for 'SNAPSHOT.CREATE' type.
"""
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (cleanup_resources,
validateList)
from marvin.lib.base import (Account,
ServiceOffering,
Snapshot,
VirtualMachine,
Configurations
)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_volumes,
)
from marvin.codes import PASS
class TestVerifyEventsTable(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestVerifyEventsTable, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata["ostype"])
cls._cleanup = []
try:
cls.unsupportedHypervisor = False
if cls.hypervisor.lower() in ['hyperv', 'lxc', 'kvm']:
if cls.hypervisor.lower() == 'kvm':
configs = Configurations.list(
cls.apiclient,
name='kvm.snapshot.enabled'
)
if configs[0].value == "false":
cls.unsupportedHypervisor = True
else:
cls.unsupportedHypervisor = True
return
# Create an account
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
# Create user api client of the account
cls.userapiclient = testClient.getUserApiClient(
UserName=cls.account.name,
DomainName=cls.account.domain
)
# Create Service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"],
)
cls._cleanup = [
cls.account,
cls.service_offering,
]
except Exception as e:
cls.tearDownClass()
raise e
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.cleanup = []
if self.unsupportedHypervisor:
self.skipTest(
"snapshots are not supported on %s" %
self.hypervisor.lower())
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic"], required_hardware="false")
def test_01_verify_events_table(self):
""" Test events table
# 1. Deploy a VM.
# 2. Take VM snapshot.
# 3. Verify that events table records UUID of the volume in descrption
instead of volume ID
"""
# Step 1
# Create VM
vm = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id,
)
volumes_list = list_volumes(
self.apiclient,
virtualmachineid=vm.id,
type='ROOT',
listall=True
)
volume_list_validation = validateList(volumes_list)
self.assertEqual(
volume_list_validation[0],
PASS,
"volume list validation failed due to %s" %
volume_list_validation[2]
)
root_volume = volumes_list[0]
# Step 2
# Create snapshot of root volume
snapshot = Snapshot.create(
self.apiclient,
root_volume.id)
snapshots_list = Snapshot.list(self.userapiclient,
id=snapshot.id)
status = validateList(snapshots_list)
self.assertEqual(status[0], PASS, "Snapshots List Validation Failed")
self.assertEqual(
snapshot.state,
"BackedUp",
"Check if snapshot gets created properly"
)
# Step 3
qresultset = self.dbclient.execute(
"select description from event where type='SNAPSHOT.CREATE' AND \
description like '%%%s%%'" % root_volume.id)
event_validation_result = validateList(qresultset)
self.assertEqual(
event_validation_result[0],
PASS,
"event list validation failed due to %s" %
event_validation_result[2]
)
self.assertNotEqual(
len(qresultset),
0,
"Check if events table records UUID of the volume"
)
return
| ikoula/cloudstack | test/integration/testpaths/testpath_uuid_event.py | Python | gpl-2.0 | 6,637 |
#
# Copyright (C) 2010-2017 Samuel Abels
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Weak references to bound and unbound methods.
"""
from builtins import object
import weakref
class DeadMethodCalled(Exception):
"""
Raised by :class:`WeakMethod` if it is called when the referenced object
is already dead.
"""
pass
class WeakMethod(object):
"""
Do not create this class directly; use :class:`ref()` instead.
"""
__slots__ = 'name', 'callback'
def __init__(self, name, callback):
"""
Constructor. Do not use directly, use :class:`ref()` instead.
"""
self.name = name
self.callback = callback
def _dead(self, ref):
if self.callback is not None:
self.callback(self)
def get_function(self):
"""
Returns the referenced method/function if it is still alive.
Returns None otherwise.
:rtype: callable|None
:return: The referenced function if it is still alive.
"""
raise NotImplementedError()
def isalive(self):
"""
Returns True if the referenced function is still alive, False
otherwise.
:rtype: bool
:return: Whether the referenced function is still alive.
"""
return self.get_function() is not None
def __call__(self, *args, **kwargs):
"""
Proxied to the underlying function or method. Raises :class:`DeadMethodCalled`
if the referenced function is dead.
:rtype: object
:return: Whatever the referenced function returned.
"""
method = self.get_function()
if method is None:
raise DeadMethodCalled('method called on dead object ' + self.name)
method(*args, **kwargs)
class _WeakMethodBound(WeakMethod):
__slots__ = 'name', 'callback', 'f', 'c'
def __init__(self, f, callback):
name = f.__self__.__class__.__name__ + '.' + f.__func__.__name__
WeakMethod.__init__(self, name, callback)
self.f = f.__func__
self.c = weakref.ref(f.__self__, self._dead)
def get_function(self):
cls = self.c()
if cls is None:
return None
return getattr(cls, self.f.__name__)
class _WeakMethodFree(WeakMethod):
__slots__ = 'name', 'callback', 'f'
def __init__(self, f, callback):
WeakMethod.__init__(self, f.__class__.__name__, callback)
self.f = weakref.ref(f, self._dead)
def get_function(self):
return self.f()
def ref(function, callback=None):
"""
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
:type function: callable
:param function: The function to reference.
:type callback: callable
:param callback: Called when the function dies.
"""
try:
function.__func__
except AttributeError:
return _WeakMethodFree(function, callback)
return _WeakMethodBound(function, callback)
| maximumG/exscript | Exscript/util/weakmethod.py | Python | mit | 4,132 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# userFacade.py
#
# Copyright 2010-2015 Jose Riguera Lopez <jriguera@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__program__ = "photoplace"
__author__ = "Jose Riguera Lopez <jriguera@gmail.com>"
__version__ = "0.6.1"
__date__ = "Dec 2014"
__license__ = "Apache 2.0"
__copyright__ ="(c) Jose Riguera"
import os
import sys
import shutil
import codecs
import ConfigParser
import logging
import logging.handlers
import loggingHandler
import datetime
import getpass
import Plugins
import Facade
import stateHandler
from definitions import *
# ################################
# PhotoPlace UserFacade Definition
# ################################
class UserFacade(Facade.Facade):
def __init__(self, resources, configfile, args, cfgopt, fargs):
# Overwrite default values with command line args
self.argfiles = []
self.args = args
self.resourcedir = resources
if not isinstance(resources, unicode):
try:
self.resourcedir = unicode(resources, PLATFORMENCODING)
except:
pass
defaultconfig = dict(PhotoPlace_Cfg_default)
if cfgopt.options:
for opt in cfgopt.options:
try:
val = opt.split(PhotoPlace_Cfg_optionsep)
section = val[0].split(PhotoPlace_Cfg_sectionsep)
sec = section[0].strip().lower()
key = section[1].strip().lower()
value = val[1].strip().lower()
except:
continue
if not defaultconfig.has_key(sec):
defaultconfig[sec] = dict()
defaultconfig[sec][key] = value
for argfile in fargs:
if not isinstance(argfile, unicode):
try:
argfile = unicode(argfile, PLATFORMENCODING)
except:
pass
inputext = os.path.splitext(argfile)[1].lower()
if os.path.isdir(argfile):
defaultconfig['main']['photoinputdir'] = argfile
elif inputext == '.kml':
defaultconfig['main']['outputfile'] = argfile
elif inputext == '.kmz':
defaultconfig['main']['outputfile'] = argfile
elif os.path.isfile(argfile):
if inputext == '.gpx':
defaultconfig['main']['gpxinputfile'] = argfile
else:
self.argfiles.append(argfile)
else:
pass
self.configfile = configfile
if not isinstance(configfile, unicode):
try:
self.configfile = unicode(configfile, PLATFORMENCODING)
except:
pass
self.options = defaultconfig
if not self._load_config(self.configfile, defaultconfig):
dgettext = dict()
if self.configfile != None and os.path.exists(self.configfile):
path = os.path.dirname(self.configfile)
old_configfile = self.configfile + PhotoPlace_Cfg_fileextold
try:
shutil.copyfile(self.configfile, old_configfile)
except Exception as exception:
dgettext['error'] = str(exception)
dgettext['file'] = old_configfile.encode(PLATFORMENCODING)
msg = _("Cannot create backup of configfile '%(file)s': %(error)s.\n")
sys.stderr.write(msg % dgettext)
source_path = os.path.join(self.resourcedir, PhotoPlace_Cfg_altdir)
source_path = os.path.join(source_path, PhotoPlace_Cfg_file)
dgettext['fromfile'] = source_path.encode(PLATFORMENCODING)
dgettext['tofile'] = self.configfile.encode(PLATFORMENCODING)
try:
shutil.copyfile(source_path, self.configfile)
except Exception as exception:
dgettext['error'] = str(exception)
msg = _("Cannot overwrite '%(tofile)s': %(error)s.\n")
sys.stderr.write(msg % dgettext)
else:
msg = _("Configuration recovered to '%(tofile)s'.\n")
sys.stderr.write(msg % dgettext)
# Try again
self._load_config(self.configfile, defaultconfig)
self.logfile = self.options['main'].setdefault('logfile')
self.loglevel = self.options['main'].setdefault('loglevel','')
self.finalize = False
self.state = None
self.observers = {}
self.addons = self.options["addons"]
# add the handler to the root logger
self.logger = logging.getLogger()
self.mainloghandler = loggingHandler.LogRedirectHandler()
self.mainloghandler.setLevel(logging.DEBUG)
consoleformatter = logging.Formatter(PhotoPlace_Cfg_consolelogformat)
self.mainloghandler.setFormatter(consoleformatter)
self.logger.addHandler(self.mainloghandler)
if self.logfile != None:
if not isinstance(self.logfile, unicode):
try:
self.logfile = unicode(self.logfile, PLATFORMENCODING)
except:
pass
l = PhotoPlace_Cfg_loglevel
try:
level = self.loglevel.lower()
l = PhotoPlace_Cfg_LogModes[level]
except:
pass
try:
loghandler = logging.handlers.RotatingFileHandler(
self.logfile, maxBytes=2097152, backupCount=5, delay=True)
loghandler.setLevel(l)
logformatter = logging.Formatter(PhotoPlace_Cfg_logformat)
loghandler.setFormatter(logformatter)
self.logger.addHandler(loghandler)
self.logger.debug("# ---")
except Exception as e:
msg = _("Cannot set up logfile: %s.") % str(e)
self.logger.error(msg)
self.logger.setLevel(PhotoPlace_Cfg_loglevel)
self.pluginmanager = Plugins.pluginManager.PluginManager()
self.logger.debug("# " + PhotoPlace_name)
self.logger.debug(
"# Launched with command line args %s, files: %s" %
(self.args, self.argfiles))
self.logger.debug(_("# with configuration file '%s'.") % \
self.configfile.encode(PLATFORMENCODING))
self.logger.debug(_("# main options: %s") % self.options['main'])
def _load_config(self, configfile, defaultconfig):
if configfile != None:
dgettext = dict()
dgettext['configfile'] = configfile.encode(PLATFORMENCODING)
if not os.path.exists(configfile):
msg = _("Cannot find config file '%(configfile)s'.\n")
sys.stderr.write(msg % dgettext)
else:
configuration = ConfigParser.ConfigParser()
try:
# Try to read the configuration file
configuration.read(configfile)
except:
msg = _("Cannot understand the format of config file '%(configfile)s'.\n")
sys.stderr.write(msg % dgettext)
else:
try:
self.options = self.load_config(defaultconfig)
except NameError as nameerror:
dgettext['section'] = str(nameerror)
msg = _("Configuration file '%(configfile)s' is incomplete: "
"cannot find section '[%(section)s]'. Ignoring it!\n")
sys.stderr.write(msg % dgettext)
except ValueError as valueerror:
dgettext['error'] = str(valueerror)
msg = _("Cannot parse the configuration file '%(configfile)s': %(error)s.\n")
sys.stderr.write(msg % dgettext)
if self.options['main'].has_key('version'):
try:
version = int(self.options['main']['version'])
if version >= PhotoPlace_Cfg_version:
return True
except:
pass
return False
else:
# Make configuration file
return False
def load_config(self, defaults):
try:
configuration = ConfigParser.ConfigParser()
configuration.read(self.configfile)
except ConfigParser.Error as configerror:
raise ValueError(str(configerror))
rconfig = dict()
current = dict()
for item in configuration.sections():
current[item] = dict()
current.update(PhotoPlace_Cfg_default)
for section in current.keys():
dictionary = dict()
if not configuration.has_section(section):
raise NameError(section)
else:
options = configuration.options(section)
for option in options:
try:
dictionary[option] = configuration.get(section, option)
except:
dictionary[option] = None
if defaults.has_key(section):
for key, value in defaults[section].iteritems():
dictionary[key] = value
for key, value in current[section].iteritems():
if value:
dictionary[key] = value
else:
value = dictionary.get(key, None)
dictionary[key] = value
rconfig[section] = dictionary
for k, v in rconfig[VARIABLES_KEY].iteritems():
if not isinstance(k, unicode):
try:
k = unicode(k, PLATFORMENCODING)
except:
pass
if k in VARIABLES_OTHER:
pass
else:
if k == 'author' and not v:
try:
v = unicode(getpass.getuser(), PLATFORMENCODING)
rconfig[VARIABLES_KEY][k] = v
except:
pass
elif k == 'date' and not v:
v = datetime.date.today().strftime(PhotoPlace_Cfg_timeformat)
v = unicode(v, PLATFORMENCODING)
rconfig[VARIABLES_KEY][k] = v
return rconfig
def _get_cfgvalue(self, value):
if isinstance(value, list):
pos = 0
limit_pos = len(value) - 1
new_current_value = ''
while pos <= limit_pos:
new_current_value += str(value[pos])
if pos != limit_pos:
new_current_value += '; '
pos = pos + 1
current_value = new_current_value
elif isinstance(value, tuple):
pos = 0
value_list = list(value)
limit_pos = len(value_list) - 1
new_current_value = ''
while pos <= limit_pos:
new_current_value += str(value_list[pos])
if pos != limit_pos:
new_current_value += ','
pos = pos + 1
current_value = new_current_value
else:
current_value = str(value)
if current_value == 'None':
current_value = ''
elif current_value == 'True':
current_value = '1'
elif current_value == 'False':
current_value = '0'
return current_value
def save_config(self, nosections=PhotoPlace_CONFIG_NOCLONE):
if self.configfile != None:
dgettext = dict()
dgettext['file'] = self.configfile.encode(PLATFORMENCODING)
path = os.path.dirname(self.configfile)
dgettext['path'] = path.encode(PLATFORMENCODING)
old_configfile = self.configfile + PhotoPlace_Cfg_fileextold
try:
shutil.copyfile(self.configfile, old_configfile)
except Exception as exception:
dgettext['error'] = str(exception)
dgettext['file'] = old_configfile.encode(PLATFORMENCODING)
msg = _("Cannot create backup of configfile to '%(file)s': %(error)s.")
msg = msg % dgettext
self.logger.error(msg)
tip = _("Check if path '%(path)s' exists or is writable.") % dgettext
raise Error(msg, tip, exception.__class__.__name__)
else:
fd_old = codecs.open(old_configfile, "r", encoding="utf-8")
try:
fd_new = codecs.open(self.configfile, "w", encoding="utf-8")
except Exception as exception:
dgettext['error'] = str(exception)
msg = _("Cannot write values in configfile '%(file)s': %(error)s.")
msg = msg % dgettext
self.logger.error(msg)
tip = _("Check if path/file exists or is writable.")
raise Error(msg, tip, exception.__class__.__name__)
section = ''
section_keys = []
for line in fd_old:
line = line.lstrip()
if line.isspace() or len(line) == 0:
continue
if line.startswith('#'):
fd_new.write(line)
continue
search = re.search(r'\[(\w+)\]\s*', line)
if search:
# write rest of values of previous section
if section and not section in nosections:
for item in self.options[section]:
if item not in section_keys:
current_value = self.options[section][item]
current_value = self._get_cfgvalue(current_value)
fd_new.write("%s = %s\n" % (item, current_value))
fd_new.write("\n")
section_keys = []
section = search.group(1)
fd_new.write(line)
continue
if section in nosections and len(nosections[section]) == 0:
fd_new.write(line)
continue
search = re.search(r';*\s*([\.\w]+)\s*=\s*([\.\\\/\-\w\$,#@]*)\s*', line)
if search:
# Maybe a default value
item_orig = search.group(1)
item = item_orig.lower()
old_value = search.group(2)
if section in nosections and item in nosections[section]:
fd_new.write(line)
else:
if section == 'main':
try:
current_value = self.state[item]
except:
current_value = old_value
else:
try:
current_value = self.options[section][item]
except:
current_value = old_value
current_value = self._get_cfgvalue(current_value)
if line.startswith(';') and current_value == old_value:
fd_new.write(line)
else:
fd_new.write("%s = %s\n" % (item_orig, current_value))
section_keys.append(item)
msg = _("Configuration saved to '%(file)s'.") % dgettext
self.logger.info(msg)
else:
self.logger.debug(_("No configuration file loaded. Nothing to do!"))
def recover_config(self, directory=PhotoPlace_Cfg_altdir):
dgettext = dict()
if self.configfile != None:
path = os.path.dirname(self.configfile)
old_configfile = self.configfile + PhotoPlace_Cfg_fileextold
try:
shutil.copyfile(self.configfile, old_configfile)
except Exception as exception:
dgettext['error'] = str(exception)
dgettext['file'] = old_configfile.encode(PLATFORMENCODING)
dgettext['path'] = path.encode(PLATFORMENCODING)
msg = _("Cannot create backup of configfile '%(file)s': %(error)s.")
msg = msg % dgettext
mtype = _("OOps ...")
tip = _("Check if '%(path)s' exists or is writable.") % dgettext
self.logger.error(msg)
raise Error(msg, tip, exception.__class__.__name__)
source_path = os.path.join(self.state.resourcedir, directory)
source_path = os.path.join(source_path, PhotoPlace_Cfg_file)
dest_path = os.path.join(self.state.resourcedir_user, PhotoPlace_Cfg_file)
dgettext['fromfile'] = source_path.encode(PLATFORMENCODING)
dgettext['tofile'] = dest_path.encode(PLATFORMENCODING)
try:
shutil.copyfile(source_path, dest_path)
except Exception as exception:
dgettext['error'] = str(exception)
msg = _("Cannot copy '%(fromfile)s' to '%(tofile)s': %(error)s.")
msg = msg % dgettext
mtype = _("OOps ...")
tip = _("Check if paths exist or are writable.")
self.logger.error(msg)
raise Error(msg, tip, exception.__class__.__name__)
msg = _("Configuration recovered from '%(fromfile)s' to '%(tofile)s'.")
msg = msg % dgettext
self.logger.info(msg)
def init(self, defaults=False):
if defaults:
self.options = PhotoPlace_Cfg_default
if self.configfile != None:
resourcedir_user = os.path.dirname(self.configfile)
self.state = stateHandler.State(self.resourcedir, self.options['main'], resourcedir_user)
def end(self):
#self.end_plugin()
self.Clear()
try:
self.unload_plugins()
except Exception as exception:
self.logger.error(str(exception))
self.logger.debug('# The end! vai vai!')
# EOF
| jriguera/photoplace | photoplace/lib/PhotoPlace/userFacade.py | Python | apache-2.0 | 19,186 |
#!/usr/bin/env python3
'''
List traffic statistics for IPv4 policies.
Method
https://<DEVICE_IP>/api/v2/monitor/firewall/policy
# PolicyID 0 means 'Implicit Deny' rule
CLI
FG # diag firewall iprope show 00100004 13
idx=13 pkts/bytes=553936822/569261079433 asic_pkts/asic_bytes=373877794/400601238192 nturbo_pkts/nturbo_bytes=236116365/236482516860 flag=0x0 hit count:12467662
first:2018-03-21 18:40:50 last:2018-08-29 18:21:06
established session count:1347
first est:2018-03-21 18:40:50 last est:2018-08-29 18:21:05
Response FGVM
{
"http_method":"GET",
"results":[
{
"policyid":0,
"active_sessions":0,
"bytes":0,
"packets":0
},
{
"policyid":1,
"uuid":"bfa5cd8e-aba3-51e8-567b-302f11df04b7",
"active_sessions":0,
"bytes":0,
"packets":0
},
{
"policyid":66,
"uuid":"88c14218-abbb-51e8-4fb5-4edfbd13d043",
"active_sessions":0,
"bytes":0,
"packets":0
},
{
"policyid":3,
"uuid":"e8047652-abbe-51e8-0273-ef4a243c29c5",
"active_sessions":0,
"bytes":0,
"packets":0
}
],
"vdom":"root",
"path":"firewall",
"name":"policy",
"status":"success",
"serial":"FGVM020000000000",
"version":"v6.0.0",
"build":76
}
Response FG-81E
{
"http_method":"GET",
"results":[
{
"policyid":0,
"active_sessions":0,
"bytes":536085,
"packets":8397,
"software_bytes":536085,
"software_packets":8397,
"asic_bytes":0,
"asic_packets":0,
"nturbo_bytes":0,
"nturbo_packets":0,
"last_used":1535478216,
"first_used":1521670953,
"hit_count":10770
},
{
"policyid":1,
"uuid":"bdd7cf24-f7fe-51e7-de6c-f42e21b200d1",
"active_sessions":0,
"bytes":0,
"packets":0,
"software_bytes":0,
"software_packets":0,
"asic_bytes":0,
"asic_packets":0,
"nturbo_bytes":0,
"nturbo_packets":0
},
{
"policyid":13,
"uuid":"d61723ce-2c41-51e8-214e-3ca194c65c79",
"active_sessions":2056,
"bytes":569238562854,
"packets":553910965,
"software_bytes":168657862523,
"software_packets":180048782,
"asic_bytes":164098194283,
"asic_packets":137745885,
"nturbo_bytes":236482506048,
"nturbo_packets":236116298,
"last_used":1535577206,
"first_used":1521668450,
"hit_count":12463107,
"session_last_used":1535577206,
"session_first_used":1521668450,
"session_count":2043
}
],
"vdom":"root",
"path":"firewall",
"name":"policy",
"status":"success",
"serial":"FG81EP0000000000",
"version":"v6.0.2",
"build":163
}
'''
from fortiosapi import FortiOSAPI
from pprint import pprint
import time
fgt = FortiOSAPI()
device = {
'host': '10.99.236.231',
'username': 'admin',
'password': '',
}
fgt.login(**device)
out = fgt.monitor('firewall', 'policy')
for policy in out['results']:
policyid = str(policy['policyid'])
if 'last_used' not in policy:
print('Policy ID {:10} {:^38}'.format(policyid, '** Never Used **'))
else:
policy_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(policy['last_used']))
print('Policy ID {:10} Last Used on: {} GMT'.format(policyid, policy_time))
fgt.logout()
| barbosm/gatepy | examples/monitor_firewall_policy-check_last_used.py | Python | gpl-2.0 | 3,732 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, with_statement
import codecs
import hashlib
import itertools
import logging
import os
import os.path
import pipes
import random
import shutil
import string
from string import Template
from stat import S_IRUSR
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
import datetime
from datetime import datetime
from datetime import timedelta
if sys.version < "3":
from urllib2 import urlopen, Request, HTTPError
else:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
raw_input = input
xrange = range
SPARK_EC2_VERSION = "2.1.2"
DEFAULT_SPARK_VERSION=SPARK_EC2_VERSION
SPARK_EC2_DIR = "/opt/spark"
VALID_SPARK_VERSIONS = set([
"0.7.3",
"0.8.0",
"0.8.1",
"0.9.0",
"0.9.1",
"0.9.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"1.2.0",
"1.2.1",
"1.3.0",
"1.3.1",
"1.4.0",
"1.4.1",
"1.5.0",
"1.5.1",
"1.5.2",
"1.6.0",
"1.6.1",
"1.6.2",
"1.6.3",
"2.0.0-preview",
"2.0.0",
"2.0.1",
"2.0.2",
"2.1.0",
"2.1.2"
])
SPARK_TACHYON_MAP = {
"1.0.0": "0.4.1",
"1.0.1": "0.4.1",
"1.0.2": "0.4.1",
"1.1.0": "0.5.0",
"1.1.1": "0.5.0",
"1.2.0": "0.5.0",
"1.2.1": "0.5.0",
"1.3.0": "0.5.0",
"1.3.1": "0.5.0",
"1.4.0": "0.6.4",
"1.4.1": "0.6.4",
"1.5.0": "0.7.1",
"1.5.1": "0.7.1",
"1.5.2": "0.7.1",
"1.6.0": "0.8.2",
"1.6.1": "0.8.2",
"1.6.2": "0.8.2",
"2.0.0-preview": "",
}
DEFAULT_SPARK_GITHUB_REPO = "https://github.com/apache/spark"
# Default location to get the spark-ec2 scripts (and ami-list) from
DEFAULT_SPARK_EC2_GITHUB_REPO = "https://github.com/paulomagalhaes/spark-ec2"
DEFAULT_SPARK_EC2_BRANCH = "branch-2.1"
def setup_external_libs(libs):
"""
Download external libraries from PyPI to SPARK_EC2_DIR/lib/ and prepend them to our PATH.
"""
PYPI_URL_PREFIX = "https://pypi.python.org/packages/source"
SPARK_EC2_LIB_DIR = os.path.join(SPARK_EC2_DIR, "lib")
if not os.path.exists(SPARK_EC2_LIB_DIR):
print("Downloading external libraries that spark-ec2 needs from PyPI to {path}...".format(
path=SPARK_EC2_LIB_DIR
))
print("This should be a one-time operation.")
os.mkdir(SPARK_EC2_LIB_DIR)
for lib in libs:
versioned_lib_name = "{n}-{v}".format(n=lib["name"], v=lib["version"])
lib_dir = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name)
if not os.path.isdir(lib_dir):
tgz_file_path = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name + ".tar.gz")
print(" - Downloading {lib}...".format(lib=lib["name"]))
download_stream = urlopen(
"{prefix}/{first_letter}/{lib_name}/{lib_name}-{lib_version}.tar.gz".format(
prefix=PYPI_URL_PREFIX,
first_letter=lib["name"][:1],
lib_name=lib["name"],
lib_version=lib["version"]
)
)
with open(tgz_file_path, "wb") as tgz_file:
tgz_file.write(download_stream.read())
with open(tgz_file_path) as tar:
if hashlib.md5(tar.read()).hexdigest() != lib["md5"]:
print("ERROR: Got wrong md5sum for {lib}.".format(lib=lib["name"]), file=stderr)
sys.exit(1)
tar = tarfile.open(tgz_file_path)
tar.extractall(path=SPARK_EC2_LIB_DIR)
tar.close()
os.remove(tgz_file_path)
print(" - Finished downloading {lib}.".format(lib=lib["name"]))
sys.path.insert(1, lib_dir)
# Only PyPI libraries are supported.
external_libs = [
{
"name": "boto",
"version": "2.34.0",
"md5": "5556223d2d0cc4d06dd4829e671dcecd"
}
]
setup_external_libs(external_libs)
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(usage="spark-ec2 [options] <action> <cluster_name>"
+ "\n\n<action> can be: launch, destroy, login, stop, start, get-master",
add_help_option=False)
parser.add_option("-h", "--help", action="help",
help="Show this help message and exit")
parser.add_option("-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: 1)")
parser.add_option("-w", "--wait", type="int", default=120,
help="Seconds to wait for nodes to start (default: 120)")
parser.add_option("-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option("-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option("-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: m1.large). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option("-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option("-r", "--region", help="EC2 region zone to launch instances in")
parser.add_option("-z", "--zone", help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies)")
parser.add_option("-a", "--ami", help="Amazon Machine Image ID to use")
parser.add_option("-p", "--profile", help="AWS profile/role arn to use")
parser.add_option("-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash")
parser.add_option("--spark-git-repo",
default="https://github.com/apache/spark",
help="Github repo from which to checkout supplied commit hash")
parser.add_option("--hadoop-major-version", default="2",
help="Major version of Hadoop (default: 2)")
parser.add_option("-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option("--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option("--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Attach a new EBS volume of size SIZE (in GB) to each node as " +
"/vol. The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs.")
parser.add_option("--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: 1024)")
parser.add_option("--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option("--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: on). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option("--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option("-u", "--user", default="root",
help="The SSH user you want to connect as (default: root)")
parser.add_option("--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option("--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option("--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: 1)")
parser.add_option("--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable (e.g -Dspark.worker.timeout=180)")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
if opts.region is None:
opts.region = region()
if opts.zone is None:
opts.zone = zone()
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
# home_dir = os.getenv('HOME')
# if home_dir == None or not os.path.isfile(home_dir + '/.boto'):
# if not os.path.isfile('/etc/boto.cfg'):
# if os.getenv('AWS_ACCESS_KEY_ID') == None:
# print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
# "must be set")
# sys.exit(1)
# if os.getenv('AWS_SECRET_ACCESS_KEY') == None:
# print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
# "must be set")
# sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print( "Creating security group " + name)
return conn.create_security_group(name, "Spark EC2 group")
# Wait for a set of launched instances to exit the "pending" state
# (i.e. either to start running or to fail and be terminated)
def wait_for_instances(conn, instances):
ids = [i.id for i in instances]
while True:
# for i in instances:
# i.update()
# if len([i for i in instances if i.state == 'pending']) > 0:
#
instace_stati = conn.get_all_instance_status(instance_ids=ids)
if len([i for i in instace_stati if i.system_status.details['reachability'] != 'passed' or i.instance_status.details['reachability'] != 'passed']) > 0:
time.sleep(5)
else:
return
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
def get_validate_spark_version(version, repo):
if "." in version:
version = version.replace("v", "")
if version not in VALID_SPARK_VERSIONS:
print("Don't know about Spark version: {v}".format(v=version), file=stderr)
sys.exit(1)
return version
else:
github_commit_url = "{repo}/commit/{commit_hash}".format(repo=repo, commit_hash=version)
request = Request(github_commit_url)
request.get_method = lambda: 'HEAD'
try:
response = urlopen(request)
except HTTPError as e:
print("Couldn't validate Spark commit: {url}".format(url=github_commit_url),
file=stderr)
print("Received HTTP response code of {code}.".format(code=e.code), file=stderr)
sys.exit(1)
return version
EC2_INSTANCE_TYPES = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.large": "pvm",
"c3.xlarge": "pvm",
"c3.2xlarge": "pvm",
"c3.4xlarge": "pvm",
"c3.8xlarge": "pvm",
"c4.large": "hvm",
"c4.xlarge": "hvm",
"c4.2xlarge": "hvm",
"c4.4xlarge": "hvm",
"c4.8xlarge": "hvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"d2.xlarge": "hvm",
"d2.2xlarge": "hvm",
"d2.4xlarge": "hvm",
"d2.8xlarge": "hvm",
"g2.2xlarge": "hvm",
"g2.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.xlarge": "hvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m3.medium": "hvm",
"m3.large": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"r4.large": "hvm",
"r4.xlarge": "hvm",
"r4.2xlarge": "hvm",
"r4.4xlarge": "hvm",
"r4.8xlarge": "hvm",
"r4.16xlarge": "hvm",
"x1e.large": "hvm",
"x1e.xlarge": "hvm",
"x1e.2xlarge": "hvm",
"x1e.4xlarge": "hvm",
"x1e.8xlarge": "hvm",
"x1e.16xlarge":"hvm",
"x1e.32xlarge":"hvm",
"t1.micro": "pvm",
"t2.micro": "hvm",
"t2.small": "hvm",
"t2.medium": "hvm",
}
def get_tachyon_version(spark_version):
return SPARK_TACHYON_MAP.get(spark_version, "")
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
def get_spark_ami(opts):
if opts.instance_type in EC2_INSTANCE_TYPES:
instance_type = EC2_INSTANCE_TYPES[opts.instance_type]
else:
instance_type = "pvm"
print("Don't recognize %s, assuming type is pvm" % opts.instance_type, file=stderr)
# URL prefix from which to fetch AMI information
ami_prefix = "{r}/{b}/ami-list".format(
r=DEFAULT_SPARK_EC2_GITHUB_REPO.replace("https://github.com", "https://raw.github.com", 1),
b=DEFAULT_SPARK_EC2_BRANCH)
ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
reader = codecs.getreader("ascii")
try:
ami = reader(urlopen(ami_path)).read().strip()
except:
print("Could not resolve AMI at: " + ami_path, file=stderr)
sys.exit(1)
print("Spark AMI: " + ami)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
#Remove known hosts to avoid "Offending key for IP ..." errors.
known_hosts = os.environ['HOME'] + "/.ssh/known_hosts"
if os.path.isfile(known_hosts):
os.remove(known_hosts)
if opts.key_pair is None:
opts.key_pair = keypair()
if opts.key_pair is None:
print ( "ERROR: Must provide a key pair name (-k) to use on instances.", file=sys.stderr)
sys.exit(1)
if opts.profile is None:
opts.profile = profile()
if opts.profile is None:
print ( "ERROR: No profile found in current host. It be provided with -p option.", file=sys.stderr)
sys.exit(1)
public_key = pub_key()
user_data = Template("""#!/bin/bash
set -e -x
echo '$public_key' >> ~root/.ssh/authorized_keys
echo '$public_key' >> ~ec2-user/.ssh/authorized_keys""").substitute(public_key=public_key)
print("Setting up security groups...")
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
security_group = os.popen("curl -s http://169.254.169.254/latest/meta-data/security-groups").read()
sparknotebook_group = get_or_make_group(conn, security_group)
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize(src_group=sparknotebook_group)
master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
master_group.authorize('tcp', 18080, 18080, '0.0.0.0/0')
master_group.authorize('tcp', 19999, 19999, '0.0.0.0/0')
master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
master_group.authorize('tcp', 4040, 4045, '0.0.0.0/0')
master_group.authorize('tcp', 7077, 7077, '0.0.0.0/0')
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0')
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize(src_group=sparknotebook_group)
slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')
if not any(r for r in sparknotebook_group.rules for g in r.grants if master_group.id == g.group_id):
sparknotebook_group.authorize(ip_protocol="tcp", from_port="1", to_port="65535", src_group=master_group)
sparknotebook_group.authorize(ip_protocol="icmp", from_port="-1", to_port="-1", src_group=master_group)
if not any(r for r in sparknotebook_group.rules for g in r.grants if slave_group.id == g.group_id):
sparknotebook_group.authorize(ip_protocol="tcp", from_port="1", to_port="65535", src_group=slave_group)
sparknotebook_group.authorize(ip_protocol="icmp", from_port="-1", to_port="-1", src_group=slave_group)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print (("ERROR: There are already instances running in " +
"group %s or %s" % (master_group.name, slave_group.name)), file=sys.stderr)
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
print("Launching instances...")
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print ("Could not find AMI " + opts.ami, file=sys.stderr)
sys.exit(1)
# Create block device mapping so that we can add an EBS volume if asked to
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.delete_on_termination = True
block_map["/dev/sdv"] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.ascii_letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price != None:
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
best_price = find_best_price(conn,opts.instance_type,zone, opts.spot_price)
# Launch spot instances with the requested price
print(("Requesting %d slaves as spot instances with price $%.3f/hour each (total $%.3f/hour)" %
(opts.slaves, best_price, opts.slaves * best_price)), file=sys.stderr)
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnetId(), groups=[slave_group.id], associate_public_ip_address=True)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
slave_reqs = conn.request_spot_instances(
price = best_price,
image_id = opts.ami,
launch_group = "launch-group-%s" % cluster_name,
placement = zone,
count = num_slaves_this_zone,
key_name = opts.key_pair,
instance_type = opts.instance_type,
block_device_map = block_map,
user_data = user_data,
instance_profile_arn = opts.profile,
network_interfaces = interfaces)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print ("Waiting for spot instances to be granted", file=sys.stderr)
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print ("All %d slaves granted" % opts.slaves, file=sys.stderr)
reservations = conn.get_all_instances(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
# print >> stderr, ".",
print("%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves))
except:
print("Canceling spot instance requests", file=sys.stderr)
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print(("WARNING: %d instances are still running" % running), file=sys.stderr)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name = opts.key_pair,
security_group_ids = [slave_group.id],
instance_type = opts.instance_type,
subnet_id = subnetId(),
placement = zone,
min_count = num_slaves_this_zone,
max_count = num_slaves_this_zone,
block_device_map = block_map,
user_data = user_data,
instance_profile_arn = opts.profile)
slave_nodes += slave_res.instances
print("Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id), file=sys.stderr)
i += 1
# Launch or resume masters
if existing_masters:
print("Starting master...")
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
if opts.spot_price != None:
best_price = find_best_price(conn,master_type,opts.zone,opts.spot_price)
# Launch spot instances with the requested price
print(("Requesting master as spot instances with price $%.3f/hour" % (best_price)), file=sys.stderr)
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnetId(), groups=[master_group.id], associate_public_ip_address=True)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
master_reqs = conn.request_spot_instances(
price = best_price,
image_id = opts.ami,
launch_group = "launch-group-%s" % cluster_name,
placement = opts.zone,
count = 1,
key_name = opts.key_pair,
instance_type = master_type,
block_device_map = block_map,
user_data = user_data,
instance_profile_arn = opts.profile,
network_interfaces = interfaces)
my_req_ids = [r.id for r in master_reqs]
print("Waiting for spot instance to be granted", file=sys.stderr)
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests(request_ids=my_req_ids)
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
#print(id_to_req[i].state, file=sys.stderr)
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == 1:
print ( "Master granted", file=sys.stderr)
reservations = conn.get_all_instances(active_instance_ids)
master_nodes = []
for r in reservations:
master_nodes += r.instances
break
else:
# print >> stderr, ".",
print("%d of %d masters granted, waiting longer" % (
len(active_instance_ids), 1))
except:
print("Canceling spot instance requests", file=sys.stderr)
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, master_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(master_nodes)
if running:
print(("WARNING: %d instances are still running" % running), file=sys.stderr)
sys.exit(0)
else:
master_res = image.run(key_name = opts.key_pair,
security_group_ids = [master_group.id],
instance_type = master_type,
subnet_id = subnetId(),
placement = opts.zone,
min_count = 1,
max_count = 1,
block_device_map = block_map,
user_data = user_data,
instance_profile_arn = opts.profile)
master_nodes = master_res.instances
print("Launched master in %s, regid = %s" % (zone, master_res.id), file=sys.stderr)
# Return all the instances
return (master_nodes, slave_nodes)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters and slaves
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print("Searching for existing cluster %s ..." % cluster_name, file=sys.stderr)
reservations = conn.get_all_instances()
master_nodes = []
slave_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for inst in active:
group_names = [g.name for g in inst.groups]
if (cluster_name + "-master") in group_names:
master_nodes.append(inst)
elif (cluster_name + "-slaves") in group_names:
slave_nodes.append(inst)
if any((master_nodes, slave_nodes)):
print("Spark standalone cluster started at http://%s:8080" % master_nodes[0].public_dns_name)
print("Spark private ip address %s" % master_nodes[0].private_dns_name)
print("Spark standalone cluster started at http://%s:8080" % master_nodes[0].public_dns_name, file=sys.stderr)
print(("Found %d master(s), %d slaves" %
(len(master_nodes), len(slave_nodes))), file=sys.stderr)
get_master_setup_files(master_nodes[0].private_dns_name, opts)
if opts.ganglia:
print("Ganglia started at http://%s:5080/ganglia" % master_nodes[0].public_dns_name, file=sys.stderr)
if master_nodes != [] or not die_on_error:
return (master_nodes, slave_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print("ERROR: Could not find master in group %s-master" %cluster_name)
else:
print("ERROR: Could not find any existing cluster")
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master_nodes[0].update()
master = master_nodes[0]
print ("Spark private ip address %s" % master.private_dns_name)
if deploy_ssh_key:
print("Generating cluster's SSH key on master...")
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master.private_dns_name, opts, key_setup)
dot_ssh_tar = ssh_read(master.private_dns_name, opts, ['tar', 'c', '.ssh'])
print("Transferring cluster's SSH key to slaves...", file=sys.stderr)
for slave in slave_nodes:
slave.update()
ssh_write(slave.private_dns_name, opts, ['tar', 'x'], dot_ssh_tar)
modules = ['mysql', 'spark', 'ephemeral-hdfs', 'persistent-hdfs',
'mapreduce', 'spark-standalone']
if opts.hadoop_major_version == "1":
modules = filter(lambda x: x != "mapreduce", modules)
if opts.ganglia:
modules.append('ganglia')
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
ssh(
host=master.private_dns_name,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone {r} -b {b} spark-ec2".format(r=DEFAULT_SPARK_EC2_GITHUB_REPO, b=DEFAULT_SPARK_EC2_BRANCH)
)
print("Deploying files to master... ", file=sys.stderr)
(path, name) = os.path.split(__file__)
deploy_files(conn, path+"/deploy.generic", opts, master_nodes, slave_nodes, modules)
print("Running setup on master... ", file=sys.stderr)
setup_spark_cluster(master, opts)
get_master_setup_files(master.private_dns_name, opts)
print( stderr,"Done!", file=sys.stderr)
def get_master_setup_files(master, opts):
scp(master, opts, "/root/spark/jars/datanucleus*.jar", "%s/lib" % SPARK_EC2_DIR)
scp(master, opts, "/root/spark/conf/*", "%s/conf" % SPARK_EC2_DIR)
def setup_spark_cluster(master, opts):
ssh(master.private_dns_name, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master.private_dns_name, opts, "spark-ec2/setup.sh")
master.update()
print("Spark standalone cluster started at http://%s:8080" % master.public_dns_name)
print("Spark standalone cluster started at http://%s:8080" % master.public_dns_name, file=sys.stderr)
if opts.ganglia:
print("Ganglia started at http://%s:5080/ganglia" % master.public_dns_name, file=sys.stderr)
# Wait for a whole cluster (masters, slaves and ZooKeeper) to start up
def wait_for_cluster(conn, wait_secs, master_nodes, slave_nodes):
print("Waiting for instances to start up...", file=sys.stderr)
time.sleep(5)
wait_for_instances(conn, master_nodes)
wait_for_instances(conn, slave_nodes)
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(5 * num_attempts) # seconds
for i in cluster_instances:
i.update()
statuses = conn.get_all_instance_status(instance_ids=[i.id for i in cluster_instances])
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print("Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
))
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
dns_name = i.private_dns_name
if not is_ssh_available(host=dns_name, opts=opts):
return False
else:
return True
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print(textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
))
return s.returncode == 0
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2015-05-08
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.large": 2,
"c3.xlarge": 2,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c4.large": 0,
"c4.xlarge": 0,
"c4.2xlarge": 0,
"c4.4xlarge": 0,
"c4.8xlarge": 0,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"d2.xlarge": 3,
"d2.2xlarge": 6,
"d2.4xlarge": 12,
"d2.8xlarge": 24,
"g2.2xlarge": 1,
"g2.8xlarge": 2,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.xlarge": 1,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"m1.small": 1,
"m1.medium": 1,
"m1.large": 2,
"m1.xlarge": 4,
"m2.xlarge": 1,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m3.medium": 1,
"m3.large": 1,
"m3.xlarge": 2,
"m3.2xlarge": 2,
"r3.large": 1,
"r3.xlarge": 1,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"r4.xlarge": 1,
"r4.2xlarge": 1,
"r4.4xlarge": 1,
"r4.8xlarge": 1,
"r4.16xlarge": 1,
"x1e.xlarge": 1,
"x1e.2xlarge": 1,
"x1e.4xlarge": 1,
"x1e.8xlarge": 1,
"x1e.16xlarge": 1,
"x1e.32xlarge": 2,
"t1.micro": 0,
"t2.micro": 0,
"t2.small": 0,
"t2.medium": 0,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type, file=stderr)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = master_nodes[0].private_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built Spark deploy
spark_v = get_validate_spark_version(opts.spark_version, DEFAULT_SPARK_GITHUB_REPO)
tachyon_v = get_tachyon_version(spark_v)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (DEFAULT_SPARK_GITHUB_REPO, opts.spark_version)
tachyon_v = ""
print("Deploying Spark via git hash; Tachyon won't be set up")
modules = filter(lambda x: x != "tachyon", modules)
worker_instances_str = "%d" % opts.worker_instances if opts.worker_instances else ""
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"hadoop_major_version": opts.hadoop_major_version,
"metastore_user": "hive",
"metastore_passwd": ''.join(random.SystemRandom().choice(string.uppercase + string.digits) for _ in xrange(10)),
"spark_worker_instances": worker_instances_str,
"spark_master_opts": opts.master_opts
}
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
print(root_dir)
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
print(tmp_dir)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no', '-o LogLevel=error']
# parts += ['-i', '~/.ssh/id_rsa']
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
def scp_command(opts):
return ['scp'] + ssh_args(opts)
def pub_key():
key_gen = """[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa)
"""
subprocess.check_call(key_gen, shell=True)
return subprocess.Popen("cat ~/.ssh/id_rsa.pub", shell=True, stdout=subprocess.PIPE).communicate()[0]
def profile():
return subprocess.Popen("""curl -s http://169.254.169.254/latest/meta-data/iam/info | grep InstanceProfileArn""", shell=True, stdout=subprocess.PIPE).communicate()[0].split("\"")[3]
def region():
return subprocess.Popen("""curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region""", shell=True, stdout=subprocess.PIPE).communicate()[0].split("\"")[3]
def zone():
return subprocess.Popen("""curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep availabilityZone""", shell=True, stdout=subprocess.PIPE).communicate()[0].split("\"")[3]
def subnetId():
mac = subprocess.Popen("""curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ | grep /""", shell=True, stdout=subprocess.PIPE).communicate()[0].split("/")[0]
return subprocess.Popen("""curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/""" + mac + """/subnet-id/""", shell=True, stdout=subprocess.PIPE).communicate()[0]
def keypair():
return subprocess.Popen("""curl -s http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key""", shell=True, stdout=subprocess.PIPE).communicate()[0].split(" ")[2].strip()
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
#print >> stderr, ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host), stringify_command(command)]
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host), stringify_command(command)])
except subprocess.CalledProcessError as e:
if (tries > 25):
print('Failed to SSH to remote host %s after %s retries.' % (host, tries), file=sys.stderr)
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError('Failed to SSH to remote host %s.\nPlease check that you have provided the correct --identity-file and --key-pair parameters and try again.' % (host))
else:
raise e
#print >> stderr,"Error executing remote command, retrying after 30 seconds: {0}".format(e)
time.sleep(30)
tries = tries + 1
def scp(host, opts, src, target):
tries = 0
while True:
try:
return subprocess.check_call(
scp_command(opts) + ['%s@%s:%s' % (opts.user, host,src), target])
except subprocess.CalledProcessError as e:
if (tries > 25):
print("Failed to SCP to remote host {0} after r retries.".format(host), file=sys.stderr)
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError("Failed to SCP to remote host {0}.\nPlease check that you have provided the correct --identity-file and --key-pair parameters and try again.".format(host))
else:
raise e
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, input):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
proc.stdin.write(input)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif (tries > 15):
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print("Error {0} while executing remote command, retrying after 30 seconds".format(status), file=sys.stderr)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total // num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def real_main():
(opts, action, cluster_name) = parse_args()
get_validate_spark_version(opts.spark_version, DEFAULT_SPARK_GITHUB_REPO)
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print("ERROR: You have to start at least 1 slave", file=sys.stderr)
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name)
else:
start_secs = time.time()
(master_nodes, slave_nodes) = launch_cluster(
conn, opts, cluster_name)
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes)
print("Provisioning took %.3f minutes" % ((time.time() - start_secs) / 60.0), file=sys.stderr)
start_secs = time.time()
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
print("Setup took %.3f minutes" % ((time.time() - start_secs)/60.0), file=sys.stderr)
elif action == "destroy":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Terminating master...", file=sys.stderr)
for inst in master_nodes:
inst.terminate()
print("Terminating slaves...", file=sys.stderr)
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print("Deleting security groups (this will take some time)...", file=sys.stderr)
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
attempt = 1;
while attempt <= 3:
print("Attempt %d" % attempt, file=sys.stderr)
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print("Deleting rules in security group " + group.name, file=sys.stderr)
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
conn.delete_security_group(group_id=group.id)
print("Deleted security group %s" % group.name)
except boto.exception.EC2ResponseError:
success = False;
print("Failed to delete security group " + group.name, file=sys.stderr)
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success: break;
attempt += 1
if not success:
print("Failed to delete all security groups after 3 tries.", file=sys.stderr)
print ("Try re-running in a few minutes.", file=sys.stderr)
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print("Logging into master " + master + "...")
proxy_opt = []
if opts.proxy_port != None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print(master_nodes[0].public_dns_name)
elif action == "stop":
response = raw_input("Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Stopping master...", file=sys.stderr)
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print("Stopping slaves...", file=sys.stderr)
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print("Starting slaves...", file=sys.stderr)
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print("Starting master...", file=sys.stderr)
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes)
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print("Invalid action: %s" % action, file=sys.stderr)
sys.exit(1)
def find_best_price(conn,instance,zone, factor):
last_hour_zone = get_spot_price(conn,zone,datetime.utcnow()-timedelta(hours=1),instance)
average_price_last_hour = sum(i.price for i in last_hour_zone)/float(len(last_hour_zone))
return average_price_last_hour*factor
def get_spot_price(conn,zone,start_date_hour,instance):
return conn.get_spot_price_history(start_time=start_date_hour.strftime("%Y-%m-%dT%H:%M:%SZ"),end_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"),instance_type=instance , product_description="Linux/UNIX",availability_zone=zone)
def main():
try:
real_main()
except UsageError, e:
print(e, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| eleflow/uberdata | iuberdata_core/src/main/resources/python/spark_ec2.py | Python | apache-2.0 | 50,532 |
import inspect
import sys
import textwrap
from pathlib import Path
from typing import Callable
from typing import Optional
import pytest
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_main_py
from _pytest.doctest import _is_mocked
from _pytest.doctest import _is_setup_py
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
from _pytest.pytester import Pytester
class TestDoctests:
def test_collect_testtextfile(self, pytester: Pytester):
w = pytester.maketxtfile(whatever="")
checkfile = pytester.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (pytester.path, checkfile):
# print "checking that %s returns custom items" % (x,)
items, reprec = pytester.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, reprec = pytester.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, pytester: Pytester):
path = pytester.makepyfile(whatever="#")
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, pytester: Pytester):
path = pytester.makepyfile(whatever='""">>> pass"""')
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, pytester: Pytester):
path = pytester.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
@pytest.mark.parametrize("filename", ["__init__", "whatever"])
def test_collect_module_two_doctest_no_modulelevel(
self,
pytester: Pytester,
filename: str,
) -> None:
path = pytester.makepyfile(
**{
filename: """
'# Empty'
def my_func():
">>> magic = 42 "
def useless():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""",
},
)
for p in (path, pytester.path):
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, pytester: Pytester):
p = pytester.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(failed=1)
def test_new_pattern(self, pytester: Pytester):
p = pytester.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, pytester: Pytester):
"""Test support for multiple --doctest-glob arguments (#1255)."""
pytester.maketxtfile(
xdoc="""
>>> 1
1
"""
)
pytester.makefile(
".foo",
test="""
>>> 1
1
""",
)
pytester.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.name for x in pytester.path.iterdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, pytester, test_string, encoding):
"""Test support for doctest_encoding ini option."""
pytester.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
encoding
)
)
doctest = """
>>> "{}"
{}
""".format(
test_string, repr(test_string)
)
fn = pytester.path / "test_encoding.txt"
fn.write_text(doctest, encoding=encoding)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, pytester: Pytester):
pytester.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"test_doctest_unexpected_exception.txt F *",
"",
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_unexpected_exception.txt _*",
"001 >>> i = 0",
"002 >>> 0 / i",
"UNEXPECTED EXCEPTION: ZeroDivisionError*",
"Traceback (most recent call last):",
' File "*/doctest.py", line *, in __run',
" *",
*((" *^^^^*",) if sys.version_info >= (3, 11) else ()),
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
"ZeroDivisionError: division by zero",
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
],
consecutive=True,
)
def test_doctest_outcomes(self, pytester: Pytester):
pytester.maketxtfile(
test_skip="""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
>>> 2
3
""",
test_xfail="""
>>> import pytest
>>> pytest.xfail("xfail_reason")
>>> foo
bar
""",
test_importorskip="""
>>> import pytest
>>> pytest.importorskip("doesnotexist")
>>> foo
bar
""",
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"collected 3 items",
"",
"test_importorskip.txt s *",
"test_skip.txt s *",
"test_xfail.txt x *",
"",
"*= 2 skipped, 1 xfailed in *",
]
)
def test_docstring_partial_context_around_error(self, pytester: Pytester):
"""Test that we show some context before the actual line of a failing
doctest.
"""
pytester.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, pytester: Pytester):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
pytester.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, pytester: Pytester):
pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
)
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
["*hello*", "006*>>> 1/0*", "*UNEXPECTED*ZeroDivision*", "*1 failed*"]
)
def test_doctest_linedata_on_property(self, pytester: Pytester):
pytester.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_linedata_on_property.Sample.some_property _*",
"004 ",
"005 >>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_linedata_on_property.py:5: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_no_linedata_on_overriden_property(self, pytester: Pytester):
pytester.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
some_property = property(some_property.__get__, None, None, some_property.__doc__)
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_no_linedata_on_overriden_property.Sample.some_property _*",
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example",
"[?][?][?] >>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_no_linedata_on_overriden_property.py:None: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_unex_importerror_only_txt(self, pytester: Pytester):
pytester.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = pytester.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*ModuleNotFoundError*",
"ModuleNotFoundError: No module named *asdal*",
]
)
def test_doctest_unex_importerror_with_module(self, pytester: Pytester):
pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
pytester.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = pytester.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*ModuleNotFoundError: No module named *asdals*",
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, pytester: Pytester):
p = pytester.mkpydir("hello")
p.joinpath("__init__.py").write_text(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
)
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, pytester: Pytester):
p = pytester.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = pytester.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, pytester: Pytester):
p = pytester.maketxtfile(
"""
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = pytester.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def useless():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, pytester: Pytester):
p = pytester.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = pytester.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = pytester.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, pytester: Pytester):
"""Fix internal error with docstrings containing non-ascii characters."""
pytester.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, pytester: Pytester):
p = pytester.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = pytester.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, pytester: Pytester):
"""#713: Fix --junit-xml option when used with --doctest-modules."""
p = pytester.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = pytester.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print("Hi\\n\\nByé")
Hi
...
Byé
>>> 1 / 0 # Byé
1
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = pytester.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, pytester: Pytester):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
p = pytester.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, pytester: Pytester):
"""Make sure that DoctestItem.reportinfo() returns lineno."""
p = pytester.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, reprec = pytester.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
p = pytester.makepyfile(
setup="""
from setuptools import setup, find_packages
if __name__ == '__main__':
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_main_py_does_not_cause_import_errors(self, pytester: Pytester):
p = pytester.copy_example("doctest/main_py")
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 2 items*", "*1 failed, 1 passed*"])
def test_invalid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
p = pytester.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_unicode(self, pytester, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == "ini":
pytester.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_UNICODE"
pytester.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(
comment=comment
)
)
pytester.makepyfile(
foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(
comment=comment
)
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_bytes(self, pytester, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == "ini":
pytester.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_BYTES"
pytester.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
""".format(
comment=comment
)
)
pytester.makepyfile(
foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(
comment=comment
)
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, pytester: Pytester):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
pytester.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_bytes_literal(self, pytester: Pytester):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
"""
pytester.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
_number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
"-1.",
".1",
"+.1",
"-.1",
"0.1",
"+0.1",
"-0.1",
"1e5",
"+1e5",
"1e+5",
"+1e+5",
"1e-5",
"+1e-5",
"-1e-5",
"1.2e3",
"-1.2e-3",
]:
print(s)
m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, pytester, config_mode):
"""Test the NUMBER option."""
if config_mode == "ini":
pytester.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
"""
)
comment = ""
else:
comment = "#doctest: +NUMBER"
pytester.maketxtfile(
test_doc="""
Scalars:
>>> import math
>>> math.pi {comment}
3.141592653589793
>>> math.pi {comment}
3.1416
>>> math.pi {comment}
3.14
>>> -math.pi {comment}
-3.14
>>> math.pi {comment}
3.
>>> 3. {comment}
3.0
>>> 3. {comment}
3.
>>> 3. {comment}
3.01
>>> 3. {comment}
2.99
>>> .299 {comment}
.3
>>> .301 {comment}
.3
>>> 951. {comment}
1e3
>>> 1049. {comment}
1e3
>>> -1049. {comment}
-1e3
>>> 1e3 {comment}
1e3
>>> 1e3 {comment}
1000.
Lists:
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
[[0.33, 0.667], [0.999, 1.333]]
>>> [[[0.101]]] {comment}
[[[0.1]]]
Doesn't barf on non-numbers:
>>> 'abc' {comment}
'abc'
>>> None {comment}
""".format(
comment=comment
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"expression,output",
[
# ints shouldn't match floats:
("3.0", "3"),
("3e0", "3"),
("1e3", "1000"),
("3", "3.0"),
# Rounding:
("3.1", "3.0"),
("3.1", "3.2"),
("3.1", "4.0"),
("8.22e5", "810000.0"),
# Only the actual output is rounded up, not the expected output:
("3.0", "2.98"),
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
# strings shouldn't be treated as numbers:
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
def test_number_non_matches(self, pytester, expression, output):
pytester.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
""".format(
expression=expression, output=output
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=0, failed=1)
def test_number_and_allow_unicode(self, pytester: Pytester):
pytester.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
T(a=0.233, b=u'str', c='bytes')
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
class TestDoctestSkips:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=["text", "module"])
def makedoctest(self, pytester, request):
def makeit(doctest):
mode = request.param
if mode == "text":
pytester.maketxtfile(doctest)
else:
assert mode == "module"
pytester.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, pytester, makedoctest):
makedoctest("")
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, pytester: Pytester):
pytester.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = pytester.runpytest(
"--doctest-modules", "--doctest-continue-on-failure"
)
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
def test_skipping_wrapped_test(self, pytester):
"""
Issue 8796: INTERNALERROR raised when skipping a decorated DocTest
through pytest_collection_modifyitems.
"""
pytester.makeconftest(
"""
import pytest
from _pytest.doctest import DoctestItem
def pytest_collection_modifyitems(config, items):
skip_marker = pytest.mark.skip()
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
"""
)
pytester.makepyfile(
"""
from contextlib import contextmanager
@contextmanager
def my_config_context():
'''
>>> import os
'''
"""
)
result = pytester.runpytest("--doctest-modules")
assert "INTERNALERROR" not in result.stdout.str()
result.assert_outcomes(skipped=1)
class TestDoctestAutoUseFixtures:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, pytester: Pytester):
"""Test that session fixtures are initialized for doctest modules (#768)."""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
pytester.makeconftest(
"""
import pytest
import sys
@pytest.fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
pytester.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, pytester, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope
)
)
pytester.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = pytester.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, pytester, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope, autouse=autouse
)
)
if use_fixture_in_doctest:
pytester.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, pytester, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
""".format(
scope=scope
)
)
pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = pytester.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
class TestDoctestNamespaceFixture:
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_doctestfile(self, pytester, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
pytester.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = pytester.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_pyfile(self, pytester, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
pytester.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = pytester.makepyfile(
"""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
def _run_doctest_report(self, pytester, format):
pytester.makepyfile(
"""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
"""
)
return pytester.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
def test_doctest_report_udiff(self, pytester, format):
result = self._run_doctest_report(pytester, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
def test_doctest_report_cdiff(self, pytester: Pytester):
result = self._run_doctest_report(pytester, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" ! 1 2 4",
" 2 3 6",
" --- 1,4 ----",
" a b",
" 0 1 4",
" ! 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_ndiff(self, pytester: Pytester):
result = self._run_doctest_report(pytester, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" - 1 2 4",
" ? ^",
" + 1 2 5",
" ? ^",
" 2 3 6",
]
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
def test_doctest_report_none_or_only_first_failure(self, pytester, format):
result = self._run_doctest_report(pytester, format)
result.stdout.fnmatch_lines(
[
"Expected:",
" a b",
" 0 1 4",
" 1 2 4",
" 2 3 6",
"Got:",
" a b",
" 0 1 4",
" 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_invalid(self, pytester: Pytester):
result = self._run_doctest_report(pytester, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
]
)
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, pytester: Pytester):
pytest.importorskip(mock_module)
pytester.makepyfile(
"""
from {mock_module} import call
class Example(object):
'''
>>> 1 + 1
2
'''
""".format(
mock_module=mock_module
)
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
class Broken:
def __getattr__(self, _):
raise KeyError("This should be an AttributeError")
@pytest.mark.parametrize( # pragma: no branch (lambdas are not called)
"stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True]
)
def test_warning_on_unwrap_of_broken_object(
stop: Optional[Callable[[object], object]]
) -> None:
bad_instance = Broken()
assert inspect.unwrap.__module__ == "inspect"
with _patch_unwrap_mock_aware():
assert inspect.unwrap.__module__ != "inspect"
with pytest.warns(
pytest.PytestWarning, match="^Got KeyError.* when unwrapping"
):
with pytest.raises(KeyError):
inspect.unwrap(bad_instance, stop=stop) # type: ignore[arg-type]
assert inspect.unwrap.__module__ == "inspect"
def test_is_setup_py_not_named_setup_py(tmp_path: Path) -> None:
not_setup_py = tmp_path.joinpath("not_setup.py")
not_setup_py.write_text('from setuptools import setup; setup(name="foo")')
assert not _is_setup_py(not_setup_py)
@pytest.mark.parametrize("mod", ("setuptools", "distutils.core"))
def test_is_setup_py_is_a_setup_py(tmp_path: Path, mod: str) -> None:
setup_py = tmp_path.joinpath("setup.py")
setup_py.write_text(f'from {mod} import setup; setup(name="foo")', "utf-8")
assert _is_setup_py(setup_py)
@pytest.mark.parametrize("mod", ("setuptools", "distutils.core"))
def test_is_setup_py_different_encoding(tmp_path: Path, mod: str) -> None:
setup_py = tmp_path.joinpath("setup.py")
contents = (
"# -*- coding: cp1252 -*-\n"
'from {} import setup; setup(name="foo", description="€")\n'.format(mod)
)
setup_py.write_bytes(contents.encode("cp1252"))
assert _is_setup_py(setup_py)
@pytest.mark.parametrize(
"name, expected", [("__main__.py", True), ("__init__.py", False)]
)
def test_is_main_py(tmp_path: Path, name: str, expected: bool) -> None:
dunder_main = tmp_path.joinpath(name)
assert _is_main_py(dunder_main) == expected
| RonnyPfannschmidt/pytest | testing/test_doctest.py | Python | mit | 48,157 |
#!/usr/bin/env python
"""Fortran to Python Interface Generator.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['run_main', 'compile', 'f2py_testing']
import sys
import subprocess
import os
import numpy as np
from . import f2py2e
from . import f2py_testing
from . import diagnose
run_main = f2py2e.run_main
main = f2py2e.main
def compile(source,
modulename='untitled',
extra_args='',
verbose=True,
source_fn=None,
extension='.f'
):
"""
Build extension module from a Fortran 77 source string with f2py.
Parameters
----------
source : str or bytes
Fortran source of module / subroutine to compile
.. versionchanged:: 1.16.0
Accept str as well as bytes
modulename : str, optional
The name of the compiled python module
extra_args : str or list, optional
Additional parameters passed to f2py
.. versionchanged:: 1.16.0
A list of args may also be provided.
verbose : bool, optional
Print f2py output to screen
source_fn : str, optional
Name of the file where the fortran source is written.
The default is to use a temporary file with the extension
provided by the `extension` parameter
extension : {'.f', '.f90'}, optional
Filename extension if `source_fn` is not provided.
The extension tells which fortran standard is used.
The default is `.f`, which implies F77 standard.
.. versionadded:: 1.11.0
Returns
-------
result : int
0 on success
Examples
--------
.. include:: compile_session.dat
:literal:
"""
import tempfile
import shlex
if source_fn is None:
f, fname = tempfile.mkstemp(suffix=extension)
# f is a file descriptor so need to close it
# carefully -- not with .close() directly
os.close(f)
else:
fname = source_fn
if not isinstance(source, str):
source = str(source, 'utf-8')
try:
with open(fname, 'w') as f:
f.write(source)
args = ['-c', '-m', modulename, f.name]
if isinstance(extra_args, np.compat.basestring):
is_posix = (os.name == 'posix')
extra_args = shlex.split(extra_args, posix=is_posix)
args.extend(extra_args)
c = [sys.executable,
'-c',
'import numpy.f2py as f2py2e;f2py2e.main()'] + args
try:
output = subprocess.check_output(c)
except subprocess.CalledProcessError as exc:
status = exc.returncode
output = ''
except OSError:
# preserve historic status code used by exec_command()
status = 127
output = ''
else:
status = 0
output = output.decode()
if verbose:
print(output)
finally:
if source_fn is None:
os.remove(fname)
return status
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| jorisvandenbossche/numpy | numpy/f2py/__init__.py | Python | bsd-3-clause | 3,138 |
from collections import defaultdict
export_dir = 'marc/'
data_dir = 'raw/'
log_dir = 'logs/'
countries_to_skip = ['Japan', 'France', 'Korea, Republic of']
def is_xml_field(x):
xml_fields = ['100', '200']
return x in xml_fields or len(x)==2
xml_header = '<?xml version="1.0" encoding="ISO-8859-1"?>'
formatting_config = {'sub': '\t\t<subfield code="%s">%s</subfield>',
'tag_beg': '\t<datafield tag="%s" ind1="%s" ind2="%s">',
'tag_end': '\t</datafield>',
'rec_beg': '<record>',
'rec_end': '</record>',
}
| jgarcial/inis_to_marc | config.py | Python | gpl-3.0 | 536 |
"""
Simultaneous iteration algorithm
Takes a matrix Q and a parameter p denoting the last column
of A considered for computing the eigenspace
>>> A = array([[ 1.079, 1.105, 0.559],
... [ 1.105, 0.725, 0.988],
... [ 0.559, 0.988, 0.732]])
>>> Q,L,iters,err = simit(A,3, details=True)
>>> iters < 200
True
>>> err < 1.0e-15
True
"""
from numpy import *
def simit(A, p, details=False):
n,_ = shape(A)
t = random.rand(n,p)
Q,R = linalg.qr(t)
err = 1.0
iters = 0
while err > 1.0e-15 and iters < 1000:
z = A.dot(Q)
Q,R = linalg.qr(z)
err = linalg.norm(A.dot(Q) - z)
iters += 1
if details:
return Q, Q.T.dot(A).dot(Q), iters, err
else:
return Q, Q.T.dot(A).dot(Q)
if __name__ == "__main__":
import doctest
doctest.testmod()
| ddrake/mth653 | eigen/simit.py | Python | mit | 838 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
def score(dice):
if len(dice) > 5:
return None
numbers = {}.fromkeys(range(1,7), 0)
score = 0
for n in dice:
numbers[n] += 1
score += (numbers[1] // 3) * 1000
for n in range(2,7):
score += (numbers[n] // 3) * 100 * n
score += (numbers[1] % 3) * 100
score += (numbers[5] % 3) * 50
return score
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
print(score([]))
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2]))
| sourabhv/python-koans-solutions | python3/koans/about_scoring_project.py | Python | mit | 2,529 |
import _plotly_utils.basevalidators
class ValuesValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="values", parent_name="sunburst", **kwargs):
super(ValuesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/sunburst/_values.py | Python | mit | 442 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import asyncio
from collections import OrderedDict
import concurrent.futures
import json
import logging
import aiopg
import psycopg2.extensions
import psycopg2.extras
from fumbbl_base.custom_types import tournament_format as tfmt
import fumbbl_trans.exc
import fumbbl_trans.html_tourney
import fumbbl_trans.xml_tourney
import fumbbl_trans.xml_tourneys
from fplusdb_main import notify_aiopg
from fplusdb_main.schema import MAIN
from fplusdb_main import sql_commands
import fplusdb_main.transimp.html_tourney
import fplusdb_main.transimp.xml_tourney
import fplusdb_main.transimp.xml_tourneys
import fumlocxmlmatches
class BasePosgreSQLScript:
#DRY with fplusdbinsmatch
async_ = False
autocommit = None
isol_lvl = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
def __init__(self, dsn):
self.dsn = dsn
self.conn = None
def connect(self, isol_lvl=None, async_=None,
autocommit=None):
assert not self.conn, 'already connected'
isol_lvl = isol_lvl or self.isol_lvl
async_ = async_ or self.async_
autocommit = autocommit or self.autocommit
self.conn = psycopg2.connect(self.dsn,
connection_factory=None,
cursor_factory=None,
async=async_)
self.conn.set_session(
isolation_level=isol_lvl,
readonly=None,
deferrable=None,
autocommit=autocommit)
def __enter__(self):
self.connect()
return self
class Script(BasePosgreSQLScript):
isol_lvl = psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE
logger = logging.getLogger(__name__)
def __init__(self, dsn, *,
hhost='localhost', hport=8080,
**kwargs):
super().__init__(dsn)
self.hhost = hhost
self.hport = hport
async def manage_notifications(self, pool):
async with notify_aiopg.NotificationManager(pool) as m:
await m.register_listen(
'check_group',
self.on_check_group,
)
await m.register_listen(
'group_checked',
self.on_group_checked,
)
await m.register_listen(
'group_check_rollbacked',
self.on_group_check_rollbacked,
)
await asyncio.Future() # wait forever
async def _check_tournament(self, group_id, tournament_id,
target, cur, sema, loop):
P = MAIN.TOURNAMENT_PAIRING
err = None
async with sema:
tournament_row = target[MAIN.TOURNAMENT][(tournament_id,)]
tournament_name = tournament_row[MAIN.TOURNAMENT.NAME]
tfmt_ = tournament_row[MAIN.TOURNAMENT.FORMAT]
t_str = '{}: {!r}'.format(tournament_id, tournament_name)
try:
xs = fumbbl_trans.xml_tourney.Session.of_group(group_id,
tournament_id,
netloc=':'.join((self.hhost, str(self.hport))),
loop=loop)
xtext = await xs.async_get()
except fumbbl_trans.exc.NoResultError:
self.logger.info('[!0] ' + t_str)
return False
except concurrent.futures.CancelledError:
self.logger.info('[CC] ' + t_str)
raise
except Exception as e:
self.logger.info('[!!] ' + t_str)
raise
else:
xtarget = fplusdb_main.transimp.xml_tourney.import_(
xtext, tournament_id, tfmt_)
if tfmt_ == tfmt.OPENROUNDROBIN.value:
target_ = xtarget
else:
one_match_id = next(r for r in xtarget[P].values()
if r.get('match_id'))['match_id']
try:
hs = fumbbl_trans.html_tourney.Session.of_group(
group_id, tournament_id, one_match_id,
netloc=':'.join((self.hhost, str(self.hport))),
loop=loop)
htext = await hs.async_get()
except concurrent.futures.CancelledError:
self.logger.info('[CC] ' + t_str)
raise
except Exception as e:
errmsg = 'HTML tournament parsing error ({})'
self.logger.warning(errmsg.format(e))
target_ = xtarget # fallback to XML result
raise # uncomment to disable fallback
else:
func = fplusdb_main.transimp.html_tourney.import_
htarget = func(htext, tfmt_)
target_ = htarget
for tablename, tablerows in target_.items():
target_table = target.setdefault(tablename,
OrderedDict())
target_table.update(tablerows)
self.logger.info('[OK] ' + t_str)
return True
async def on_check_group(self, notify, pool):
loop = pool._loop
if notify.payload.isdecimal():
group_id = int(notify.payload)
else:
errfs = 'invalid check_group payload: {!r}'
self.logger.warning(errfs.format(notify.payload))
return
self.logger.info('CHECKING GROUP...: {}'.format(group_id))
async with pool.acquire() as conn:
async with conn.cursor(
cursor_factory=psycopg2.extras.DictCursor) as cur:
sql = await sql_commands.tournaments_of_group(cur,
group_id)
await cur.execute(sql)
results = await cur.fetchall()
done_ids = set(r[MAIN.TOURNAMENT.ID] for r in results
if r[MAIN.TOURNAMENT.WINNER_TEAM_ID])
self.logger.debug(f'done_ids: {done_ids}')
xts = fumbbl_trans.xml_tourneys.Session.of_group(
group_id,
netloc=':'.join((self.hhost, str(self.hport))),
)
try:
text = await xts.async_get()
except fumbbl_trans.exc.NoResultError:
async with conn.cursor() as cur:
sql = await sql_commands.notify(cur,
'group_checked', group_id)
await cur.execute(sql)
return
d = {}
fplusdb_main.transimp.xml_tourneys.import_(text, d)
d_t = d.get(MAIN.TOURNAMENT, {})
tids = [t['id'] for t in d_t.values()
if t['id'] not in done_ids]
self.logger.debug(f'tids1: {tids}')
if not tids:
async with conn.cursor() as cur:
sql = await sql_commands.notify(cur, 'group_checked',
group_id)
await cur.execute(sql)
return
sema = asyncio.Semaphore(5, loop=loop)
async with conn.cursor() as cur:
coros = [
loop.create_task(
self._check_tournament(
group_id, tid, d, cur, sema, loop
)
)
for tid in tids
]
if coros:
try:
successes = await asyncio.gather(*coros, loop=loop)
except Exception as e:
for task in coros:
task.cancel()
sql = await sql_commands.notify(cur,
'group_check_rollbacked', group_id)
self.logger.error('{}'.format(e))
await cur.execute(sql)
raise
else:
fails = [tids[i] for i, s in enumerate(successes)
if s is False]
for tid in fails:
del d_t[(tid,)]
UPSERT = True
rb_exc_class = psycopg2.extensions.TransactionRollbackError
ie1_exc_class = psycopg2.IntegrityError
ie2_exc_class = psycopg2.InternalError
fail_exc = None
tids = [t['id'] for t in d_t.values()
if t['id'] not in done_ids]
self.logger.debug(f'tids2: {tids}')
async with conn.cursor() as cur:
await cur.execute('BEGIN ISOLATION LEVEL SERIALIZABLE;')
for tid in tids:
sql = f'DELETE FROM main.TOURNAMENT where id={tid}'
await cur.execute(sql)
ok = True
for tablename in (
MAIN.TOURNAMENT,
MAIN.TOURNAMENT_PAIRING,
):
if (
tablename == MAIN.TOURNAMENT_PAIRING
and tablename not in d
):
continue
for r in d[tablename].values():
funcs = sql_commands.serts[tablename]
func = funcs[UPSERT]
sql = await func(cur, data=r)
try:
await cur.execute(sql)
except (rb_exc_class, ie1_exc_class,
ie2_exc_class) as e:
fail_exc = e
break
except Exception as e:
self.logger.error('{}'.format(e))
raise e
else:
continue
ok = False
break
if ok:
for tournament_id in tids:
try:
sql2 = ('SELECT main.fill_up_unplayed_'
'tournament_pairings({});')
await cur.execute(sql2.format(tournament_id))
sql3 = ('INSERT INTO main.tournament_pairing_stat'
' (tournament_id) VALUES ({0})'
' ON CONFLICT (tournament_id)'
' DO UPDATE SET tournament_id = {0};')
await cur.execute(sql3.format(tournament_id))
except (rb_exc_class, ie1_exc_class,
ie2_exc_class) as e:
fail_exc = e
break
except Exception as e:
self.logger.error('{}'.format(e))
raise e
sql = await sql_commands.notify(cur, 'group_checked',
group_id)
await cur.execute(sql)
await cur.execute('COMMIT;')
if fail_exc:
async with conn.cursor() as cur:
if isinstance(fail_exc, rb_exc_class):
sql = await sql_commands.notify(cur,
'group_check_rollbacked', group_id)
elif (isinstance(fail_exc, ie1_exc_class)
or isinstance(fail_exc, ie2_exc_class)):
sql = await sql_commands.notify(cur,
'group_check_error', group_id)
await cur.execute(sql)
errmsg = 'Database update failed ({})'
self.logger.warning(errmsg.format(fail_exc))
#raise fail_exc
def on_group_checked(self, notify, pool):
self.logger.info('GROUP CHECKED: {}'.format(notify.payload))
def on_group_check_rollbacked(self, notify, pool):
self.logger.info('GROUP CHECK ROLLBACKED: {}'.format(
notify.payload))
def get_main_params():
parser = argparse.ArgumentParser(parents=(
fumlocxmlmatches.log_parser,
))
parser.add_argument('dsn',
help=('PostgreSQL connection string of the '
'FUMBBLPlus database'))
parser.add_argument('--hhost', default='localhost',
help='http host (default: localhost)')
parser.add_argument('--hport', type=int, default=8080,
help='http port (default: 8080)')
args = parser.parse_args()
return dict(args._get_kwargs())
async def main(loop=None):
global script
loop = loop or asyncio.get_event_loop()
params = get_main_params()
hdlr = logging.StreamHandler(params['logto'])
hdlr.setFormatter(fumlocxmlmatches.LOG_FORMATTER)
Script.logger.setLevel(params['loglevel'])
Script.logger.addHandler(hdlr)
script = Script(**params)
async with aiopg.create_pool(
params['dsn'],
loop=loop
) as pool:
notifman_task = loop.create_task(
script.manage_notifications(pool)
)
try:
await asyncio.Future() # wait forever
except:
notifman_task.cancel()
raise
if __name__ == '__main__':
script=None
loop = asyncio.get_event_loop()
main_task = loop.create_task(main(loop))
while True:
try:
loop.run_until_complete(main_task)
except KeyboardInterrupt:
main_task.cancel()
for t in asyncio.Task.all_tasks(loop=loop):
t.cancel()
except asyncio.CancelledError:
break
except Exception as err:
script.logger.info(err)
break
loop.close()
| FUMBBLPlus/fplusdb_main | scripts/fplusdbchkgroup.py | Python | mit | 11,627 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
from trac.admin import IAdminPanelProvider
from trac.config import IntOption
from trac.core import Component, implements
from trac.web.api import HTTPNotFound
from trac.web.chrome import (
add_link, add_script, add_script_data, add_stylesheet)
from tracspamfilter.api import _
from tracspamfilter.model import LogEntry
class ReportAdminPageProvider(Component):
"""Web administration panel for reviewing Spam reports"""
implements(IAdminPanelProvider)
MAX_PER_PAGE = 10000
MIN_PER_PAGE = 5
DEF_PER_PAGE = IntOption('spam-filter', 'spam_report_entries', '100',
"How many report entries are displayed by default (between 5 and 10000).",
doc_domain='tracspamfilter')
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CHECKREPORTS' in req.perm:
total = self.env.db_query("SELECT COUNT(*) FROM spamfilter_report")
total = total[0][0]
yield ('spamfilter', _("Spam Filtering"), 'report', _("Reports") \
+ (" (%s)"%total if total else ""))
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('SPAM_CHECKREPORTS')
if req.method == 'POST' and 'delete' in req.args:
entries = req.args.getlist('sel')
if entries:
self.env.db_transaction("""
DELETE FROM spamfilter_report WHERE id IN (%s)
""" % ",".join("'%s'" % each for each in entries))
req.redirect(req.href.admin(cat, page,
page=req.args.get('page'),
num=req.args.get('num')))
if path_info:
data = self._render_entry(req, cat, page, path_info)
page = 'entry'
data['allowselect'] = False
data['entries'] = LogEntry.selectrelated(self.env, data['path'], data['time'])
else:
data = self._render_panel(req, cat, page)
page = ''
add_stylesheet(req, 'spamfilter/admin.css')
return 'admin_report%s.html' % page, data
# Internal methods
def _render_panel(self, req, cat, page):
try:
pagenum = int(req.args.get('page', 1)) - 1
except ValueError:
pagenum = 1
try:
pagesize = int(req.args.get('num', self.DEF_PER_PAGE))
except ValueError:
pagesize = self.DEF_PER_PAGE
if pagesize < self.MIN_PER_PAGE:
pagesize = self.MIN_PER_PAGE
elif pagesize > self.MAX_PER_PAGE:
pagesize = self.MAX_PER_PAGE
total = self.env.db_query("SELECT COUNT(*) FROM spamfilter_report")
total = total[0][0]
if total < pagesize:
pagenum = 0
elif total <= pagenum * pagesize:
pagenum = (total-1)/pagesize
offset = pagenum * pagesize
entries = []
idx = 0;
for e in self.env.db_query("""
SELECT id,time,entry,author,authenticated,comment
FROM spamfilter_report
ORDER BY time DESC LIMIT %s OFFSET %s""", (pagesize, offset)):
# don't display additional appended values
p = e[2].split("#")
entries.append(('odd' if idx %2 else 'even',)+e[0:2]+(p[0],)+e[3:])
idx += 1;
if pagenum > 0:
add_link(req, 'prev', req.href.admin(cat, page, page=pagenum,
num=pagesize),
_('Previous Page'))
if offset + pagesize < total:
add_link(req, 'next', req.href.admin(cat, page, page=pagenum+2,
num=pagesize),
_('Next Page'))
if entries:
add_script_data(req, {'toggleform': "spamreportform"})
add_script(req, 'spamfilter/toggle.js')
return {
'entries': entries,
'offset': offset + 1,
'page': pagenum + 1,
'num': pagesize,
'total': total
}
def _render_entry(self, req, cat, page, entry_id):
with self.env.db_query as db:
entry = db("""
SELECT time,entry,author,authenticated,headers,comment
FROM spamfilter_report
WHERE id = %s""", (entry_id,))
if not entry:
raise HTTPNotFound(_('Report entry not found'))
entry = entry[0]
for previous, in db("""
SELECT id
FROM spamfilter_report
WHERE id<%s ORDER BY id DESC LIMIT 1""", (entry_id,)):
add_link(req, 'prev', req.href.admin(cat, page, previous),
_('Report Entry %d') % previous)
add_link(req, 'up', req.href.admin(cat, page), _('Report Entry List'))
for next, in db("""
SELECT id
FROM spamfilter_report
WHERE id>%s ORDER BY id DESC LIMIT 1""", (entry_id,)):
add_link(req, 'next', req.href.admin(cat, page, next),
_('Report Entry %d') % next)
# don't display additional appended values
path = entry[1].split("#")
return {'time': entry[0],
'monitor': 'SPAM_MONITOR' in req.perm,
'id': entry_id,
'path': path[0],
'author': entry[2],
'authenticated': entry[3],
'headers': entry[4],
'comment': entry[5]}
| Puppet-Finland/trac | files/spam-filter/tracspamfilter/adminreport.py | Python | bsd-2-clause | 6,070 |
from datetime import datetime, timedelta, date, time
import nose
from pandas.compat import lrange, zip
import numpy as np
from numpy.testing.decorators import slow
from pandas import Index, Series, DataFrame
from pandas.tseries.index import date_range, bdate_range
from pandas.tseries.offsets import DateOffset, Week
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
from pandas.tests.test_graphics import _skip_if_no_scipy_gaussian_kde
@tm.mplskip
class TestTSPlot(tm.TestCase):
def setUp(self):
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def tearDown(self):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 9), index=range(10))
ax = df.plot(fontsize=2)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
self.assertEqual(label.get_fontsize(), 2)
@slow
def test_frame_inferred(self):
# inferred freq
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}, idx)
ax = df.plot() # it works
self.assertEqual(len(ax.get_lines()), 1) #B was plotted
plt.close(plt.gcf())
self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
ax = plt.gca()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
ax = ts.plot(style='k')
self.assertEqual((0., 0., 0.), ax.get_lines()[0].get_color())
def test_both_style_and_color(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
self.assertRaises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
self.assertRaises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot)
def test_get_datevalue(self):
from pandas.tseries.converter import get_datevalue
self.assertIsNone(get_datevalue(None, 'D'))
self.assertEqual(get_datevalue(1987, 'A'), 1987)
self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'),
Period('1987-12', 'M').ordinal)
self.assertEqual(get_datevalue('1/1/1987', 'D'),
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
self.assertEqual(expected_string, ax.format_coord(first_x, first_y))
except (ValueError):
raise nose.SkipTest("skipping test because issue forming test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3, freq='A-DEC'))
check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
check_format_of_first_point(daily.plot(), 't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
import matplotlib.pyplot as plt
from pandas.tseries.plotting import tsplot
tsplot(annual, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014 y = 1.000000')
tsplot(daily, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ax = ts.plot()
self.assertFalse(hasattr(ax, 'freq'))
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1),
datetime(2000, 1, 6),
datetime(2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.tseries.converter as conv
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
ax = df.plot()
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
self.assertEqual(xp, rs)
@slow
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all())
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
ret = ser.plot()
self.assertIsNotNone(ret)
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
self.assertEqual(rs, xp)
def test_business_freq(self):
import matplotlib.pyplot as plt
bts = tm.makePeriodSeries()
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
bts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'B')
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
ts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'M')
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') +
timedelta(minutes=30))
df = DataFrame(np.arange(24), index=idx)
ax = df.plot()
rs = ax.get_lines()[0].get_xdata()
self.assertFalse(Index(rs).is_normalized)
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
tm.assert_numpy_array_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
import matplotlib.pyplot as plt
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(result[0], xlim[0] - 5)
self.assertEqual(result[1], xlim[1] + 10)
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
fig = ax.get_figure()
plt.close(fig)
ser = tm.makeTimeSeries()
ax = ser.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
ax = df.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.tseries.converter as conv
self.assertEqual(conv.get_finder('B'), conv._daily_finder)
self.assertEqual(conv.get_finder('D'), conv._daily_finder)
self.assertEqual(conv.get_finder('M'), conv._monthly_finder)
self.assertEqual(conv.get_finder('Q'), conv._quarterly_finder)
self.assertEqual(conv.get_finder('A'), conv._annual_finder)
self.assertEqual(conv.get_finder('W'), conv._daily_finder)
@slow
def test_finder_daily(self):
import matplotlib.pyplot as plt
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
import matplotlib.pyplot as plt
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
import matplotlib.pyplot as plt
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
self.assertEqual(rs, xp)
@slow
def test_finder_annual(self):
import matplotlib.pyplot as plt
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, Period(xp[i], freq='A').ordinal)
plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
self.assertEqual(rs, xp)
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
self.assertEqual(rs, xp)
@slow
def test_gaps(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
ax = ts.plot()
lines = ax.get_lines()
tm._skip_if_mpl_1_5()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
ax = ser.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
ax = low.plot()
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
tm._skip_if_mpl_1_5()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
@slow
def test_secondary_y(self):
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
self.assertFalse(hasattr(ax, 'left_ax'))
self.assertTrue(hasattr(ax, 'right_ax'))
self.assertTrue(hasattr(ax2, 'left_ax'))
self.assertFalse(hasattr(ax2, 'right_ax'))
@slow
def test_secondary_y_ts(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='bar')
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
def test_mixed_freq_regular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
ax = s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first(self):
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
s2.plot(style='g')
ax = s1.plot()
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s1.plot()
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s2.plot(style='g')
ax = s1.plot(ax=ax)
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
ax = ts.plot()
ts2.plot(style='r')
self.assertEqual(ax.lines[0].get_xdata()[0],
ax.lines[1].get_xdata()[0])
@slow
def test_mixed_freq_lf_first(self):
import matplotlib.pyplot as plt
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot(legend=True)
ax = high.plot(legend=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
leg = ax.get_legend()
self.assertEqual(len(leg.texts), 2)
plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'T')
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
irreg.plot()
ps.plot()
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(high, plt.Axes.plot)
lines = tsplot(low, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
expected_h = idxh.to_period().asi8
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(low, plt.Axes.plot)
lines = tsplot(high, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = low.plot(kind=kind1, stacked=True)
ax = high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
# check stacked values are correct
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = high.plot(kind=kind1, stacked=True)
ax = low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549,
1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x)
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
high.plot()
ax = low.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
tm.close()
# low to high
low.plot()
ax = high.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_check_plot_works(df.plot)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
self.assertEqual(xp, rs)
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@slow
def test_secondary_legend(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B (right)')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'C'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'])
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
# non-ts
df = tm.makeDataFrame()
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
ax = df.plot()
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
self.assertEqual(l.get_rotation(), 30)
@slow
def test_ax_plot(self):
import matplotlib.pyplot as plt
x = DatetimeIndex(start='2012-01-02', periods=10,
freq='D')
y = lrange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
tm.assert_numpy_array_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
import matplotlib.pyplot as plt
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line1.get_xydata()[:, 0])
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line2.get_xydata()[:, 0])
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
ax = ts_irregular[:5].plot()
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
ax = ts.plot()
left_before, right_before = ax.get_xlim()
ts.resample('D').plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
self.assertEqual(left_before, left_after)
self.assertEqual(right_before, right_after)
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
ax = ts_irregular[:5].plot()
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tseries/tests/test_plotting.py | Python | artistic-2.0 | 44,763 |
# print(f'Invoking __init__.py for {__name__}')
from .maths import complexto
from .HP4395A import HP4395A
from .AgilentE8357A import AgilentE8357A
from .Wiltron360 import Wiltron360
# import .Wiltron360
REGISTER = {
'HEWLETT-PACKARD,4395A': HP4395A,
'Agilent Technologies,E8357A': AgilentE8357A,
'Wiltron,360,NO_IDN': Wiltron360,
}
'''
"Keysight, Fieldfox": KeysightFieldFox,
# Benchview suppored:
# ENA: E5080A, E5061B, E5063A, E5071C, E5072A
# PNA: N5221A, N5222A, N5224A, N5245A, N5227A
# PNA-L: N5230C, N5231A, N5232A, N5234A, N5235A, N5239A
# PNA-X: N5241A, N5242A, N5244A, N5245A, N5247A, N5249A
# Fieldfox: N9912A, N9913A, N9914A, N9915A, N9916A, N9917A, N9918A, N9923A, N9925A,
# N9926A, N9927A, N9928A, N9935A, N9936A, N9937A, N9938A, N9950A, N9951A, N9952A, N9960A, N9961A, N9962A
'''
| DavidLutton/EngineeringProject | labtoolkit/NetworkAnalyser/__init__.py | Python | mit | 851 |
from django.conf.urls import patterns, url
from ts_get_started import views
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='ts_get_started_index'),
url(r'^modeling1/$', views.Modeling1View.as_view(), name='ts_get_started_modeling1'),
url(r'^modeling2/$', views.Modeling2View.as_view(), name='ts_get_started_modeling2'),
url(r'^modeling3/$', views.Modeling3View.as_view(), name='ts_get_started_modeling3'),
url(r'^modeling4/$', views.Modeling4View.as_view(), name='ts_get_started_modeling4'),
url(r'^weatherGenerator/$', views.WeatherGeneratorView.as_view(), name='ts_get_started_weatherGenerator'),
url(r'^select_model/$', TemplateView.as_view(template_name="ts_get_started/frank_index.html"), name="ts_get_started.frank.index"),
url(r'^emod_model/$', TemplateView.as_view(template_name="ts_get_started/frank_emod.html"), name="ts_get_started.frank.emod"),
url(r'^emod_single_node/$', TemplateView.as_view(template_name="ts_get_started/frank_single_node.html"), name="ts_get_started.frank.emod_single_node"),
url(r'^temperatureData/$', views.getTemperatureData, name='ts_weather.temperatureData'),
url(r'^rainfallData/$', views.getRainfallData, name='ts_weather.rainfallData'),
url(r'^humidityData/$', views.getHumidityData, name='ts_weather.humidityData'),
) | tph-thuering/vnetsource | ts_get_started/urls.py | Python | mpl-2.0 | 1,377 |
from fnmatch import fnmatch
import logging
logger = logging.getLogger("ralybot")
# put your hostmask here for magic
# it's disabled by default, see has_perm_mask()
backdoor = None
class PermissionManager(object):
"""
:type name: str
:type config: dict[str, ?]
:type group_perms: dict[str, list[str]]
:type group_users: dict[str, list[str]]
:type perm_users: dict[str, list[str]]
"""
def __init__(self, conn):
"""
:type conn: ralybot.client.Client
"""
logger.info("[{}|permissions] Created permission manager for {}.".format(conn.name, conn.name))
# stuff
self.name = conn.name
self.config = conn.config
self.group_perms = {}
self.group_users = {}
self.perm_users = {}
self.reload()
def reload(self):
self.group_perms = {}
self.group_users = {}
self.perm_users = {}
logger.info("[{}|permissions] Reloading permissions for {}.".format(self.name, self.name))
groups = self.config.get("permissions", {})
# work out the permissions and users each group has
for key, value in groups.items():
if not key.islower():
logger.warning("[{}|permissions] Warning! Non-lower-case group '{}' in config. This will cause problems"
" when setting permissions using the bot's permissions commands"
.format(self.name, key))
key = key.lower()
self.group_perms[key] = []
self.group_users[key] = []
for permission in value["perms"]:
self.group_perms[key].append(permission.lower())
for user in value["users"]:
self.group_users[key].append(user.lower())
for group, users in self.group_users.items():
group_perms = self.group_perms[group]
for perm in group_perms:
if self.perm_users.get(perm) is None:
self.perm_users[perm] = []
self.perm_users[perm].extend(users)
logger.debug("[{}|permissions] Group permissions: {}".format(self.name, self.group_perms))
logger.debug("[{}|permissions] Group users: {}".format(self.name, self.group_users))
logger.debug("[{}|permissions] Permission users: {}".format(self.name, self.perm_users))
def has_perm_mask(self, user_mask, perm, notice=True):
"""
:type user_mask: str
:type perm: str
:rtype: bool
"""
if backdoor:
if fnmatch(user_mask.lower(), backdoor.lower()):
return True
if not perm.lower() in self.perm_users:
# no one has access
return False
allowed_users = self.perm_users[perm.lower()]
for allowed_mask in allowed_users:
if fnmatch(user_mask.lower(), allowed_mask):
if notice:
logger.info("[{}|permissions] Allowed user {} access to {}".format(self.name, user_mask, perm))
return True
return False
def get_groups(self):
return set().union(self.group_perms.keys(), self.group_users.keys())
def get_group_permissions(self, group):
"""
:type group: str
:rtype: list[str]
"""
return self.group_perms.get(group.lower())
def get_group_users(self, group):
"""
:type group: str
:rtype: list[str]
"""
return self.group_users.get(group.lower())
def get_user_permissions(self, user_mask):
"""
:type user_mask: str
:rtype: list[str]
"""
permissions = set()
for permission, users in self.perm_users.items():
for mask_to_check in users:
if fnmatch(user_mask.lower(), mask_to_check):
permissions.add(permission)
return permissions
def get_user_groups(self, user_mask):
"""
:type user_mask: str
:rtype: list[str]
"""
groups = []
for group, users in self.group_users.items():
for mask_to_check in users:
if fnmatch(user_mask.lower(), mask_to_check):
groups.append(group)
continue
return groups
def group_exists(self, group):
"""
Checks whether a group exists
:type group: str
:rtype: bool
"""
return group.lower() in self.group_perms
def user_in_group(self, user_mask, group):
"""
Checks whether a user is matched by any masks in a given group
:type group: str
:type user_mask: str
:rtype: bool
"""
users = self.group_users.get(group.lower())
if not users:
return False
for mask_to_check in users:
if fnmatch(user_mask.lower(), mask_to_check):
return True
return False
def remove_group_user(self, group, user_mask):
"""
Removes all users that match user_mask from group. Returns a list of user masks removed from the group.
Use permission_manager.reload() to make this change take affect.
Use bot.config.save_config() to save this change to file.
:type group: str
:type user_mask: str
:rtype: list[str]
"""
masks_removed = []
config_groups = self.config.get("permissions", {})
for mask_to_check in list(self.group_users[group.lower()]):
if fnmatch(user_mask.lower(), mask_to_check):
masks_removed.append(mask_to_check)
# We're going to act like the group keys are all lowercase.
# The user has been warned (above) if they aren't.
# Okay, maybe a warning, but no support.
if group not in config_groups:
logger.warning(
"[{}|permissions] Can't remove user from group due to"
" upper-case group names!".format(self.name))
continue
config_group = config_groups.get(group)
config_users = config_group.get("users")
config_users.remove(mask_to_check)
return masks_removed
def add_user_to_group(self, user_mask, group):
"""
Adds user to group. Returns whether this actually did anything.
Use permission_manager.reload() to make this change take affect.
Use bot.config.save_config() to save this change to file.
:type group: str
:type user_mask: str
:rtype: bool
"""
if self.user_in_group(user_mask, group):
return False
# We're going to act like the group keys are all lowercase.
# The user has been warned (above) if they aren't.
groups = self.config.get("permissions", {})
if group in groups:
group_dict = groups.get(group)
users = group_dict["users"]
users.append(user_mask)
else:
# create the group
group_dict = {"users": [user_mask], "perms": []}
groups[group] = group_dict
return True
| Jakeable/Ralybot | ralybot/permissions.py | Python | gpl-3.0 | 7,238 |
# -*- coding: UTF-8 -*-
import postmarkup
import unittest
class TestPostmarkup(unittest.TestCase):
def test_textilize(self):
"""Test textilize function"""
tests = [(u"<b>No bold</b>", u"No bold"),
(u'<span class="blah">A span</span>', u"A span"),
(u"Just text", u"Just text"),
(u"<p>paragraph</p>", u" paragraph")]
for test, result in tests:
self.assertEqual(postmarkup.textilize(test), result)
def test_strip_bbcode(self):
"""Test strip_bbcode function"""
tests = [(u"[b]Not bold[/b]", u"Not bold"),
(u"Just text", u"Just text"),
(u"[b][i][url][url=test]", u"")]
for test, result in tests:
self.assertEqual(postmarkup.strip_bbcode(test), result)
def test_cleanuphtml(self):
"""Test cleanup_html"""
markup = postmarkup.create()
tests = [(u"""\n<p>\n </p>\n""", u""),
(u"""<b>\n\n<i> </i>\n</b>Test""", u"Test"),
(u"""<p id="test">Test</p>""", u"""<p id="test">Test</p>"""),]
for test, result in tests:
self.assertEqual(markup.cleanup_html(test).strip(), result)
def test_simpletag(self):
"Test simple tags"
markup = postmarkup.create()
tests = [ (u'[b]Hello[/b]', u"<strong>Hello</strong>"),
(u'[i]Italic[/i]', u"<em>Italic</em>"),
(u'[s]Strike[/s]', u"<strike>Strike</strike>"),
(u'[u]underlined[/u]', u"<u>underlined</u>"),
]
for test, result in tests:
self.assertEqual(markup(test), result)
def test_overlap(self):
"""Test overlapping tags produce correct output"""
markup = postmarkup.create()
tests = [ (u'[i][b]Hello[/i][/b]', u"<em><strong>Hello</strong></em>"),
(u'[b]bold [u]both[/b] underline[/u]', u'<strong>bold <u>both</u></strong><u> underline</u>')
]
for test, result in tests:
self.assertEqual(markup(test), result)
def test_links(self):
"""Test links produce correct output"""
markup = postmarkup.create(annotate_links=False)
tests = [ (u'[link=http://www.willmcgugan.com]blog1[/link]', u'<a href="http://www.willmcgugan.com">blog1</a>'),
(u'[link="http://www.willmcgugan.com"]blog2[/link]', u'<a href="http://www.willmcgugan.com">blog2</a>'),
(u'[link http://www.willmcgugan.com]blog3[/link]', u'<a href="http://www.willmcgugan.com">blog3</a>'),
(u'[link]http://www.willmcgugan.com[/link]', u'<a href="http://www.willmcgugan.com">http://www.willmcgugan.com</a>')
]
for test, result in tests:
self.assertEqual(markup(test), result)
def test_unknowntags(self):
"""Test unknown tags pass through correctly"""
markup = postmarkup.create(annotate_links=False)
tests = [ (u'[REDACTED]', u'[REDACTED]'),
(u'[REDACTED this]', u'[REDACTED this]'),
(u'[REDACTED <b>]', u'[REDACTED <b>]') ]
for test, result in tests:
self.assertEqual(markup(test, render_unknown_tags=True), result)
def test_unicode(self):
"""Test unicode support"""
markup = postmarkup.create()
tests= [ (u'[b]Hello André[/b]', u"<strong>Hello André</strong>"),
(u'[i]ɸβfvθðsz[/i]', u"<em>ɸβfvθðsz</em>"),
]
for test, result in tests:
self.assertEqual(markup(test), result)
| saknis/upelis | postmarkup/tests.py | Python | lgpl-2.1 | 3,704 |
# Generated by Django 2.0.1 on 2018-01-17 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('ligand', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Mutation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amino_acid', models.CharField(max_length=1)),
],
options={
'db_table': 'mutation',
},
),
migrations.CreateModel(
name='MutationExperiment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('submitting_group', models.CharField(max_length=200, null=True)),
('data_container', models.CharField(max_length=200, null=True)),
('data_container_number', models.CharField(max_length=20, null=True)),
('wt_value', models.FloatField()),
('wt_unit', models.CharField(max_length=10)),
('mu_value', models.FloatField()),
('mu_sign', models.CharField(max_length=2)),
('foldchange', models.FloatField()),
('opt_receptor_expression', models.FloatField(null=True)),
('opt_basal_activity', models.FloatField(null=True)),
('opt_gain_of_activity', models.CharField(max_length=100, null=True)),
('opt_ligand_emax', models.FloatField(null=True)),
('opt_agonist', models.CharField(max_length=100, null=True)),
],
options={
'db_table': 'mutation_experiment',
},
),
migrations.CreateModel(
name='MutationExperimentalType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100, unique=True)),
],
options={
'db_table': 'mutation_experimental_type',
},
),
migrations.CreateModel(
name='MutationFunc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('func', models.CharField(max_length=100, unique=True)),
],
options={
'db_table': 'mutation_func',
},
),
migrations.CreateModel(
name='MutationLigandClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('classname', models.CharField(max_length=100, unique=True)),
],
options={
'db_table': 'mutation_ligand_class',
},
),
migrations.CreateModel(
name='MutationLigandRef',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference', models.CharField(max_length=100, unique=True)),
],
options={
'db_table': 'mutation_ligand_reference',
},
),
migrations.CreateModel(
name='MutationMeasure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('measure', models.CharField(max_length=100)),
],
options={
'db_table': 'mutation_measure',
},
),
migrations.CreateModel(
name='MutationOptional',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100)),
('wt', models.FloatField()),
('mu', models.FloatField()),
('sign', models.CharField(max_length=2)),
('percentage', models.FloatField()),
('qual', models.CharField(max_length=100)),
('agonist', models.CharField(max_length=100)),
],
options={
'db_table': 'mutation_opt',
},
),
migrations.CreateModel(
name='MutationQual',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qual', models.CharField(max_length=100)),
('prop', models.CharField(max_length=100)),
],
options={
'db_table': 'mutation_qual',
},
),
migrations.CreateModel(
name='MutationRaw',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('submitting_group', models.CharField(max_length=200, null=True)),
('reference', models.CharField(max_length=100)),
('review', models.CharField(max_length=100, null=True)),
('data_container', models.CharField(max_length=200, null=True)),
('data_container_number', models.CharField(max_length=20, null=True)),
('protein', models.CharField(max_length=100)),
('mutation_pos', models.SmallIntegerField()),
('mutation_from', models.CharField(max_length=1)),
('mutation_to', models.CharField(max_length=1)),
('ligand_name', models.CharField(max_length=100)),
('ligand_idtype', models.CharField(max_length=100)),
('ligand_id', models.CharField(max_length=300)),
('ligand_class', models.CharField(max_length=100)),
('exp_type', models.CharField(max_length=100)),
('exp_func', models.CharField(max_length=100)),
('exp_wt_value', models.FloatField()),
('exp_wt_unit', models.CharField(max_length=10)),
('exp_mu_effect_type', models.CharField(max_length=100)),
('exp_mu_effect_sign', models.CharField(max_length=2)),
('exp_mu_effect_value', models.FloatField()),
('exp_fold_change', models.FloatField()),
('exp_mu_effect_qual', models.CharField(max_length=100)),
('exp_mu_effect_ligand_prop', models.CharField(max_length=100)),
('exp_mu_ligand_ref', models.CharField(max_length=100)),
('opt_receptor_expression', models.CharField(max_length=100)),
('opt_basal_activity', models.CharField(max_length=100)),
('opt_gain_of_activity', models.CharField(max_length=100)),
('opt_ligand_emax', models.CharField(max_length=100)),
('opt_agonist', models.CharField(max_length=100)),
('added_by', models.CharField(max_length=100)),
('added_date', models.DateField()),
],
options={
'db_table': 'mutation_raw',
},
),
migrations.CreateModel(
name='MutationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100)),
],
options={
'db_table': 'mutation_type',
},
),
migrations.AlterUniqueTogether(
name='mutationqual',
unique_together={('qual', 'prop')},
),
migrations.AddField(
model_name='mutationexperiment',
name='exp_func',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mutation.MutationFunc'),
),
migrations.AddField(
model_name='mutationexperiment',
name='exp_measure',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mutation.MutationMeasure'),
),
migrations.AddField(
model_name='mutationexperiment',
name='exp_qual',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mutation.MutationQual'),
),
migrations.AddField(
model_name='mutationexperiment',
name='exp_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mutation.MutationExperimentalType'),
),
migrations.AddField(
model_name='mutationexperiment',
name='ligand',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ligand', to='ligand.Ligand'),
),
migrations.AddField(
model_name='mutationexperiment',
name='ligand_ref',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reference_ligand', to='ligand.Ligand'),
),
migrations.AddField(
model_name='mutationexperiment',
name='ligand_role',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='ligand.LigandRole'),
),
migrations.AddField(
model_name='mutationexperiment',
name='mutation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mutation.Mutation'),
),
migrations.AddField(
model_name='mutationexperiment',
name='optional',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mutation.MutationOptional'),
),
]
| cmunk/protwis | mutation/migrations/0001_initial.py | Python | apache-2.0 | 10,073 |
import os
import pandas as pd
import pytest
import pickle
import numpy as np
import string
from copy import copy
import pandas.util.testing as tm
import dask
import dask.dataframe as dd
from dask import delayed
from dask.base import compute_as_if_collection
from dask.dataframe.shuffle import (shuffle,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
maybe_buffered_partd,
remove_nans)
from dask.dataframe.utils import assert_eq, make_meta
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [1, 4, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [2, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [3, 6, 9]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({'x': np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle='tasks', npartitions=17, max_branch=4)
sc = s.compute(scheduler='sync')
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert (set(map(tuple, sc.values.tolist())) ==
set(map(tuple, df.values.tolist())))
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method),
shuffle(d, 'b', shuffle=method))
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[['b']], shuffle=method).compute()
res2 = shuffle(d, ['b'], shuffle=method).compute()
res3 = shuffle(d, 'b', shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, 'x', npartitions=i, shuffle=method)
assert (len(a.compute(scheduler='sync')) ==
len(b.compute(scheduler='sync')))
@pytest.mark.parametrize('method', ['disk', 'tasks'])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({'x': [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame({'i32': np.array([1, 2, 3] * 3, dtype='int32'),
'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'),
'cat': pd.Series(['a', 'b', 'c'] * 3).astype('category'),
'obj': pd.Series(['d', 'e', 'f'] * 3),
'bool': np.array([True, False, True] * 3),
'dt': pd.Series(pd.date_range('20130101', periods=9)),
'dt_tz': pd.Series(pd.date_range('20130101', periods=9, tz='US/Eastern')),
'td': pd.Series(pd.timedelta_range('2000', periods=9))})
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[['i32']], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[['cat', 'bool', 'f32']], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({'a': list(string.ascii_letters),
'b': [1, 2, 3, 4] * 13})
df.a = df.a.astype('category')
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize('npartitions', [1, 4, 7, pytest.mark.slow(23)])
def test_set_index_tasks(npartitions):
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100) // 0.2},
index=np.random.random(100))
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index('x'),
ddf.set_index('x', shuffle='tasks'))
assert_eq(df.set_index('y'),
ddf.set_index('y', shuffle='tasks'))
assert_eq(df.set_index(df.x),
ddf.set_index(ddf.x, shuffle='tasks'))
assert_eq(df.set_index(df.x + df.y),
ddf.set_index(ddf.x + ddf.y, shuffle='tasks'))
assert_eq(df.set_index(df.x + 1),
ddf.set_index(ddf.x + 1, shuffle='tasks'))
assert_eq(df.set_index(df.index),
ddf.set_index(ddf.index, shuffle='tasks'))
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_self_index(shuffle):
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100) // 0.2},
index=np.random.random(100))
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize('shuffle', ['tasks'])
def test_set_index_names(shuffle):
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100) // 0.2},
index=np.random.random(100))
ddf = dd.from_pandas(df, npartitions=4)
assert (set(ddf.set_index('x', shuffle=shuffle).dask) ==
set(ddf.set_index('x', shuffle=shuffle).dask))
assert (set(ddf.set_index('x', shuffle=shuffle).dask) !=
set(ddf.set_index('y', shuffle=shuffle).dask))
assert (set(ddf.set_index('x', max_branch=4, shuffle=shuffle).dask) !=
set(ddf.set_index('x', max_branch=3, shuffle=shuffle).dask))
assert (set(ddf.set_index('x', drop=True, shuffle=shuffle).dask) !=
set(ddf.set_index('x', drop=False, shuffle=shuffle).dask))
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_tasks_2(shuffle):
df = dd.demo.make_timeseries(
'2000', '2004', {'value': float, 'name': str, 'id': int},
freq='2H', partition_freq='1M', seed=1)
df2 = df.set_index('name', shuffle=shuffle)
df2.value.sum().compute(scheduler='sync')
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=['x', 'y'])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index('x', shuffle=shuffle, max_branch=2,
npartitions=ddf.npartitions)
df2 = df.set_index('x')
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize('shuffle', ['tasks', 'disk'])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({'x': [1, 2, 3, 2, 1], 'y': [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index('x').sort_index()
ddf2 = ddf.set_index('x', shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize('shuffle', ['tasks', 'disk'])
@pytest.mark.parametrize('scheduler', ['threads', 'processes'])
def test_rearrange(shuffle, scheduler):
df = pd.DataFrame({'x': np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(y=ddf.x % 4)
result = rearrange_by_column(ddf2, 'y', max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a.y.drop_duplicates():
assert sum(i in part.y for part in parts) == 1
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, 'x', (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({'x': [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index('x', divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index('x')
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index('x', divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index('y', divisions=['a', 'c', 'd'])
assert result.divisions == ('a', 'c', 'd')
assert list(result.compute(scheduler='sync').index[-2:]) == ['d', 'd']
def test_set_index_divisions_compute():
d2 = d.set_index('b', divisions=[0, 2, 9], compute=False)
d3 = d.set_index('b', divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index('b'))
assert_eq(d3, full.set_index('b'))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({'x': [10, 11, 12], 'y': ['a', 'a', 'a']})
p2 = pd.DataFrame({'x': [13, 14, 15], 'y': ['b', 'b', 'c']})
p3 = pd.DataFrame({'x': [16, 17, 18], 'y': ['d', 'e', 'e']})
ddf = dd.DataFrame({('x', 0): p1, ('x', 1): p2, ('x', 2): p3},
'x', p1, [None, None, None, None])
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index('x', divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index('x'))
with dask.config.set(get=throw):
res = ddf.set_index('y', divisions=['a', 'b', 'd', 'e'], sorted=True)
assert_eq(res, df.set_index('y'))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index('y', divisions=['a', 'b', 'c', 'd', 'e'], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index('y', divisions=['a', 'b', 'd', 'c'], sorted=True)
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({'x': np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index('x', shuffle=shuffle, npartitions='auto')
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({'x': np.random.random(n),
'y': np.random.random(n)})
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_reduces_partitions_large(shuffle):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame({('x', i): (make_part, n) for i in range(nparts)},
'x', make_part(1), [None] * (nparts + 1))
ddf2 = ddf.set_index('x', shuffle=shuffle, npartitions='auto',
partition_size=nbytes)
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_doesnt_increase_partitions(shuffle):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame({('x', i): (make_part, n) for i in range(nparts)},
'x', make_part(1), [None] * (nparts + 1))
ddf2 = ddf.set_index('x', shuffle=shuffle, npartitions='auto',
partition_size=nbytes)
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({'x': range(100), 'y': range(100)})
ddf = dd.from_pandas(df, npartitions=10, name='x', sort=False)
ddf2 = ddf.set_index('x', shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array([1348550149000000000, 1348550149000000000, 1348558142000000000,
1348558142000000000, 1348585928000000000, 1348585928000000000,
1348600739000000000, 1348601706000000000, 1348600739000000000,
1348601706000000000, 1348614789000000000, 1348614789000000000,
1348621037000000000, 1348621038000000000, 1348621040000000000,
1348621037000000000, 1348621038000000000, 1348621040000000000,
1348637628000000000, 1348638159000000000, 1348638160000000000,
1348638159000000000, 1348638160000000000, 1348637628000000000,
1348646354000000000, 1348646354000000000, 1348659107000000000,
1348657111000000000, 1348659107000000000, 1348657111000000000,
1348672876000000000, 1348672876000000000, 1348682787000000000,
1348681985000000000, 1348682787000000000, 1348681985000000000,
1348728167000000000, 1348728167000000000, 1348730745000000000,
1348730745000000000, 1348750198000000000, 1348750198000000000,
1348750198000000000, 1348753539000000000, 1348753539000000000,
1348753539000000000, 1348754449000000000, 1348754449000000000,
1348761333000000000, 1348761554000000000, 1348761610000000000,
1348761333000000000, 1348761554000000000, 1348761610000000000,
1348782624000000000, 1348782624000000000, 1348782624000000000,
1348782624000000000])
vals = pd.to_datetime(vals, unit='ns')
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i:i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic is True
def test_set_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 2, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [9, 1, 8]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index('b', npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == 'b'
assert_eq(d2, full.set_index('b'))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == 'b'
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index('b')
assert d4.index.name == 'b'
assert_eq(d4, full.set_index('b'))
def test_set_index_interpolate():
df = pd.DataFrame({'x': [4, 1, 1, 3, 3], 'y': [1., 1, 1, 1, 2]})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 3, 4])
d2 = d.set_index('y', npartitions=3)
assert d2.divisions[0] == 1.
assert 1. < d2.divisions[1] < d2.divisions[2] < 2.
assert d2.divisions[3] == 2.
def test_set_index_interpolate_int():
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({'x': 2 * L})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range('20130101', periods=3))
s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
df = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index('notz', npartitions=2)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index('tz', npartitions=2)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame({'A': list('ABAABBABAA'),
'B': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'C': [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index('A', drop=drop),
pdf.set_index('A', drop=drop))
assert_eq(ddf.set_index('B', drop=drop),
pdf.set_index('B', drop=drop))
assert_eq(ddf.set_index('C', drop=drop),
pdf.set_index('C', drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop),
pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop),
pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop),
pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame({0: list('ABAABBABAA'),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop),
pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop),
pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(['a', 'b'])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index('x', sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index('x', drop=drop),
df.set_index('x', drop=drop))
assert_eq(a.set_index(a.x, sorted=True, drop=drop),
df.set_index(df.x, drop=drop))
assert_eq(a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop))
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index('x', sorted=True),
df.set_index('x'))
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 0, 0]})
b = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index('y', sorted=True)
assert df2.divisions == (0, 1, 1)
def test_set_index_empty_partition():
test_vals = [1, 2, 3]
converters = [
int,
float,
str,
lambda x: pd.to_datetime(x, unit='ns'),
]
for conv in converters:
df = pd.DataFrame([{'x': conv(i), 'y': i} for i in test_vals], columns=['x', 'y'])
ddf = dd.concat([
dd.from_pandas(df, npartitions=1),
dd.from_pandas(df[df.y > df.y.max()], npartitions=1),
])
assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))
assert assert_eq(ddf.set_index('x'), df.set_index('x'))
def test_set_index_on_empty():
test_vals = [1, 2, 3, 4]
converters = [
int,
float,
str,
lambda x: pd.to_datetime(x, unit='ns'),
]
for converter in converters:
df = pd.DataFrame([{'x': converter(x), 'y': x} for x in test_vals])
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.npartitions > 1
ddf = ddf[ddf.y > df.y.max()].set_index('x')
expected_df = df[df.y > df.y.max()].set_index('x')
assert assert_eq(ddf, expected_df)
assert ddf.npartitions == 1
def test_compute_divisions():
from dask.dataframe.shuffle import compute_divisions
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]},
index=[1, 3, 10, 20])
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
divisions = compute_divisions(a)
b = copy(a)
b.divisions = divisions
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_temporary_directory(tmpdir):
df = pd.DataFrame({'x': np.random.random(100),
'y': np.random.random(100),
'z': np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10, name='x', sort=False)
with dask.config.set(temporary_directory=str(tmpdir),
scheduler='processes'):
ddf2 = ddf.set_index('x', shuffle='disk')
ddf2.compute()
assert any(fn.endswith('.partd') for fn in os.listdir(str(tmpdir)))
def test_empty_partitions():
# See https://github.com/dask/dask/issues/2408
df = pd.DataFrame({'a': list(range(10))})
df['b'] = df['a'] % 3
df['c'] = df['b'].astype(str)
ddf = dd.from_pandas(df, npartitions=3)
ddf = ddf.set_index('b')
ddf = ddf.repartition(npartitions=3)
ddf.get_partition(0).compute()
assert_eq(ddf, df.set_index('b'))
ddf = ddf.set_index('c')
assert_eq(ddf, df.set_index('b').set_index('c'))
def test_remove_nans():
tests = [
((1, 1, 2), (1, 1, 2)),
((None, 1, 2), (1, 1, 2)),
((1, None, 2), (1, 2, 2)),
((1, 2, None), (1, 2, 2)),
((1, 2, None, None), (1, 2, 2, 2)),
((None, None, 1, 2), (1, 1, 1, 2)),
((1, None, None, 2), (1, 2, 2, 2)),
((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),
]
converters = [
(int, np.nan),
(float, np.nan),
(str, np.nan),
(lambda x: pd.to_datetime(x, unit='ns'), np.datetime64('NaT')),
]
for conv, none_val in converters:
for inputs, expected in tests:
params = [none_val if x is None else conv(x) for x in inputs]
expected = [conv(x) for x in expected]
assert remove_nans(params) == expected
@pytest.mark.slow
def test_gh_2730():
large = pd.DataFrame({'KEY': np.arange(0, 50000)})
small = pd.DataFrame({'KEY': np.arange(25, 500)})
dd_left = dd.from_pandas(small, npartitions=3)
dd_right = dd.from_pandas(large, npartitions=257)
with dask.config.set(shuffle='tasks', scheduler='sync'):
dd_merged = dd_left.merge(dd_right, how='inner', on='KEY')
result = dd_merged.compute()
expected = large.merge(small, how='inner', on='KEY')
tm.assert_frame_equal(
result.sort_values('KEY').reset_index(drop=True),
expected
)
| TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/dask/dataframe/tests/test_shuffle.py | Python | gpl-3.0 | 25,674 |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/tableofcontents.py
__version__=''' $Id$ '''
__doc__="""Experimental class to generate Tables of Contents easily
This module defines a single TableOfContents() class that can be used to
create automatically a table of tontents for Platypus documents like
this:
story = []
toc = TableOfContents()
story.append(toc)
# some heading paragraphs here...
doc = MyTemplate(path)
doc.multiBuild(story)
The data needed to create the table is a list of (level, text, pageNum)
triplets, plus some paragraph styles for each level of the table itself.
The triplets will usually be created in a document template's method
like afterFlowable(), making notification calls using the notify()
method with appropriate data like this:
(level, text, pageNum) = ...
self.notify('TOCEntry', (level, text, pageNum))
Optionally the list can contain four items in which case the last item
is a destination key which the entry should point to. A bookmark
with this key needs to be created first like this:
key = 'ch%s' % self.seq.nextf('chapter')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (level, text, pageNum, key))
As the table of contents need at least two passes over the Platypus
story which is why the moultiBuild0() method must be called.
The level<NUMBER>ParaStyle variables are the paragraph styles used
to format the entries in the table of contents. Their indentation
is calculated like this: each entry starts at a multiple of some
constant named delta. If one entry spans more than one line, all
lines after the first are indented by the same constant named
epsilon.
"""
from reportlab.lib import enums
from reportlab.lib.units import cm
from reportlab.lib.utils import commasplit, escapeOnce, encode_label, decode_label
from reportlab.lib.styles import ParagraphStyle, _baseFontName
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import IndexingFlowable
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.flowables import Spacer, Flowable
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.pdfgen import canvas
def unquote(txt):
from xml.sax.saxutils import unescape
return unescape(txt, {"'": "'", """: '"'})
try:
set
except:
class set(list):
def add(self,x):
if x not in self:
list.append(self,x)
def drawPageNumbers(canvas, style, pages, availWidth, availHeight, dot=' . '):
'''
Draws pagestr on the canvas using the given style.
If dot is None, pagestr is drawn at the current position in the canvas.
If dot is a string, pagestr is drawn right-aligned. If the string is not empty,
the gap is filled with it.
'''
pages.sort()
pagestr = ', '.join([str(p) for p, _ in pages])
x, y = canvas._curr_tx_info['cur_x'], canvas._curr_tx_info['cur_y']
fontSize = style.fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
#if it's too long to fit, we need to shrink to fit in 10% increments.
#it would be very hard to output multiline entries.
#however, we impose a minimum size of 1 point as we don't want an
#infinite loop. Ultimately we should allow a TOC entry to spill
#over onto a second line if needed.
freeWidth = availWidth-x
while pagestrw > freeWidth and fontSize >= 1.0:
fontSize = 0.9 * fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
if isinstance(dot, str):
if dot:
dotw = stringWidth(dot, style.fontName, fontSize)
dotsn = int((availWidth-x-pagestrw)/dotw)
else:
dotsn = dotw = 0
text = '%s%s' % (dotsn * dot, pagestr)
newx = availWidth - dotsn*dotw - pagestrw
pagex = availWidth - pagestrw
elif dot is None:
text = ', ' + pagestr
newx = x
pagex = newx
else:
raise TypeError('Argument dot should either be None or an instance of basestring.')
tx = canvas.beginText(newx, y)
tx.setFont(style.fontName, fontSize)
tx.setFillColor(style.textColor)
tx.textLine(text)
canvas.drawText(tx)
commaw = stringWidth(', ', style.fontName, fontSize)
for p, key in pages:
if not key:
continue
w = stringWidth(str(p), style.fontName, fontSize)
canvas.linkRect('', key, (pagex, y, pagex+w, y+style.leading), relative=1)
pagex += w + commaw
# Default paragraph styles for tables of contents.
# (This could also be generated automatically or even
# on-demand if it is not known how many levels the
# TOC will finally need to display...)
delta = 1*cm
epsilon = 0.5*cm
defaultLevelStyles = [
ParagraphStyle(
name='Level 0',
fontName=_baseFontName,
fontSize=10,
leading=11,
firstLineIndent = 0,
leftIndent = epsilon)]
defaultTableStyle = \
TableStyle([
('VALIGN', (0,0), (-1,-1), 'TOP'),
('RIGHTPADDING', (0,0), (-1,-1), 0),
('LEFTPADDING', (0,0), (-1,-1), 0),
])
class TableOfContents(IndexingFlowable):
"""This creates a formatted table of contents.
It presumes a correct block of data is passed in.
The data block contains a list of (level, text, pageNumber)
triplets. You can supply a paragraph style for each level
(starting at zero).
Set dotsMinLevel to determine from which level on a line of
dots should be drawn between the text and the page number.
If dotsMinLevel is set to a negative value, no dotted lines are drawn.
"""
def __init__(self):
self.rightColumnWidth = 72
self.levelStyles = defaultLevelStyles
self.tableStyle = defaultTableStyle
self.dotsMinLevel = 1
self._table = None
self._entries = []
self._lastEntries = []
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries[:]
self.clearEntries()
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'TOCEntry' events only.
"""
if kind == 'TOCEntry':
self.addEntry(*stuff)
def clearEntries(self):
self._entries = []
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
try:
return self.levelStyles[n]
except IndexError:
prevstyle = self.getLevelStyle(n-1)
self.levelStyles.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+delta,
leftIndent = prevstyle.leftIndent+delta))
return self.levelStyles[n]
def addEntry(self, level, text, pageNum, key=None):
"""Adds one entry to the table of contents.
This allows incremental buildup by a doctemplate.
Requires that enough styles are defined."""
assert type(level) == type(1), "Level must be an integer"
self._entries.append((level, text, pageNum, key))
def addEntries(self, listOfEntries):
"""Bulk creation of entries in the table of contents.
If you knew the titles but not the page numbers, you could
supply them to get sensible output on the first run."""
for entryargs in listOfEntries:
self.addEntry(*entryargs)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0,'Placeholder for table of contents',0,None)]
else:
_tempEntries = self._lastEntries
def drawTOCEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
label = label.split(',')
page, level, key = int(label[0]), int(label[1]), eval(label[2],{})
style = self.getLevelStyle(level)
if self.dotsMinLevel >= 0 and level >= self.dotsMinLevel:
dot = ' . '
else:
dot = ''
drawPageNumbers(canvas, style, [(page, key)], availWidth, availHeight, dot)
self.canv.drawTOCEntryEnd = drawTOCEntryEnd
tableData = []
for (level, text, pageNum, key) in _tempEntries:
style = self.getLevelStyle(level)
if key:
text = '<a href="#%s">%s</a>' % (key, text)
keyVal = repr(key).replace(',','\\x2c').replace('"','\\x2c')
else:
keyVal = None
para = Paragraph('%s<onDraw name="drawTOCEntryEnd" label="%d,%d,%s"/>' % (text, pageNum, level, keyVal), style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
self._table = Table(tableData, colWidths=(availWidth,), style=self.tableStyle)
self.width, self.height = self._table.wrapOn(self.canv,availWidth, availHeight)
return (self.width, self.height)
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._table.splitOn(self.canv,availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._table.drawOn(canvas, x, y, _sW)
def makeTuple(x):
if hasattr(x, '__iter__'):
return tuple(x)
return (x,)
class SimpleIndex(IndexingFlowable):
"""Creates multi level indexes.
The styling can be cutomized and alphabetic headers turned on and off.
"""
def __init__(self, **kwargs):
"""
Constructor of SimpleIndex.
Accepts the same arguments as the setup method.
"""
#keep stuff in a dictionary while building
self._entries = {}
self._lastEntries = {}
self._flowable = None
self.setup(**kwargs)
def getFormatFunc(self,format):
try:
D = {}
exec('from reportlab.lib.sequencer import _format_%s as formatFunc' % format, D)
return D['formatFunc']
except ImportError:
raise ValueError('Unknown format %r' % format)
def setup(self, style=None, dot=None, tableStyle=None, headers=True, name=None, format='123', offset=0):
"""
This method makes it possible to change styling and other parameters on an existing object.
style is the paragraph style to use for index entries.
dot can either be None or a string. If it's None, entries are immediatly followed by their
corresponding page numbers. If it's a string, page numbers are aligned on the right side
of the document and the gap filled with a repeating sequence of the string.
tableStyle is the style used by the table which the index uses to draw itself. Use this to
change properties like spacing between elements.
headers is a boolean. If it is True, alphabetic headers are displayed in the Index when the first
letter changes. If False, we just output some extra space before the next item
name makes it possible to use several indexes in one document. If you want this use this
parameter to give each index a unique name. You can then index a term by refering to the
name of the index which it should appear in:
<index item="term" name="myindex" />
format can be 'I', 'i', '123', 'ABC', 'abc'
"""
if style is None:
style = ParagraphStyle(name='index',
fontName=_baseFontName,
fontSize=11)
self.textStyle = style
self.tableStyle = tableStyle or defaultTableStyle
self.dot = dot
self.headers = headers
if name is None:
from reportlab.platypus.paraparser import DEFAULT_INDEX_NAME as name
self.name = name
self.formatFunc = self.getFormatFunc(format)
self.offset = offset
def __call__(self,canv,kind,label):
try:
terms, format, offset = decode_label(label)
except:
terms = label
format = offset = None
if format is None:
formatFunc = self.formatFunc
else:
formatFunc = self.getFormatFunc(format)
if offset is None:
offset = self.offset
terms = commasplit(terms)
pns = formatFunc(canv.getPageNumber()-offset)
key = 'ix_%s_%s_p_%s' % (self.name, label, pns)
info = canv._curr_tx_info
canv.bookmarkHorizontal(key, info['cur_x'], info['cur_y'] + info['leading'])
self.addEntry(terms, pns, key)
def getCanvasMaker(self, canvasmaker=canvas.Canvas):
def newcanvasmaker(*args, **kwargs):
from reportlab.pdfgen import canvas
c = canvasmaker(*args, **kwargs)
setattr(c,self.name,self)
return c
return newcanvasmaker
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries.copy()
self.clearEntries()
def clearEntries(self):
self._entries = {}
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'IndexEntry' events only.
"""
if kind == 'IndexEntry':
(text, pageNum) = stuff
self.addEntry(text, pageNum)
def addEntry(self, text, pageNum, key=None):
"""Allows incremental buildup"""
self._entries.setdefault(makeTuple(text),set([])).add((pageNum, key))
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._flowable.splitOn(self.canv,availWidth, availHeight)
def _getlastEntries(self, dummy=[(['Placeholder for index'],enumerate((None,)*3))]):
'''Return the last run's entries! If there are none, returns dummy.'''
if not self._lastEntries:
if self._entries:
return list(self._entries.items())
return dummy
return list(self._lastEntries.items())
def _build(self,availWidth,availHeight):
_tempEntries = self._getlastEntries()
def getkey(seq):
return [x.upper() for x in seq[0]]
_tempEntries.sort(key=getkey)
leveloffset = self.headers and 1 or 0
def drawIndexEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
style = self.getLevelStyle(leveloffset)
pages = decode_label(label)
drawPageNumbers(canvas, style, pages, availWidth, availHeight, self.dot)
self.canv.drawIndexEntryEnd = drawIndexEntryEnd
alpha = ''
tableData = []
lastTexts = []
alphaStyle = self.getLevelStyle(0)
for texts, pageNumbers in _tempEntries:
texts = list(texts)
#track when the first character changes; either output some extra
#space, or the first letter on a row of its own. We cannot do
#widow/orphan control, sadly.
nalpha = texts[0][0].upper()
if alpha != nalpha:
alpha = nalpha
if self.headers:
header = alpha
else:
header = ' '
tableData.append([Spacer(1, alphaStyle.spaceBefore),])
tableData.append([Paragraph(header, alphaStyle),])
tableData.append([Spacer(1, alphaStyle.spaceAfter),])
i, diff = listdiff(lastTexts, texts)
if diff:
lastTexts = texts
texts = texts[i:]
label = encode_label(list(pageNumbers))
texts[-1] = '%s<onDraw name="drawIndexEntryEnd" label="%s"/>' % (texts[-1], label)
for text in texts:
#Platypus and RML differ on how parsed XML attributes are escaped.
#e.g. <index item="M&S"/>. The only place this seems to bite us is in
#the index entries so work around it here.
text = escapeOnce(text)
style = self.getLevelStyle(i+leveloffset)
para = Paragraph(text, style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
i += 1
self._flowable = Table(tableData, colWidths=[availWidth], style=self.tableStyle)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
self._build(availWidth,availHeight)
self.width, self.height = self._flowable.wrapOn(self.canv,availWidth, availHeight)
return self.width, self.height
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._flowable.drawOn(canvas, x, y, _sW)
def draw(self):
t = self._flowable
ocanv = getattr(t,'canv',None)
if not ocanv:
t.canv = self.canv
try:
t.draw()
finally:
if not ocanv:
del t.canv
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
if not hasattr(self.textStyle, '__iter__'):
self.textStyle = [self.textStyle]
try:
return self.textStyle[n]
except IndexError:
self.textStyle = list(self.textStyle)
prevstyle = self.getLevelStyle(n-1)
self.textStyle.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+.2*cm,
leftIndent = prevstyle.leftIndent+.2*cm))
return self.textStyle[n]
AlphabeticIndex = SimpleIndex
def listdiff(l1, l2):
m = min(len(l1), len(l2))
for i in range(m):
if l1[i] != l2[i]:
return i, l2[i:]
return m, l2[m:]
class ReferenceText(IndexingFlowable):
"""Fakery to illustrate how a reference would work if we could
put it in a paragraph."""
def __init__(self, textPattern, targetKey):
self.textPattern = textPattern
self.target = targetKey
self.paraStyle = ParagraphStyle('tmp')
self._lastPageNum = None
self._pageNum = -999
self._para = None
def beforeBuild(self):
self._lastPageNum = self._pageNum
def notify(self, kind, stuff):
if kind == 'Target':
(key, pageNum) = stuff
if key == self.target:
self._pageNum = pageNum
def wrap(self, availWidth, availHeight):
text = self.textPattern % self._lastPageNum
self._para = Paragraph(text, self.paraStyle)
return self._para.wrap(availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
self._para.drawOn(canvas, x, y, _sW)
| yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/reportlab/platypus/tableofcontents.py | Python | mit | 20,681 |
# -*- coding: utf-8 -*-
# Copyright © 2012-2022 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Copy static files into the output folder."""
import os
from nikola.plugin_categories import Task
from nikola import utils
class CopyFiles(Task):
"""Copy static files into the output folder."""
name = "copy_files"
def gen_tasks(self):
"""Copy static files into the output folder."""
kw = {
'files_folders': self.site.config['FILES_FOLDERS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'filters': self.site.config['FILTERS'],
}
yield self.group_task()
for src in kw['files_folders']:
dst = kw['output_folder']
filters = kw['filters']
real_dst = os.path.join(dst, kw['files_folders'][src])
for task in utils.copy_tree(src, real_dst, link_cutoff=dst):
task['basename'] = self.name
task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_files')]
yield utils.apply_filters(task, filters, skip_ext=['.html'])
| getnikola/nikola | nikola/plugins/task/copy_files.py | Python | mit | 2,163 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: cifar.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
# Yukun Chen <cykustc@gmail.com>
import os
import pickle
import numpy as np
import six
from six.moves import range
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ['Cifar10', 'Cifar100']
DATA_URL_CIFAR_10 = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
DATA_URL_CIFAR_100 = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
def maybe_download_and_extract(dest_directory, cifar_classnum):
"""Download and extract the tarball from Alex's website.
copied from tensorflow example """
assert cifar_classnum == 10 or cifar_classnum == 100
if cifar_classnum == 10:
cifar_foldername = 'cifar-10-batches-py'
else:
cifar_foldername = 'cifar-100-python'
if os.path.isdir(os.path.join(dest_directory, cifar_foldername)):
logger.info("Found cifar{} data in {}.".format(cifar_classnum, dest_directory))
return
else:
DATA_URL = DATA_URL_CIFAR_10 if cifar_classnum == 10 else DATA_URL_CIFAR_100
download(DATA_URL, dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
import tarfile
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def read_cifar(filenames, cifar_classnum):
assert cifar_classnum == 10 or cifar_classnum == 100
ret = []
for fname in filenames:
fo = open(fname, 'rb')
if six.PY3:
dic = pickle.load(fo, encoding='bytes')
else:
dic = pickle.load(fo)
data = dic[b'data']
if cifar_classnum == 10:
label = dic[b'labels']
IMG_NUM = 10000 # cifar10 data are split into blocks of 10000
elif cifar_classnum == 100:
label = dic[b'fine_labels']
IMG_NUM = 50000 if 'train' in fname else 10000
fo.close()
for k in range(IMG_NUM):
img = data[k].reshape(3, 32, 32)
img = np.transpose(img, [1, 2, 0])
ret.append([img, label[k]])
return ret
def get_filenames(dir, cifar_classnum):
assert cifar_classnum == 10 or cifar_classnum == 100
if cifar_classnum == 10:
filenames = [os.path.join(
dir, 'cifar-10-batches-py', 'data_batch_%d' % i) for i in range(1, 6)]
filenames.append(os.path.join(
dir, 'cifar-10-batches-py', 'test_batch'))
elif cifar_classnum == 100:
filenames = [os.path.join(dir, 'cifar-100-python', 'train'),
os.path.join(dir, 'cifar-100-python', 'test')]
return filenames
class CifarBase(RNGDataFlow):
def __init__(self, train_or_test, shuffle=True, dir=None, cifar_classnum=10):
assert train_or_test in ['train', 'test']
assert cifar_classnum == 10 or cifar_classnum == 100
self.cifar_classnum = cifar_classnum
if dir is None:
dir = get_dataset_path('cifar{}_data'.format(cifar_classnum))
maybe_download_and_extract(dir, self.cifar_classnum)
fnames = get_filenames(dir, cifar_classnum)
if train_or_test == 'train':
self.fs = fnames[:-1]
else:
self.fs = [fnames[-1]]
for f in self.fs:
if not os.path.isfile(f):
raise ValueError('Failed to find file: ' + f)
self.train_or_test = train_or_test
self.data = read_cifar(self.fs, cifar_classnum)
self.dir = dir
self.shuffle = shuffle
def size(self):
return 50000 if self.train_or_test == 'train' else 10000
def get_data(self):
idxs = np.arange(len(self.data))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
# since cifar is quite small, just do it for safety
yield self.data[k]
def get_per_pixel_mean(self):
"""
return a mean image of all (train and test) images of size 32x32x3
"""
fnames = get_filenames(self.dir, self.cifar_classnum)
all_imgs = [x[0] for x in read_cifar(fnames, self.cifar_classnum)]
arr = np.array(all_imgs, dtype='float32')
mean = np.mean(arr, axis=0)
return mean
def get_per_channel_mean(self):
"""
return three values as mean of each channel
"""
mean = self.get_per_pixel_mean()
return np.mean(mean, axis=(0, 1))
class Cifar10(CifarBase):
"""
Produces [image, label] in Cifar10 dataset,
image is 32x32x3 in the range [0,255].
label is an int.
"""
def __init__(self, train_or_test, shuffle=True, dir=None):
"""
Args:
train_or_test (str): either 'train' or 'test'.
shuffle (bool): shuffle the dataset.
"""
super(Cifar10, self).__init__(train_or_test, shuffle, dir, 10)
class Cifar100(CifarBase):
""" Similar to Cifar10"""
def __init__(self, train_or_test, shuffle=True, dir=None):
super(Cifar100, self).__init__(train_or_test, shuffle, dir, 100)
if __name__ == '__main__':
ds = Cifar10('train')
from tensorpack.dataflow.dftools import dump_dataflow_images
mean = ds.get_per_channel_mean()
print(mean)
dump_dataflow_images(ds, '/tmp/cifar', 100)
# for (img, label) in ds.get_data():
# from IPython import embed; embed()
# break
| haamoon/tensorpack | tensorpack/dataflow/dataset/cifar.py | Python | apache-2.0 | 5,459 |
"""Gold subscription forms"""
from django import forms
from stripe.error import InvalidRequestError
from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin
from .models import LEVEL_CHOICES, GoldUser
class GoldSubscriptionForm(StripeResourceMixin, StripeModelForm):
"""Gold subscription payment form
This extends the common base form for handling Stripe subscriptions. Credit
card fields for card number, expiry, and CVV are extended from
:py:cls:`StripeModelForm`, with additional methods from
:py:cls:`StripeResourceMixin` for common operations against the Stripe API.
"""
class Meta:
model = GoldUser
fields = ['last_4_digits', 'level']
last_4_digits = forms.CharField(
required=True,
min_length=4,
max_length=4,
widget=forms.HiddenInput(attrs={
'data-bind': 'valueInit: card_digits, value: card_digits'
})
)
level = forms.ChoiceField(
required=True,
choices=LEVEL_CHOICES,
)
def clean(self):
self.instance.user = self.customer
return super(GoldSubscriptionForm, self).clean()
def validate_stripe(self):
subscription = self.get_subscription()
self.instance.stripe_id = subscription.customer
self.instance.subscribed = True
def get_customer_kwargs(self):
return {
'description': self.customer.get_full_name() or self.customer.username,
'email': self.customer.email,
'id': self.instance.stripe_id or None
}
def get_subscription(self):
customer = self.get_customer()
try:
# TODO get the first sub more intelligently
subscriptions = customer.subscriptions.all(limit=5)
subscription = subscriptions.data[0]
subscription.plan = self.cleaned_data['level']
if 'stripe_token' in self.cleaned_data:
subscription.source = self.cleaned_data['stripe_token']
subscription.save()
return subscription
except (InvalidRequestError, AttributeError, IndexError):
subscription = customer.subscriptions.create(
plan=self.cleaned_data['level'],
source=self.cleaned_data['stripe_token']
)
return subscription
def clear_card_data(self):
super(GoldSubscriptionForm, self).clear_card_data()
self.data['last_4_digits'] = None
class GoldProjectForm(forms.Form):
project = forms.CharField(
required=True,
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
self.projects = kwargs.pop('projects', None)
super(GoldProjectForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(GoldProjectForm, self).clean()
if self.projects.count() < self.user.num_supported_projects:
return cleaned_data
else:
self.add_error(None, 'You already have the max number of supported projects.')
| stevepiercy/readthedocs.org | readthedocs/gold/forms.py | Python | mit | 3,072 |
"""RESTful platform for notify component."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PARAMS,
CONF_PASSWORD,
CONF_RESOURCE,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BAD_REQUEST,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_OK,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import setup_reload_service
from homeassistant.helpers.template import Template
from . import DOMAIN, PLATFORMS
CONF_DATA = "data"
CONF_DATA_TEMPLATE = "data_template"
CONF_MESSAGE_PARAMETER_NAME = "message_param_name"
CONF_TARGET_PARAMETER_NAME = "target_param_name"
CONF_TITLE_PARAMETER_NAME = "title_param_name"
DEFAULT_MESSAGE_PARAM_NAME = "message"
DEFAULT_METHOD = "GET"
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(
CONF_MESSAGE_PARAMETER_NAME, default=DEFAULT_MESSAGE_PARAM_NAME
): cv.string,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(
["POST", "GET", "POST_JSON"]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_PARAMS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TARGET_PARAMETER_NAME): cv.string,
vol.Optional(CONF_TITLE_PARAMETER_NAME): cv.string,
vol.Optional(CONF_DATA): vol.All(dict, cv.template_complex),
vol.Optional(CONF_DATA_TEMPLATE): vol.All(dict, cv.template_complex),
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the RESTful notification service."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
resource = config.get(CONF_RESOURCE)
method = config.get(CONF_METHOD)
headers = config.get(CONF_HEADERS)
params = config.get(CONF_PARAMS)
message_param_name = config.get(CONF_MESSAGE_PARAMETER_NAME)
title_param_name = config.get(CONF_TITLE_PARAMETER_NAME)
target_param_name = config.get(CONF_TARGET_PARAMETER_NAME)
data = config.get(CONF_DATA)
data_template = config.get(CONF_DATA_TEMPLATE)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
verify_ssl = config.get(CONF_VERIFY_SSL)
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = requests.auth.HTTPDigestAuth(username, password)
else:
auth = requests.auth.HTTPBasicAuth(username, password)
else:
auth = None
return RestNotificationService(
hass,
resource,
method,
headers,
params,
message_param_name,
title_param_name,
target_param_name,
data,
data_template,
auth,
verify_ssl,
)
class RestNotificationService(BaseNotificationService):
"""Implementation of a notification service for REST."""
def __init__(
self,
hass,
resource,
method,
headers,
params,
message_param_name,
title_param_name,
target_param_name,
data,
data_template,
auth,
verify_ssl,
):
"""Initialize the service."""
self._resource = resource
self._hass = hass
self._method = method.upper()
self._headers = headers
self._params = params
self._message_param_name = message_param_name
self._title_param_name = title_param_name
self._target_param_name = target_param_name
self._data = data
self._data_template = data_template
self._auth = auth
self._verify_ssl = verify_ssl
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {self._message_param_name: message}
if self._title_param_name is not None:
data[self._title_param_name] = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
if self._target_param_name is not None and ATTR_TARGET in kwargs:
# Target is a list as of 0.29 and we don't want to break existing
# integrations, so just return the first target in the list.
data[self._target_param_name] = kwargs[ATTR_TARGET][0]
if self._data_template or self._data:
kwargs[ATTR_MESSAGE] = message
def _data_template_creator(value):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [_data_template_creator(item) for item in value]
if isinstance(value, dict):
return {
key: _data_template_creator(item) for key, item in value.items()
}
if not isinstance(value, Template):
return value
value.hass = self._hass
return value.async_render(kwargs, parse_result=False)
if self._data:
data.update(_data_template_creator(self._data))
if self._data_template:
data.update(_data_template_creator(self._data_template))
if self._method == "POST":
response = requests.post(
self._resource,
headers=self._headers,
params=self._params,
data=data,
timeout=10,
auth=self._auth,
verify=self._verify_ssl,
)
elif self._method == "POST_JSON":
response = requests.post(
self._resource,
headers=self._headers,
params=self._params,
json=data,
timeout=10,
auth=self._auth,
verify=self._verify_ssl,
)
else: # default GET
response = requests.get(
self._resource,
headers=self._headers,
params={**self._params, **data} if self._params else data,
timeout=10,
auth=self._auth,
verify=self._verify_ssl,
)
if (
response.status_code >= HTTP_INTERNAL_SERVER_ERROR
and response.status_code < 600
):
_LOGGER.exception(
"Server error. Response %d: %s:", response.status_code, response.reason
)
elif (
response.status_code >= HTTP_BAD_REQUEST
and response.status_code < HTTP_INTERNAL_SERVER_ERROR
):
_LOGGER.exception(
"Client error. Response %d: %s:", response.status_code, response.reason
)
elif response.status_code >= HTTP_OK and response.status_code < 300:
_LOGGER.debug(
"Success. Response %d: %s:", response.status_code, response.reason
)
else:
_LOGGER.debug("Response %d: %s:", response.status_code, response.reason)
| turbokongen/home-assistant | homeassistant/components/rest/notify.py | Python | apache-2.0 | 7,690 |
# -*- coding: utf-8 -*-
from bika.lims.content.analysis import Analysis
from bika.lims.testing import BIKA_FUNCTIONAL_TESTING
from bika.lims.tests.base import BikaFunctionalTestCase
from bika.lims.utils import tmpID
from bika.lims.utils.analysisrequest import create_analysisrequest
from bika.lims.workflow import doActionFor
from plone.app.testing import login, logout
from plone.app.testing import TEST_USER_NAME
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import _createObjectByType
import unittest
try:
import unittest2 as unittest
except ImportError:
import unittest
class Test_LIMS2001(BikaFunctionalTestCase):
"""
When adding a duplicate for an AR in a worksheet, only the first analysis
gets duplicated: https://jira.bikalabs.com/browse/LIMS-2001
"""
layer = BIKA_FUNCTIONAL_TESTING
def setUp(self):
super(Test_LIMS2001, self).setUp()
login(self.portal, TEST_USER_NAME)
def tearDown(self):
logout()
super(Test_LIMS2001, self).tearDown()
def test_LIMS2001(self):
# ARs creation
# Client: Happy Hills
# SampleType: Apple Pulp
# Contact: Rita Mohale
client = self.portal.clients['client-1']
sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1']
values = {'Client': client.UID(),
'Contact': client.getContacts()[0].UID(),
'SamplingDate': '2015-01-01',
'SampleType': sampletype.UID()}
# analysis-service-3: Calcium (Ca)
# analysis-service-6: Cooper (Cu)
# analysis-service-7: Iron (Fe)
servs = self.portal.bika_setup.bika_analysisservices
aservs = [servs['analysisservice-3'],
servs['analysisservice-6'],
servs['analysisservice-7']]
services = [s.UID() for s in aservs]
request = {}
ar = create_analysisrequest(client, request, values, services)
sp = _createObjectByType('SamplePartition', ar.getSample(), tmpID())
wf = getToolByName(ar, 'portal_workflow')
wf.doActionFor(ar, 'receive')
# Worksheet creation
wsfolder = self.portal.worksheets
ws = _createObjectByType("Worksheet", wsfolder, tmpID())
ws.processForm()
bsc = getToolByName(self.portal, 'bika_setup_catalog')
lab_contacts = [o.getObject() for o in bsc(portal_type="LabContact")]
lab_contact = [o for o in lab_contacts if o.getUsername() == 'analyst1']
self.assertEquals(len(lab_contact), 1)
lab_contact = lab_contact[0]
ws.setAnalyst(lab_contact)
ws.setResultsLayout(self.portal.bika_setup.getWorksheetLayout())
# Add analyses into the worksheet
self.request['context_uid'] = ws.UID()
for analysis in ar.getAnalyses():
an = analysis.getObject()
an.setSamplePartition(sp)
ws.addAnalysis(an)
self.assertEquals(len(ws.getAnalyses()), 3)
# Add a duplicate for slot 1 (there's only one slot)
ws.addDuplicateAnalyses('1', None)
ans = ws.getAnalyses()
reg = [an for an in ans if an.portal_type == 'Analysis']
dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis']
regkeys = [an.getKeyword() for an in reg]
dupkeys = [an.getKeyword() for an in dup]
regkeys.sort()
dupkeys.sort()
expregs = ['Ca', 'Cu', 'Fe']
expdups = ['Ca', 'Cu', 'Fe']
self.assertEquals(regkeys, expregs)
self.assertEquals(dupkeys, expdups)
# Add a result, submit and add another duplicate
an1 = [an for an in reg if an.getKeyword() == 'Cu'][0]
an1.setResult('13')
wf.doActionFor(an1, 'submit')
ws.addDuplicateAnalyses('1', None)
ans = ws.getAnalyses()
reg = [an for an in ans if an.portal_type == 'Analysis']
dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis']
regkeys = [an.getKeyword() for an in reg]
dupkeys = [an.getKeyword() for an in dup]
regkeys.sort()
dupkeys.sort()
expregs = ['Ca', 'Cu', 'Fe']
expdups = ['Ca', 'Ca', 'Cu', 'Cu', 'Fe', 'Fe']
self.assertEquals(regkeys, expregs)
self.assertEquals(dupkeys, expdups)
# Retract the previous analysis and add another duplicate
wf.doActionFor(an1, 'retract')
ws.addDuplicateAnalyses('1', None)
ans = ws.getAnalyses()
reg = [an for an in ans if an.portal_type == 'Analysis']
dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis']
regkeys = [an.getKeyword() for an in reg]
dupkeys = [an.getKeyword() for an in dup]
regkeys.sort()
dupkeys.sort()
expregs = ['Ca', 'Cu', 'Cu', 'Fe']
expdups = ['Ca', 'Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe', 'Fe']
self.assertEquals(regkeys, expregs)
self.assertEquals(dupkeys, expdups)
# Do the same process, but with two ARs
ar = create_analysisrequest(client, request, values, services)
sp = _createObjectByType('SamplePartition', ar.getSample(), tmpID())
wf.doActionFor(ar, 'receive')
# Add analyses into the worksheet
for analysis in ar.getAnalyses():
an = analysis.getObject()
an.setSamplePartition(sp)
ws.addAnalysis(an)
ans = ws.getAnalyses()
reg = [an for an in ans if an.portal_type == 'Analysis']
regkeys = [an.getKeyword() for an in reg]
regkeys.sort()
expregs = ['Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe']
self.assertEquals(regkeys, expregs)
# Add a duplicte for the second AR
# slot 1: previous AR
# slot 2: Duplicate 1 (analysis without result)
# slot 3: Duplicate 2 (analysis with submitted result)
# slot 4: Duplicate 3 (analysis retracted)
# slot 5: this new AR
ws.addDuplicateAnalyses('5', None)
ans = ws.getAnalyses()
reg = [an for an in ans if an.portal_type == 'Analysis']
dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis']
regkeys = [an.getKeyword() for an in reg]
dupkeys = [an.getKeyword() for an in dup]
regkeys.sort()
dupkeys.sort()
expregs = ['Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe']
expdups = ['Ca', 'Ca', 'Ca', 'Ca',
'Cu', 'Cu', 'Cu', 'Cu',
'Fe', 'Fe', 'Fe', 'Fe']
self.assertEquals(regkeys, expregs)
self.assertEquals(dupkeys, expdups)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test_LIMS2001))
suite.layer = BIKA_FUNCTIONAL_TESTING
return suite
| labsanmartin/Bika-LIMS | bika/lims/tests/test_LIMS-2001-duplicate-one-analysis.py | Python | agpl-3.0 | 6,785 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/devtools/resultstore_v2/proto/file_set.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resultstoreapi.cloud.devtools.resultstore_v2.proto import file_pb2 as google_dot_cloud_dot_devtools_dot_resultstore__v2_dot_proto_dot_file__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/devtools/resultstore_v2/proto/file_set.proto',
package='google.devtools.resultstore.v2',
syntax='proto3',
serialized_options=b'\n\"com.google.devtools.resultstore.v2P\001ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstore',
serialized_pb=b'\n9google/cloud/devtools/resultstore_v2/proto/file_set.proto\x12\x1egoogle.devtools.resultstore.v2\x1a\x35google/cloud/devtools/resultstore_v2/proto/file.proto\"\xc9\x01\n\x07\x46ileSet\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x02id\x18\x02 \x01(\x0b\x32*.google.devtools.resultstore.v2.FileSet.Id\x12\x11\n\tfile_sets\x18\x03 \x03(\t\x12\x33\n\x05\x66iles\x18\x04 \x03(\x0b\x32$.google.devtools.resultstore.v2.File\x1a\x30\n\x02Id\x12\x15\n\rinvocation_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66ile_set_id\x18\x02 \x01(\tBq\n\"com.google.devtools.resultstore.v2P\x01ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstoreb\x06proto3'
,
dependencies=[google_dot_cloud_dot_devtools_dot_resultstore__v2_dot_proto_dot_file__pb2.DESCRIPTOR,])
_FILESET_ID = _descriptor.Descriptor(
name='Id',
full_name='google.devtools.resultstore.v2.FileSet.Id',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='invocation_id', full_name='google.devtools.resultstore.v2.FileSet.Id.invocation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='file_set_id', full_name='google.devtools.resultstore.v2.FileSet.Id.file_set_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=302,
serialized_end=350,
)
_FILESET = _descriptor.Descriptor(
name='FileSet',
full_name='google.devtools.resultstore.v2.FileSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.devtools.resultstore.v2.FileSet.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='google.devtools.resultstore.v2.FileSet.id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='file_sets', full_name='google.devtools.resultstore.v2.FileSet.file_sets', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='files', full_name='google.devtools.resultstore.v2.FileSet.files', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FILESET_ID, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=350,
)
_FILESET_ID.containing_type = _FILESET
_FILESET.fields_by_name['id'].message_type = _FILESET_ID
_FILESET.fields_by_name['files'].message_type = google_dot_cloud_dot_devtools_dot_resultstore__v2_dot_proto_dot_file__pb2._FILE
DESCRIPTOR.message_types_by_name['FileSet'] = _FILESET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FileSet = _reflection.GeneratedProtocolMessageType('FileSet', (_message.Message,), {
'Id' : _reflection.GeneratedProtocolMessageType('Id', (_message.Message,), {
'DESCRIPTOR' : _FILESET_ID,
'__module__' : 'resultstoreapi.cloud.devtools.resultstore_v2.proto.file_set_pb2'
,
'__doc__': """The resource ID components that identify the FileSet.
Attributes:
invocation_id:
The Invocation ID.
file_set_id:
The FileSet ID.
""",
# @@protoc_insertion_point(class_scope:google.devtools.resultstore.v2.FileSet.Id)
})
,
'DESCRIPTOR' : _FILESET,
'__module__' : 'resultstoreapi.cloud.devtools.resultstore_v2.proto.file_set_pb2'
,
'__doc__': """This resource represents a set of Files and other (nested)
FileSets. A FileSet is a node in the graph, and the file_sets field
represents the outgoing edges. A resource may reference various nodes in
the graph to represent the transitive closure of all files from those
nodes. The FileSets must form a directed acyclic graph. The Upload API
is unable to enforce that the graph is acyclic at write time, and if
cycles are written, it may cause issues at read time.
A FileSet may be referenced by other resources in conjunction with
Files. A File is preferred for something that can only be ever
referenced by one resource, and a FileSet is preferred if it can be
reference by multiple resources.
Attributes:
name:
The format of this FileSet resource name must be:
invocations/\ :math:`{INVOCATION_ID}/fileSets/`\
{url_encode(FILE_SET_ID)}
id:
The resource ID components that identify the file set. They
must match the resource name after proper encoding.
file_sets:
List of names of other file sets that are referenced from this
one. Each name must point to a file set under the same
invocation. The name format must be: invocations/\
:math:`{INVOCATION_ID}/fileSets/`\ {FILE_SET_ID}
files:
Files that are contained within this file set. The uid field
in the file should be unique for the Invocation.
""",
# @@protoc_insertion_point(class_scope:google.devtools.resultstore.v2.FileSet)
})
_sym_db.RegisterMessage(FileSet)
_sym_db.RegisterMessage(FileSet.Id)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| google/resultstoreui | resultstoreui/resultstoreapi/cloud/devtools/resultstore_v2/proto/file_set_pb2.py | Python | apache-2.0 | 7,589 |
"""
Course info page.
"""
from .course_page import CoursePage
class CourseInfoPage(CoursePage):
"""
Course info.
"""
URL_PATH = "info"
def is_browser_on_page(self):
return self.is_css_present('section.updates')
@property
def num_updates(self):
"""
Return the number of updates on the page.
"""
return self.css_count('section.updates ol li')
@property
def handout_links(self):
"""
Return a list of handout assets links.
"""
return self.css_map('section.handouts ol li a', lambda el: el['href'])
| pku9104038/edx-platform | common/test/acceptance/pages/lms/course_info.py | Python | agpl-3.0 | 609 |
# -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import Settings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.versions import scrapy_components_versions
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
install_scrapy_root_handler(settings)
def install_scrapy_root_handler(settings):
global _scrapy_root_handler
if (_scrapy_root_handler is not None
and _scrapy_root_handler in logging.root.handlers):
logging.root.removeHandler(_scrapy_root_handler)
logging.root.setLevel(logging.NOTSET)
_scrapy_root_handler = _get_handler(settings)
logging.root.addHandler(_scrapy_root_handler)
def get_scrapy_root_handler():
return _scrapy_root_handler
_scrapy_root_handler = None
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
if settings.getbool('LOG_SHORT_NAMES'):
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Versions: %(versions)s",
{'versions': ", ".join("%s %s" % (name, version)
for name, version in scrapy_components_versions()
if name != "Scrapy")})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
for h in self.logger.handlers:
h.flush()
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
| Ryezhang/scrapy | scrapy/utils/log.py | Python | bsd-3-clause | 6,556 |
import os.path
from robot.errors import DataError
from robot.utils import secs_to_timestr, timestr_to_secs
from selenium import webdriver
from selenium.common.exceptions import NoSuchWindowException
from Selenium2Library import webdrivermonkeypatches
from Selenium2Library.utils import BrowserCache
from Selenium2Library.locators import WindowManager
from .keywordgroup import KeywordGroup
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIREFOX_PROFILE_DIR = os.path.join(ROOT_DIR, 'resources', 'firefoxprofile')
BROWSER_NAMES = {'ff': "_make_ff",
'firefox': "_make_ff",
'ie': "_make_ie",
'internetexplorer': "_make_ie",
'googlechrome': "_make_chrome",
'gc': "_make_chrome",
'chrome': "_make_chrome",
'opera' : "_make_opera",
'phantomjs' : "_make_phantomjs",
'htmlunit' : "_make_htmlunit",
'htmlunitwithjs' : "_make_htmlunitwithjs",
'android': "_make_android",
'iphone': "_make_iphone",
'safari': "_make_safari",
'edge': "_make_edge"
}
class _BrowserManagementKeywords(KeywordGroup):
def __init__(self):
self._cache = BrowserCache()
self._window_manager = WindowManager()
self._speed_in_secs = float(0)
self._timeout_in_secs = float(5)
self._implicit_wait_in_secs = float(0)
# Public, open and close
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self._debug('Closing all browsers')
self._cache.close_all()
def close_browser(self):
"""Closes the current browser."""
if self._cache.current:
self._debug('Closing browser with session id %s'
% self._cache.current.session_id)
self._cache.close()
def open_browser(self, url, browser='firefox', alias=None,remote_url=False,
desired_capabilities=None,ff_profile_dir=None):
"""Opens a new browser instance to given URL.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
Optional alias is an alias for the browser instance and it can be used
for switching between browsers (just as index can be used). See `Switch
Browser` for more details.
Possible values for `browser` are as follows:
| firefox | FireFox |
| ff | FireFox |
| internetexplorer | Internet Explorer |
| ie | Internet Explorer |
| googlechrome | Google Chrome |
| gc | Google Chrome |
| chrome | Google Chrome |
| opera | Opera |
| phantomjs | PhantomJS |
| htmlunit | HTMLUnit |
| htmlunitwithjs | HTMLUnit with Javascipt support |
| android | Android |
| iphone | Iphone |
| safari | Safari |
| edge | Edge |
Note, that you will encounter strange behavior, if you open
multiple Internet Explorer browser instances. That is also why
`Switch Browser` only works with one IE browser at most.
For more information see:
http://selenium-grid.seleniumhq.org/faq.html#i_get_some_strange_errors_when_i_run_multiple_internet_explorer_instances_on_the_same_machine
Optional 'remote_url' is the url for a remote selenium server for example
http://127.0.0.1:4444/wd/hub. If you specify a value for remote you can
also specify 'desired_capabilities' which is a string in the form
key1:val1,key2:val2 that will be used to specify desired_capabilities
to the remote server. This is useful for doing things like specify a
proxy server for internet explorer or for specify browser and os if your
using saucelabs.com. 'desired_capabilities' can also be a dictonary
(created with 'Create Dictionary') to allow for more complex configurations.
Optional 'ff_profile_dir' is the path to the firefox profile dir if you
wish to overwrite the default.
"""
if remote_url:
self._info("Opening browser '%s' to base url '%s' through remote server at '%s'"
% (browser, url, remote_url))
else:
self._info("Opening browser '%s' to base url '%s'" % (browser, url))
browser_name = browser
browser = self._make_browser(browser_name,desired_capabilities,ff_profile_dir,remote_url)
try:
browser.get(url)
except:
self._cache.register(browser, alias)
self._debug("Opened browser with session id %s but failed to open url '%s'"
% (browser.session_id, url))
raise
self._debug('Opened browser with session id %s'
% browser.session_id)
return self._cache.register(browser, alias)
def create_webdriver(self, driver_name, alias=None, kwargs={}, **init_kwargs):
"""Creates an instance of a WebDriver.
Like `Open Browser`, but allows passing arguments to a WebDriver's
__init__. _Open Browser_ is preferred over _Create Webdriver_ when
feasible.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
`driver_name` must be the exact name of a WebDriver in
_selenium.webdriver_ to use. WebDriver names include: Firefox, Chrome,
Ie, Opera, Safari, PhantomJS, and Remote.
Use keyword arguments to specify the arguments you want to pass to
the WebDriver's __init__. The values of the arguments are not
processed in any way before being passed on. For Robot Framework
< 2.8, which does not support keyword arguments, create a keyword
dictionary and pass it in as argument `kwargs`. See the
[http://selenium.googlecode.com/git/docs/api/py/api.html|Selenium API Documentation]
for information about argument names and appropriate argument values.
Examples:
| # use proxy for Firefox | | | |
| ${proxy}= | Evaluate | sys.modules['selenium.webdriver'].Proxy() | sys, selenium.webdriver |
| ${proxy.http_proxy}= | Set Variable | localhost:8888 | |
| Create Webdriver | Firefox | proxy=${proxy} | |
| # use a proxy for PhantomJS | | | |
| ${service args}= | Create List | --proxy=192.168.132.104:8888 | |
| Create Webdriver | PhantomJS | service_args=${service args} | |
Example for Robot Framework < 2.8:
| # debug IE driver | | | |
| ${kwargs}= | Create Dictionary | log_level=DEBUG | log_file=%{HOMEPATH}${/}ie.log |
| Create Webdriver | Ie | kwargs=${kwargs} | |
"""
if not isinstance(kwargs, dict):
raise RuntimeError("kwargs must be a dictionary.")
for arg_name in kwargs:
if arg_name in init_kwargs:
raise RuntimeError("Got multiple values for argument '%s'." % arg_name)
init_kwargs[arg_name] = kwargs[arg_name]
driver_name = driver_name.strip()
try:
creation_func = getattr(webdriver, driver_name)
except AttributeError:
raise RuntimeError("'%s' is not a valid WebDriver name" % driver_name)
self._info("Creating an instance of the %s WebDriver" % driver_name)
driver = creation_func(**init_kwargs)
self._debug("Created %s WebDriver instance with session id %s" % (driver_name, driver.session_id))
return self._cache.register(driver, alias)
def switch_browser(self, index_or_alias):
"""Switches between active browsers using index or alias.
Index is returned from `Open Browser` and alias can be given to it.
Example:
| Open Browser | http://google.com | ff |
| Location Should Be | http://google.com | |
| Open Browser | http://yahoo.com | ie | 2nd conn |
| Location Should Be | http://yahoo.com | |
| Switch Browser | 1 | # index |
| Page Should Contain | I'm feeling lucky | |
| Switch Browser | 2nd conn | # alias |
| Page Should Contain | More Yahoo! | |
| Close All Browsers | | |
Above example expects that there was no other open browsers when
opening the first one because it used index '1' when switching to it
later. If you aren't sure about that you can store the index into
a variable as below.
| ${id} = | Open Browser | http://google.com | *firefox |
| # Do something ... |
| Switch Browser | ${id} | | |
"""
try:
self._cache.switch(index_or_alias)
self._debug('Switched to browser with Selenium session id %s'
% self._cache.current.session_id)
except (RuntimeError, DataError): # RF 2.6 uses RE, earlier DE
raise RuntimeError("No browser with index or alias '%s' found."
% index_or_alias)
# Public, window management
def close_window(self):
"""Closes currently opened pop-up window."""
self._current_browser().close()
def get_window_identifiers(self):
"""Returns and logs id attributes of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_ids(self._current_browser()))
def get_window_names(self):
"""Returns and logs names of all windows known to the browser."""
values = self._window_manager.get_window_names(self._current_browser())
# for backward compatibility, since Selenium 1 would always
# return this constant value for the main window
if len(values) and values[0] == 'undefined':
values[0] = 'selenium_main_app_window'
return self._log_list(values)
def get_window_titles(self):
"""Returns and logs titles of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_titles(self._current_browser()))
def maximize_browser_window(self):
"""Maximizes current browser window."""
self._current_browser().maximize_window()
def get_window_size(self):
"""Returns current window size as `width` then `height`.
Example:
| ${width} | ${height}= | Get Window Size |
"""
size = self._current_browser().get_window_size()
return size['width'], size['height']
def set_window_size(self, width, height):
"""Sets the `width` and `height` of the current window to the specified values.
Example:
| Set Window Size | ${800} | ${600} |
| ${width} | ${height}= | Get Window Size |
| Should Be Equal | ${width} | ${800} |
| Should Be Equal | ${height} | ${600} |
"""
return self._current_browser().set_window_size(width, height)
def get_window_position(self):
"""Returns current window position as `x` then `y` (relative to the left and top of the screen).
Example:
| ${x} | ${y}= | Get Window Position |
"""
position = self._current_browser().get_window_position()
return position['x'], position['y']
def set_window_position(self, x, y):
"""Sets the position x and y of the current window (relative to the left and top of the screen) to the specified values.
Example:
| Set Window Position | ${8} | ${10} |
| ${x} | ${y}= | Get Window Position |
| Should Be Equal | ${x} | ${8} |
| Should Be Equal | ${y} | ${10} |
"""
return self._current_browser().set_window_position(x, y)
def select_frame(self, locator):
"""Sets frame identified by `locator` as current frame.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
self._info("Selecting frame '%s'." % locator)
element = self._element_find(locator, True, True)
self._current_browser().switch_to_frame(element)
def select_window(self, locator=None):
"""Selects the window matching locator and return previous window handle.
locator: any of name, title, url, window handle, excluded handle's list, or special words.
return: either current window handle before selecting, or None if no current window.
If the window is found, all subsequent commands use that window, until
this keyword is used again. If the window is not found, this keyword fails.
By default, when a locator value is provided,
it is matched against the title of the window and the
javascript name of the window. If multiple windows with
same identifier are found, the first one is selected.
There are some special locators for searching target window:
string 'main' (default): select the main window;
string 'self': only return current window handle;
string 'new': select the last-indexed window assuming it is the newest opened window
window list: select the first window not in given list (See 'List Windows' to get the list)
It is also possible to specify the approach Selenium2Library should take
to find a window by specifying a locator strategy:
| *Strategy* | *Example* | *Description* |
| title | Select Window `|` title=My Document | Matches by window title |
| name | Select Window `|` name=${name} | Matches by window javascript name |
| url | Select Window `|` url=http://google.com | Matches by window's current URL |
Example:
| Click Link | popup_link | # opens new window |
| Select Window | popupName |
| Title Should Be | Popup Title |
| Select Window | | | # Chooses the main window again |
"""
try:
return self._current_browser().get_current_window_handle()
except NoSuchWindowException:
pass
finally:
self._window_manager.select(self._current_browser(), locator)
def list_windows(self):
"""Return all current window handles as a list"""
return self._current_browser().get_window_handles()
def unselect_frame(self):
"""Sets the top frame as the current frame."""
self._current_browser().switch_to_default_content()
# Public, browser/current page properties
def get_location(self):
"""Returns the current location."""
return self._current_browser().get_current_url()
def get_locations(self):
"""Returns and logs current locations of all windows known to the browser."""
return self._log_list(
[window_info[4] for window_info in
self._window_manager._get_window_infos(self._current_browser())]
)
def get_source(self):
"""Returns the entire html source of the current page or frame."""
return self._current_browser().get_page_source()
def get_title(self):
"""Returns title of current page."""
return self._current_browser().get_title()
def location_should_be(self, url):
"""Verifies that current URL is exactly `url`."""
actual = self.get_location()
if actual != url:
raise AssertionError("Location should have been '%s' but was '%s'"
% (url, actual))
self._info("Current location is '%s'." % url)
def location_should_contain(self, expected):
"""Verifies that current URL contains `expected`."""
actual = self.get_location()
if not expected in actual:
raise AssertionError("Location should have contained '%s' "
"but it was '%s'." % (expected, actual))
self._info("Current location contains '%s'." % expected)
def log_location(self):
"""Logs and returns the current location."""
url = self.get_location()
self._info(url)
return url
def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
WARN, INFO (default), DEBUG, and NONE (no logging).
"""
source = self.get_source()
self._log(source, loglevel.upper())
return source
def log_title(self):
"""Logs and returns the title of current page."""
title = self.get_title()
self._info(title)
return title
def title_should_be(self, title):
"""Verifies that current page title equals `title`."""
actual = self.get_title()
if actual != title:
raise AssertionError("Title should have been '%s' but was '%s'"
% (title, actual))
self._info("Page title is '%s'." % title)
# Public, navigation
def go_back(self):
"""Simulates the user clicking the "back" button on their browser."""
self._current_browser().back()
def go_to(self, url):
"""Navigates the active browser instance to the provided URL."""
self._info("Opening url '%s'" % url)
self._current_browser().get(url)
def reload_page(self):
"""Simulates user reloading page."""
self._current_browser().refresh()
# Public, execution properties
def get_selenium_speed(self):
"""Gets the delay in seconds that is waited after each Selenium command.
See `Set Selenium Speed` for an explanation."""
return secs_to_timestr(self._speed_in_secs)
def get_selenium_timeout(self):
"""Gets the timeout in seconds that is used by various keywords.
See `Set Selenium Timeout` for an explanation."""
return secs_to_timestr(self._timeout_in_secs)
def get_selenium_implicit_wait(self):
"""Gets the wait in seconds that is waited by Selenium.
See `Set Selenium Implicit Wait` for an explanation."""
return secs_to_timestr(self._implicit_wait_in_secs)
def set_selenium_speed(self, seconds):
"""Sets the delay in seconds that is waited after each Selenium command.
This is useful mainly in slowing down the test execution to be able to
view the execution. `seconds` may be given in Robot Framework time
format. Returns the previous speed value.
Example:
| Set Selenium Speed | .5 seconds |
"""
old_speed = self.get_selenium_speed()
self._speed_in_secs = timestr_to_secs(seconds)
for browser in self._cache.browsers:
browser.set_speed(self._speed_in_secs)
return old_speed
def set_selenium_timeout(self, seconds):
"""Sets the timeout in seconds used by various keywords.
There are several `Wait ...` keywords that take timeout as an
argument. All of these timeout arguments are optional. The timeout
used by all of them can be set globally using this keyword.
See `Timeouts` for more information about timeouts.
The previous timeout value is returned by this keyword and can
be used to set the old value back later. The default timeout
is 5 seconds, but it can be altered in `importing`.
Example:
| ${orig timeout} = | Set Selenium Timeout | 15 seconds |
| Open page that loads slowly |
| Set Selenium Timeout | ${orig timeout} |
"""
old_timeout = self.get_selenium_timeout()
self._timeout_in_secs = timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.set_script_timeout(self._timeout_in_secs)
return old_timeout
def set_selenium_implicit_wait(self, seconds):
"""Sets Selenium 2's default implicit wait in seconds and
sets the implicit wait for all open browsers.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| ${orig wait} = | Set Selenium Implicit Wait | 10 seconds |
| Perform AJAX call that is slow |
| Set Selenium Implicit Wait | ${orig wait} |
"""
old_wait = self.get_selenium_implicit_wait()
self._implicit_wait_in_secs = timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.implicitly_wait(self._implicit_wait_in_secs)
return old_wait
def set_browser_implicit_wait(self, seconds):
"""Sets current browser's implicit wait in seconds.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| Set Browser Implicit Wait | 10 seconds |
See also `Set Selenium Implicit Wait`.
"""
implicit_wait_in_secs = timestr_to_secs(seconds)
self._current_browser().implicitly_wait(implicit_wait_in_secs)
# Private
def _current_browser(self):
if not self._cache.current:
raise RuntimeError('No browser is open')
return self._cache.current
def _get_browser_creation_function(self, browser_name):
func_name = BROWSER_NAMES.get(browser_name.lower().replace(' ', ''))
return getattr(self, func_name) if func_name else None
def _make_browser(self, browser_name, desired_capabilities=None,
profile_dir=None, remote=None):
creation_func = self._get_browser_creation_function(browser_name)
if not creation_func:
raise ValueError(browser_name + " is not a supported browser.")
browser = creation_func(remote, desired_capabilities, profile_dir)
browser.set_speed(self._speed_in_secs)
browser.set_script_timeout(self._timeout_in_secs)
browser.implicitly_wait(self._implicit_wait_in_secs)
return browser
def _make_ff(self , remote , desired_capabilites , profile_dir):
if not profile_dir: profile_dir = FIREFOX_PROFILE_DIR
profile = webdriver.FirefoxProfile(profile_dir)
if remote:
browser = self._create_remote_web_driver(webdriver.DesiredCapabilities.FIREFOX ,
remote , desired_capabilites , profile)
else:
browser = webdriver.Firefox(firefox_profile=profile)
return browser
def _make_ie(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Ie,
webdriver.DesiredCapabilities.INTERNETEXPLORER, remote, desired_capabilities)
def _make_chrome(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Chrome,
webdriver.DesiredCapabilities.CHROME, remote, desired_capabilities)
def _make_opera(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Opera,
webdriver.DesiredCapabilities.OPERA, remote, desired_capabilities)
def _make_phantomjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.PhantomJS,
webdriver.DesiredCapabilities.PHANTOMJS, remote, desired_capabilities)
def _make_htmlunit(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNIT, remote, desired_capabilities)
def _make_htmlunitwithjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNITWITHJS, remote, desired_capabilities)
def _make_android(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.ANDROID, remote, desired_capabilities)
def _make_iphone(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.IPHONE, remote, desired_capabilities)
def _make_safari(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Safari,
webdriver.DesiredCapabilities.SAFARI, remote, desired_capabilities)
def _make_edge(self , remote , desired_capabilities , profile_dir):
if hasattr(webdriver, 'Edge'):
return self._generic_make_browser(webdriver.Edge,
webdriver.DesiredCapabilities.EDGE, remote, desired_capabilities)
else:
raise ValueError("Edge is not a supported browser with your version of Selenium python library. Please, upgrade to minimum required version 2.47.0.")
def _generic_make_browser(self, webdriver_type , desired_cap_type, remote_url, desired_caps):
'''most of the make browser functions just call this function which creates the
appropriate web-driver'''
if not remote_url:
browser = webdriver_type()
else:
browser = self._create_remote_web_driver(desired_cap_type,remote_url , desired_caps)
return browser
def _create_remote_web_driver(self , capabilities_type , remote_url , desired_capabilities=None , profile=None):
'''parses the string based desired_capabilities if neccessary and
creates the associated remote web driver'''
desired_capabilities_object = capabilities_type.copy()
if type(desired_capabilities) in (str, unicode):
desired_capabilities = self._parse_capabilities_string(desired_capabilities)
desired_capabilities_object.update(desired_capabilities or {})
return webdriver.Remote(desired_capabilities=desired_capabilities_object,
command_executor=str(remote_url), browser_profile=profile)
def _parse_capabilities_string(self, capabilities_string):
'''parses the string based desired_capabilities which should be in the form
key1:val1,key2:val2
'''
desired_capabilities = {}
if not capabilities_string:
return desired_capabilities
for cap in capabilities_string.split(","):
(key, value) = cap.split(":", 1)
desired_capabilities[key.strip()] = value.strip()
return desired_capabilities
| Gaurang033/Selenium2Library | src/Selenium2Library/keywords/_browsermanagement.py | Python | apache-2.0 | 28,305 |
from bokeh.models import Panel, Tabs
from bokeh.io import output_file, show
from bokeh.plotting import figure
output_file("slider.html")
p1 = figure(plot_width=300, plot_height=300)
p1.circle([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], size=20, color="navy", alpha=0.5)
tab1 = Panel(child=p1, title="circle")
p2 = figure(plot_width=300, plot_height=300)
p2.line([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], line_width=3, color="navy", alpha=0.5)
tab2 = Panel(child=p2, title="line")
tabs = Tabs(tabs=[ tab1, tab2 ])
show(tabs)
| stonebig/bokeh | sphinx/source/docs/user_guide/examples/interaction_tab_panes.py | Python | bsd-3-clause | 511 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import socket
import unittest
import warnings
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip import HAS_GEOIP
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.test import ignore_warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
if HAS_GEOIP:
from django.contrib.gis.geoip import GeoIP, GeoIPException
from django.contrib.gis.geoip.prototypes import GeoIP_lib_version
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(
HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting."
)
@ignore_warnings(category=RemovedInDjango20Warning)
class GeoIPTest(unittest.TestCase):
addr = '162.242.220.127'
fqdn = 'www.djangoproject.com'
def _is_dns_available(self, domain):
# Naive check to see if there is DNS available to use.
# Used to conditionally skip fqdn geoip checks.
# See #25407 for details.
ErrClass = socket.error if six.PY2 else OSError
try:
socket.gethostbyname(domain)
return True
except ErrClass:
return False
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
with self.assertRaises(GeoIPException):
GeoIP(cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
with self.assertRaises(e):
GeoIP(bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
with self.assertRaises(GeoIPException):
cntry_g.city('google.com')
with self.assertRaises(GeoIPException):
cntry_g.coords('yahoo.com')
# Non-string query should raise TypeError
with self.assertRaises(TypeError):
cntry_g.country_code(17)
with self.assertRaises(TypeError):
cntry_g.country_name(GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
queries = [self.addr]
if self._is_dns_available(self.fqdn):
queries.append(self.fqdn)
for query in queries:
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query), 'Failed for func %s and query %s' % (func, query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query), 'Failed for func %s and query %s' % (func, query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
queries = [self.addr]
if self._is_dns_available(self.fqdn):
queries.append(self.fqdn)
for query in queries:
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('San Antonio', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(210, d['area_code'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-98, 29)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 0)
self.assertAlmostEqual(lat, tup[1], 0)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
fqdn = "messe-duesseldorf.com"
if self._is_dns_available(fqdn):
d = g.city(fqdn)
self.assertEqual('Düsseldorf', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
GeoIP()
self.assertEqual(len(warns), 1)
msg = str(warns[0].message)
self.assertIn('django.contrib.gis.geoip is deprecated', msg)
def test_repr(self):
path = settings.GEOIP_PATH
g = GeoIP(path=path)
country_path = g._country_file
city_path = g._city_file
if GeoIP_lib_version:
expected = '<GeoIP [v%(version)s] _country_file="%(country)s", _city_file="%(city)s">' % {
'version': force_text(GeoIP_lib_version()),
'country': country_path,
'city': city_path,
}
else:
expected = '<GeoIP _country_file="%(country)s", _city_file="%(city)s">' % {
'country': country_path,
'city': city_path,
}
self.assertEqual(repr(g), expected)
| cloudera/hue | desktop/core/ext-py/Django-1.11.29/tests/gis_tests/test_geoip.py | Python | apache-2.0 | 6,866 |
# pylint: disable=W0614,W0401,W0611
# flake8: noqa
import numpy as np
from pandas.core.algorithms import factorize, match, unique, value_counts
from pandas.core.common import isnull, notnull
from pandas.core.categorical import Categorical
from pandas.core.groupby import Grouper
from pandas.core.format import set_eng_float_format
from pandas.core.index import (Index, CategoricalIndex, Int64Index,
RangeIndex, Float64Index, MultiIndex)
from pandas.core.series import Series, TimeSeries
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
from pandas.core.groupby import groupby
from pandas.core.reshape import (pivot_simple as pivot, get_dummies,
lreshape, wide_to_long)
from pandas.core.indexing import IndexSlice
from pandas.tseries.offsets import DateOffset
from pandas.tseries.tools import to_datetime
from pandas.tseries.index import (DatetimeIndex, Timestamp,
date_range, bdate_range)
from pandas.tseries.tdi import TimedeltaIndex, Timedelta
from pandas.tseries.period import Period, PeriodIndex
# legacy
import pandas.core.datetools as datetools
from pandas.core.config import (get_option, set_option, reset_option,
describe_option, option_context, options)
| pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/core/api.py | Python | gpl-2.0 | 1,378 |
#!/usr/bin/env python
import os
"""
def numCPUs():
if not hasattr(os, 'sysconf'):
raise RuntimeError('No sysconf detected.')
return os.sysconf('SC_NPROCESSORS_ONLN')
"""
bind = '0.0.0.0:5000'
workers = 4
# backlog = 2048
# worker_class = 'sync'
worker_class = 'gevent'
debug = True
daemon = True
pidfile = '/tmp/gunicorn.pid'
logfile = '/tmp/gunicorn.log'
| ianjuma/it-docker-container | config-gunicorn.py | Python | gpl-2.0 | 375 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import ftplib as ftp
import sys
import tarfile
import smtplib
from datetime import datetime, timedelta
from time import time
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# Copy file or dir
def cp(path):
global nb_files
global disk_size
if os.path.isdir(path):
for object in os.listdir(path):
cp(os.path.join(path, object))
else:
try:
tar.add(path)
except Exception as e:
_print("Error adding {0} file to tar archive: {1}.".format(
path, e.strerror))
quit_ezbackup(ERROR_CODE)
nb_files = nb_files + 1
disk_size = disk_size + os.path.getsize(path)
return True
# Connect to the ftp server
def ftp_connect(host, port, login, passwd, dir):
try:
ftp.connect(host, port)
except Exception as e:
_print("Connection to {0} failed: {1}.".format(host, e.strerror))
quit_ezbackup(ERROR_CODE)
try:
ftp.login(login, passwd)
except Exception as e:
_print("Login error: {0}.".format(e.message))
quit_ezbackup(ERROR_CODE)
try:
ftp.cwd(FTP_SAVE_DIR)
except Exception as e:
_print("Error changing FTP directory to {0}: {1}.".format(
FTP_SAVE_DIR, e.message))
ftp_quit()
quit_ezbackup(ERROR_CODE)
_print("Logged as " + login + " on " + host + ":" + str(port))
_print("Changed dir to " + FTP_SAVE_DIR)
_print("\nTYPE\tPATH")
return True
# Convert bytes in Ki, Mi, Gi ...
def sizeof(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
# Send an email
def mail(sender, receivers, subject, msg):
try:
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receivers, 'Subject: %s\n\n%s' % (subject, msg))
except Exception as e:
_print("Error sending email to {0} : {1}.".format(
receivers, e.strerror))
return False
_print("Email sent to {0}.".format(", ".join(receivers)))
return True
# Convert seconds in hour, minutes ...
def get_time(_seconds):
sec = timedelta(seconds=_seconds)
exec_time = datetime(1, 1, 1) + sec
if _seconds < 60:
return "{0:.3f}s".format(_seconds)
elif _seconds >= 60 and _seconds < 3600:
return "{0}m {1}s".format(exec_time.minute, exec_time.second)
elif _seconds >= 3600:
return "{0}h {1}m {2}s".format(exec_time.hour, exec_time.minute, exec_time.second)
else:
return False
# Exit and send an email if necessary
def quit_ezbackup(exit_code=0):
rm_archive()
quit_ftp()
if exit_code == ERROR_CODE and (MAIL_STATE == MAIL_FAILS or MAIL_STATE == MAIL_ALWAYS):
mail(MAIL_SENDER, MAIL_RECEIVERS, "EZBackup Fails", outbuffer)
elif exit_code == SUCCESS_CODE and MAIL_STATE == MAIL_ALWAYS:
mail(MAIL_SENDER, MAIL_RECEIVERS, "EZBackup Stats", outbuffer)
else:
_print("Error trying to send mail. No configuration found.")
exit(exit_code)
# Remove archive
def rm_archive():
global LOCAL_SAVE_DIR
if "tarname" in globals() and os.path.isfile(tarname) and LOCAL_SAVE_ENABLED is False:
os.remove(tarname)
elif LOCAL_SAVE_ENABLED and LOCAL_SAVE_DIR != "." and LOCAL_SAVE_DIR != "./" and os.path.exists(LOCAL_SAVE_DIR):
if LOCAL_SAVE_DIR[-1:] != "/":
LOCAL_SAVE_DIR = LOCAL_SAVE_DIR + "/"
try:
os.rename(tarname, os.path.normpath(LOCAL_SAVE_DIR + tarname))
except:
_print("Error moving {0}: {1}.".format(tarname, e.strerror))
# Quit FTP connection
def quit_ftp():
if "ftp" in globals():
try:
ftp.quit()
_print("\nConnection to {0} closed.".format(HOST))
except:
pass
# Print text to stdout and save value in result
def _print(text):
global outbuffer
# Print text to stdout
print (text)
# Save text in result
old_stdout = sys.stdout
result = StringIO()
sys.stdout = result
print (text)
sys.stdout = old_stdout
outbuffer = outbuffer + result.getvalue()
if sys.argv[0] != "ezbackup.py":
os.chdir(os.path.dirname(sys.argv[0]))
# Constants
MAIL_ALWAYS = 2
MAIL_FAILS = 1
MAIL_NEVER = 0
ERROR_CODE = 1
SUCCESS_CODE = 0
# Init the text buffer
outbuffer = ""
# Start timer for the execution time
start_time = time()
# Open and parse the config file for ezbackup
CONF_FILE = 'ezbackup.conf'
config = ConfigParser.RawConfigParser()
config.read(CONF_FILE)
# Load constants from the ezbackup configuration file
try:
FTP_ENABLED = config.getboolean('FTP', 'ftp_enabled')
HOST = config.get('FTP', 'host')
PORT = config.getint('FTP', 'port')
LOGIN = config.get('FTP', 'login')
PASSWD = config.get('FTP', 'passwd')
FTP_SAVE_DIR = config.get('FTP', 'save_dir')
LOCAL_SAVE_ENABLED = config.getboolean('Options', 'local_save_enabled')
LOCAL_SAVE_DIR = config.get('Options', 'local_save_dir')
COMPRESS = config.get('Options', 'compress')
BACKUP_NAME = config.get('Options', 'backup_name')
SAVE_LIST = config.get('Options', 'save_list')
MAIL_STATE = config.getint('Options', 'mail')
MAIL_SENDER = config.get('Options', 'mail_sender')
MAIL_RECEIVERS = config.get('Options', 'mail_receivers')
MAIL_RECEIVERS = MAIL_RECEIVERS.replace(' ', '')
MAIL_RECEIVERS = MAIL_RECEIVERS.split(',')
except Exception as e:
_print("Error parsing {0}: {1}.".format(CONF_FILE, e.message))
quit_ezbackup(ERROR_CODE)
# Maximum recursion (for cp() function)
sys.setrecursionlimit(10000)
# Create the archive
t = datetime.now()
tarname = t.strftime(BACKUP_NAME) + '.tar.' + COMPRESS
try:
tar = tarfile.open(tarname, 'w:' + COMPRESS)
except Exception as e:
_print("Error creating {0}: {1}.".format(tarname, e.message))
quit_ezbackup(ERROR_CODE)
# Open the save list file
try:
savelist = open(SAVE_LIST, 'r')
except IOError as e:
_print("Error opening {0}: {1}.".format(SAVE_LIST, e.strerror))
quit_ezbackup(ERROR_CODE)
if FTP_ENABLED:
ftp = ftp.FTP()
nb_files = 0
disk_size = 0
tar_size = 0
something_saved = False
# Read each lines of the save list and copy dirs or files
for path in savelist.readlines():
# If line different from a comment or an empty line
if path != "\n" and path[0] != ';' and path[0] != '#':
if something_saved is False:
if FTP_ENABLED:
ftp_connect(HOST, PORT, LOGIN, PASSWD, FTP_SAVE_DIR)
something_saved = True
path = path.replace('\n', '')
if os.path.isdir(path):
_print("[DIR]\t" + path)
elif os.path.isfile(path):
_print("[FILE]\t" + path)
cp(path)
savelist.close()
tar.close()
# If there is something to save
if something_saved:
with open(tarname, 'rb') as savedtar:
if FTP_ENABLED:
try:
ftp.storbinary('STOR ' + tarname, savedtar)
except Exception as e:
_print("Error copying {0} to FTP server: {1}.".format(
tarname, e.message))
quit_ezbackup(ERROR_CODE)
exec_time = time() - start_time
tar_size = os.path.getsize(tarname)
_print("\nArchive name : {0}".format(tarname))
_print("Files copied : {0}".format(nb_files))
_print("Disk size : {0}".format(sizeof(disk_size)))
_print("Archive size : {0}".format(sizeof(tar_size)))
_print("Exec time : {0}".format(get_time(exec_time)))
else:
_print("Nothing to save.")
quit_ezbackup(SUCCESS_CODE)
| tux-00/ezbackup | ezbackup.py | Python | gpl-3.0 | 7,934 |
"""
Goal Sentry API
Models
"""
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from datetime import datetime as dt
from sqlalchemy.orm import relationship
from database import Base
class User(Base):
__tablename__ = 'users'
# Basic metadata
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True)
name = Column(String(120))
email = Column(String(120))
rank = Column(Integer)
# Create a one-to-many relationship with Score
scores = relationship("Score")
def __init__(self, username=None, name=None, email=None):
self.username = username
self.name = name
self.email = email.lower()
self.rank = 0
def __repr__(self):
return '<User %r>' % self.username
class Table(Base):
__tablename__ = 'tables'
# Basic metadata
id = Column(Integer, primary_key=True)
name = Column(String(120))
# Create a one-to-many relationship with Game
games = relationship("Game")
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<Table %r>' % self.id
class Game(Base):
__tablename__ = 'games'
# Basic metadata
id = Column(Integer, primary_key=True)
time_started = Column(DateTime)
time_completed = Column(DateTime)
# Create a one-to-many relationship with Score
scores = relationship("Score")
# Create a many-to-one relationship with Table
table_id = Column(Integer, ForeignKey('tables.id'))
def __init__(self, time_started=None, table_id=None):
if time_started:
# Convert dates to ISO 8601
self.time_started = dt.strptime(time_started, "%Y-%m-%d %H:%M:%S.%f")
else:
# Store the current time
self.time_started = dt.now()
self.time_completed = None
if table_id:
self.table_id = table_id
def __repr__(self):
return '<Game %r>' % self.id
class Score(Base):
__tablename__ = 'scores'
# Basic metadata
id = Column(Integer, primary_key=True)
score = Column(Integer)
# Create a one-to-many relationship with User
user_id = Column(Integer, ForeignKey('users.id'))
# Create a one-to-many relationship with Game
game_id = Column(Integer, ForeignKey('games.id'))
def __init__(self, score=0, user_id=None, game_id=None):
self.score = score
if user_id:
self.user_id = user_id
if game_id:
self.game_id = game_id
def __repr__(self):
return '<Score %r>' % self.id
| stevenmirabito/GoalSentry | api/goalsentry/models.py | Python | mit | 2,593 |
import gtk, sys, pango
def label(text=None, textmn=None, markup=None, x=0, y=0.5, \
wrap=False, select=False, w=-1, h=-1):
# Defaults to left-aligned, vertically centered
tmplabel = gtk.Label()
if text:
tmplabel.set_text(text)
elif markup:
tmplabel.set_markup(markup)
elif textmn:
tmplabel.set_text_with_mnemonic(textmn)
tmplabel.set_alignment(x, y)
tmplabel.set_size_request(w, h)
tmplabel.set_line_wrap(wrap)
try: # Only recent versions of pygtk/gtk have this
tmplabel.set_line_wrap_mode(pango.WRAP_WORD_CHAR)
except:
pass
tmplabel.set_selectable(select)
return tmplabel
def expander(text=None, markup=None, expand=False, can_focus=True):
tmpexp = gtk.Expander()
if text:
tmpexp.set_label(text)
elif markup:
tmpexp.set_label(markup)
tmpexp.set_use_markup(True)
tmpexp.set_expanded(expand)
tmpexp.set_property('can-focus', can_focus)
return tmpexp
def eventbox(visible=False, add=None, w=-1, h=-1, state=None):
tmpevbox = gtk.EventBox()
tmpevbox.set_visible_window(visible)
tmpevbox.set_size_request(w, h)
if state:
tmpevbox.set_state(state)
if add:
tmpevbox.add(add)
return tmpevbox
def button(text=None, stock=None, relief=None, can_focus=True, \
hidetxt=False, img=None, w=-1, h=-1):
tmpbut = gtk.Button()
if text:
tmpbut.set_label(text)
elif stock:
tmpbut.set_label(stock)
tmpbut.set_use_stock(True)
tmpbut.set_use_underline(True)
if img:
tmpbut.set_image(img)
if relief:
tmpbut.set_relief(relief)
tmpbut.set_property('can-focus', can_focus)
if hidetxt:
tmpbut.get_child().get_child().get_children()[1].set_text('')
tmpbut.set_size_request(w, h)
return tmpbut
def combo(items=None, active=None, changed_cb=None, wrap=1):
tmpcb = gtk.combo_box_new_text()
tmpcb = _combo_common(tmpcb, items, active, changed_cb, wrap)
return tmpcb
def comboentry(items=None, active=None, changed_cb=None, wrap=1):
tmpcbe = gtk.combo_box_entry_new_text()
tmpcbe = _combo_common(tmpcbe, items, active, changed_cb, wrap)
return tmpcbe
def _combo_common(combobox, items, active, changed_cb, wrap):
if items:
for item in items:
combobox.append_text(item)
if active is not None:
combobox.set_active(active)
if changed_cb:
combobox.connect('changed', changed_cb)
combobox.set_wrap_width(wrap)
return combobox
def togglebutton(text=None, underline=False, relief=gtk.RELIEF_NORMAL, \
can_focus=True):
tmptbut = gtk.ToggleButton()
if text:
tmptbut.set_label(text)
tmptbut.set_use_underline(underline)
tmptbut.set_relief(relief)
tmptbut.set_property('can-focus', can_focus)
return tmptbut
def image(stock=None, stocksize=gtk.ICON_SIZE_MENU, w=-1, h=-1, \
x=0.5, y=0.5, pb=None):
if stock:
tmpimg = gtk.image_new_from_stock(stock, stocksize)
elif pb:
tmpimg = gtk.image_new_from_pixbuf(pb)
else:
tmpimg = gtk.Image()
tmpimg.set_size_request(w, h)
tmpimg.set_alignment(x, y)
return tmpimg
def progressbar(orient=None, frac=None, step=None, ellipsize=None):
tmpprog = gtk.ProgressBar()
if orient:
tmpprog.set_orientation(orient)
if frac:
tmpprog.set_fraction(frac)
if step:
tmpprog.set_pulse_step(step)
if ellipsize:
tmpprog.set_ellipsize(ellipsize)
return tmpprog
def scrollwindow(policy_x=gtk.POLICY_AUTOMATIC, policy_y=gtk.POLICY_AUTOMATIC, \
shadow=gtk.SHADOW_IN, w=-1, h=-1, add=None, addvp=None):
tmpsw = gtk.ScrolledWindow()
tmpsw.set_policy(policy_x, policy_y)
tmpsw.set_shadow_type(shadow)
tmpsw.set_size_request(w, h)
if add:
tmpsw.add(add)
elif addvp:
tmpsw.add_with_viewport(addvp)
return tmpsw
def dialog(title=None, parent=None, flags=0, buttons=None, default=None, \
separator=True, resizable=True, w=-1, h=-1, role=None):
tmpdialog = gtk.Dialog(title, parent, flags, buttons)
if default is not None:
tmpdialog.set_default_response(default)
tmpdialog.set_has_separator(separator)
tmpdialog.set_resizable(resizable)
tmpdialog.set_size_request(w, h)
if role:
tmpdialog.set_role(role)
return tmpdialog
def entry(text=None, password=False, w=-1, h=-1, changed_cb=None):
tmpentry = UnicodeEntry()
if text:
tmpentry.set_text(text)
if password:
tmpentry.set_visibility(False)
tmpentry.set_size_request(w, h)
if changed_cb:
tmpentry.connect('changed', changed_cb)
return tmpentry
class UnicodeEntry(gtk.Entry):
def get_text(self):
try:
return gtk.Entry.get_text(self).decode('utf-8')
except:
print sys.exc_info()[1]
return gtk.Entry.get_text(self).decode('utf-8', 'replace')
def treeview(hint=True, reorder=False, search=True, headers=False):
tmptv = gtk.TreeView()
tmptv.set_rules_hint(hint)
tmptv.set_reorderable(reorder)
tmptv.set_enable_search(search)
tmptv.set_headers_visible(headers)
return tmptv
def iconview(col=None, space=None, margin=None, itemw=None, selmode=None):
tmpiv = gtk.IconView()
if col:
tmpiv.set_columns(col)
if space:
tmpiv.set_spacing(space)
if margin:
tmpiv.set_margin(margin)
if itemw:
tmpiv.set_item_width(itemw)
if selmode:
tmpiv.set_selection_mode(selmode)
return tmpiv
def show_msg(owner, message, title, role, buttons, default=None, response_cb=None):
is_button_list = hasattr(buttons, '__getitem__')
if not is_button_list:
messagedialog = gtk.MessageDialog(owner, gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING, buttons, message)
else:
messagedialog = gtk.MessageDialog(owner, gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING, message_format=message)
i = 0
while i < len(buttons):
messagedialog.add_button(buttons[i], buttons[i+1])
i += 2
messagedialog.set_title(title)
messagedialog.set_role(role)
if default is not None:
messagedialog.set_default_response(default)
if response_cb:
messagedialog.connect("response", response_cb)
response = messagedialog.run()
value = response
messagedialog.destroy()
return value
def dialog_destroy(dialog_widget, _response_id):
dialog_widget.destroy()
def show(widget):
widget.set_no_show_all(False)
widget.show_all()
def hide(widget):
widget.hide_all()
widget.set_no_show_all(True)
def focus(widget):
widget.grab_focus()
def set_widths_equal(widgets):
# Assigns the same width to all passed widgets in the list, where
# the width is the maximum width across widgets.
max_width = 0
for widget in widgets:
if widget.size_request()[0] > max_width:
max_width = widget.size_request()[0]
for widget in widgets:
widget.set_size_request(max_width, -1)
def icon(factory, icon_name, path):
# Either the file or fullpath must be supplied, but not both:
sonataset = gtk.IconSet()
filename = [path]
icons = [gtk.IconSource() for i in filename]
for i, iconsource in enumerate(icons):
iconsource.set_filename(filename[i])
sonataset.add_source(iconsource)
factory.add(icon_name, sonataset)
factory.add_default()
def change_cursor(cursortype):
for i in gtk.gdk.window_get_toplevels():
i.set_cursor(cursortype)
class CellRendererTextWrap(gtk.CellRendererText):
"""A CellRendererText which sets its wrap-width to its width."""
__gtype_name__ = 'CellRendererTextWrap'
def __init__(self):
self.column = None
gtk.CellRendererText.__init__(self)
def set_column(self, column):
"""Set the containing gtk.TreeViewColumn to queue resizes."""
self.column = column
def do_render(self, window, widget, background_area, cell_area,
expose_area, flags):
if (self.props.wrap_width == -1 or
cell_area.width < self.props.wrap_width):
self.props.wrap_width = cell_area.width
self.column.queue_resize()
gtk.CellRendererText.do_render(
self, window, widget, background_area, cell_area,
expose_area, flags)
| KL-7/sonata | sonata/ui.py | Python | gpl-3.0 | 8,517 |
##!/usr/bin/env python
"""Search for sequences of transforms that solve a task"""
import time
import copy
import abc
from collections import namedtuple
import csv
import argparse
import statistics
import Levenshtein
import ftfy
from learning_text_transformer import transforms
from learning_text_transformer import config
# DUMMY DECORATOR FOR PROFILING
def profile(target):
def wrapper(*args, **kwargs):
return target(*args, **kwargs)
return wrapper
def load_examples(input_file):
"""Load data that we'll learn from"""
with open(input_file) as f:
reader = csv.reader(f)
header = next(reader)
assert header == ["From", "To"]
examples_to_learn_from = [l for l in reader]
return examples_to_learn_from
ScoredTransformation = namedtuple('ScoredTransformation', ['transformations', 'average_distance'])
class TransformSearcherBase(abc.ABC):
#def get_best_transform_sequence(self, distances_and_sequences):
#distances_and_sequences.sort(key=lambda x: x.average_distance)
#chosen_transformations = distances_and_sequences[0].transformations
#best_cost = distances_and_sequences[0].average_distance
#return chosen_transformations, best_cost
def apply_transforms(self, ts, s):
"""Apply list of Transform objects to string s, return transformed string"""
change_made = False
for transform_nbr, t in enumerate(ts):
s1 = t.apply(s)
change_made = False
if s1 != s:
change_made = True
s = s1
# signal if a change was made on the last transform
return s, change_made
def fix_unicode(self, examples_to_learn_from):
fixed_examples_to_learn_from = []
for frm, to in examples_to_learn_from:
frm = ftfy.fix_text(frm) # fix any bad unicode
fixed_examples_to_learn_from.append((frm, to))
return fixed_examples_to_learn_from
class TransformSearcherClever(TransformSearcherBase):
def __init__(self, conf=None, verbose=False, timeout=2):
self.nbr_evals = 0
self.best_distance = None
self.best_cur_seq = None
self.conf = conf
self.verbose = verbose
self.timeout = timeout # seconds for max search time
def calculate_distance(self, s1, s2):
return Levenshtein.distance(s1, s2)
@profile
def evaluate_transforms(self, cur_seq, examples_to_learn_from, force_evaluation=False):
self.nbr_evals += 1
if self.verbose:
if self.nbr_evals % 10000 == 0:
print("...nbr_evals", self.nbr_evals, cur_seq)
distances_per_example = []
transform_made_a_change = False
average_distance_for_this_sequence = None
for example_nbr, (s1, s2) in enumerate(examples_to_learn_from):
s1, change_made = self.apply_transforms(cur_seq, s1)
if change_made:
transform_made_a_change = True
#distance = 1.0 - Levenshtein.ratio(s1, s2)
#distance = Levenshtein.distance(s1, s2)
distance = self.calculate_distance(s1, s2)
distances_per_example.append(distance)
if transform_made_a_change or force_evaluation:
average_distance_for_this_sequence = statistics.mean(distances_per_example)
return average_distance_for_this_sequence, transform_made_a_change
@profile
def search_transforms(self, ts, cur_seq, examples_to_learn_from):
# ts - current set of operators we need to search
# cur_seq - sequence of operators we're investigating
assert self.best_distance is not None
assert self.best_cur_seq is not None
keep_going = True
if time.time() > self.finish_search_by:
# if we've exceeded our allowed search time we must exit
keep_going = False
if self.verbose:
print("TIMED OUT!", examples_to_learn_from)
# before we try new moves, get a score for the moves we had before
average_distance_cur_seq, _ = self.evaluate_transforms(cur_seq, examples_to_learn_from, force_evaluation=True)
for idx in range(len(ts)):
t = ts.pop(idx)
cur_seq.append(t)
average_distance_for_this_sequence, transform_made_a_change = self.evaluate_transforms(cur_seq, examples_to_learn_from)
new_move_improves_the_score = False
if transform_made_a_change:
new_move_improves_the_score = average_distance_for_this_sequence < average_distance_cur_seq
if average_distance_for_this_sequence < self.best_distance:
self.best_distance = average_distance_for_this_sequence
self.best_cur_seq = copy.copy(cur_seq)
if self.verbose:
print("New best", self.best_distance, self.best_cur_seq, self.nbr_evals, average_distance_cur_seq)
# if we've found a perfect solution then stop trying
if average_distance_for_this_sequence == 0:
keep_going = False
# recursively explore this tree if the latest Transform made a
# change to at least 1 example
if keep_going and transform_made_a_change and new_move_improves_the_score:
keep_going = self.search_transforms(ts, cur_seq, examples_to_learn_from)
cur_seq.pop()
ts.insert(idx, t)
return keep_going
@profile
def search_permutations(self, examples_to_learn_from, verbose):
self.nbr_evals = 0
# set a maximum timeout
self.finish_search_by = time.time() + self.timeout
input_strings, output_strings = [], []
for frm, to in examples_to_learn_from:
input_strings.append(frm)
output_strings.append(to)
ts = transforms.get_transforms(input_strings, output_strings)
if verbose:
print("SEARCHING USING:")
for t in ts:
print(t)
cur_seq = []
self.best_distance, _ = self.evaluate_transforms(cur_seq, examples_to_learn_from, force_evaluation=True)
self.best_cur_seq = []
self.search_transforms(ts, cur_seq, examples_to_learn_from)
return cur_seq, ts
@profile
def search_and_find_best_sequence(self, examples_to_learn_from, verbose=False):
examples_to_learn_from = self.fix_unicode(examples_to_learn_from)
input_strings, output_strings = [], []
for frm, to in examples_to_learn_from:
input_strings.append(frm)
output_strings.append(to)
t1 = time.time()
permutations_tested, transforms_tested = self.search_permutations(examples_to_learn_from, verbose)
chosen_transformations = self.best_cur_seq
best_cost = self.best_distance
if verbose:
print("Took {0:.2f}s to find best sequence".format(time.time() - t1))
return chosen_transformations, best_cost
def get_transform_searcher(conf=None, verbose=False, timeout=30):
return TransformSearcherClever(conf, verbose=verbose, timeout=timeout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Project description')
parser.add_argument('input_file', type=str, help='CSV file of mappings to learn')
parser.add_argument('--verbose', default=False, action="store_true")
args = parser.parse_args()
conf = config.get('dev')
verbose = args.verbose
examples_to_learn_from = load_examples(args.input_file)
print("Loaded {} items from {}".format(len(examples_to_learn_from), args.input_file))
transform_searcher = get_transform_searcher(conf, verbose)
chosen_transformations, best_cost = transform_searcher.search_and_find_best_sequence(examples_to_learn_from, verbose)
print("====")
print("Final sequence of transforms (cost={}):".format(best_cost))
for chosen_transformation in chosen_transformations:
print(chosen_transformation)
print("\nTransformed versions of the input sequences:")
for frm, to in examples_to_learn_from[1:]:
transformed_frm, _ = transform_searcher.apply_transforms(chosen_transformations, frm)
print("'{}'->'{}' compared to '{}' has distance '{}'".format(frm, transformed_frm, to, transform_searcher.calculate_distance(transformed_frm, to)))
#print(transform_searcher.evaluate_transforms(chosen_transformations, examples_to_learn_from, force_evaluation=True))
| ianozsvald/learning_text_transformer | learning_text_transformer/learner3.py | Python | mit | 8,533 |
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core import signals
from haystack.constants import DEFAULT_ALIAS
from haystack.utils import loading
__author__ = 'Daniel Lindsley'
__version__ = (2, 0, 0, 'beta')
# Setup default logging.
log = logging.getLogger('haystack')
stream = logging.StreamHandler()
stream.setLevel(logging.INFO)
log.addHandler(stream)
# Help people clean up from 1.X.
if hasattr(settings, 'HAYSTACK_SITECONF'):
raise ImproperlyConfigured('The HAYSTACK_SITECONF setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_SEARCH_ENGINE'):
raise ImproperlyConfigured('The HAYSTACK_SEARCH_ENGINE setting has been replaced with HAYSTACK_CONNECTIONS.')
if hasattr(settings, 'HAYSTACK_ENABLE_REGISTRATIONS'):
raise ImproperlyConfigured('The HAYSTACK_ENABLE_REGISTRATIONS setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_INCLUDE_SPELLING'):
raise ImproperlyConfigured('The HAYSTACK_INCLUDE_SPELLING setting is now a per-backend setting & belongs in HAYSTACK_CONNECTIONS.')
# Check the 2.X+ bits.
if not hasattr(settings, 'HAYSTACK_CONNECTIONS'):
raise ImproperlyConfigured('The HAYSTACK_CONNECTIONS setting is required.')
if DEFAULT_ALIAS not in settings.HAYSTACK_CONNECTIONS:
raise ImproperlyConfigured("The default alias '%s' must be included in the HAYSTACK_CONNECTIONS setting." % DEFAULT_ALIAS)
# Load the connections.
connections = loading.ConnectionHandler(settings.HAYSTACK_CONNECTIONS)
# Load the router(s).
connection_router = loading.ConnectionRouter()
if hasattr(settings, 'HAYSTACK_ROUTERS'):
if not isinstance(settings.HAYSTACK_ROUTERS, (list, tuple)):
raise ImproperlyConfigured("The HAYSTACK_ROUTERS setting must be either a list or tuple.")
connection_router = loading.ConnectionRouter(settings.HAYSTACK_ROUTERS)
# Per-request, reset the ghetto query log.
# Probably not extraordinarily thread-safe but should only matter when
# DEBUG = True.
def reset_search_queries(**kwargs):
for conn in connections.all():
conn.reset_queries()
if settings.DEBUG:
signals.request_started.connect(reset_search_queries)
| boblefrag/lolyx | haystack/__init__.py | Python | gpl-3.0 | 2,222 |
# -*- coding: utf-8 -*-
__about__ = """
In addition to what is provided by the "zero" project, this project
provides thorough integration with django-user-accounts, adding
comprehensive account management functionality. It is a foundation
suitable for most sites that have user accounts.
"""
default_app_config = "ich_bau.apps.AppConfig"
| postpdm/ich_bau | ich_bau/__init__.py | Python | apache-2.0 | 340 |
# -*- test-case-name: twisted.test.test_randbytes -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Cryptographically secure random implementation, with fallback on normal random.
"""
# System imports
import warnings, os, random
getrandbits = getattr(random, 'getrandbits', None)
class SecureRandomNotAvailable(RuntimeError):
"""
Exception raised when no secure random algorithm is found.
"""
class SourceNotAvailable(RuntimeError):
"""
Internal exception used when a specific random source is not available.
"""
class RandomFactory(object):
"""
Factory providing L{secureRandom} and L{insecureRandom} methods.
You shouldn't have to instantiate this class, use the module level
functions instead: it is an implementation detail and could be removed or
changed arbitrarily.
@cvar randomSources: list of file sources used when os.urandom is not
available.
@type randomSources: C{tuple}
"""
randomSources = ('/dev/urandom',)
getrandbits = getrandbits
def _osUrandom(self, nbytes):
"""
Wrapper around C{os.urandom} that cleanly manage its absence.
"""
try:
return os.urandom(nbytes)
except (AttributeError, NotImplementedError), e:
raise SourceNotAvailable(e)
def _fileUrandom(self, nbytes):
"""
Wrapper around random file sources.
This method isn't meant to be call out of the class and could be
removed arbitrarily.
"""
for src in self.randomSources:
try:
f = file(src, 'rb')
except (IOError, OSError):
pass
else:
bytes = f.read(nbytes)
f.close()
return bytes
raise SourceNotAvailable("File sources not available: %s" %
(self.randomSources,))
def secureRandom(self, nbytes, fallback=False):
"""
Return a number of secure random bytes.
@param nbytes: number of bytes to generate.
@type nbytes: C{int}
@param fallback: Whether the function should fallback on non-secure
random or not. Default to C{False}.
@type fallback: C{bool}
@return: a string of random bytes.
@rtype: C{str}
"""
for src in ("_osUrandom", "_fileUrandom"):
try:
return getattr(self, src)(nbytes)
except SourceNotAvailable:
pass
if fallback:
warnings.warn(
"urandom unavailable - "
"proceeding with non-cryptographically secure random source",
category=RuntimeWarning,
stacklevel=2)
return self.insecureRandom(nbytes)
else:
raise SecureRandomNotAvailable("No secure random source available")
def _randBits(self, nbytes):
"""
Wrapper around C{os.getrandbits}.
"""
if self.getrandbits is not None:
n = self.getrandbits(nbytes * 8)
hexBytes = ("%%0%dx" % (nbytes * 2)) % n
return hexBytes.decode('hex')
raise SourceNotAvailable("random.getrandbits is not available")
def _randRange(self, nbytes):
"""
Wrapper around C{random.randrange}.
"""
bytes = ""
for i in xrange(nbytes):
bytes += chr(random.randrange(0, 255))
return bytes
def insecureRandom(self, nbytes):
"""
Return a number of non secure random bytes.
@param nbytes: number of bytes to generate.
@type nbytes: C{int}
@return: a string of random bytes.
@rtype: C{str}
"""
for src in ("_randBits", "_randRange"):
try:
return getattr(self, src)(nbytes)
except SourceNotAvailable:
pass
factory = RandomFactory()
secureRandom = factory.secureRandom
insecureRandom = factory.insecureRandom
del factory
__all__ = ["secureRandom", "insecureRandom", "SecureRandomNotAvailable"]
| waseem18/oh-mainline | vendor/packages/twisted/twisted/python/randbytes.py | Python | agpl-3.0 | 4,144 |
#!/usr/bin/python
import sys
if len(sys.argv) != 3 or sys.argv[2] == sys.argv[1]:
print "usage: dna_to_fastq.py <input_dna_filename> <output_fastq_filename>"
exit(1)
outfile = open(sys.argv[2], 'w')
rec_count = 0
for dna in open(sys.argv[1]):
qua = "H" * (len(dna) - 1)
outfile.write("@TAG.%d\n%s+\n%s\n" % (rec_count, dna, qua))
rec_count += 1
print "Done!"
print "Written %d FASTQ records" % rec_count
| lrog/orcom | scripts/dna_to_fastq.py | Python | gpl-2.0 | 414 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-10 06:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Aim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('background_img', models.ImageField(upload_to=b'')),
('caption_heder', models.CharField(max_length=200)),
('caption_letter', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='AndroidApplicationProgramming',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slider_img', models.ImageField(upload_to=b'')),
('caption', models.CharField(max_length=200)),
('letter', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='ArtificialIntellignece',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('background_img', models.ImageField(upload_to=b'')),
('caption', models.CharField(max_length=200)),
('letter', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='DesktopApplicationProgramming',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slider_img', models.ImageField(upload_to=b'')),
('caption', models.CharField(max_length=200)),
('letter', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='InformationSecurity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slider_img', models.ImageField(upload_to=b'')),
('caption', models.CharField(max_length=200)),
('letter', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='Networking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('background_img', models.ImageField(upload_to=b'')),
('caption', models.CharField(max_length=200)),
('letter', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='NetworkProgramming',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slider_img', models.ImageField(upload_to=b'')),
('caption', models.CharField(max_length=200)),
('letter', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='Slider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slider_img', models.ImageField(upload_to=b'')),
('slider_caption', models.CharField(max_length=200)),
('slider_header', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='WebDeveloping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slider_img', models.ImageField(upload_to=b'')),
('caption', models.CharField(max_length=200)),
('letter', models.CharField(max_length=400)),
],
),
]
| TRIOrganization/TRI | TRI/main/migrations/0001_initial.py | Python | mit | 4,113 |
# -*- coding: UTF-8 -*-
from django.shortcuts import render
from django.template import loader, Context
from django.http import HttpResponse, HttpResponseRedirect
from travels.models import *
from favourite.models import *
from accounts.models import *
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http import Http404
import time
from django.core.files.base import ContentFile
from django.views.decorators.csrf import csrf_exempt
from django.contrib import auth
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from django.core.mail import send_mail
from django.utils import timezone
import datetime
from django.shortcuts import render_to_response,get_object_or_404
# Create your views here.
def add_favorite(request, travels_id):
if request.user.is_authenticated():
user = request.user
travels = Travels.objects.get(id=travels_id)
if Favourite.objects.filter(user__username=user, travels=travels):
return HttpResponse("<div style='text-align: center'>该文章您已经收藏过了!</div>")
else:
favourlist = Favourite(user=user, travels=travels)
favourlist.save()
return HttpResponse("<div style='text-align: center'>收藏成功!</div>")
else:
return HttpResponse("<div style='text-align: center'>请您登陆后再收藏!</div>")
def favourite_list(request,user):
users = MyProfile.objects.all()
try:
favlist = Favourite.objects.filter(user__username=user)
except Favourite.DoesNotExist:
raise Http404
return render_to_response('../templates/userena/fav_list.html', {'favlist' : favlist, 'users': users}, context_instance=RequestContext(request))
def deletefav(request, favourite_id):
favourite = get_object_or_404(Favourite, pk=int(favourite_id))
favourite.delete()
author = request.user.username
return HttpResponseRedirect('/favlist/'+author) | liuasliy/rdstourcms | favourite/views.py | Python | mit | 2,009 |
""""
Classes that each define a smoothly varying component of a scaling model.
These classes use a gaussian smoother (1D, 2D or 3D) to calculate the
inverse scale factors and derivatives with respect to the component
parameters.
"""
from __future__ import annotations
from math import ceil, floor
from scitbx import sparse
from dials.algorithms.scaling.model.components.scale_components import (
ScaleComponentBase,
)
from dials.array_family import flex
from dials_refinement_helpers_ext import GaussianSmoother2D as GS2D
from dials_refinement_helpers_ext import GaussianSmoother3D as GS3D
from dials_scaling_ext import GaussianSmootherFirstFixed as GS1D
from dials_scaling_ext import row_multiply
# The following gaussian smoother classes make the implementation
# consistent with that used in dials.refinement.
class GaussianSmoother1D(GS1D):
"""A 1D Gaussian smoother."""
def value_weight(self, x, value):
"""Return the value, weight and sumweight at a single point."""
result = super().value_weight(x, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def value_weight_first_fixed(self, x, value):
"""Return the value, weight and sumweight at a single point."""
result = super().value_weight_first_fixed(x, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def multi_value_weight(self, x, value):
"""Return the value, weight and sumweight at multiple points."""
result = super().multi_value_weight(x, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def multi_value_weight_first_fixed(self, x, value):
"""Return the value, weight and sumweight at multiple points."""
result = super().multi_value_weight_first_fixed(x, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def positions(self):
"""Return the smoother positions."""
return list(super().positions())
class GaussianSmoother2D(GS2D):
"""A 2D Gaussian smoother."""
def value_weight(self, x, y, value):
"""Return the value, weight and sumweight at a single point."""
result = super().value_weight(x, y, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def multi_value_weight(self, x, y, value):
"""Return the value, weight and sumweight at multiple points."""
result = super().multi_value_weight(x, y, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def x_positions(self):
"""Return the smoother x-positions."""
return list(super().x_positions())
def y_positions(self):
"""Return the smoother y-positions."""
return list(super().y_positions())
class GaussianSmoother3D(GS3D):
"""A 3D Gaussian smoother."""
def value_weight(self, x, y, z, value):
"""Return the value, weight and sumweight at a single point."""
result = super().value_weight(x, y, z, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def multi_value_weight(self, x, y, z, value):
"""Return the value, weight and sumweight at multiple points."""
result = super().multi_value_weight(x, y, z, value)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def x_positions(self):
"""Return the smoother x-positions."""
return list(super().x_positions())
def y_positions(self):
"""Return the smoother y-positions."""
return list(super().y_positions())
def z_positions(self):
"""Return the smoother z-positions."""
return list(super().z_positions())
class SmoothMixin:
"""Mixin class for smooth scale factor components.
This uses a Gaussian smoother to calculate scales and derivatives
based on the parameters and a have a set of normalised_values
associated with the data."""
def __init__(self):
self._Vr = 1.0
self._smoother = None
@property
def value(self):
"""Extra access to the parameters for the gaussian smoother."""
return self._parameters
@property
def smoother(self):
"""The Gaussian smoother."""
return self._smoother
@staticmethod
def nparam_to_val(n_params):
"""Convert the number of parameters to the required input value
for the smoother."""
assert (
n_params >= 2
), """cannot initialise a smooth scale factor
for <2 parameters."""
if n_params == 2 or n_params == 3:
return n_params - 1
return n_params - 2
class SmoothScaleComponent1D(ScaleComponentBase, SmoothMixin):
"""A smoothly varying scale component in one dimension.
This class has the option to fix the first parameter during minimisation."""
null_parameter_value = 1.0
def __init__(self, initial_values, parameter_esds=None):
super().__init__(initial_values, parameter_esds)
self._normalised_values = []
self._fixed_initial = False
def fix_initial_parameter(self):
"""Set a flag to indicate that we're fixing the first parameter."""
self._fixed_initial = True
@property
def free_parameters(self):
if self._fixed_initial:
return self._parameters[1:]
return self._parameters
@free_parameters.setter
def free_parameters(self, parameters):
if not self._fixed_initial:
self._parameters = parameters
else:
sel = flex.bool(self._parameters.size(), True)
sel[0] = False
self._parameters.set_selected(sel, parameters)
@property
def free_parameter_esds(self):
"""Return the estimated standard deviations of the parameters."""
if self._fixed_initial:
return self._parameter_esds[1:]
return self._parameter_esds
@free_parameter_esds.setter
def free_parameter_esds(self, esds):
assert len(esds) == len(self.free_parameters)
if not self._fixed_initial:
self._parameter_esds = esds
else:
sel = flex.bool(self._parameters.size(), True)
sel[0] = False
if self._parameter_esds:
self._parameter_esds.set_selected(sel, esds)
self._parameter_esds[0] = 0.0
else:
self._parameter_esds = flex.double(self.parameters.size(), 0.0)
self._parameter_esds.set_selected(sel, esds)
def set_new_parameters(self, new_parameters):
"""Set new parameters of a different length i.e. after batch handling"""
self._parameters = new_parameters
self._parameter_esds = None
self._n_params = len(self._parameters)
@property
def normalised_values(self):
"""This is a list of the relevant data needed to calculate the
inverse scale factors, normalised to give 'normalised coordinate
values' that fit in the range of the smoother parameters, which
are defined as a 1D array at normalised coordinates separated by
a spacing of 1."""
return self._normalised_values
@ScaleComponentBase.data.setter
def data(self, data):
assert set(data.keys()) == {"x"}, set(data.keys())
self._data = data
def update_reflection_data(self, selection=None, block_selections=None):
"""Set the normalised coordinate values and configure the smoother."""
self._normalised_values = []
self._n_refl = []
normalised_values = self.data["x"]
if selection:
normalised_values = normalised_values.select(selection)
# Make sure zeroed correctly.
normalised_values = normalised_values - flex.min(normalised_values)
phi_range_deg = [
floor(round(flex.min(normalised_values), 10)),
max(ceil(round(flex.max(normalised_values), 10)), 1),
]
self._smoother = GaussianSmoother1D(
phi_range_deg, self.nparam_to_val(self._n_params)
)
if block_selections:
block_selection_list = block_selections
for i, sel in enumerate(block_selection_list):
self._normalised_values.append(normalised_values.select(sel))
self._n_refl.append(self._normalised_values[i].size())
else:
self._normalised_values.append(normalised_values)
self._n_refl.append(normalised_values.size())
def calculate_scales_and_derivatives(self, block_id=0):
if self._n_refl[block_id] > 1:
if self._fixed_initial:
(
value,
weight,
sumweight,
) = self._smoother.multi_value_weight_first_fixed(
self._normalised_values[block_id], self.value
)
else:
value, weight, sumweight = self._smoother.multi_value_weight(
self._normalised_values[block_id], self.value
)
inv_sw = 1.0 / sumweight
dv_dp = row_multiply(weight, inv_sw)
elif self._n_refl[block_id] == 1:
if self._fixed_initial:
value, weight, sumweight = self._smoother.value_weight_first_fixed(
self._normalised_values[block_id][0], self.value
)
else:
value, weight, sumweight = self._smoother.value_weight(
self._normalised_values[block_id][0], self.value
)
dv_dp = sparse.matrix(1, weight.size)
b = flex.double(weight.as_dense_vector() / sumweight)
b.reshape(flex.grid(1, b.size()))
dv_dp.assign_block(b, 0, 0)
value = flex.double(1, value)
else:
return flex.double([]), sparse.matrix(0, 0)
return value, dv_dp
def calculate_scales(self, block_id=0):
""" "Only calculate the scales if needed, for performance."""
if self._n_refl[block_id] > 1:
value, _, __ = self._smoother.multi_value_weight(
self._normalised_values[block_id], self.value
)
elif self._n_refl[block_id] == 1:
value, _, __ = self._smoother.value_weight(
self._normalised_values[block_id][0], self.value
)
value = flex.double(1, value)
else:
value = flex.double([])
return value
class SmoothBScaleComponent1D(SmoothScaleComponent1D):
"""Subclass of SmoothScaleComponent1D to implement a smoothly
varying B-factor correction."""
null_parameter_value = 0.0
def __init__(self, initial_values, parameter_esds=None):
super().__init__(initial_values, parameter_esds)
self._d_values = []
@property
def d_values(self):
"""The current set of d-values associated with this component."""
return self._d_values
@ScaleComponentBase.data.setter
def data(self, data):
assert set(data.keys()) == {"x", "d"}, set(data.keys())
self._data = data
def update_reflection_data(self, selection=None, block_selections=None):
super().update_reflection_data(selection, block_selections)
self._d_values = []
data = self.data["d"]
if selection:
data = data.select(selection)
if block_selections:
for sel in block_selections:
self._d_values.append(data.select(sel))
else:
self._d_values.append(data)
def calculate_scales_and_derivatives(self, block_id=0):
scales, derivatives = super().calculate_scales_and_derivatives(block_id)
if self._n_refl[block_id] == 0:
return flex.double([]), sparse.matrix(0, 0)
prefac = 1.0 / (2.0 * (self._d_values[block_id] * self._d_values[block_id]))
s = flex.exp(scales * prefac)
d = row_multiply(derivatives, s * prefac)
return s, d
def calculate_scales(self, block_id=0):
s = super().calculate_scales(block_id)
return flex.exp(s / (2.0 * flex.pow2(self._d_values[block_id])))
def calculate_restraints(self):
residual = self.parameter_restraints * (self._parameters * self._parameters)
gradient = 2.0 * self.parameter_restraints * self._parameters
return residual, gradient
def calculate_jacobian_restraints(self):
jacobian = sparse.matrix(self.n_params, self.n_params)
for i in range(self.n_params):
jacobian[i, i] = +1.0
return self._parameters, jacobian, self.parameter_restraints
class SmoothScaleComponent2D(ScaleComponentBase, SmoothMixin):
"""Implementation of a 2D array-based smoothly varying scale factor.
A 2d array of parameters is defined, and the scale factor at fractional
coordinates is calculated as smoothly varying based on the distance to
the nearby parameters as calculated in the GaussianSmoother2D. The
initial values are passed as a 1D array, and shape is a 2-tuple
indicating the number of parameters in each dimension."""
null_parameter_value = 1.0
def __init__(self, initial_values, shape, parameter_esds=None):
assert len(initial_values) == (
shape[0] * shape[1]
), """The shape
information to initialise a 2D smoother is inconsistent with the length
of the initial parameter list."""
super().__init__(initial_values, parameter_esds)
self._n_x_params = shape[0]
self._n_y_params = shape[1]
self._normalised_x_values = None
self._normalised_y_values = None
@ScaleComponentBase.data.setter
def data(self, data):
assert set(data.keys()) == {"x", "y"}, set(data.keys())
self._data = data
def set_new_parameters(self, new_parameters, shape):
"""Set new parameters of a different length i.e. after batch handling"""
assert len(new_parameters) == shape[0] * shape[1]
self._parameters = new_parameters
self._parameter_esds = None
self._n_params = len(self._parameters)
self._n_x_params = shape[0]
self._n_y_params = shape[1]
@property
def n_x_params(self):
"""The number of parameters that parameterise the x-component."""
return self._n_x_params
@property
def n_y_params(self):
"""The number of parameters that parameterise the y-component."""
return self._n_y_params
@property
def normalised_x_values(self):
"""The normalised coordinate values in the first dimension."""
return self._normalised_x_values
@property
def normalised_y_values(self):
"""The normalised coordinate values in the second dimension."""
return self._normalised_y_values
def update_reflection_data(self, selection=None, block_selections=None):
"""control access to setting all of reflection data at once"""
self._normalised_x_values = []
self._normalised_y_values = []
self._n_refl = []
normalised_x_values = self.data["x"]
normalised_y_values = self.data["y"]
if selection:
normalised_x_values = normalised_x_values.select(selection)
normalised_y_values = normalised_y_values.select(selection)
normalised_x_values = normalised_x_values - flex.min(normalised_x_values)
normalised_y_values = normalised_y_values - flex.min(normalised_y_values)
x_range = [
floor(round(flex.min(normalised_x_values), 10)),
max(ceil(round(flex.max(normalised_x_values), 10)), 1),
]
y_range = [
floor(round(flex.min(normalised_y_values), 10)),
max(ceil(round(flex.max(normalised_y_values), 10)), 1),
]
self._smoother = GaussianSmoother2D(
x_range,
self.nparam_to_val(self._n_x_params),
y_range,
self.nparam_to_val(self._n_y_params),
)
if block_selections:
for i, sel in enumerate(block_selections):
self._normalised_x_values.append(normalised_x_values.select(sel))
self._normalised_y_values.append(normalised_y_values.select(sel))
self._n_refl.append(self._normalised_x_values[i].size())
else:
self._normalised_x_values.append(normalised_x_values)
self._normalised_y_values.append(normalised_y_values)
self._n_refl.append(normalised_x_values.size())
def calculate_scales_and_derivatives(self, block_id=0):
if self._n_refl[block_id] > 1:
value, weight, sumweight = self._smoother.multi_value_weight(
self._normalised_x_values[block_id],
self._normalised_y_values[block_id],
self.value,
)
inv_sw = 1.0 / sumweight
dv_dp = row_multiply(weight, inv_sw)
elif self._n_refl[block_id] == 1:
value, weight, sumweight = self._smoother.value_weight(
self._normalised_x_values[block_id][0],
self._normalised_y_values[block_id][0],
self.value,
)
dv_dp = sparse.matrix(1, weight.size)
b = flex.double(weight.as_dense_vector() / sumweight)
b.reshape(flex.grid(1, b.size()))
dv_dp.assign_block(b, 0, 0)
value = flex.double(1, value)
else:
return flex.double([]), sparse.matrix(0, 0)
return value, dv_dp
def calculate_scales(self, block_id=0):
"""Only calculate the scales if needed, for performance."""
if self._n_refl[block_id] > 1:
value, _, __ = self._smoother.multi_value_weight(
self._normalised_x_values[block_id],
self._normalised_y_values[block_id],
self.value,
)
elif self._n_refl[block_id] == 1:
value, _, __ = self._smoother.value_weight(
self._normalised_x_values[block_id][0],
self._normalised_y_values[block_id][0],
self.value,
)
value = flex.double(1, value)
else:
value = flex.double([])
return value
class SmoothScaleComponent3D(ScaleComponentBase, SmoothMixin):
"""Implementation of a 3D array-based smoothly varying scale factor.
A 3d array of parameters is defined, and the scale factor at fractional
coordinates is calculated as smoothly varying based on the distance to
the nearby parameters as calculated in the GaussianSmoother3D. The
initial values are passed as a 1D array, and shape is a 3-tuple
indicating the number of parameters in each dimension."""
null_parameter_value = 1.0
def __init__(self, initial_values, shape, parameter_esds=None):
assert len(initial_values) == (
shape[0] * shape[1] * shape[2]
), """The
shape information to initialise a 3D smoother is inconsistent with the
length of the initial parameter list."""
super().__init__(initial_values, parameter_esds)
self._n_x_params = shape[0]
self._n_y_params = shape[1]
self._n_z_params = shape[2]
self._normalised_x_values = None
self._normalised_y_values = None
self._normalised_z_values = None
def set_new_parameters(self, new_parameters, shape):
"""Set new parameters of a different length i.e. after batch handling"""
assert len(new_parameters) == shape[0] * shape[1] * shape[2]
self._parameters = new_parameters
self._parameter_esds = None
self._n_params = len(self._parameters)
self._n_x_params = shape[0]
self._n_y_params = shape[1]
self._n_z_params = shape[2]
@ScaleComponentBase.data.setter
def data(self, data):
assert set(data.keys()) == {"x", "y", "z"}, set(data.keys())
self._data = data
@property
def n_x_params(self):
"""The number of parameters that parameterise the x-component."""
return self._n_x_params
@property
def n_y_params(self):
"""The number of parameters that parameterise the y-component."""
return self._n_y_params
@property
def n_z_params(self):
"""The number of parameters that parameterise the z-component."""
return self._n_z_params
@property
def normalised_x_values(self):
"""The normalised coordinate values in the first dimension."""
return self._normalised_x_values
@property
def normalised_y_values(self):
"""The normalised coordinate values in the second dimension."""
return self._normalised_y_values
@property
def normalised_z_values(self):
"""The normalised coordinate values in the third dimension."""
return self._normalised_z_values
def update_reflection_data(self, selection=None, block_selections=None):
"""control access to setting all of reflection data at once"""
self._normalised_x_values = []
self._normalised_y_values = []
self._normalised_z_values = []
self._n_refl = []
normalised_x_values = self.data["x"]
normalised_y_values = self.data["y"]
normalised_z_values = self.data["z"]
if selection:
normalised_x_values = normalised_x_values.select(selection)
normalised_y_values = normalised_y_values.select(selection)
normalised_z_values = normalised_z_values.select(selection)
"""Set the normalised coordinate values and configure the smoother."""
normalised_x_values = normalised_x_values - flex.min(normalised_x_values)
normalised_y_values = normalised_y_values - flex.min(normalised_y_values)
normalised_z_values = normalised_z_values - flex.min(normalised_z_values)
x_range = [
floor(round(flex.min(normalised_x_values), 10)),
max(ceil(round(flex.max(normalised_x_values), 10)), 1),
]
y_range = [
floor(round(flex.min(normalised_y_values), 10)),
max(ceil(round(flex.max(normalised_y_values), 10)), 1),
]
z_range = [
floor(round(flex.min(normalised_z_values), 10)),
max(ceil(round(flex.max(normalised_z_values), 10)), 1),
]
self._smoother = GaussianSmoother3D(
x_range,
self.nparam_to_val(self._n_x_params),
y_range,
self.nparam_to_val(self._n_y_params),
z_range,
self.nparam_to_val(self._n_z_params),
)
if block_selections:
for i, sel in enumerate(block_selections):
self._normalised_x_values.append(normalised_x_values.select(sel))
self._normalised_y_values.append(normalised_y_values.select(sel))
self._normalised_z_values.append(normalised_z_values.select(sel))
self._n_refl.append(self._normalised_x_values[i].size())
else:
self._normalised_x_values.append(normalised_x_values)
self._normalised_y_values.append(normalised_y_values)
self._normalised_z_values.append(normalised_z_values)
self._n_refl.append(normalised_x_values.size())
def calculate_scales_and_derivatives(self, block_id=0):
if self._n_refl[block_id] > 1:
value, weight, sumweight = self._smoother.multi_value_weight(
self._normalised_x_values[block_id],
self._normalised_y_values[block_id],
self._normalised_z_values[block_id],
self.value,
)
inv_sw = 1.0 / sumweight
dv_dp = row_multiply(weight, inv_sw)
elif self._n_refl[block_id] == 1:
value, weight, sumweight = self._smoother.value_weight(
self._normalised_x_values[block_id][0],
self._normalised_y_values[block_id][0],
self._normalised_z_values[block_id][0],
self.value,
)
dv_dp = sparse.matrix(1, weight.size)
b = flex.double(weight.as_dense_vector() / sumweight)
b.reshape(flex.grid(1, b.size()))
dv_dp.assign_block(b, 0, 0)
value = flex.double(1, value)
else:
return flex.double([]), sparse.matrix(0, 0)
return value, dv_dp
def calculate_scales(self, block_id=0):
""" "Only calculate the scales if needed, for performance."""
if self._n_refl[block_id] > 1:
value, _, __ = self._smoother.multi_value_weight(
self._normalised_x_values[block_id],
self._normalised_y_values[block_id],
self._normalised_z_values[block_id],
self.value,
)
elif self._n_refl[block_id] == 1:
value, _, __ = self._smoother.value_weight(
self._normalised_x_values[block_id][0],
self._normalised_y_values[block_id][0],
self._normalised_z_values[block_id][0],
self.value,
)
value = flex.double(1, value)
else:
value = flex.double([])
return value
| dials/dials | algorithms/scaling/model/components/smooth_scale_components.py | Python | bsd-3-clause | 25,391 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0035_realm_message_retention_period_days"),
]
operations = [
migrations.RenameField(
model_name="realm",
old_name="subdomain",
new_name="string_id",
),
]
| rht/zulip | zerver/migrations/0036_rename_subdomain_to_string_id.py | Python | apache-2.0 | 335 |
#!/usr/bin/env python
from pymongo import Connection
from bson.code import Code
connection = Connection()
db = connection.pulp_database
m = Code(open("map.js", "r").read())
r = Code(open("reduce.js", "r").read())
coll = db.packages.map_reduce(m,r)
results = coll.find()
print results[0]
results = coll.find({"value.count":0})
print "%s results found " % (results.count())
orphanids = [x["value"]["package_id"] for x in results]
results = db.packages.find({"id":{"$in":orphanids}}, {"id":1, "filename":1, "checksum":1})
for index, r in enumerate(results):
print "Id: %s\t Filename: %s \tChecksum: %s" % (r["id"], r["filename"], r["checksum"])
if index >= 15:
print "%s more matches available...." % (results.count()-index)
break
| mhrivnak/pulp | playpen/mongodb/orphaned_packages/driver.py | Python | gpl-2.0 | 756 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import Queue
import sys
import requests
import os
import threading
import time
class Worker(threading.Thread): # 处理工作请求
def __init__(self, workQueue, resultQueue, **kwds):
threading.Thread.__init__(self, **kwds)
self.setDaemon(True)
self.workQueue = workQueue
self.resultQueue = resultQueue
def run(self):
while 1:
try:
callable, args, kwds = self.workQueue.get(False) # get task
res = callable(*args, **kwds)
self.resultQueue.put(res) # put result
except Queue.Empty:
break
class WorkManager: # 线程池管理,创建
def __init__(self, num_of_workers=10):
self.workQueue = Queue.Queue() # 请求队列
self.resultQueue = Queue.Queue() # 输出结果的队列
self.workers = []
self._recruitThreads(num_of_workers)
def _recruitThreads(self, num_of_workers):
for i in range(num_of_workers):
worker = Worker(self.workQueue, self.resultQueue) # 创建工作线程
self.workers.append(worker) # 加入到线程队列
def start(self):
for w in self.workers:
w.start()
def wait_for_complete(self):
while len(self.workers):
worker = self.workers.pop() # 从池中取出一个线程处理请求
worker.join()
if worker.isAlive() and not self.workQueue.empty():
self.workers.append(worker) # 重新加入线程池中
print 'All jobs were complete.'
def add_job(self, callable, *args, **kwds):
self.workQueue.put((callable, args, kwds)) # 向工作队列中加入请求
def get_result(self, *args, **kwds):
return self.resultQueue.get(*args, **kwds)
def download_file(url):
"""这里可以请求并保存网页"""
#print 'beg download', url
print requests.get(url).text
def main():
try:
num_of_threads = int(sys.argv[1])
except:
num_of_threads = 10
_st = time.time()
wm = WorkManager(num_of_threads)
print num_of_threads
urls = ['http://www.baidu.com'] * 100 # 待爬的url
for i in urls:
wm.add_job(download_file, i)
wm.start()
wm.wait_for_complete()
print time.time() - _st
if __name__ == '__main__':
main()
if __name__ == '__main__':
main()
| PegasusWang/articles | crawler/thread_pool_spider.py | Python | mit | 2,504 |
from coco.api.permissions import *
from coco.core.helpers import get_server_selection_algorithm
from coco.core.models import *
from coco.api.serializer import *
from django.contrib.auth.models import User, Group
from django.db.models import Q
from django_admin_conf_vars.models import ConfigurationVariable
from rest_framework import generics, status
from rest_framework.decorators import api_view
from rest_framework.permissions import *
from rest_framework.response import Response
# TODO: check for unique names before creation of objects !
def validate_request_params(required_params, request):
"""
Validate request parameters.
"""
params = {}
for param in required_params:
if param not in request.data:
return Response({"error": "Parameters missing.", "required_parameters": required_params })
params[param] = request.data.get(param)
return params
@api_view(('GET',))
def api_root(request, format=None):
"""
API Root
"""
available_endpoints = {}
available_endpoints['users'] = {
'': 'Get a list of all users.',
'{id}': 'Get details about a user.'
}
available_endpoints['collaborationgroups'] = {
'': 'Get a list of all collaborationgroups.',
'{id}': {
'': 'Get details about a collaborationgroup.',
'add_members': 'Add members to a collaborationgroup.',
'remove_members': 'Remove members from a collaborationgroup.',
'add_admins': 'Add admins to a collaborationgroup.',
'remove_admins': 'Remove admins from a collaborationgroup.',
'join': 'Join a public collaborationgroup.',
'leave': 'Leave a collaborationgroup.'
}
}
available_endpoints['containers'] = {
'': 'Get a list of all containers available to your user.',
'images': {
'': 'Get a list of all container images available to your user.',
'{id}': {
'add_access_groups': 'Add access_groups to the share.',
'remove_access_groups': 'Remove access_groups from the share.'
}
},
'snapshots': 'Get a list of all container snapshots available to your user.',
'{id}': {
'': 'Get details about a container.',
'commit': 'Create an image from the container.',
'clone': 'Clone the container.',
'clones': 'Get a list of all clones of the container',
'create_snapshot': 'Create a snapshot of the container.',
'restore_snapshot': 'Restore a snapshot of the container.',
'restart': 'Restart the container.',
'resume': 'Resume the container.',
'start': 'Start the container.',
'stop': 'Stop the container.',
'suspend': 'Suspend the container.'
}
}
available_endpoints['shares'] = {
'': 'Get a list of all available shares.',
'{id}': {
'': 'Get details about a share.',
'add_access_groups': 'Add access_groups to the share.',
'remove_access_groups': 'Remove access_groups from the share.'
}
}
available_endpoints['tags'] = {
'': 'Get a list of all available tags.',
'{id}': 'Get details about a tag.'
}
available_endpoints['notifications'] = {
'': 'Get a list of all available notifications.',
'{id}': 'Get details about a notification.'
}
available_endpoints['notificationlogs'] = {
'': 'Get a list of all available notificationlogs.',
'{id}': 'Get details about a notificationlog.',
'unread': 'Get all new notificationlogs.',
'mark_all_as_read': 'Mark all your notificationlogs as read.'
}
available_endpoints['notificationtypes'] = 'Get a list of all available notificationtypes.'
# additional endpoints for superusers only
if request.user.is_superuser:
available_endpoints['configurationvariables'] = {
'': 'Get a list of all available configurationvariables.',
'{id}': 'Get details about a configurationvariable.'
}
available_endpoints['backends'] = {
'': 'Get a list of all available backends.',
'{id}': 'Get details about a backend.'
}
available_endpoints['servers'] = {
'': 'Get a list of all available servers.',
'{id}': 'Get details about a server.'
}
return Response(available_endpoints)
class ConfigurationVariableList(generics.ListCreateAPIView):
"""
Get a list of all configuration variables.
Only visible to authenticated users.
"""
queryset = ConfigurationVariable.objects.all()
serializer_class = ConfigurationVariableSerializer
permission_classes = [IsSuperUser]
class ConfigurationVariableDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get a list of all configuration variables.
Only visible to authenticated users.
"""
queryset = ConfigurationVariable.objects.all()
serializer_class = ConfigurationVariableSerializer
permission_classes = [IsSuperUser]
class UserList(generics.ListAPIView):
"""
Get a list of all users (`django.contrib.auth.models.User`).
Only visible to authenticated users.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsSuperUserOrAuthenticatedAndReadOnly]
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details about a user (`django.contrib.auth.models.User`).
Only visible to authenticated users.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsSuperUserOrAuthenticatedAndReadOnly]
class GroupList(generics.ListAPIView):
"""
Get a list of all groups.
Only visible to authenticated users.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [IsAuthenticatedAndReadOnly]
class BackendList(generics.ListCreateAPIView):
"""
Get a list of all the containers.
"""
queryset = Backend.objects.all()
serializer_class = BackendSerializer
permission_classes = [IsSuperUser]
class BackendDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details of a backend.
"""
queryset = Backend.objects.all()
serializer_class = BackendSerializer
permission_classes = [IsSuperUser]
class CollaborationGroupList(generics.ListCreateAPIView):
"""
Get a list of all the collaboration groups the user is in.
"""
def get_serializer_class(self, *args, **kwargs):
if self.request.method in ['PATCH', 'POST', 'PUT']:
return FlatCollaborationGroupSerializer
return NestedCollaborationGroupSerializer
def get_queryset(self):
if self.request.user.is_superuser:
queryset = CollaborationGroup.objects.all()
else:
queryset = CollaborationGroup.objects.filter(
Q(user__id=self.request.user.id)
| Q(creator=self.request.user.backend_user.id)
| Q(is_public=True)
).distinct()
return queryset
def perform_create(self, serializer):
if hasattr(self.request.user, 'backend_user'):
serializer.save(
creator=self.request.user.backend_user,
)
else:
serializer.save()
class CollaborationGroupDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details of a collaboration group the user is in.
"""
permission_classes = [CollaborationGroupDetailPermission]
queryset = CollaborationGroup.objects.all()
def get_serializer_class(self, *args, **kwargs):
if self.request.method in ['PATCH', 'POST', 'PUT']:
return FlatCollaborationGroupSerializer
return NestedCollaborationGroupSerializer
@api_view(['POST'])
def collaborationgroup_add_members(request, pk):
"""
Add a list of users to the group.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
:param POST.users list of user ids, i.e. { "users": [1,2,3]}
"""
required_params = ["users"]
params = validate_request_params(required_params, request)
obj = CollaborationGroup.objects.filter(id=pk)
if not obj:
return Response({"error": "CollaborationGroup not found!", "data": request.data})
group = obj.first()
# check permissions
validate_object_permission(CollaborationGroupDetailPermission, request, group)
# validate all the user_ids first before adding them
user_list = []
for user_id in params.get("users"):
obj = User.objects.filter(id=user_id)
if not obj:
return Response({"error": "User not found!", "data": user_id})
user = obj.first()
if not user.backend_user:
return Response({"error": "User has no backend user!", "data": user_id})
user_list.append(user.backend_user)
for user in user_list:
result = group.add_user(user)
if not result:
return Response({"error": "{} is already member of {}".format(user.username, group.name), "data": user.id})
serializer = NestedCollaborationGroupSerializer(group)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def collaborationgroup_add_admins(request, pk):
"""
Make a list of users to group admins.
Only users that are already members of the group will be added as admins.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
:param POST.users list of user ids, i.e. { "users": [1,2,3]}
"""
required_params = ["users"]
params = validate_request_params(required_params, request)
obj = CollaborationGroup.objects.filter(id=pk)
if not obj:
return Response({"error": "CollaborationGroup not found!", "data": request.data})
group = obj.first()
# check permissions
validate_object_permission(CollaborationGroupDetailPermission, request, group)
# validate all the user_ids first before adding them
user_list = []
for user_id in params.get("users"):
obj = User.objects.filter(id=user_id)
if not obj:
return Response({"error": "User not found!", "data": user_id})
user = obj.first()
if not user.backend_user:
return Response({"error": "User has no backend user!", "data": user_id})
user_list.append(user.backend_user)
for user in user_list:
result = group.add_admin(user)
if not result:
return Response({"error": "{} is already admin of {}".format(user.username, group.name), "data": user.id})
serializer = NestedCollaborationGroupSerializer(group)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def collaborationgroup_remove_admins(request, pk):
"""
Remove a list of users from group admins.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
:param POST.users list of user ids, i.e. { "users": [1,2,3]}
"""
required_params = ["users"]
params = validate_request_params(required_params, request)
obj = CollaborationGroup.objects.filter(id=pk)
if not obj:
return Response({"error": "CollaborationGroup not found!", "data": request.data})
group = obj.first()
# check permissions
validate_object_permission(CollaborationGroupDetailPermission, request, group)
# validate all the user_ids first before adding them
user_list = []
for user_id in params.get("users"):
obj = User.objects.filter(id=user_id)
if not obj:
return Response({"error": "User not found!", "data": user_id})
user = obj.first()
if not user.backend_user:
return Response({"error": "User has no backend user!", "data": user_id})
user_list.append(user.backend_user)
for user in user_list:
result = group.remove_admin(user)
if not result:
return Response({"error": "{} is no admin of {}".format(user.username, group.name), "data": user.id})
serializer = NestedCollaborationGroupSerializer(group)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def collaborationgroup_remove_members(request, pk):
"""
Remove a list of users from the group.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
:param POST.users list of user ids, i.e. { "users": [1,2,3]}
"""
required_params = ["users"]
params = validate_request_params(required_params, request)
obj = CollaborationGroup.objects.filter(id=pk)
if not obj:
return Response({"error": "CollaborationGroup not found!", "data": request.data})
group = obj.first()
# check permissions
validate_object_permission(CollaborationGroupDetailPermission, request, group)
# validate all the user_ids first before adding them
user_list = []
for user_id in params.get("users"):
obj = User.objects.filter(id=user_id)
if not obj:
return Response({"error": "User not found!", "data": user_id})
user = obj.first()
if not user.backend_user:
return Response({"error": "User has no backend user!", "data": user_id})
user_list.append(user.backend_user)
for user in user_list:
result = group.remove_member(user)
if not result:
return Response({"error": "{} is no member of {}".format(user.username, group.name), "data": user.id})
serializer = NestedCollaborationGroupSerializer(group)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def collaborationgroup_join(request, pk):
"""
Join a group.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
"""
obj = CollaborationGroup.objects.filter(id=pk)
if not obj:
return Response({"error": "CollaborationGroup not found!", "data": request.data})
group = obj.first()
if not group.is_public:
return Response({"error": "{} could not be added to {}. Group not public.".format(request.user.username, group.name)})
result = group.add_user(request.user.backend_user)
if not result:
return Response({"error": "{} could not be added to {}".format(request.user.username, group.name)})
serializer = NestedCollaborationGroupSerializer(group)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def collaborationgroup_leave(request, pk):
"""
Leave a group.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
"""
obj = CollaborationGroup.objects.filter(id=pk)
if not obj:
return Response({"error": "CollaborationGroup not found!", "data": request.data})
group = obj.first()
result = group.remove_member(request.user.backend_user)
if not result:
return Response({"error": "{} could not be removed from {}. Not a member or creator.".format(request.user.username, group.name)})
serializer = NestedCollaborationGroupSerializer(group)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class ContainerList(generics.ListCreateAPIView):
"""
Get a list of all the containers.
"""
serializer_class = ContainerSerializer
def get_queryset(self):
if self.request.user.is_superuser:
queryset = Container.objects.all()
else:
queryset = Container.objects.filter(owner=self.request.user.backend_user.id)
return queryset
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
# target server gets selected by selection algorithm
server = get_server_selection_algorithm().choose_server(
Server.objects.all().iterator()
)
if hasattr(self.request.user, 'backend_user'):
serializer.save(
server=server,
owner=self.request.user.backend_user
)
else:
serializer.save(
server=server,
)
class ContainerDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details of a container.
"""
serializer_class = ContainerSerializer
permission_classes = [ContainerDetailPermission]
queryset = Container.objects.all()
def get_container(pk):
"""
Get container by pk.
"""
containers = Container.objects.filter(id=pk)
if containers:
return containers.first()
else:
return None
@api_view(['POST'])
def container_clone(request, pk):
"""
Make a clone of the container.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the container that needs to be cloned
:param name
:param description
"""
params = {}
data = request.data
if not data.get('name'):
return Response({"error": "please provide name for the clone: {\"name\" : \"some name \"}"})
params['name'] = data.get('name')
if data.get('description'):
params['description'] = data.get('description')
origin = get_container(pk)
# validate permissions
validate_object_permission(ContainerDetailPermission, request, origin)
if origin:
clone = origin.clone(**params)
clone.save()
serializer = ContainerSerializer(clone)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response({"error": "Container not found!", "data": data})
@api_view(['POST'])
def container_commit(request, pk):
"""
Create a new image based on this container.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the container that needs to be cloned
:param name:
:param description:
:param public:
"""
required_params = ["name", "description", "public"]
params = {}
for param in required_params:
if param not in request.data:
return Response({"error": "Parameters missing.", "required_parameters": required_params })
params[param] = request.data.get(param)
container = get_container(pk)
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
if container:
image = container.commit(**params)
serializer = ContainerImageSerializer(image)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response({"error": "Container not found!", "data": data})
@api_view(['POST'])
def container_create_snapshot(request, pk):
"""
Create a snapshot of the container.
Todo: show params on OPTIONS call.
:param pk pk of the container that needs to be cloned
:param name
:param description
"""
params = {}
data = request.data
if not data.get('name'):
return Response({"error": "please provide name for the clone: {\"name\" : \"some name \"}"})
params['name'] = data.get('name')
if data.get('description'):
params['description'] = data.get('description')
origin = get_container(pk)
# validate permissions
validate_object_permission(ContainerDetailPermission, request, origin)
if origin:
snapshot = origin.create_snapshot(**params)
snapshot.save()
serializer = ContainerSnapshotSerializer(snapshot)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response({"error": "Container not found!", "pk": pk})
@api_view(['GET'])
def container_clones(request, pk):
container = get_container(pk)
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
if container:
clones = container.get_clones()
serializer = ContainerSerializer(clones, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({"error": "Container not found!", "pk": pk})
@api_view(['POST'])
def container_restart(request, pk):
"""
Restart the container.
:param pk pk of the container that needs to be cloned
"""
containers = Container.objects.filter(id=pk)
if containers:
container = containers.first()
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
container.restart()
return Response({"message": "container rebooting"}, status=status.HTTP_200_OK)
else:
return Response({"error": "Container not found!", "data": data})
@api_view(['POST'])
def container_resume(request, pk):
"""
Resume the container.
:param pk pk of the container that needs to be cloned
"""
containers = Container.objects.filter(id=pk)
if containers:
container = containers.first()
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
container.resume()
return Response({"message": "container resuming"}, status=status.HTTP_200_OK)
else:
return Response({"error": "Container not found!", "data": data})
pass
@api_view(['POST'])
def container_start(request, pk):
"""
Start the container.
:param pk pk of the container that needs to be cloned
"""
containers = Container.objects.filter(id=pk)
if containers:
container = containers.first()
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
container.start()
return Response({"message": "container booting"}, status=status.HTTP_200_OK)
else:
return Response({"error": "Container not found!", "data": data})
pass
@api_view(['POST'])
def container_stop(request, pk):
"""
Stop the container.
:param pk pk of the container that needs to be cloned
"""
containers = Container.objects.filter(id=pk)
if containers:
container = containers.first()
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
container.stop()
return Response({"message": "container stopping"}, status=status.HTTP_200_OK)
else:
return Response({"error": "Container not found!", "data": data})
pass
@api_view(['POST'])
def container_suspend(request, pk):
"""
Suspend the container.
:param pk pk of the container that needs to be cloned
"""
containers = Container.objects.filter(id=pk)
if containers:
container = containers.first()
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
container.suspend()
return Response({"message": "container suspending"}, status=status.HTTP_200_OK)
else:
return Response({"error": "Container not found!", "data": data})
pass
class ContainerImageList(generics.ListCreateAPIView):
"""
Get a list of all the container images.
"""
def get_serializer_class(self, *args, **kwargs):
if self.request.method in ['PATCH', 'POST', 'PUT']:
return FlatContainerImageSerializer
return ContainerImageSerializer
def get_queryset(self):
if self.request.user.is_superuser:
queryset = ContainerImage.objects.all()
else:
collab_group = None
if hasattr(self.request.user, 'backend_user'):
collab_group = self.request.user.backend_user.get_collaboration_group()
if collab_group:
queryset = ContainerImage.objects.filter(
Q(is_internal=False) & (Q(owner=self.request.user) | Q(is_public=True) | Q(access_groups__user=self.request.user))
).distinct()
else:
queryset = ContainerImage.objects.filter(
Q(is_internal=False) & (Q(owner=self.request.user) | Q(is_public=True))
).distinct()
return queryset
class ContainerImageDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details of a container image.
"""
serializer_class = ContainerImageSerializer
permission_classes = [ContainerImageDetailPermission]
queryset = ContainerImage.objects.all()
@api_view(['POST'])
def image_add_access_groups(request, pk):
"""
Add a list of collaboration groups to the image.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
"""
required_params = ["access_groups"]
params = validate_request_params(required_params, request)
obj = ContainerImage.objects.filter(id=pk)
if not obj:
return Response({"error": "Image not found!", "data": request.data})
image = obj.first()
# validate permissions
validate_object_permission(ContainerImageDetailPermission, request, image)
# validate all the access_groups first before adding them
access_groups = []
for access_group_id in params.get("access_groups"):
obj = CollaborationGroup.objects.filter(id=access_group_id)
if not obj:
return Response(
{"error": "CollaborationGroup not found!", "data": access_group_id},
status=status.HTTP_404_NOT_FOUND
)
access_groups.append(obj.first())
added_groups = []
# add the access groups to the share
for access_group in access_groups:
if image.add_access_group(access_group):
added_groups.append((access_group.id, access_group.name))
return Response({
"detail": "Groups added successfully",
"groups": added_groups,
"count": len(added_groups)
},
status=status.HTTP_200_OK
)
@api_view(['POST'])
def image_remove_access_groups(request, pk):
"""
Remove a list of collaboration groups from the image.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
"""
required_params = ["access_groups"]
params = validate_request_params(required_params, request)
obj = ContainerImage.objects.filter(id=pk)
if not obj:
return Response({"error": "Image not found!", "data": request.data})
image = obj.first()
# validate permissions
validate_object_permission(ContainerImageDetailPermission, request, image)
# validate all the access_groups first before adding them
access_groups = []
for access_group_id in params.get("access_groups"):
obj = CollaborationGroup.objects.filter(id=access_group_id)
if not obj:
return Response(
{"error": "CollaborationGroup not found!", "data": access_group_id},
status=status.HTTP_404_NOT_FOUND
)
access_groups.append(obj.first())
removed_groups = []
# add the access groups to the share
for access_group in access_groups:
if image.remove_access_group(access_group):
removed_groups.append((access_group.id, access_group.name))
return Response({
"detail": "Groups removed successfully",
"groups": removed_groups,
"count": len(removed_groups)
},
status=status.HTTP_200_OK
)
class ContainerSnapshotsList(generics.ListAPIView):
"""
Get a list of all snapshots for a specific container.
"""
serializer_class = ContainerSnapshotSerializer
def get_queryset(self):
# get pk of container from url
pk = self.kwargs['pk']
if self.request.user.is_superuser:
queryset = ContainerSnapshot.objects.all().filter(container__id=pk)
else:
queryset = ContainerSnapshot.objects.filter(
container__owner=self.request.user.backend_user
).filter(container=pk)
return queryset
class ContainerSnapshotList(generics.ListAPIView):
"""
Get a list of all the container snapshots.
"""
serializer_class = ContainerSnapshotSerializer
def get_queryset(self):
if self.request.user.is_superuser:
queryset = ContainerSnapshot.objects.all()
else:
queryset = ContainerSnapshot.objects.filter(
container__owner=self.request.user.backend_user
)
return queryset
class ContainerSnapshotDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details of a container snapshot.
"""
serializer_class = ContainerSnapshotSerializer
permission_classes = [ContainerSnapshotDetailPermission]
queryset = ContainerSnapshot.objects.all()
@api_view(['POST'])
def container_snapshot_restore(request, pk):
"""
Restore a snapshot of the container.
Todo: show params on OPTIONS call.
:param pk pk of the container that needs to be cloned
"""
snapshots = ContainerSnapshot.objects.filter(id=pk)
if snapshots:
s = snapshots.first()
container = s.container
# validate permissions
validate_object_permission(ContainerDetailPermission, request, container)
s.restore()
serializer = ContainerSerializer(container)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response({"error": "Snapshot not found!", "pk": pk})
class ServerList(generics.ListCreateAPIView):
"""
Get a list of all the servers.
"""
queryset = Server.objects.all()
serializer_class = ServerSerializer
permission_classes = [IsSuperUser]
class ServerDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details of a server.
"""
queryset = Server.objects.all()
serializer_class = ServerSerializer
permission_classes = [IsSuperUser]
class ShareList(generics.ListCreateAPIView):
"""
Get a list of all the shares.
"""
def get_serializer_class(self, *args, **kwargs):
if self.request.method in ['PATCH', 'POST', 'PUT']:
return FlatShareSerializer
return NestedShareSerializer
def get_queryset(self):
if self.request.user.is_superuser:
return Share.objects.all()
else:
return Share.objects.filter(
backend_group__django_group__user=self.request.user
)
def perform_create(self, serializer):
if hasattr(self.request.user, 'backend_user'):
serializer.save(
owner=self.request.user.backend_user,
)
else:
serializer.save()
class ShareDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get details of a share.
"""
permission_classes = [ShareDetailPermissions]
queryset = Share.objects.all()
def get_serializer_class(self, *args, **kwargs):
if self.request.method in ['PATCH', 'POST', 'PUT']:
return FlatShareSerializer
return NestedShareSerializer
@api_view(['POST'])
def share_add_access_groups(request, pk):
"""
Add a list of collaboration groups to the share.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
"""
required_params = ["access_groups"]
params = validate_request_params(required_params, request)
obj = Share.objects.filter(id=pk)
if not obj:
return Response({"error": "Share not found!", "data": request.data})
share = obj.first()
# validate permissions
validate_object_permission(ShareDetailPermissions, request, share)
# validate all the access_groups first before adding them
access_groups = []
for access_group_id in params.get("access_groups"):
obj = CollaborationGroup.objects.filter(id=access_group_id)
if not obj:
return Response(
{"error": "CollaborationGroup not found!", "data": access_group_id},
status=status.HTTP_404_NOT_FOUND
)
access_groups.append(obj.first())
added_groups = []
# add the access groups to the share
for access_group in access_groups:
if share.add_access_group(access_group):
added_groups.append((access_group.id, access_group.name))
return Response({
"detail": "Groups added successfully",
"groups": added_groups,
"count": len(added_groups)
},
status=status.HTTP_200_OK
)
@api_view(['POST'])
def share_remove_access_groups(request, pk):
"""
Remove a list of collaboration groups from the share.
Todo: show params on OPTIONS call.
Todo: permissions
:param pk pk of the collaboration group
"""
required_params = ["access_groups"]
params = validate_request_params(required_params, request)
obj = Share.objects.filter(id=pk)
if not obj:
return Response({"error": "Share not found!", "data": request.data})
share = obj.first()
# validate permissions
validate_object_permission(ShareDetailPermissions, request, share)
# validate all the access_groups first before adding them
access_groups = []
for access_group_id in params.get("access_groups"):
obj = CollaborationGroup.objects.filter(id=access_group_id)
if not obj:
return Response(
{"error": "CollaborationGroup not found!", "data": access_group_id},
status=status.HTTP_404_NOT_FOUND
)
access_groups.append(obj.first())
removed_groups = []
# add the access groups to the share
for access_group in access_groups:
if share.remove_access_group(access_group):
removed_groups.append((access_group.id, access_group.name))
return Response({
"detail": "Groups removed successfully",
"groups": removed_groups,
"count": len(removed_groups)
},
status=status.HTTP_200_OK
)
class TagList(generics.ListCreateAPIView):
"""
Get a list of all the tags.
"""
serializer_class = TagSerializer
def get_queryset(self):
"""
Optionally restricts the returned purchases to a given string,
by filtering against a `label_text` query parameter in the URL.
"""
queryset = Tag.objects.all()
label_text = self.kwargs.get('label_text', None)
if label_text is not None:
queryset = queryset.filter(label__iexact=label_text)
return queryset
class TagDetail(generics.RetrieveDestroyAPIView):
"""
Get details of a tag.
"""
permission_classes = [IsSuperUserOrAuthenticatedAndReadOnly]
queryset = Tag.objects.all()
serializer_class = TagSerializer
class NotificationList(generics.ListCreateAPIView):
"""
Get a list of all the notifications.
"""
def get_serializer_class(self, *args, **kwargs):
if self.request.method in ['PATCH', 'POST', 'PUT']:
return FlatNotificationSerializer
return NestedNotificationSerializer
def get_queryset(self):
if self.request.user.is_superuser:
queryset = Notification.objects.all()
else:
queryset = Notification.objects.filter(sender=self.request.user)
return queryset
def perform_create(self, serializer):
sender = serializer.context.get('request').POST.getlist('sender')
# check if sender has been provided in POST data
sender_provided = filter(None, sender)
if self.request.user.is_superuser and sender_provided:
serializer.save()
else:
serializer.save(sender=self.request.user)
class NotificationDetail(generics.RetrieveDestroyAPIView):
"""
Get details of a notification.
"""
permission_classes = [NotificationDetailPermission]
queryset = Notification.objects.all()
def get_serializer_class(self, *args, **kwargs):
if self.request.method in ['PATCH', 'POST', 'PUT']:
return FlatNotificationSerializer
return NestedNotificationSerializer
class NotificationLogList(generics.ListAPIView):
"""
Get a list of all the notification logs.
"""
serializer_class = NotificationLogSerializer
def get_serializer_class(self, *args, **kwargs):
if self.request.user.is_superuser:
return SuperUserNotificationLogSerializer
return NotificationLogSerializer
def get_queryset(self):
if self.request.user.is_superuser:
return NotificationLog.objects.all().order_by('-notification__date')
else:
return NotificationLog.objects.filter(user=self.request.user.backend_user) \
.filter(in_use=True) \
.order_by('-notification__date')
class NotificationLogUnreadList(generics.ListAPIView):
"""
Get a list of all the notification logs.
"""
serializer_class = NotificationLogSerializer
def get_queryset(self):
return NotificationLog.objects.filter(user=self.request.user.backend_user) \
.filter(in_use=True).filter(read=False) \
.order_by('-notification__date')
class NotificationLogDetail(generics.RetrieveUpdateAPIView):
"""
Get details of a notification.
"""
serializer_class = NotificationLogSerializer
permission_classes = [NotificationLogDetailPermission]
queryset = NotificationLog.objects.all()
@api_view(('POST',))
def notificationlogs_mark_all_as_read(request):
"""
Mark all notificationlogs of a user as read.
"""
notifications = NotificationLog.objects.filter(user=request.user.backend_user)
count = 0
for n in notifications:
if not n.read:
n.read=True
n.save()
count += 1
return Response({"detail": "{} NotificationLog objects marked as read.".format(count), "count": count})
@api_view(('GET',))
def notification_types(request):
"""
Notification types.
"""
return Response(Notification.NOTIFICATION_TYPES)
| coco-project/coco | coco/api/views.py | Python | bsd-3-clause | 38,697 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-19 15:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import hindustani.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('data', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('gender', models.CharField(blank=True, choices=[(b'M', b'Male'), (b'F', b'Female')], max_length=1, null=True)),
('begin', models.CharField(blank=True, max_length=10, null=True)),
('end', models.CharField(blank=True, max_length=10, null=True)),
('artist_type', models.CharField(choices=[(b'P', b'Person'), (b'G', b'Group')], default=b'P', max_length=1)),
('dummy', models.BooleanField(db_index=True, default=False)),
('description_edited', models.BooleanField(default=False)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('group_members', models.ManyToManyField(blank=True, related_name='groups', to='hindustani.Artist')),
('images', models.ManyToManyField(related_name='hindustani_artist_image_set', to='data.Image')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ArtistAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=100)),
('primary', models.BooleanField(default=False)),
('locale', models.CharField(blank=True, max_length=10, null=True)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Artist')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Composer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('gender', models.CharField(blank=True, choices=[(b'M', b'Male'), (b'F', b'Female')], max_length=1, null=True)),
('begin', models.CharField(blank=True, max_length=10, null=True)),
('end', models.CharField(blank=True, max_length=10, null=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_composer_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_composer_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_composer_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ComposerAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=100)),
('primary', models.BooleanField(default=False)),
('locale', models.CharField(blank=True, max_length=10, null=True)),
('composer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Composer')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Form',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_form_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_form_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_form_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FormAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Form')),
],
),
migrations.CreateModel(
name='Instrument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percussion', models.BooleanField(default=False)),
('name', models.CharField(max_length=50)),
('mbid', models.UUIDField(blank=True, null=True)),
('hidden', models.BooleanField(default=False)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_instrument_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_instrument_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_instrument_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='InstrumentPerformance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lead', models.BooleanField(default=False)),
('attributes', models.CharField(blank=True, max_length=200, null=True)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Artist')),
('instrument', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hindustani.Instrument')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Laya',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_laya_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_laya_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_laya_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LayaAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('laya', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Laya')),
],
),
migrations.CreateModel(
name='Lyrics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lyrics', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Raag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_raag_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_raag_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_raag_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RaagAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('raag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Raag')),
],
),
migrations.CreateModel(
name='Recording',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('length', models.IntegerField(blank=True, null=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='RecordingForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Form')),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingLaya',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('laya', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Laya')),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingRaag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('raag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Raag')),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingTaal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='Release',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mbid', models.UUIDField(blank=True, null=True)),
('title', models.CharField(max_length=100)),
('artistcredit', models.CharField(max_length=255)),
('year', models.IntegerField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=100, null=True)),
('rel_type', models.CharField(blank=True, max_length=100, null=True)),
('artists', models.ManyToManyField(related_name='primary_concerts', to='hindustani.Artist')),
('collection', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_releases', to='data.Collection')),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_release_image_set', to='data.Image')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ReleaseRecording',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('track', models.IntegerField()),
('disc', models.IntegerField()),
('disctrack', models.IntegerField()),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
('release', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Release')),
],
options={
'ordering': ('track',),
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='SectionAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Section')),
],
),
migrations.CreateModel(
name='Taal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('num_maatras', models.IntegerField(null=True)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_taal_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_taal_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_taal_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TaalAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('taal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Taal')),
],
),
migrations.CreateModel(
name='Work',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('mbid', models.UUIDField(blank=True, null=True)),
('composers', models.ManyToManyField(blank=True, related_name='works', to='hindustani.Composer')),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_work_image_set', to='data.Image')),
('lyricists', models.ManyToManyField(blank=True, related_name='lyric_works', to='hindustani.Composer')),
('lyrics', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hindustani.Lyrics')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_work_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_work_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='WorkTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('time', models.IntegerField(blank=True, null=True)),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
('work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Work')),
],
),
migrations.AddField(
model_name='release',
name='recordings',
field=models.ManyToManyField(through='hindustani.ReleaseRecording', to='hindustani.Recording'),
),
migrations.AddField(
model_name='release',
name='references',
field=models.ManyToManyField(blank=True, related_name='hindustani_release_reference_set', to='data.Source'),
),
migrations.AddField(
model_name='release',
name='source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_release_source_set', to='data.Source'),
),
migrations.AddField(
model_name='recordingtaal',
name='taal',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Taal'),
),
migrations.AddField(
model_name='recordingsection',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Section'),
),
migrations.AddField(
model_name='recording',
name='forms',
field=models.ManyToManyField(through='hindustani.RecordingForm', to='hindustani.Form'),
),
migrations.AddField(
model_name='recording',
name='images',
field=models.ManyToManyField(related_name='hindustani_recording_image_set', to='data.Image'),
),
migrations.AddField(
model_name='recording',
name='layas',
field=models.ManyToManyField(through='hindustani.RecordingLaya', to='hindustani.Laya'),
),
migrations.AddField(
model_name='recording',
name='performance',
field=models.ManyToManyField(through='hindustani.InstrumentPerformance', to='hindustani.Artist'),
),
migrations.AddField(
model_name='recording',
name='raags',
field=models.ManyToManyField(through='hindustani.RecordingRaag', to='hindustani.Raag'),
),
migrations.AddField(
model_name='recording',
name='references',
field=models.ManyToManyField(blank=True, related_name='hindustani_recording_reference_set', to='data.Source'),
),
migrations.AddField(
model_name='recording',
name='sections',
field=models.ManyToManyField(through='hindustani.RecordingSection', to='hindustani.Section'),
),
migrations.AddField(
model_name='recording',
name='source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_recording_source_set', to='data.Source'),
),
migrations.AddField(
model_name='recording',
name='taals',
field=models.ManyToManyField(through='hindustani.RecordingTaal', to='hindustani.Taal'),
),
migrations.AddField(
model_name='recording',
name='works',
field=models.ManyToManyField(through='hindustani.WorkTime', to='hindustani.Work'),
),
migrations.AddField(
model_name='instrumentperformance',
name='recording',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording'),
),
migrations.AddField(
model_name='artist',
name='main_instrument',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hindustani.Instrument'),
),
migrations.AddField(
model_name='artist',
name='references',
field=models.ManyToManyField(blank=True, related_name='hindustani_artist_reference_set', to='data.Source'),
),
migrations.AddField(
model_name='artist',
name='source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_artist_source_set', to='data.Source'),
),
]
| MTG/dunya | hindustani/migrations/0001_initial.py | Python | agpl-3.0 | 25,073 |
import sys
f = open(sys.argv[1],'r')#sys call
#reference:http://stackoverflow.com/questions/16233593/how-to-strip-comma-in-python-string
#remove new line,comma and period. rstrip does not work for commas.
line = f.readline().rstrip('\n.').replace(',','')
wordList = line.split(' ')
for word in wordList:
print len(word)
f.close()
| iku000888/Wordprocessing-100-Knoks | prob003/python/prob003.py | Python | mit | 336 |
"""
Deletes the Kubernetes cluster associated with this service. Used by the
Google Kubernetes Engine blueprint.
"""
from __future__ import unicode_literals
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from oauth2client.service_account import ServiceAccountCredentials
from containerorchestrators.kuberneteshandler.models import Kubernetes
from infrastructure.models import Environment
def run(job=None, logger=None, resource=None, **kwargs):
# Get cluster information
cluster_name = resource.create_gke_k8s_cluster_name
if not cluster_name:
return "WARNING", "No cluster associated with this resource", ""
env_id = resource.create_gke_k8s_cluster_env
try:
environment = Environment.objects.get(id=env_id)
except Environment.DoesNotExist:
return ("FAILURE",
"The environment used to create this cluster no longer exists",
"")
handler = environment.resource_handler.cast()
project = handler.gcp_projects
zone = environment.gcp_zone
# Get client
credentials = ServiceAccountCredentials.from_json_keyfile_dict({
'client_email': handler.serviceaccount,
'private_key': handler.servicepasswd,
'type': 'service_account',
'client_id': None,
'private_key_id': None,
})
client = build('container', 'v1', credentials=credentials)
cluster_resource = client.projects().zones().clusters()
# Delete cluster
job.set_progress("Deleting cluster {}...".format(cluster_name))
try:
cluster_resource.delete(
projectId=project, zone=zone, clusterId=cluster_name).execute()
except HttpError as error:
if error.resp['status'] == '404':
return ("WARNING",
"Cluster {} was not found. It may have already been "
"deleted.".format(cluster_name),
"")
raise
# In CB 7.6 and before, this will delete any existing Kubernetes Resources.
# Starting in CB 7.7, they will be marked HISTORICAL instead.
kubernetes = Kubernetes.objects.get(id=resource.create_gke_k8s_cluster_id)
kubernetes.delete()
| CloudBoltSoftware/cloudbolt-forge | blueprints/gke_cluster_legacy/delete_gke_cluster_legacy/delete_gke_cluster_legacy.py | Python | apache-2.0 | 2,203 |
import requests
import bs4
# It's "seems" a good idea to use this "enum", for now
class Category(object):
ALL = 0
MOVIE = 1
GAME = 2
ALBUM = 3
TV = 4
PERSON = 5
TRAILER = 6
COMPANY = 7
# Contains info about the query to be made
class Query(object):
# Standard constructor (w/ parameters)
def __init__(self, category, terms):
self.category = category
self.terms = terms
self.base_url = "http://www.metacritic.com/search/"
partial_url = {Category.ALL: self.base_url + "all",
Category.MOVIE: self.base_url + "movie",
Category.GAME: self.base_url + "game",
Category.ALBUM: self.base_url + "album",
Category.TV: self.base_url + "tv",
Category.PERSON: self.base_url + "person",
Category.TRAILER: self.base_url + "trailer",
Category.COMPANY: self.base_url + "company"}[self.category]
self.url = partial_url + "/" + terms + "/results"
# Returns the URL of the created query
def get_url(self):
return self.url
# This class represents a generic resource found at Metacritic
class Resource(object):
def __init__(self, name, date, category, metascore, userscore, description):
self.name = name
self.date = date
self.category = category
self.metascore = metascore
self.userscore = userscore
self.description = description
class Game(Resource):
def __init__(self, name, date, category, metascore, userscore, description, platform):
super.__init__(name, date, category, metascore, userscore, description)
self.platform = platform
class Response(object):
def __init__(self, status, content):
self.status = status
self.content = content
def valid(self):
return (self.status == 200)
class Browser(object):
def get(self, url):
request = requests.get(url)
response = Response(request.status_code, request.content)
return response
class Scraper(object):
def __init__(self):
self.browser = Browser()
self.response = ""
self.soup = ""
def get(self, url):
self.response = self.browser.get(url)
self.soup = bs4.BeautifulSoup(self.response.content)
return self.extract_data()
def extract_data(self):
name = self._extract_name()
date = self._extract_date()
category = self._extract_category()
metascore = self._extract_metascore()
userscore = self._extract_userscore()
description = self._extract_description()
resource = Resource(name, date, category, metascore, userscore, description)
return resource
def _extract_name(self):
titles = self.soup.select(".product_title")
title = titles[0].text
info = title.split("\n")
name = info[1].strip()
return name
def _extract_date(self):
dates = self.soup.select(".release_data")
date = dates[0].select(".data")[0].text.strip()
return date
def _extract_category(self):
# TODO
return Category.GAME
def _extract_metascore(self):
section = self.soup.select(".metascore_wrap")[0]
score = section.select(".score_value")[0].text.strip()
return int(score)
def _extract_userscore(self):
section = self.soup.select(".userscore_wrap")[0]
score = section.select(".score_value")[0].text.strip()
return float(score)
def _extract_description(self):
section = self.soup.select(".product_summary")[0].select(".data")[0]
collapsed = section.select(".blurb_collapsed")
description = ""
if (collapsed): # There's a collapse/expand button
expanded = section.select(".blurb_expanded")
description = unicode(collapsed[0].text + expanded[0].text).strip()
else:
description = unicode(section.text.strip())
return unicode(description)
| iconpin/pycritic | pycritic/pycritic.py | Python | mit | 4,097 |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Inspect a TFRecord file of tensorflow.Example and show tokenizations.
python data_generators/inspect_tfrecord.py \
--logtostderr \
--print_targets \
--subword_text_encoder_filename=$DATA_DIR/vocab.endefr.8192 \
--input_filename=$DATA_DIR/wmt_ende_tokens_8k-train-00000-of-00100
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensor2tensor.data_generators import text_encoder
import tensorflow.compat.v1 as tf
tf.flags.DEFINE_string("subword_text_encoder_filename", "",
"SubwordTextEncoder vocabulary file")
tf.flags.DEFINE_string("token_text_encoder_filename", "",
"TokenTextEncoder vocabulary file")
tf.flags.DEFINE_bool("byte_text_encoder", False, "use a ByteTextEncoder")
tf.flags.DEFINE_string("input_filename", "", "input filename")
tf.flags.DEFINE_bool("print_inputs", False, "Print decoded inputs to stdout")
tf.flags.DEFINE_bool("print_targets", False, "Print decoded targets to stdout")
tf.flags.DEFINE_bool("print_all", False, "Print all fields")
FLAGS = tf.flags.FLAGS
def main(_):
"""Convert a file to examples."""
if FLAGS.subword_text_encoder_filename:
encoder = text_encoder.SubwordTextEncoder(
FLAGS.subword_text_encoder_filename)
elif FLAGS.token_text_encoder_filename:
encoder = text_encoder.TokenTextEncoder(FLAGS.token_text_encoder_filename)
elif FLAGS.byte_text_encoder:
encoder = text_encoder.ByteTextEncoder()
else:
encoder = None
reader = tf.python_io.tf_record_iterator(FLAGS.input_filename)
total_sequences = 0
total_input_tokens = 0
total_target_tokens = 0
nonpadding_input_tokens = 0
nonpadding_target_tokens = 0
max_input_length = 0
max_target_length = 0
for record in reader:
x = tf.train.Example()
x.ParseFromString(record)
inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value]
targets = [int(i) for i in x.features.feature["targets"].int64_list.value]
if FLAGS.print_inputs:
print("INPUTS:\n" + encoder.decode(inputs) if encoder else inputs)
if FLAGS.print_targets:
print("TARGETS:\n" + encoder.decode(targets) if encoder else targets)
nonpadding_input_tokens += len(inputs) - inputs.count(0)
nonpadding_target_tokens += len(targets) - targets.count(0)
total_input_tokens += len(inputs)
total_target_tokens += len(targets)
total_sequences += 1
max_input_length = max(max_input_length, len(inputs))
max_target_length = max(max_target_length, len(targets))
if FLAGS.print_all:
for k, v in six.iteritems(x.features.feature):
print("%s: %s" % (k, v.int64_list.value))
print("total_sequences: %d" % total_sequences)
print("total_input_tokens: %d" % total_input_tokens)
print("total_target_tokens: %d" % total_target_tokens)
print("nonpadding_input_tokens: %d" % nonpadding_input_tokens)
print("nonpadding_target_tokens: %d" % nonpadding_target_tokens)
print("max_input_length: %d" % max_input_length)
print("max_target_length: %d" % max_target_length)
if __name__ == "__main__":
tf.app.run()
| tensorflow/tensor2tensor | tensor2tensor/data_generators/inspect_tfrecord.py | Python | apache-2.0 | 3,754 |
#droids.py
import time
import random
from random import randint
print('\n' * 100)
print('\n\n[-Droids : by jay : type help-]')
print('\n\n\n------------------------')
print(' DROIDS')
print('------------------------')
print('\n10.03.2245 : cytek inc.')
time.sleep(1)
print('\nCTRL866 Re-Boot Sequence....')
time.sleep(1)
print('\nInitalizing Control Droid 866....')
time.sleep(1)
print('....')
time.sleep(1)
print('....')
time.sleep(1)
print('Laser offline...')
time.sleep(1)
print('Motion Tracker offline...')
time.sleep(1)
print('\nService Port avaliable...')
time.sleep(1)
print('....')
time.sleep(1)
print('....')
print('\nControl Droid Active.')
time.sleep(1)
print('''\n\nYou are the 866 Control Droid aboard
the Droid Shuttle 'KERNEL'. Enemy droids have boarded
and have taken over flight path. You are damaged & have been
re-initialized but your laser and motion tracker are offline.''')
def start(inventory):
print('\n----------')
print('\nDroid mobile..')
time.sleep(1)
print('....')
time.sleep(1)
print('\n[-MAIN ELEVATOR-]')
print('\n1.) deck 1 - Security')
print('2.) deck 2 - Maintenance')
print('3.) deck 3 - Cargo Hold - Airlock')
print('4.) deck 4 - Droid Hangar')
print('5.) deck 5 - Shuttle Control')
print('6.) deck 6 - Observation\n')
cmdlist = ['1', '2', '3', '4', '5', '6',]
cmd = getcmd(cmdlist)
if cmd == '1':
security(inventory)
elif cmd == '2':
if 'droid hack' in inventory:
print('\n- DECK 2 - MAINTENANCE LOCKED -')
time.sleep(2)
start(inventory)
else:
maintenance(inventory)
elif cmd == '3':
cargo_hold(inventory)
elif cmd == '4':
if 'laser' in inventory:
print('\n- DECK 4 - DROID HANGAR LOCKED -')
time.sleep(2)
start(inventory)
else:
droid_hangar(inventory)
elif cmd == '5':
shuttle_control(inventory)
elif cmd == '6':
if 'motion tracker' in inventory:
print('\n- DECK 6 - OBSERVATION LOCKED -')
time.sleep(2)
start(inventory)
else:
observation(inventory)
def maintenance(inventory):
print('\n----------')
print('\nDroid mobile..')
time.sleep(1)
print('....')
time.sleep(1)
print('''\nThis is the maintenance deck and it appears deserted.
You can see a terminated crew droid, it has sustained
severe laser fire.''')
print('\n[-MAINTENANCE-]\n')
print('1.) 716 Crew Droid')
print('2.) Return to Main Elevator\n')
cmdlist = ['1', '2']
cmd = getcmd(cmdlist)
if cmd == '1':
crew_droid(inventory)
elif cmd == '2':
start(inventory)
def crew_droid(inventory, items=['droid hack']):
print('\n----------')
print('''\n716 has a droid hack program and it's connection
outlet is still intact. You can connect to this droid with service
port and download the program.''')
if len(items) > 0:
for item in items:
print('\n--> %s' % (item))
print('\n\n1.) Exit.')
cmdlist = ['service port', '1']
cmd = getcmd(cmdlist)
if cmd == 'service port':
inventory.append('droid hack')
items = ['droid hack']
print('\nservice port connected.')
time.sleep(1)
print('accessing file..')
time.sleep(1)
print('downloading..')
time.sleep(1)
print('....')
time.sleep(1)
print('\ndownload complete.')
print('\nYou have the droid hack program and return')
print('to the Main Elevator.')
time.sleep(2)
start(inventory)
elif cmd == '1':
maintenance(inventory)
else:
print('\n error. invalid command-')
def cargo_hold(inventory):
print('\n----------')
print('\nDroid mobile..')
time.sleep(1)
print('....')
time.sleep(1)
print('''\nYou enter the Cargo Hold, two Enemy Combat droids
unload a barrage of laser fire at you. Their fire is very accurate
and you take a direct hit in your main CPU.''')
print('\n[-CARGO HOLD - AIRLOCK-]')
print('....')
time.sleep(1)
print('....')
time.sleep(1)
print('\nshutdown imminent...')
time.sleep(1)
print('CTRL866 offline.')
time.sleep(1)
print('Droid terminated.')
print('\nGAME OVER\n')
exit(0)
def droid_hangar(inventory):
print('\n----------')
print('\nDroid mobile..')
time.sleep(1)
print('....')
time.sleep(1)
print('''\nThe Droid Hangar is filled with debri. There
is laser scoring everywhere and all droids are terminated.
In the corner there is one inactive repair droid still in his security
cylinder. You can initialise the droid to repair your laser but you will
require a 3 digit access code.\n''')
print('[-DROID HANGAR-]')
print('\n1.) Repair Droid 3 digit code')
print('2.) Return to Main Elevator')
cmdlist = ['1', '2']
cmd = getcmd(cmdlist)
if cmd == '1':
access_code(inventory)
elif cmd == '2':
start(inventory)
def access_code(inventory):
code = '%d%d%d' % (randint(0,9), randint(0,9), randint(0,9))
guess = input('\n[KEYPAD]> ')
guesses = 0
while guess != code and guess != 'yu8xxj3' and guesses <4:
print('\n* ACCESS - DENIED *')
guesses += 1
guess = input('\n[KEYPAD]> ')
if guess == code or guess == 'yu8xxj3':
repair_droid(inventory)
else:
print('\n....')
time.sleep(1)
print('\n....')
time.sleep(1)
print('\nKEYPAD - LOCKED')
time.sleep(1)
print('\ncode randomizing..')
time.sleep(1)
print('\nKEYPAD - OPEN')
time.sleep(1)
droid_hangar(inventory)
def repair_droid(inventory, items=['laser']):
print('\n\n----------')
print('\nREP323 boot sequence....')
time.sleep(1)
print('Initalizing Repair Droid 323....')
time.sleep(1)
print('....')
time.sleep(1)
print('....')
time.sleep(1)
print('Repair Droid Active.')
time.sleep(1)
print('''\nThe Repair droid is now active. You MUST connect to
this droid with service port to repair laser.''')
if len(items) > 0:
for item in items:
print('\n--> %s' % (item))
cmdlist = ['service port']
cmd = getcmd(cmdlist)
if cmd == 'service port':
inventory.append('laser')
items = ['laser']
print('\nservice port connected.')
time.sleep(1)
print('Repairing Laser...')
time.sleep(1)
print('Auto alignment...')
time.sleep(1)
print('....')
time.sleep(1)
print('\nLASER ONLINE.')
print('''\nYour laser is now online. You de-activate the Repair
Droid and return to the Main Elevator.''')
time.sleep(2)
start(inventory)
else:
print('\n error. invalid command-')
def security(inventory):
print('\n----------')
print('\nDroid mobile..')
time.sleep(1)
print('....')
time.sleep(1)
print('''\nYou are on the Security Deck. This is where all
surveillance aboard the shuttle is done. Sentry droid 343 has been
terminated. You MUST access the Sentry droid's logs but you
will have to hack the data recorder.\n''')
print('[-SECURITY-]\n')
print('1.) View Surveillance monitors on other decks')
print('2.) Hack Sentry droid 343')
print('3.) Return to main elevator')
cmdlist =['1', '2', '3']
cmd = getcmd(cmdlist)
if cmd == '1':
print('\n----------')
print('\nBooting Monitors....')
time.sleep(1)
print('....')
time.sleep(1)
print('...')
time.sleep(1)
print('Monitors active.')
time.sleep(1)
print('\n[-SURVEILLANCE FEED-]')
print('''\n-The Hangar monitor is offline you have no live feed.
\n-In the Cargo hold there are two Enemy Combat droids patroling.
\n-The Maintenance deck looks clear except for a few terminated droids.
\n-An Elite Enemy Command droid is posted on the Shuttle Control deck.
\n-Observation shows a Enemy Sentry droid.''')
time.sleep(2)
security(inventory)
elif cmd == '2':
if 'droid hack' in inventory:
print('\nloading droid hack....')
time.sleep(2)
print('....')
time.sleep(2)
print('10000101010101010101010' * 1000)
time.sleep(1)
print('....')
time.sleep(1)
print('Accessing encrypted files...')
time.sleep(2)
print('Decrypting....')
time.sleep(2)
print('\n\n[-SEN343 LOG-]')
time.sleep(1)
print('\n\nDAILY OVER-RIDE CODES- HANGAR DROIDS')
time.sleep(1)
print('\n\n-Combat Droids - szb41ee')
time.sleep(1)
print('\n\n-Sentry Droids - qr66mop')
time.sleep(1)
print('\n\n-Repair Droids - yu8xxj3')
time.sleep(1)
print('\n\nCODES WILL BE RESET EVERY 24 HOURS')
security(inventory)
else:
print('\n- ACCESS TO DATA RECORDER DENIED -')
time.sleep(2)
security(inventory)
elif cmd == '3':
start(inventory)
def observation(inventory):
print('\n----------')
print('\nDroid mobile..')
time.sleep(1)
print('....')
time.sleep(1)
print('''\nYou enter the Observation deck and are confronted
with a Enemy Sentry droid beside a disabled crew droid.
His laser is almost charged and will be active in seconds.\n''')
print('[-OBSERVATION-]\n')
print('1.) Terminate Sentry Droid')
print('2.) Retreat to Main Elevator.')
cmdlist = ['1', '2']
cmd = getcmd(cmdlist)
if cmd == '1':
if 'laser' in inventory:
print('\nlaser active...')
time.sleep(1)
print('target locked...')
time.sleep(1)
print('...')
time.sleep(1)
print('\nTARGET TERMINATED\n')
enemy_sentry(inventory)
else:
print('\n- WARNING LASER OFFLINE -')
time.sleep(2)
print('''\nThe Sentry Droids laser is now active and has you locked
on. You try to initiate self-destruct but its to late..''')
print('....')
time.sleep(1)
print('....')
time.sleep(1)
print('\nshutdown imminent...')
time.sleep(1)
print('CTRL866 offline.')
time.sleep(1)
print('Droid terminated.')
print('\nGAME OVER\n')
exit(0)
elif cmd == '2':
print('''\nThe Sentry droids laser is now active and has you locked
on. You try to retreat back to the elevator but its to late..''')
print('....')
time.sleep(1)
print('....')
time.sleep(1)
print('\nshutdown imminent...')
time.sleep(1)
print('CTRL866 offline.')
time.sleep(1)
print('Droid terminated.')
print('\nGAME OVER\n')
exit(0)
def enemy_sentry(inventory, items=['motion tracker']):
print('\n----------')
time.sleep(1)
print('''\nThe Enemy Sentry droid has been terminated.
Judging by the model you know he has a
motion tracker repair program installed. You MUST connect
to this droid with service port and download the program.''')
if len(items) > 0:
for item in items:
print('\n--> %s' % (item))
cmdlist = ['service port']
cmd = getcmd(cmdlist)
if cmd == 'service port':
inventory.append('motion tracker')
items = ['motion tracker']
print('\nservice port connected.')
time.sleep(1)
print('accessing file..')
time.sleep(1)
print('downloading..')
time.sleep(1)
print('....')
time.sleep(1)
print('Repairing Motion Tracker...')
time.sleep(1)
print('Auto alignment...')
time.sleep(1)
print('....')
time.sleep(2)
print('\nMOTION TRACKER ONLINE.')
time.sleep(2)
print('''\nYour Motion Tracker is now online.
You return to the main elevator''')
start(inventory)
def shuttle_control(inventory):
print('\n----------')
print('\nDroid mobile..')
time.sleep(1)
print('....')
time.sleep(1)
print('''\nYou enter Shuttle Control where all navigation takes place.
A 999 Elite Enemy Command droid is posted here.
This Droid is extremely powerfull.''')
print('\n[-SHUTTLE CONTROL-]')
print('\n1.) Terminate the 999 Elite Enemy Command Droid')
print('2.) Retreat to Main Elevator')
cmdlist = ['1', '2']
cmd = getcmd(cmdlist)
if cmd == '1':
if 'laser' in inventory and 'motion tracker' in inventory and 'droid hack' in inventory:
print('\n....')
time.sleep(1)
print('\n....')
command_droid(inventory)
else:
time.sleep(1)
print('\nEECD999:>')
print('\n100101010101010101010101010101010' * 10)
time.sleep(1)
print('''\nThe Elite Enemy Command droid laughs in machine language
at your pathetic attempt. The last thing your data recorder gets is the
deafing sound of a Target Lock.''')
print('....')
time.sleep(1)
print('....')
time.sleep(1)
print('\nshutdown imminent...')
time.sleep(1)
print('CTRL866 offline.')
time.sleep(1)
print('Droid terminated.')
print('\nGAME OVER\n')
exit(0)
elif cmd == '2':
start(inventory)
def command_droid(inventory):
print('\nRunning droid hack...')
time.sleep(1)
print('\njamming EECD999 Target Lock...')
time.sleep(1)
print('\n......')
time.sleep(1)
print('\nMotion Tracker active...')
time.sleep(1)
print('\nTrack motion of EECD999...')
time.sleep(1)
print('\n......')
time.sleep(1)
print('\nLaser active...')
time.sleep(1)
print('\nTargeting EECD999...')
time.sleep(1)
print('\nTarget Locked...')
time.sleep(1)
print('\n......')
time.sleep(2)
print('\n\nTARGET TERMINATED \n')
time.sleep(2)
print('''\n\nYou have defeated the EECD999 droid and taken back control
of the 'KERNEL'. The flight path has been restored and
a distress signal sent to Droid Command. Reinforcements are inbound.
\n - GAME OVER -\n''')
def getcmd(cmdlist):
cmd = input('\nCTRL866:> ')
if cmd in cmdlist:
return cmd
elif cmd == 'help':
print('\nTYPE: inventory to view items')
print('or quit to self-destruct')
return getcmd(cmdlist)
elif cmd == 'inventory':
print('\ninventory contains:\n')
for item in inventory:
print('-- %s' % (item))
return getcmd(cmdlist)
elif cmd == 'secret':
print('\n........')
time.sleep(1)
print('\n[--Paradroid -- published by Hewson 1985--]')
time.sleep(1)
print('\n[--written by Andrew Braybrook for Commodore 64 computer--]')
time.sleep(1)
print('\n[--play this game or die--]')
time.sleep(1)
print('\n........\n')
return getcmd(cmdlist)
elif cmd == 'quit':
print('\n----------')
time.sleep(1)
print('\nself-destruct sequence initiated...')
time.sleep(1)
print('shutdown imminent...')
time.sleep(1)
print('\nCTRL866 offline.')
time.sleep(1)
print('Droid terminated.\n')
exit(0)
else:
print('\n error. invalid command-\n')
return getcmd(cmdlist)
if __name__ == "__main__":
inventory = ['service port']
start(inventory)
| ActiveState/code | recipes/Python/578618_Droids__a_text_adventure_/recipe-578618.py | Python | mit | 13,762 |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import webob
from nova import test
from nova.tests.api.openstack import fakes
from nova.compute import instance_types
def fake_get_instance_type_by_flavor_id(flavorid):
return {
'id': flavorid,
'flavorid': str(flavorid),
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'memory_mb': 512,
'vcpus': 1,
'swap': 512,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None
}
def fake_get_all_types(inactive=0, filters=None):
return {
'fake1': fake_get_instance_type_by_flavor_id(1),
'fake2': fake_get_instance_type_by_flavor_id(2)
}
class FlavorextradataTest(test.TestCase):
def setUp(self):
super(FlavorextradataTest, self).setUp()
self.stubs.Set(instance_types, 'get_instance_type_by_flavor_id',
fake_get_instance_type_by_flavor_id)
self.stubs.Set(instance_types, 'get_all_types', fake_get_all_types)
def _verify_server_response(self, flavor, expected):
for key in expected:
self.assertEquals(flavor[key], expected[key])
def test_show(self):
expected = {
'flavor': {
'id': '1',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
'swap': 512,
'rxtx_factor': 1,
}
}
url = '/v2/fake/flavors/1'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
body = json.loads(res.body)
self._verify_server_response(body['flavor'], expected['flavor'])
def test_detail(self):
expected = [
{
'id': '1',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
'swap': 512,
'rxtx_factor': 1,
},
{
'id': '2',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
'swap': 512,
'rxtx_factor': 1,
},
]
url = '/v2/fake/flavors/detail'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
body = json.loads(res.body)
for i, flavor in enumerate(body['flavors']):
self._verify_server_response(flavor, expected[i])
| usc-isi/essex-baremetal-support | nova/tests/api/openstack/compute/contrib/test_flavorextradata.py | Python | apache-2.0 | 3,547 |
import sys
import os
from ctypes import windll
user32 = windll.user32
SPI_SETFONTSMOOTHING = 0x004B # dec 75
SPI_SETFONTSMOOTHINGTYPE = 0x200B # dec 8203
SPIF_UPDATEINIFILE = 0x1
SPIF_SENDCHANGE = 0x2
FE_FONTSMOOTHINGCLEARTYPE = 0x2
is_font_smoothing_enabled = 1
if len(sys.argv) > 1:
if sys.argv[1].lower() not in ['1', 'true', 'on', 'enable']:
is_font_smoothing_enabled = 0
user32.SystemParametersInfoA(SPI_SETFONTSMOOTHING, is_font_smoothing_enabled, 0,
SPIF_UPDATEINIFILE | SPIF_SENDCHANGE)
| ActiveState/code | recipes/Python/578500_Enable_Clear_Type_font_smoothing_Windows_ctypes_/recipe-578500.py | Python | mit | 545 |
""" A component that designates a capacitor. """
import sympy as sy
from component import Component
from config import Config
class Inductor(Component):
""" Inductor component """
def __init__(self, graph, inductance, node_a, node_b, edge_i):
""" Initializes a inductance with two nodes. Current goes from
A to B. Supplied nodes / edges should be part of the supplied
graph.
Args:
inductance : float
graph : Graph object
node_a : Node object
node_b : Node object
edge_i : Edge object
Returns:
Inductor object
"""
self._node_a = node_a
self._node_b = node_b
self._edge_i = edge_i
self._inductance = inductance
self._i = sy.Symbol("i" + str(self)) # TODO better mangling
def node_a(self):
""" Returns node A.
Returns:
Node object
"""
return self._node_a
def node_b(self):
""" Returns node B.
Returns:
Node object
"""
return self._node_b
def edge_i(self):
""" Returns the edge that stores current from A to B.
Returns:
Edge object
"""
return self._edge_i
def variables(self):
""" Returns a set of variables under constraints.
Returns:
set of Nodes, Edges, tuples, or strings
"""
return set([self._node_a, self._node_b, self._edge_i])
def substitutions(self):
""" Return a dictionary mapping each symbol to a value. Return
an empty dictionary if no substitutions exist
Returns:
dictionary from sympy variable to value
"""
mappings = {}
mappings[self._i] = self._edge_i.value()
return mappings
def constraints(self):
""" Returns a list of constraints that must be solved.
A constraint is a tuple (coefficients, variables), where
coefficients is a list of numbers corresponding to the linear
equation:
A_0 * x_0 + A_1 * x_1 + ... + A_{n-1} * x_{n-1} = 0,
and variables is a list of the Node and Edge objects.
Returns:
List of tuples (coefficients, variables)
"""
# Inductor equation
cs = [float(-1) / self._inductance,
float(1) / self._inductance,
float(1) / Config.time_step]
xs = [self._node_a, self._node_b, self._edge_i]
b = self._i / Config.time_step
constraint = Constraint(cs, xs, b)
return [constraint]
| ThatSnail/impede | impede-app/server/py/inductor.py | Python | mit | 2,635 |
import os
import sys
# Add parent directory to path to make test aware of other modules
srcfolder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', "src"))
if srcfolder not in sys.path:
sys.path.append(srcfolder)
| lmotta/Roam | tests/__init__.py | Python | gpl-2.0 | 234 |
# Copyright (c) 2013-2016 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ..exceptions import Error
class InvalidSettingsError(Error):
"""Invalid Settings detected"""
pass
class CannotParseConfigFileError(Error):
pass
| dzoep/khal | khal/settings/exceptions.py | Python | mit | 1,274 |
import logging
import sys
from argparse2tool import load_argparse
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Arg2GxmlParser:
def __init__(self):
ap = load_argparse() # avoid circular imports
help_text = (
"argparse2tool forms Galaxy XML and CWL tools from Python scripts.\n"
"You are currently using the Galaxy XML invocation which may have different options from the CWL invocation."
)
arg2tool_parser = ap.ArgumentParser(
prog=sys.argv[0], description=help_text,
formatter_class=ap.RawDescriptionHelpFormatter, add_help=False
)
arg2tool_parser.add_argument('--help', help='Show this help message and exit', action='help')
self.parser = arg2tool_parser
def process_arguments(self):
self.parser.add_argument('--generate_galaxy_xml', action='store_true')
self.parser.add_argument('--command', action='store', default="")
return vars(self.parser.parse_args())
| erasche/argparse2tool | argparse2tool/cmdline2gxml/__init__.py | Python | apache-2.0 | 1,040 |
from time import clock
def timer(function):
def wrapper(*args, **kwargs):
start = clock()
print(function(*args, **kwargs))
print("Solution took: %f seconds." % (clock() - start))
return wrapper
@timer
def find_answer():
total = 1
previous_max = 1
size = 1001
for level in range(2, size, 2):
total += (4 * previous_max) + (10 * level)
previous_max += 4 * level
return total
if __name__ == "__main__":
find_answer()
| Igglyboo/Project-Euler | 1-99/20-29/Problem28.py | Python | unlicense | 495 |
import pytest
import sqlalchemy as sa
from sqlalchemy_utils import dependent_objects, get_referencing_foreign_keys
class TestDependentObjects(object):
@pytest.fixture
def User(self, Base):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
first_name = sa.Column(sa.Unicode(255))
last_name = sa.Column(sa.Unicode(255))
return User
@pytest.fixture
def Article(self, Base, User):
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
owner_id = sa.Column(
sa.Integer, sa.ForeignKey('user.id', ondelete='SET NULL')
)
author = sa.orm.relationship(User, foreign_keys=[author_id])
owner = sa.orm.relationship(User, foreign_keys=[owner_id])
return Article
@pytest.fixture
def BlogPost(self, Base, User):
class BlogPost(Base):
__tablename__ = 'blog_post'
id = sa.Column(sa.Integer, primary_key=True)
owner_id = sa.Column(
sa.Integer, sa.ForeignKey('user.id', ondelete='CASCADE')
)
owner = sa.orm.relationship(User)
return BlogPost
@pytest.fixture
def init_models(self, User, Article, BlogPost):
pass
def test_returns_all_dependent_objects(self, session, User, Article):
user = User(first_name=u'John')
articles = [
Article(author=user),
Article(),
Article(owner=user),
Article(author=user, owner=user)
]
session.add_all(articles)
session.commit()
deps = list(dependent_objects(user))
assert len(deps) == 3
assert articles[0] in deps
assert articles[2] in deps
assert articles[3] in deps
def test_with_foreign_keys_parameter(
self,
session,
User,
Article,
BlogPost
):
user = User(first_name=u'John')
objects = [
Article(author=user),
Article(),
Article(owner=user),
Article(author=user, owner=user),
BlogPost(owner=user)
]
session.add_all(objects)
session.commit()
deps = list(
dependent_objects(
user,
(
fk for fk in get_referencing_foreign_keys(User)
if fk.ondelete == 'RESTRICT' or fk.ondelete is None
)
).limit(5)
)
assert len(deps) == 2
assert objects[0] in deps
assert objects[3] in deps
class TestDependentObjectsWithColumnAliases(object):
@pytest.fixture
def User(self, Base):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
first_name = sa.Column(sa.Unicode(255))
last_name = sa.Column(sa.Unicode(255))
return User
@pytest.fixture
def Article(self, Base, User):
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_id = sa.Column(
'_author_id', sa.Integer, sa.ForeignKey('user.id')
)
owner_id = sa.Column(
'_owner_id',
sa.Integer, sa.ForeignKey('user.id', ondelete='SET NULL')
)
author = sa.orm.relationship(User, foreign_keys=[author_id])
owner = sa.orm.relationship(User, foreign_keys=[owner_id])
return Article
@pytest.fixture
def BlogPost(self, Base, User):
class BlogPost(Base):
__tablename__ = 'blog_post'
id = sa.Column(sa.Integer, primary_key=True)
owner_id = sa.Column(
'_owner_id',
sa.Integer, sa.ForeignKey('user.id', ondelete='CASCADE')
)
owner = sa.orm.relationship(User)
return BlogPost
@pytest.fixture
def init_models(self, User, Article, BlogPost):
pass
def test_returns_all_dependent_objects(self, session, User, Article):
user = User(first_name=u'John')
articles = [
Article(author=user),
Article(),
Article(owner=user),
Article(author=user, owner=user)
]
session.add_all(articles)
session.commit()
deps = list(dependent_objects(user))
assert len(deps) == 3
assert articles[0] in deps
assert articles[2] in deps
assert articles[3] in deps
def test_with_foreign_keys_parameter(
self,
session,
User,
Article,
BlogPost
):
user = User(first_name=u'John')
objects = [
Article(author=user),
Article(),
Article(owner=user),
Article(author=user, owner=user),
BlogPost(owner=user)
]
session.add_all(objects)
session.commit()
deps = list(
dependent_objects(
user,
(
fk for fk in get_referencing_foreign_keys(User)
if fk.ondelete == 'RESTRICT' or fk.ondelete is None
)
).limit(5)
)
assert len(deps) == 2
assert objects[0] in deps
assert objects[3] in deps
class TestDependentObjectsWithManyReferences(object):
@pytest.fixture
def User(self, Base):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
first_name = sa.Column(sa.Unicode(255))
last_name = sa.Column(sa.Unicode(255))
return User
@pytest.fixture
def BlogPost(self, Base, User):
class BlogPost(Base):
__tablename__ = 'blog_post'
id = sa.Column(sa.Integer, primary_key=True)
author_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
author = sa.orm.relationship(User)
return BlogPost
@pytest.fixture
def Article(self, Base, User):
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
author = sa.orm.relationship(User)
return Article
@pytest.fixture
def init_models(self, User, BlogPost, Article):
pass
def test_with_many_dependencies(self, session, User, Article, BlogPost):
user = User(first_name=u'John')
objects = [
Article(author=user),
BlogPost(author=user)
]
session.add_all(objects)
session.commit()
deps = list(dependent_objects(user))
assert len(deps) == 2
class TestDependentObjectsWithCompositeKeys(object):
@pytest.fixture
def User(self, Base):
class User(Base):
__tablename__ = 'user'
first_name = sa.Column(sa.Unicode(255), primary_key=True)
last_name = sa.Column(sa.Unicode(255), primary_key=True)
return User
@pytest.fixture
def Article(self, Base, User):
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_first_name = sa.Column(sa.Unicode(255))
author_last_name = sa.Column(sa.Unicode(255))
__table_args__ = (
sa.ForeignKeyConstraint(
[author_first_name, author_last_name],
[User.first_name, User.last_name]
),
)
author = sa.orm.relationship(User)
return Article
@pytest.fixture
def init_models(self, User, Article):
pass
def test_returns_all_dependent_objects(self, session, User, Article):
user = User(first_name=u'John', last_name=u'Smith')
articles = [
Article(author=user),
Article(),
Article(),
Article(author=user)
]
session.add_all(articles)
session.commit()
deps = list(dependent_objects(user))
assert len(deps) == 2
assert articles[0] in deps
assert articles[3] in deps
class TestDependentObjectsWithSingleTableInheritance(object):
@pytest.fixture
def Category(self, Base):
class Category(Base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
return Category
@pytest.fixture
def TextItem(self, Base, Category):
class TextItem(Base):
__tablename__ = 'text_item'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
category_id = sa.Column(
sa.Integer,
sa.ForeignKey(Category.id)
)
category = sa.orm.relationship(
Category,
backref=sa.orm.backref(
'articles'
)
)
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type,
}
return TextItem
@pytest.fixture
def Article(self, TextItem):
class Article(TextItem):
__mapper_args__ = {
'polymorphic_identity': u'article'
}
return Article
@pytest.fixture
def BlogPost(self, TextItem):
class BlogPost(TextItem):
__mapper_args__ = {
'polymorphic_identity': u'blog_post'
}
return BlogPost
@pytest.fixture
def init_models(self, Category, TextItem, Article, BlogPost):
pass
def test_returns_all_dependent_objects(self, session, Category, Article):
category1 = Category(name=u'Category #1')
category2 = Category(name=u'Category #2')
articles = [
Article(category=category1),
Article(category=category1),
Article(category=category2),
Article(category=category2),
]
session.add_all(articles)
session.commit()
deps = list(dependent_objects(category1))
assert len(deps) == 2
assert articles[0] in deps
assert articles[1] in deps
| konstantinoskostis/sqlalchemy-utils | tests/functions/test_dependent_objects.py | Python | bsd-3-clause | 10,549 |
import gradient
import grid
import pygame
import sys
from cmath import sqrt
import math
grid_width = 200
grid_height = 200
grid_drawing_mulitplier = 2
g = grid.create_led_grid(grid_width,grid_height)
np = grid.neoPixelRing(8,0)
np2 = grid.neoPixelRing(8,1)
np3 = grid.neoPixelRing(8,2)
g.insertPixelRing(np,7,2)
g.insertPixelRing(np2,2,8)
g.insertPixelRing(np3,11,8)
center = g.getCenter()
outGradient = gradient.linear_gradient("#11aaff","#222222",10)
outGradient2 = gradient.polylinear_gradient(["#000000","#ff0000","#11aaff","#000000"],10)
pygame.init()
background_colour = (255,255,255)
(width, height) = (grid_width * grid_drawing_mulitplier, grid_height * grid_drawing_mulitplier)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Tutorial 1')
screen.fill(background_colour)
pygame.display.flip()
def plotCircle(x0,y0,radius):
x = radius
y = 0
err = 0
width = 1
height = 1
while x >= y:
pygame.draw.rect(screen,(111,0,0),(x0 + x , y0 + y, width, height))
pygame.draw.rect(screen,(111,0,0),(x0 + y, y0 + x, width, height))
pygame.draw.rect(screen,(111,0,0),(x0 - y, y0 + x, width, height))
pygame.draw.rect(screen,(111,0,0),(x0 - x, y0 + y, width, height))
pygame.draw.rect(screen,(111,0,0),(x0 - x, y0 - y, width, height))
pygame.draw.rect(screen,(111,0,0),(x0 - y, y0 - x, width, height))
pygame.draw.rect(screen,(111,0,0),(x0 + y, y0 - x, width, height))
pygame.draw.rect(screen,(111,0,0),(x0 + x, y0 - y, width, height))
pygame.display.flip()
y +=1
if err <= 0:
err += 2 * y + 1
if err > 0:
x -= 1
err -= 2 * x + 1
if __name__ == '__main__':
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# screen.fill(background_colour)
for x in range(grid_width):
for y in range(grid_height):
d1 = math.sqrt((x - grid_width/2) ** 2 + (y - grid_height/2) ** 2)
d1 = int(math.floor(float(d1) / (math.sqrt(2) * grid_width/2) * 10))
# print(d1)
if d1 > len(outGradient2['r'])-1:
d1 = len(outGradient2['r'])-1
pygame.draw.rect(screen, (outGradient2['r'][d1], outGradient2['g'][d1], outGradient2['b'][d1]), (x * grid_drawing_mulitplier, y * grid_drawing_mulitplier, grid_drawing_mulitplier, grid_drawing_mulitplier))
pygame.display.flip()
| theceremony/pyramids-installation | python/test.py | Python | mit | 2,570 |
"""
bamboo.globals
~~~~~~~~~~~~~
"""
from peak.util.proxies import CallbackProxy
from bamboo.context import context
db = CallbackProxy(lambda: context["db"])
| hahnicity/bamboo | bamboo/globals.py | Python | mit | 159 |
# -*- coding: utf-8 -*-
## begin license ##
#
# "Weightless" is a High Performance Asynchronous Networking Library. See http://weightless.io
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2011-2016, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
#
# This file is part of "Weightless"
#
# "Weightless" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Weightless" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Weightless"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
import sys
import threading
from select import epoll
from select import EPOLLIN, EPOLLOUT, EPOLLPRI, EPOLLERR, EPOLLHUP, EPOLLET, EPOLLONESHOT, EPOLLRDNORM, EPOLLRDBAND, EPOLLWRNORM, EPOLLWRBAND, EPOLLMSG
from socket import error as socket_error
from time import time
from errno import EBADF, EINTR, ENOENT
from weightless.core import local
from os import pipe, close, write, read
from inspect import getsourcelines, getsourcefile
from sys import stderr
from traceback import print_exc
class Reactor(object):
"""This Reactor allows applications to be notified of read, write or time events. The callbacks being executed can contain instructions to modify the reader, writers and timers in the reactor. Additions of new events are effective with the next step() call, removals are effective immediately, even if the actual event was already trigger, but the handler wat not called yet."""
MAXPRIO = 10
DEFAULTPRIO = 0
def __init__(self):
self._epoll = epoll()
self._fds = {}
self._badFdsLastCallback = []
self._suspended = {}
self._running = {}
self._timers = []
self._prio = -1
self._epoll_ctrl_read, self._epoll_ctrl_write = pipe()
self._epoll.register(fd=self._epoll_ctrl_read, eventmask=EPOLLIN)
# per (part-of) step relevent state
self.currentcontext = None
self.currenthandle = None
self._removeFdsInCurrentStep = set()
self._listening = threading.Lock()
self._loop = True
def addReader(self, sok, sink, prio=None):
"""Adds a socket and calls sink() when the socket becomes readable."""
self._addFD(fileOrFd=sok, callback=sink, intent=READ_INTENT, prio=prio)
def addWriter(self, sok, source, prio=None):
"""Adds a socket and calls source() whenever the socket is writable."""
self._addFD(fileOrFd=sok, callback=source, intent=WRITE_INTENT, prio=prio)
def addProcess(self, process, prio=None):
"""Adds a process and calls it repeatedly."""
if process in self._suspended:
raise ValueError('Process is suspended')
if process in self._running:
raise ValueError('Process is already in processes')
self._running[process] = _ProcessContext(process, prio)
self._wake_up()
def addTimer(self, seconds, callback):
"""Add a timer that calls callback() after the specified number of seconds. Afterwards, the timer is deleted. It returns a token for removeTimer()."""
timer = Timer(seconds, callback)
self._timers.append(timer)
self._timers.sort(key=_timeKey)
self._wake_up()
return timer
def removeReader(self, sok):
self._removeFD(fileOrFd=sok)
def removeWriter(self, sok):
self._removeFD(fileOrFd=sok)
def removeProcess(self, process=None):
if process is None:
process = self.currentcontext.callback
if process in self._running:
del self._running[process]
return True
def removeTimer(self, token):
self._timers.remove(token)
def cleanup(self, sok):
# Only use for Reader/Writer's!
try:
fd = _fdNormalize(sok)
except _HandleEBADFError:
self._cleanFdsByFileObj(sok)
self._cleanSuspendedByFileObj(sok)
else:
self._fds.pop(fd, None)
self._suspended.pop(fd, None)
self._epollUnregisterSafe(fd=fd)
def suspend(self):
if self.currenthandle is None:
raise RuntimeError('suspend called from a timer or when running a last-call callback for a bad file-descriptor.')
if self.currenthandle in self._fds:
self._removeFD(fileOrFd=self.currenthandle)
elif self.removeProcess(self.currenthandle):
pass
else:
raise RuntimeError('Current context not found!')
self._suspended[self.currenthandle] = self.currentcontext
return self.currenthandle
def resumeReader(self, handle):
context = self._suspended.pop(handle)
self._addFD(fileOrFd=context.fileOrFd, callback=context.callback, intent=READ_INTENT, prio=context.prio, fdContext=context)
def resumeWriter(self, handle):
context = self._suspended.pop(handle)
self._addFD(fileOrFd=context.fileOrFd, callback=context.callback, intent=WRITE_INTENT, prio=context.prio, fdContext=context)
def resumeProcess(self, handle):
self._running[handle] = self._suspended.pop(handle)
self._wake_up()
def shutdown(self):
# Will be called exactly once; in testing situations 1..n times.
for contextDict, info in [
(self._fds, 'active'),
(self._suspended, 'suspended')
]:
for handle, context in list(contextDict.items()):
contextDict.pop(handle)
obj = context.fileOrFd if hasattr(context, 'fileOrFd') else context.callback
if hasattr(obj, 'close'):
print(_shutdownMessage(message='closing - %s' % info, thing=obj, context=context))
_closeAndIgnoreFdErrors(obj)
else:
print(_shutdownMessage(message='terminating - %s' % info, thing=handle, context=context))
for handle, context in list(self._running.items()):
self._running.pop(handle)
if hasattr(handle, 'close'):
print(_shutdownMessage(message='closing - active', thing=handle, context=context))
_closeAndIgnoreFdErrors(handle)
else:
print(_shutdownMessage(message='terminating - active', thing=handle, context=context))
del self._badFdsLastCallback[:]
self._close_epoll_ctrl()
_closeAndIgnoreFdErrors(self._epoll)
def request_shutdown(self):
self._loop = False
def loop(self):
try:
while self._loop:
self.step()
finally:
self.shutdown()
def step(self):
__reactor__ = self
if self._badFdsLastCallback:
self._lastCallbacks()
return self
self._prio = (self._prio + 1) % Reactor.MAXPRIO
with self._listening:
if self._running:
timeout = 0
elif self._timers:
timeout = min(max(0, self._timers[0].time - time()), MAX_TIMEOUT_EPOLL)
else:
timeout = -1
try:
fdEvents = self._epoll.poll(timeout=timeout)
except IOError as e:
(errno, description) = e.args
_printException()
if errno == EINTR:
pass
else:
raise
return self
except KeyboardInterrupt:
self.shutdown() # For testing purposes; normally loop's finally does this.
raise
self._clear_epoll_ctrl(fdEvents)
self._removeFdsInCurrentStep = set([self._epoll_ctrl_read])
self._timerCallbacks(self._timers)
self._callbacks(fdEvents, self._fds, READ_INTENT)
self._callbacks(fdEvents, self._fds, WRITE_INTENT)
self._processCallbacks(self._running)
return self
def getOpenConnections(self):
return len(self._fds)
def __enter__(self):
"Usable as a context-manager for testing purposes"
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Assumes .step() is used to drive the reactor;
so having an exception here does not mean shutdown has been called.
"""
self.shutdown()
return False
def _addFD(self, fileOrFd, callback, intent, prio, fdContext=None):
context = fdContext or _FDContext(callback, fileOrFd, intent, prio)
try:
fd = _fdNormalize(fileOrFd)
if fd in self._fds:
# Otherwise epoll would give an IOError, Errno 17 / EEXIST.
raise ValueError('fd already registered')
if fd in self._suspended:
raise ValueError('Socket is suspended')
eventmask = EPOLLIN if intent is READ_INTENT else EPOLLOUT # Change iff >2 intents exist.
self._epollRegister(fd=fd, eventmask=eventmask)
except _HandleEBADFError:
self._raiseIfFileObjSuspended(obj=fileOrFd)
self._badFdsLastCallback.append(context)
except TypeError:
_printException() # Roughly same "whatever" behaviour of 'old.
self._badFdsLastCallback.append(context)
else:
self._fds[fd] = context
def _removeFD(self, fileOrFd):
try:
fd = _fdNormalize(fileOrFd)
except _HandleEBADFError:
self._cleanFdsByFileObj(fileOrFd)
return
if fd in self._fds:
del self._fds[fd]
self._epollUnregister(fd=fd)
def _lastCallbacks(self):
while self._badFdsLastCallback:
context = self._badFdsLastCallback.pop()
self.currenthandle = None
self.currentcontext = context
try:
self.currentcontext.callback()
except (AssertionError, SystemExit, KeyboardInterrupt):
raise
except:
_printException()
def _timerCallbacks(self, timers):
currentTime = time()
for timer in timers[:]:
if timer.time > (currentTime + EPOLL_TIMEOUT_GRANULARITY):
break
if timer not in timers:
continue
self.currenthandle = None
self.currentcontext = timer
self.removeTimer(timer)
try:
timer.callback()
except (AssertionError, SystemExit, KeyboardInterrupt):
raise
except:
_printException()
def _callbacks(self, fdEvents, fds, intent):
for fd, eventmask in fdEvents:
if fd in self._removeFdsInCurrentStep:
continue
try:
context = fds[fd]
except KeyError:
self._removeFdsInCurrentStep.add(fd)
sys.stderr.write('[Reactor]: epoll event fd %d does not exist in fds list.\n' % fd)
sys.stderr.flush()
continue
if context.intent is not intent:
continue
if context.prio <= self._prio:
self.currenthandle = fd
self.currentcontext = context
try:
context.callback()
except (AssertionError, SystemExit, KeyboardInterrupt):
if self.currenthandle in fds:
del fds[self.currenthandle]
self._epollUnregisterSafe(fd=self.currenthandle)
raise
except:
_printException()
if self.currenthandle in fds:
del fds[self.currenthandle]
self._epollUnregisterSafe(fd=self.currenthandle)
def _processCallbacks(self, processes):
for self.currenthandle, context in list(processes.items()):
if self.currenthandle in processes and context.prio <= self._prio:
self.currentcontext = context
try:
context.callback()
except:
self.removeProcess(self.currenthandle)
raise
def _epollRegister(self, fd, eventmask):
try:
self._epoll.register(fd=fd, eventmask=eventmask)
except IOError as e:
(errno, description) = e.args
_printException()
if errno == EBADF:
raise _HandleEBADFError()
raise
def _epollUnregister(self, fd):
self._removeFdsInCurrentStep.add(fd)
try:
self._epoll.unregister(fd)
except IOError as e:
(errno, description) = e.args
_printException()
if errno == ENOENT or errno == EBADF: # Already gone (epoll's EBADF automagical cleanup); not reproducable in Python's epoll binding - but staying on the safe side.
pass
else:
raise
def _epollUnregisterSafe(self, fd):
"Ignores the expected (ENOENT & EBADF) and unexpected exceptions from epoll_ctl / unregister"
self._removeFdsInCurrentStep.add(fd)
try:
if fd != -1:
self._epoll.unregister(fd)
except IOError as e:
# If errno is either ENOENT or EBADF than the fd is already gone (epoll's EBADF automagical cleanup); not reproducable in Python's epoll binding - but staying on the safe side.
(errno, description) = e.args
# If errno is either ENOENT or EBADF than the fd is already gone (epoll's EBADF automagical cleanup); not reproducable in Python's epoll binding - but staying on the safe side.
if errno == ENOENT or errno == EBADF:
pass
else:
_printException()
def _clear_epoll_ctrl(self, fdEvents):
if (self._epoll_ctrl_read, EPOLLIN) in fdEvents:
while True:
try:
read(self._epoll_ctrl_read, 1)
break
except (IOError, OSError) as e:
(errno, description) = e.args
if errno == EINTR:
_printException()
else:
raise
def _wake_up(self):
if self._listening.locked():
while True:
try:
write(self._epoll_ctrl_write, b'x')
break
except (IOError, OSError) as e:
(errno, description) = e.args
if errno == EINTR:
_printException()
else:
raise
def _raiseIfFileObjSuspended(self, obj):
for handle, context in list(self._suspended.items()):
if hasattr(context, 'fileOrFd') and context.fileOrFd == obj:
raise ValueError('Socket is suspended')
def _cleanFdsByFileObj(self, obj):
for fd, context in list(self._fds.items()):
if context.fileOrFd == obj:
del self._fds[fd]
self._epollUnregisterSafe(fd=fd)
def _cleanSuspendedByFileObj(self, obj):
for handle, context in list(self._suspended.items()):
if hasattr(context, 'fileOrFd') and context.fileOrFd == obj:
del self._suspended[handle]
def _close_epoll_ctrl(self):
# Will be called exactly once; in testing situations 1..n times.
try:
close(self._epoll_ctrl_read)
except Exception:
pass
try:
close(self._epoll_ctrl_write)
except Exception:
pass
self._epoll_ctrl_read = None
self._epoll_ctrl_write = None
def _printException():
print_exc()
stderr.flush()
def reactor():
return local('__reactor__')
class Timer(object):
def __init__(self, seconds, callback):
assert seconds >= 0, 'Timeout must be >= 0. It was %s.' % seconds
self.callback = callback
if seconds > 0:
seconds = seconds + EPOLL_TIMEOUT_GRANULARITY # Otherwise seconds when (EPOLL_TIMEOUT_GRANULARITY > seconds > 0) is effectively 0(.0)
self.time = time() + seconds
class _FDContext(object):
def __init__(self, callback, fileOrFd, intent, prio):
if prio is None:
prio = Reactor.DEFAULTPRIO
if not 0 <= prio < Reactor.MAXPRIO:
raise ValueError('Invalid priority: %s' % prio)
self.callback = callback
self.fileOrFd = fileOrFd
self.intent = intent
self.prio = prio
class _ProcessContext(object):
def __init__(self, callback, prio):
if prio is None:
prio = Reactor.DEFAULTPRIO
if not 0 <= prio < Reactor.MAXPRIO:
raise ValueError('Invalid priority: %s' % prio)
self.callback = callback
self.prio = prio
def _fdNormalize(fd):
if hasattr(fd, 'fileno'):
try:
fileno = fd.fileno()
if fileno == -1:
print("Reactor: Bad file descriptor {}".format(fd), file=sys.stderr, flush=True)
raise _HandleEBADFError()
return fileno
except (IOError, OSError, socket_error) as e:
(errno, description) = e.args
_printException()
if errno == EBADF:
raise _HandleEBADFError()
raise
return fd
def _fdOrNone(fd):
"Only use for info/debugging - supresses errors without logging."
if hasattr(fd, 'fileno'):
try:
fileno = fd.fileno()
return None if fileno == -1 else fileno
except (IOError, OSError, socket_error):
return None
return fd
def _closeAndIgnoreFdErrors(obj):
try:
obj.close()
except (IOError, OSError, socket_error) as e:
# EBADF, EIO or EINTR -- non of which are really relevant in our shutdown.
# For why re-trying after EINTR is not a good idea for close(), see:
# http://lwn.net/Articles/576478/
(errno, description) = e.args
_printException()
def _shutdownMessage(message, thing, context):
details = [str(thing)]
if isinstance(context, _FDContext):
details.append('(fd)')
fd = _fdOrNone(context.fileOrFd)
if fd is not None:
details.append('with fd: %s' % fd)
else: # _ProcessContext
details.append('(process)')
callback = context.callback
details.append('with callback: %s' % callback)
try:
try:
sourceLines = getsourcelines(callback)
except TypeError:
try:
# generator-method?
_cb = getattr(getattr(callback, '__self__', None), 'gi_code', None)
sourceLines = getsourcelines(_cb)
callback = _cb
except TypeError:
# Callable instance?
callback = getattr(callback, '__call__', None)
sourceLines = getsourcelines(callback)
details.append('at: %s: %d: %s' % (
getsourcefile(callback),
sourceLines[1], # Line number
sourceLines[0][0].strip(), # Source of the first relevant line
))
except (IndexError, IOError, TypeError):
# IOError / TypeError: inspect getsourcefile / sourcelines
# IndexError: unexpected sourcelines datastructure
pass
return ('Reactor shutdown: %s: ' % message) + ' '.join(details)
class _HandleEBADFError(Exception):
pass
_timeKey = lambda t: t.time
READ_INTENT = type('READ_INTENT', (object,), {})()
WRITE_INTENT = type('WRITE_INTENT', (object,), {})()
# In Python 2.7 - anything lower than 0.001 will become 0(.0); epoll.poll() may (and will IRL) return early - see: https://bugs.python.org/issue20311 (Python 3.x differs in behaviour :-s).
# TS: If this granularity (& related logic) is unwanted - start using timerfd_* system-calls.
EPOLL_TIMEOUT_GRANULARITY = 0.001
MAX_INT_EPOLL = 2**31 -1
MAX_TIMEOUT_EPOLL = MAX_INT_EPOLL / 1000 - 1
EPOLLRDHUP = int('0x2000', 16)
_EPOLL_CONSTANT_MAPPING = { # Python epoll constants (missing EPOLLRDHUP - exists since Linux 2.6.17))
EPOLLIN: 'EPOLLIN', # Available for read
EPOLLOUT: 'EPOLLOUT', # Available for write
EPOLLRDHUP: 'EPOLLRDHUP', # (Exists since Linux 2.6.17, see Linux's: man epoll_ctl; and glibc / libc6-dev: sys/epoll.h)
# Stream socket peer closed connection, or shut down writing half
# of connection. (This flag is especially useful for writing
# simple code to detect peer shutdown when using Edge Triggered
# monitoring.)
EPOLLPRI: 'EPOLLPRI', # Urgent data for read
# Note: just don't send socket data with the MSG_OOB flag specified and this will never happen. - TCP Out-of-Band communication is a bit of a protocol relic (though TELNET and FTP might use it (allowed by spec), decent people don't :-).
EPOLLERR: 'EPOLLERR', # Error condition happened on the assoc. fd
# EPOLLERR and EPOLLHUP: will always be waited for, does not need to be specified in the eventmask (see Linux's: man epoll_ctl).
EPOLLHUP: 'EPOLLHUP', # Hang up happened on the assoc. fd
EPOLLET: 'EPOLLET', # Set Edge Trigger behavior, the default is Level Trigger behavior
EPOLLONESHOT: 'EPOLLONESHOT', # Set one-shot behavior. After one event is pulled out, the fd is internally disabled
#
# Don't use below here, please.
# ...When compiling with _XOPEN_SOURCE defined... which convey no further information beyond the bits listed above ... (see Linux's: man poll)
EPOLLRDNORM: 'EPOLLRDNORM', # Equivalent to EPOLLIN
EPOLLRDBAND: 'EPOLLRDBAND', # Priority data band can be read.
EPOLLWRNORM: 'EPOLLWRNORM', # Equivalent to EPOLLOUT
EPOLLWRBAND: 'EPOLLWRBAND', # Priority data may be written.
# Linux ignores 'POLLMSG' (see Linux's: man poll)
EPOLLMSG: 'EPOLLMSG', # Ignored.
}
| seecr/weightless-core | weightless/io/_reactor.py | Python | gpl-2.0 | 22,610 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Pool
def test(x):
print('call test')
return x * x
if __name__ == '__main__':
with Pool(5) as p:
print(p.map(test, [1,2,3]))
# https://docs.python.org/dev/library/multiprocessing.html
| sharkspeed/dororis | languages/python/corelibs/test_multiprocess.py | Python | bsd-2-clause | 282 |
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2019 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
"""
Decorators for the :mod:`bapsflib` package.
"""
__all__ = ["with_bf", "with_lapdf"]
import functools
import inspect
from typing import Union
def with_bf(
wfunc=None,
*,
filename: Union[str, None] = None,
control_path: Union[str, None] = None,
digitizer_path: Union[str, None] = None,
msi_path: Union[str, None] = None
):
"""
Context decorator for managing the opening and closing BaPSF HDF5
Files (:class:`bapsflib._hdf.utils.file.File`). An instance of the
BaPSF HDF5 file is injected into the decorated function at the end of
the positional arguments. The decorator is primarily designed for use
on test methods, but can also be used as a function decorator.
:param wfunc: function or method to be wrapped
:param filename: name of the BaPSF HDF5 file
:param control_path: internal HDF5 path for control devices
:param digitizer_path: internal HDF5 path for digitizers
:param msi_path: internal HDF5 path for MSI devices
:example:
The HDF5 file parameters (:data:`filename`, :data:`control_path`,
:data:`digitizer_path`, and :data:`msi_path`) can be passed to the
decorator in three ways (listed by predominance):
#. The wrapped function arguments.
#. If the wrapped function is a method, then through appropriately
named :data:`self` attributes.
#. The decorator keywords.
**Defined with wrapped function arguments**::
>>> # as function keywords
>>> @with_bf
... def foo(bf, **kwargs):
... # * bf will be the HDF5 file object
... # * do whatever is needed with bf and @with_bf will close
... # the file at the end
... return bf.filename
>>> foo(filename='test.hdf5', control_path='Raw data + config',
... digitizer_path='Raw data + config', msi_path='MSI')
'test.hdf5'
>>>
>>> # as a function argument
>>> @with_bf
... def foo(filename, bf, **kwargs):
... # use bf
... return bf.filename
... foo('test.hdf5')
'test.hdf5'
**Defined with wrapped method attributes**::
>>> # use `self` to pass file settings
>>> class BehaveOnFile:
... def __init__(self):
... super().__init__()
... self.filename = 'test.hdf5'
... self.control_path = 'Raw data + config'
... self.digitizer_path = 'Raw data + config'
... self.msi_path = 'MSI'
...
... @with_bf
... def foo(self, bf, **kwargs):
... return bf.filename
>>> a = BehaveOnFile()
>>> a.foo()
'test.hdf5'
>>>
>>> # but keywords will still take precedence
>>> a.foo(filename='test_2.hdf5')
'test_2.hdf5'
**Defined with decorator keywords**:
>>> # as function keywords
>>> @with_bf(filename='test.hdf5',
... control_path='Raw data + config',
... digitizer_path='Raw data +config',
... msi_path='MSI')
... def foo(bf, **kwargs):
... return bf.filename
>>> foo()
'test.hdf5'
>>>
>>> # function keywords will still take precedence
>>> foo(filename='test_2.hdf5')
'test_2.hdf5'
"""
# How to pass in file settings (listed in priority):
# 1. function keywords
# 2. self attributes
# 3. decorator keywords
#
# define decorator set file settings
settings = {
"filename": filename,
"control_path": control_path,
"digitizer_path": digitizer_path,
"msi_path": msi_path,
}
def decorator(func):
# import File here to avoid cyclical imports
from bapsflib._hdf.utils.file import File
@functools.wraps(func)
def wrapper(*args, **kwargs):
# is decorated function a method
# - this relies on the convention that a method's first argument
# is self
# - inspect.ismethod only works on bound methods, so it does
# not work at time of decorating in class
#
func_sig = inspect.signature(func)
bound_args = func_sig.bind_partial(*args, **kwargs)
self = None # type: Union[None, object]
if "self" in func_sig.parameters:
try:
if hasattr(args[0], func.__name__): # pragma: no branch
# arg[0] is an object with method of the same name
# as the decorated function
self = args[0]
except IndexError: # pragma: no cover
pass
# update settings
fsettings = settings.copy()
for name in fsettings.keys():
if name in bound_args.arguments:
# look for setting in passed arguments
if bound_args.arguments[name] is None:
continue
fsettings[name] = bound_args.arguments[name]
elif name in bound_args.kwargs:
# look for setting in passed kwargs (if not in arguments)
if bound_args.kwargs[name] is None:
continue
fsettings[name] = bound_args.kwargs[name]
elif self is not None:
# if wrapped function is a method, and setting not passed
# as function argument then look to self
try:
if self.__getattribute__(name) is None:
continue
fsettings[name] = self.__getattribute__(name)
except KeyError: # pragma: no cover
pass
for name in list(fsettings.keys()):
if fsettings[name] is None:
if name == "filename":
raise ValueError("No valid file name specified.")
else:
del fsettings[name]
fname = fsettings.pop("filename")
# run function with in if statement
with File(fname, **fsettings) as bf:
args += (bf,)
return func(*args, **kwargs)
return wrapper
if wfunc is not None:
# This is a decorator call without arguments, e.g. @with_bf
return decorator(wfunc)
else:
# This is a factory call, e.g. @with_bf()
return decorator
def with_lapdf(wfunc=None, *, filename: Union[str, None] = None):
"""
Context decorator for managing the opening and closing LaPD HDF5
Files (:class:`bapsflib.lapd._hdf.file.File`). An instance of the
LaPD HDF5 file is injected into the decorated function at the end of
the positional arguments. The decorator is primarily designed for use
on test methods, but can also be used as a function decorator.
:param wfunc: function or method to be wrapped
:param filename: name of the BaPSF HDF5 file
:example:
The HDF5 :data:`filename` can be passed to the decorator in three
ways (listed by predominance):
#. The wrapped function arguments.
#. If the wrapped function is a method, then through the
appropriately named :data:`self` attributes.
#. The decorator keywords.
**Defined with wrapped function arguments**::
>>> # as function keywords
>>> @with_lapdf
... def foo(lapdf, **kwargs):
... # * bf will be the HDF5 file object
... # * do whatever is needed with bf and @with_bf will close
... # the file at the end
... return lapdf.filename
>>> foo(filename='test.hdf5')
'test.hdf5'
>>>
>>> # as a function argument
>>> @with_lapdf
... def foo(filename, lapdf, **kwargs):
... # use bf
... return lapdf.filename
... foo('test.hdf5')
'test.hdf5'
**Defined with wrapped method attributes**::
>>> # use `self` to pass file settings
>>> class BehaveOnFile:
... def __init__(self):
... super().__init__()
... self.filename = 'test.hdf5'
...
... @with_bf
... def foo(self, lapdf, **kwargs):
... return lapdf.filename
>>> a = BehaveOnFile()
>>> a.foo()
'test.hdf5'
>>>
>>> # but keywords will still take precedence
>>> a.foo(filename='test_2.hdf5')
'test_2.hdf5'
**Defined with decorator keywords**:
>>> # as function keywords
>>> @with_bf(filename='test.hdf5')
... def foo(lapdf, **kwargs):
... return lapdf.filename
>>> foo()
'test.hdf5'
>>>
>>> # function keywords will still take precedence
>>> foo(filename='test_2.hdf5')
'test_2.hdf5'
"""
# How to pass in file settings (listed in priority):
# 1. function keywords
# 2. self attributes
# 3. decorator keywords
#
# define decorator set file settings
settings = {"filename": filename}
def decorator(func):
# import File here to avoid cyclical imports
from bapsflib.lapd._hdf.file import File
@functools.wraps(func)
def wrapper(*args, **kwargs):
# is decorated function a method
# - this relies on the convention that a method's first argument
# is self
# - inspect.ismethod only works on bound methods, so it does
# not work at time of decorating in class
#
func_sig = inspect.signature(func)
bound_args = func_sig.bind_partial(*args, **kwargs)
self = None # type: Union[None, object]
if "self" in func_sig.parameters:
try:
if hasattr(args[0], func.__name__): # pragma: no branch
self = args[0]
except IndexError: # pragma: no cover
pass
# update settings
fsettings = settings.copy()
for name in fsettings.keys():
if name in bound_args.arguments:
# look for setting in passed arguments
if bound_args.arguments[name] is None:
continue
fsettings[name] = bound_args.arguments[name]
elif name in bound_args.kwargs:
# look for setting in passed kwargs (if not in arguments)
if bound_args.kwargs[name] is None: # pragma: no cover
# currently with_lapdf only takes filename as an
# argument, this need to be tested if that changes
continue
fsettings[name] = bound_args.kwargs[name]
elif self is not None:
# if wrapped function is a method, and setting not passed
# as function argument then look to self
try:
if self.__getattribute__(name) is None: # pragma: no cover
# currently with_lapdf only takes filename as an
# argument, this need to be tested if that changes
continue
fsettings[name] = self.__getattribute__(name)
except KeyError: # pragma: no cover
pass
for name in list(fsettings.keys()):
if fsettings[name] is None:
if name == "filename":
raise ValueError("No valid file name specified.")
else: # pragma: no cover
del fsettings[name]
fname = fsettings.pop("filename")
# run function with in if statement
with File(fname) as lapdf:
args += (lapdf,)
return func(*args, **kwargs)
return wrapper
if wfunc is not None:
# This is a decorator call without arguments, e.g. @with_lapdf
return decorator(wfunc)
else:
# This is a factory call, e.g. @with_lapdf()
return decorator
| rocco8773/bapsflib | bapsflib/utils/decorators.py | Python | bsd-3-clause | 13,153 |
from __future__ import unicode_literals
import os
from collections import OrderedDict
from django.apps import apps
from django.contrib.staticfiles.finders import get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.utils.encoding import smart_text
from django.utils.functional import cached_property
from django.utils.six.moves import input
class Command(BaseCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
help = "Collect static files in a single location."
requires_system_checks = False
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = staticfiles_storage
self.style = no_style()
@cached_property
def local(self):
try:
self.storage.path('')
except NotImplementedError:
return False
return True
def add_arguments(self, parser):
parser.add_argument(
'--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help="Do NOT prompt the user for input of any kind.",
)
parser.add_argument(
'--no-post-process',
action='store_false', dest='post_process', default=True,
help="Do NOT post process collected files.",
)
parser.add_argument(
'-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more.",
)
parser.add_argument(
'-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem.",
)
parser.add_argument(
'-c', '--clear',
action='store_true', dest='clear', default=False,
help="Clear the existing files using the storage "
"before trying to copy or link the original file.",
)
parser.add_argument(
'-l', '--link',
action='store_true', dest='link', default=False,
help="Create a symbolic link to each file instead of copying.",
)
parser.add_argument(
'--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns (defaults to 'CVS', '.*' and '*~').",
)
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = options['verbosity']
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
else:
self.log(
"Found another file with the destination path '%s'. It "
"will be ignored since only the first encountered file "
"is collected. If this is not what you want, make sure "
"every static file has a unique path." % prefixed_path,
level=1,
)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle(self, **options):
self.set_options(**options)
message = ['\n']
if self.dry_run:
message.append(
'You have activated the --dry-run option so no files will be modified.\n\n'
)
message.append(
'You have requested to collect static files at the destination\n'
'location as specified in your settings'
)
if self.is_local_storage() and self.storage.location:
destination_path = self.storage.location
message.append(':\n\n %s\n\n' % destination_path)
else:
destination_path = None
message.append('.\n\n')
if self.clear:
message.append('This will DELETE ALL FILES in this location!\n')
else:
message.append('This will overwrite existing files!\n')
message.append(
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if self.interactive and input(''.join(message)) != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
return summary
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def is_local_storage(self):
return isinstance(self.storage, FileSystemStorage)
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
if not self.storage.exists(path):
return
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
try:
full_path = self.storage.path(fpath)
except NotImplementedError:
self.storage.delete(fpath)
else:
if not os.path.exists(full_path) and os.path.lexists(full_path):
# Delete broken symlinks
os.unlink(full_path)
else:
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = self.storage.get_modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support get_modified_time() or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.get_modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0) >= source_last_modified.replace(microsecond=0) and
full_path and not (self.symlink ^ os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path)
| KrzysztofStachanczyk/Sensors-WWW-website | www/env/lib/python2.7/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py | Python | gpl-3.0 | 14,419 |
#Ryan G. Coleman, Kim A. Sharp crystal.med.upenn.edu, ryan.g.coleman ATSYMBOL gmail.com
#bunch of helper methods for orthogonal range searching in the context of depth
import rangesearch
import geometry
import operator
import sys
def getIntersectingPts(
startPt, endPt, longEdge, shortAxis1, shortAxis2, orst, maxIntersect):
'''helper function that finds the intersecting points on a line,
knows how to perturb line if intersecting points are odd
newly added range search tree must be passed in'''
divideByZeroProblems = False
try: # to catch divide by zero issues
intersectingPts = []
#intersectingTris = [] # don't need except for debugging
result = orst.rangeQuery(
startPt[shortAxis1]-longEdge, startPt[shortAxis1]+longEdge,
startPt[shortAxis2]-longEdge, startPt[shortAxis2]+longEdge)
#result[x][3] contains tri tuple indices, if all three there, do...
checkResults = []
for resultEntry in result: # put triangle index into new struct, 3 copies
for triIndex in resultEntry[3]: # will be there if whole tri is inside
checkResults.append(triIndex)
checkResults.sort(key=operator.itemgetter(10))
lastPt, ptCount = -1, 0
for point in checkResults:
if False == maxIntersect or len(intersectingPts) < maxIntersect:
if point == lastPt:
ptCount += 1
else:
lastPt, ptCount = point, 1
if ptCount == 3:
triTuple = point
#now check to make sure at least one point of the triangle is on each
#side of the x/y shortaxis1/2 planes
short1BelowCt, short1AboveCt, short2BelowCt, short2AboveCt = \
0, 0, 0, 0
for pointInTri in triTuple[:3]:
if pointInTri[shortAxis1] < startPt[shortAxis1]:
short1BelowCt += 1
elif pointInTri[shortAxis1] > startPt[shortAxis1]:
short1AboveCt += 1
else: # weird equality case, increment both
short1BelowCt += 1
short1AboveCt += 1
if pointInTri[shortAxis2] < startPt[shortAxis2]:
short2BelowCt += 1
elif pointInTri[shortAxis2] > startPt[shortAxis2]:
short2AboveCt += 1
else: # weird equality case, increment both
short2BelowCt += 1
short2AboveCt += 1
#now do check, only do rest if necessary
if short1BelowCt > 0 and short1AboveCt > 0 and \
short2BelowCt > 0 and short2AboveCt > 0:
triPts0 = triTuple[0]
triPts1 = triTuple[1]
triPts2 = triTuple[2]
posPt, maxIt = False, 5000
while False == posPt:
posPt = geometry.linePlaneIntersectionNumeric(
triPts0, triPts1, triPts2, startPt, endPt)
if False == posPt:
triPts0, triPts1, triPts2 = geometry.perturbTriangle(
triPts0, triPts1, triPts2)
maxIt -= 1
if maxIt < 0:
print "perturbed 5000 times", triPts0, triPts1, triPts2,
print startPt, endPt, "giving up"
sys.exit(1)
if posPt is not False:
if geometry.intPointInsideTriTuple(triTuple, posPt):
intersectingPts.append(posPt)
#intersectingTris.append([triPts0, triPts1, triPts2])
#print "inter", len(intersectingPts), len(intersectingTris)
#tstdebug.debugTris([startPt,endPt],
#intersectingTris,intersectingPts,"debug."+
#str(x)+"."+str(y)+"."+str(z)+".line.py")
except ZeroDivisionError: # caused by intPt = triangle vertex
divideByZeroProblems = True
if divideByZeroProblems or len(intersectingPts) % 2 == 1:
return False
else:
return intersectingPts
def buildOrst(
triTuples, shortAxis1, shortAxis2, limitPoints, pointTriList, pointList):
'''helper that builds the ortho range search tree from the triangle points'''
inputData = []
for pointTriNum in xrange(len(pointTriList)):
inputData.append(False) # initialize to empty
for ptIndex, pointTri in enumerate(pointTriList):
if pointTri[0] in limitPoints:
triList = pointTri[2:]
modifiedTriList = []
for thisTri in triList:
modifiedTriList.append(triTuples[thisTri])
#disabled, use tstCheckTris for now
#if pointTri[1] == 13:
# #print triList, modifiedTriList, "something wrong with tst file"
pointXYZ = pointList[pointTri[0]-1]
inputData[ptIndex] = [
pointXYZ[1], pointXYZ[2], pointXYZ[3], modifiedTriList]
newInputData = []
for inputRow in inputData:
if inputRow is not False:
newInputData.append(inputRow)
if len(newInputData) > 0:
orst = rangesearch.orthoRangeSearchTree(
newInputData, shortAxis1, shortAxis2)
return orst
else:
return False
def decideInsideLong(
emptyGrid, triTuples, longAxis, allPoints, pointTriList, pointList,
triList, valueToSet, valueFromSet=False, maxIntersect=False):
'''helper function that does proper stuff depending on longAxis value'''
#don't bother figuring out what's inside the ms if already bad grid
lenX = len(emptyGrid)
lenY = len(emptyGrid[0])
lenZ = len(emptyGrid[0][0])
lens = (lenX, lenY, lenZ)
shortAxis1, shortAxis2 = 0, 0
if longAxis == 0:
shortAxis1, shortAxis2 = 1, 2
elif longAxis == 1:
shortAxis1, shortAxis2 = 0, 2
elif longAxis == 2:
shortAxis1, shortAxis2 = 0, 1
longEdge = 1.0000001 * geometry.getLongestEdge(triList, pointList, longAxis)
#build orthogonal range search tree structure now, save in orst
orst = buildOrst(
triTuples, shortAxis1, shortAxis2, allPoints, pointTriList, pointList)
if False != orst:
for short1 in range(0, lens[shortAxis1]):
#print " "
for short2 in range(0, lens[shortAxis2]):
startPt, endPt = [], [] # set these up in next switch
if longAxis == 0:
x, y, z = 0, short1, short2
yCoord = emptyGrid[x][y][z][2]
zCoord = emptyGrid[x][y][z][3]
xCoordStart = emptyGrid[0][y][z][1]
xCoordEnd = emptyGrid[lenX - 1][y][z][1]
startPt = [xCoordStart, yCoord, zCoord]
endPt = [xCoordEnd, yCoord, zCoord]
elif longAxis == 1:
x, y, z = short1, 0, short2
xCoord = emptyGrid[x][y][z][1]
zCoord = emptyGrid[x][y][z][3]
yCoordStart = emptyGrid[x][0][z][2]
yCoordEnd = emptyGrid[x][lenY - 1][z][2]
startPt = [xCoord, yCoordStart, zCoord]
endPt = [xCoord, yCoordEnd, zCoord]
elif longAxis == 2:
x, y, z = short1, short2, 0
xCoord = emptyGrid[x][y][z][1]
yCoord = emptyGrid[x][y][z][2]
zCoordStart = emptyGrid[x][y][0][3]
zCoordEnd = emptyGrid[x][y][lenZ - 1][3]
startPt = [xCoord, yCoord, zCoordStart]
endPt = [xCoord, yCoord, zCoordEnd]
intersectingPts, maxIt, usedStart, usedEnd = False, 5000, startPt, endPt
while False == intersectingPts:
intersectingPts = getIntersectingPts(
usedStart, usedEnd, longEdge, shortAxis1, shortAxis2, orst,
maxIntersect)
usedStart, usedEnd = geometry.perturbLine(
longAxis, shortAxis1, shortAxis2, usedStart, usedEnd, maxIt)
maxIt -= 1
if maxIt < 0:
print "had to perturb line 5000 times..."
print usedStart, usedEnd
sys.exit(1)
if len(intersectingPts) > 0:
#print len(intersectingPts),
#check even-ness... perhaps perturb if odd
if len(intersectingPts) % 2 == 1:
pass
#print "odd number of intersecting points!!"
#perturb starting line, try again
#need to sort based on ascending long-axis int pt
#longInts mean long dimension intercepts
longInts = [xb[longAxis] for xb in intersectingPts] # make quick list
longInts.sort()
#then decide inside/outside ms points, put -2 in grid if inside
lPlace, lInside = 0, False # records which intercepts
# have been seen, and whether currently outside or inside
for longCount in range(0, lens[longAxis]):
x, y, z = -1, -1, -1
if longAxis == 0:
x, y, z = longCount, short1, short2
elif longAxis == 1:
x, y, z = short1, longCount, short2
elif longAxis == 2:
x, y, z = short1, short2, longCount
gridPiece = emptyGrid[x][y][z]
while lPlace < len(longInts) and \
gridPiece[longAxis+1] > longInts[lPlace]: # switch
lPlace += 1
lInside = not lInside
if lInside: # replace the encoding int
if False == valueFromSet or valueFromSet == gridPiece[0]:
newGridPiece = valueToSet, gridPiece[1], gridPiece[2], \
gridPiece[3]
emptyGrid[x][y][z] = newGridPiece
#emptyGrid has been modified, nothing returned...
def decideInside(
emptyGrid, triTuples, allPoints, pointTriList, pointList, triList,
valueToSet, valueFromSet=False, maxIntersect=False):
'''moved this into a function for better readability, etc.
just modifies emptyGrid, doesn't return anything
calls helper function after determining which axis is longer'''
#don't bother figuring out what's inside the ms if already bad grid
lenX = len(emptyGrid)
lenY = len(emptyGrid[0])
lenZ = len(emptyGrid[0][0])
#loop through shortest 2 first is most efficient
if lenZ >= lenX and lenZ >= lenY:
decideInsideLong(
emptyGrid, triTuples, 2, allPoints, pointTriList, pointList,
triList, valueToSet, valueFromSet, maxIntersect)
elif lenY >= lenX and lenY >= lenZ:
decideInsideLong(
emptyGrid, triTuples, 1, allPoints, pointTriList, pointList,
triList, valueToSet, valueFromSet, maxIntersect)
elif lenX >= lenZ and lenX >= lenY:
decideInsideLong(
emptyGrid, triTuples, 0, allPoints, pointTriList, pointList,
triList, valueToSet, valueFromSet, maxIntersect)
#that's the end, emptyGrid has been modified, nothing returned...
def decideInsidePhi(
phiData, triTuples, allPoints, pointTriList, pointList, triList,
valueToSet=1, valueToUnset=0, maxIntersect=False):
'''helper function that puts values into a phimap instead of a grid.
cubic so doesn't matter which axis, just use x.
ToSet is inside surface, ToUnset is outside the surface.
the values in PhiData are destroyed and replaced with values.'''
longEdge = 1.0000001 * geometry.getLongestEdge(triList, pointList, 0)
#build orthogonal range search tree structure now, save in orst
orst = buildOrst(
triTuples, 1, 2, allPoints, pointTriList, pointList)
if False != orst:
for short1 in xrange(0, phiData.gridDimension):
#print " "
for short2 in xrange(0, phiData.gridDimension):
startPt, endPt = [], [] # set these up next
x, y, z = 0, short1, short2
yCoord = phiData.getXYZ(x, y, z)[1]
zCoord = phiData.getXYZ(x, y, z)[2]
xCoordStart = phiData.getXYZ(x, y, z)[0]
xCoordEnd = phiData.getXYZ(phiData.gridDimension - 1, y, z)[0]
startPt = [xCoordStart, yCoord, zCoord]
endPt = [xCoordEnd, yCoord, zCoord]
intersectingPts, maxIt, usedStart, usedEnd = False, 5000, startPt, endPt
while False == intersectingPts:
intersectingPts = getIntersectingPts(
usedStart, usedEnd, longEdge, 1, 2, orst, maxIntersect)
usedStart, usedEnd = geometry.perturbLine(
0, 1, 2, usedStart, usedEnd, maxIt)
maxIt -= 1
if maxIt < 0:
print "had to perturb line 5000 times..."
print usedStart, usedEnd
sys.exit(1)
if len(intersectingPts) > 0:
#need to sort based on ascending long-axis int pt
#longInts mean long dimension intercepts
longInts = [xb[0] for xb in intersectingPts] # make quick array
longInts.sort()
#then decide inside/outside ms points, put -2 in grid if inside
lPlace, lInside = 0, False # records which intercepts
# have been seen, and whether currently outside or inside
#outside is False!
for longCount in xrange(0, phiData.gridDimension):
x, y, z = longCount, short1, short2
gridPiece = phiData.getXYZ(x, y, z)
while lPlace < len(longInts) and \
gridPiece[0] > longInts[lPlace]: # switch
lPlace += 1
lInside = not lInside
if lInside: # replace the encoding int
phiData.setValue(x, y, z, valueToSet)
else:
phiData.setValue(x, y, z, valueToUnset)
else: # no intersecting points
for longCount in xrange(0, phiData.gridDimension):
x, y, z = longCount, short1, short2
phiData.setValue(x, y, z, valueToUnset)
#print phiData.countValues()
#phiData has been modified, no return value
| ryancoleman/traveldistance | src/orstHelper.py | Python | gpl-2.0 | 13,203 |
#! /usr/bin/python
# Copyright 2004 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
db = {
"QPtrVector<FX::FXWindow>" : "QPtrVectorOfFXWindow"
, "QMemArray<unsigned char>" : "QMemArrayOfUnsignedChar"
, "QValueList<FX::Pol::knowReferrers::ReferrerEntry>" : "QValueListOfReferrerEntry"
, "FXIPCMsgChunkCodeAlloc<0,false>" : "FXIPCMsgChunkCodeAlloc0False"
, "FXIPCMsgChunkCodeAlloc<4,false>" : "FXIPCMsgChunkCodeAlloc4False"
, "QPtrVector<FX::Generic::BoundFunctorV>" : "QPtrVectorOfBoundFunctorV"
, "FXObjectListOf<FX::FXListItem>" : "FXObjectListOfFXListItem"
, "FXObjectListOf<FX::FXHeaderItem>" : "FXObjectListOfFXHeaderItem"
, "QValueList<FX::FXString>" : "QValueListOfFXString"
, "FXObjectListOf<FX::FXIconItem>" : "FXObjectListOfFXIconItem"
, "FXIPCMsgChunkCodeAlloc<0,true>" : "FXIPCMsgChunkCodeAlloc0True"
, "FXIPCMsgChunkCodeAlloc<2,false>" : "FXIPCMsgChunkCodeAlloc2False"
} | ned14/tnfox | Python/aliases.py | Python | lgpl-2.1 | 1,055 |
from math import *
inp = raw_input()
spl = inp.split()
n = int(spl[0])
m = int(spl[1])
a = int(spl[2])
i = int(ceil(n * 1.0 / a))
j = int(ceil(m * 1.0 / a))
print max(1, i * j) | yamstudio/Codeforces | 000/1A - Theatre Square.py | Python | gpl-3.0 | 177 |
import sqlite3
from random import randint
#create a new database called newnum.db if it doesn't already exist
with sqlite3.connect("newnum.db") as connection:
c = connection.cursor()
c.execute("DROP TABLE if exists random_numbers")
c.execute("""CREATE TABLE random_numbers
(rnum INT)
""")
for i in range(0, 100):
#choose a random int from 0 to 100
new_num = randint(0,100)
#insert it into the db
c.execute("INSERT INTO random_numbers VALUES (?)", (new_num,)) | joelstanner/RealPython_book2 | assignment3a.py | Python | mit | 559 |
import json
import warnings
import fabricio
from fabricio import utils
from .base import BaseService, Option, Attribute, ServiceError
class ContainerError(ServiceError):
pass
class ContainerNotFoundError(ContainerError):
pass
class Container(BaseService):
command = Attribute()
stop_timeout = Attribute(default=10)
user = Option(safe=True)
publish = Option()
env = Option(safe=True)
label = Option(safe=True)
volume = Option(safe=True)
link = Option(safe=True)
add_host = Option(name='add-host', safe=True)
network = Option(name='net', safe=True)
restart = Option()
stop_signal = Option(name='stop-signal', safe=True)
@utils.default_property
def info(self):
command = 'docker inspect --type container {container}'
info = fabricio.run(
command.format(container=self),
abort_exception=ContainerNotFoundError,
)
return json.loads(info)[0]
def delete(
self,
delete_image=False,
delete_dangling_volumes=None, # deprecated
**options
):
options = utils.Options(options)
if delete_dangling_volumes is not None: # pragma: no cover
warnings.warn(
'`delete_dangling_volumes` parameter is deprecated '
'and will be removed in v0.6, use `volumes` instead',
RuntimeWarning, stacklevel=2,
)
options.setdefault('volumes', delete_dangling_volumes)
delete_image_callback = delete_image and self.image.get_delete_callback()
options.setdefault('volumes', True) # default option
fabricio.run('docker rm {options} {container}'.format(
container=self,
options=options,
))
if delete_image_callback:
delete_image_callback()
def run(self, tag=None, registry=None, account=None):
self.image[registry:tag:account].run(
command=self.command,
temporary=False,
name=self,
options=self.options,
)
def execute(
self,
command=None,
quiet=True,
use_cache=False,
options=(),
):
if not command:
raise ValueError('Must provide command to execute')
options = utils.Options(options)
options.setdefault('tty', True)
options.setdefault('interactive', True)
exec_command = 'docker exec {options} {container} {command}'
return fabricio.run(
exec_command.format(
container=self,
command=command,
options=options,
),
quiet=quiet,
use_cache=use_cache,
)
def start(self):
command = 'docker start {container}'
fabricio.run(command.format(container=self))
def stop(self, timeout=None):
if timeout is None:
timeout = self.stop_timeout
command = 'docker stop --time {timeout} {container}'
fabricio.run(command.format(container=self, timeout=timeout))
def reload(self, timeout=None):
if timeout is None:
timeout = self.stop_timeout
command = 'docker restart --time {timeout} {container}'
fabricio.run(command.format(container=self, timeout=timeout))
def rename(self, new_name):
command = 'docker rename {container} {new_name}'
fabricio.run(command.format(container=self, new_name=new_name))
self.name = new_name
def signal(self, signal):
command = 'docker kill --signal {signal} {container}'
fabricio.run(command.format(container=self, signal=signal))
@property
def image_id(self):
return self.info['Image']
def update(self, tag=None, registry=None, account=None, force=False):
if not force:
try:
if self.image_id == self.image[registry:tag:account].info['Id']:
self.start() # force starting container
return False
except ContainerNotFoundError:
pass
obsolete_container = self.get_backup_version()
try:
obsolete_container.delete(delete_image=True)
except fabricio.Error:
pass
try:
backup_container = self.fork()
backup_container.rename(obsolete_container.name)
except fabricio.Error:
pass # current container not found
else:
backup_container.stop()
self.run(tag=tag, registry=registry, account=account)
return True
def revert(self):
backup_container = self.get_backup_version()
try:
backup_container.info
except ContainerNotFoundError:
raise ContainerError('backup container not found')
self.stop()
backup_container.start()
self.delete(delete_image=True)
backup_container.rename(self.name)
def get_backup_version(self):
return self.fork(name='{container}_backup'.format(container=self))
def destroy(self):
self.delete(force=True, delete_image=True)
try:
self.get_backup_version().delete(force=True, delete_image=True)
except fabricio.Error:
pass
| renskiy/fabricio | fabricio/docker/container.py | Python | mit | 5,302 |
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import json
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.six.moves import input
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
from ansible.utils.color import stringc
from ansible.utils.display import Display
display = Display()
class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy
self.github_username = None
self.github_password = None
self._validate_certs = not context.CLIARGS['ignore_certs']
if github_token is None:
self.get_credentials()
def get_credentials(self):
display.display(u'\n\n' + "We need your " + stringc("GitHub login", 'bright cyan') +
" to identify you.", screen_only=True)
display.display("This information will " + stringc("not be sent to Galaxy", 'bright cyan') +
", only to " + stringc("api.github.com.", "yellow"), screen_only=True)
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token", 'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True)
try:
self.github_username = input("GitHub Username: ")
except Exception:
pass
try:
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
except Exception:
pass
if not self.github_username or not self.github_password:
raise AnsibleError("Invalid GitHub credentials. Username and password are required.")
def remove_github_token(self):
'''
If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
retrieve the token after creation, so we are forced to create a new one.
'''
try:
tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True,
validate_certs=self._validate_certs, http_agent=user_agent()))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
for token in tokens:
if token['note'] == 'ansible-galaxy login':
display.vvvvv('removing token: %s' % token['token_last_eight'])
try:
open_url('https://api.github.com/authorizations/%d' % token['id'],
url_username=self.github_username, url_password=self.github_password, method='DELETE',
force_basic_auth=True, validate_certs=self._validate_certs, http_agent=user_agent())
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
def create_github_token(self):
'''
Create a personal authorization token with a note of 'ansible-galaxy login'
'''
self.remove_github_token()
args = json.dumps({"scopes": ["public_repo"], "note": "ansible-galaxy login"})
try:
data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True, data=args,
validate_certs=self._validate_certs, http_agent=user_agent()))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
return data['token']
| azaghal/ansible | lib/ansible/galaxy/login.py | Python | gpl-3.0 | 4,922 |
import nebu.cli.main
def test_for_version(monkeypatch, invoker):
version = 'X.Y.Z'
monkeypatch.setattr(nebu.cli.main, '__version__', version)
from nebu.cli.main import cli
args = ['--version']
result = invoker(cli, args)
assert result.exit_code == 0
expected_output = 'Nebuchadnezzar {}\n'.format(version)
expected_output += "The semantic version of Neb could not be read.\n"
expected_output += "Please submit a bug report.\n"
assert result.output == expected_output
def test_old_version(monkeypatch, invoker, requests_mock):
version = '0.0.0'
monkeypatch.setattr(nebu.cli.main, '__version__', version)
content = b'{"releases": {"0.0.0": [], "1.0.0": []}}'
content_size = len(content)
headers = {'Content-Length': str(content_size)}
requests_mock.get(
"https://pypi.org/pypi/nebuchadnezzar/json",
content=content,
headers=headers,
)
from nebu.cli.main import cli
args = ['--version']
result = invoker(cli, args)
assert result.exit_code == 0
import re
expected_output = 'Nebuchadnezzar {}\n'.format(version)
expected_output += "Version available for install.\n"
output_no_version = re.sub(r"Version \w+(\.\w+)* available",
"Version available",
result.output)
assert output_no_version == expected_output
def test_bad_remote_url(requests_mock):
from nebu.cli.main import get_remote_releases
bad_url = "bad_url.!@#$%^&*()_+"
requests_mock.get(
bad_url,
text='Not Found',
status_code=404,
)
path = []
assert get_remote_releases(bad_url, path) == []
def test_bad_remote_path(requests_mock):
from nebu.cli.main import get_remote_releases
url = "https://pypi.org/pypi/nebuchadnezzar/json"
content = b'{"releases": {"0.0.0": [], "1.0.0": []}}'
content_size = len(content)
headers = {'Content-Length': str(content_size)}
requests_mock.get(
"https://pypi.org/pypi/nebuchadnezzar/json",
content=content,
headers=headers,
)
bad_path = ["bad_path.!@#$%^&*()_+"]
assert get_remote_releases(url, bad_path) == []
def test_no_versions_found():
from nebu.cli.main import get_latest_released_version
def empty_version_list():
return []
assert get_latest_released_version(empty_version_list) == ""
def test_help_formatter(invoker):
import textwrap
import click
from nebu.cli.main import HelpSectionsGroup
@click.group(cls=HelpSectionsGroup)
def cli(ctx):
pass
@cli.command(help_section='a')
def a():
pass
@cli.command(help_section='b')
def b():
pass
args = ['--help']
result = invoker(cli, args)
assert result.exit_code == 0
expected_output = textwrap.dedent("""
Usage: cli [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
Commands:
[a]
a
[b]
b
""")
assert ('\n' + result.output) == expected_output
def test_help_formatter_no_cmd(invoker):
import textwrap
import click
from nebu.cli.main import HelpSectionsGroup
@click.group(cls=HelpSectionsGroup)
def cli(ctx):
pass
args = ['--help']
result = invoker(cli, args)
assert result.exit_code == 0
expected_output = textwrap.dedent("""
Usage: cli [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
""")
assert ('\n' + result.output) == expected_output
| Connexions/nebuchadnezzar | nebu/tests/cli/test_main.py | Python | agpl-3.0 | 3,573 |
"""initial migration
Revision ID: 43e4ff56fbb
Revises: None
Create Date: 2015-09-15 17:37:55.801842
"""
# revision identifiers, used by Alembic.
revision = '43e4ff56fbb'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Unicode(length=255), nullable=True),
sa.Column('text', sa.UnicodeText(), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('publish_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('articles')
### end Alembic commands ###
| JXNU-ACS/learn-blog | migrations/versions/43e4ff56fbb_initial_migration.py | Python | mit | 865 |
# - *- coding: utf- 8 - *-
import re
from .service import Service
from .utils import geocoding_utils
from .utils import geocoding_constants
from .utils import TableGeocodingLock
from ...utils.logger import log
from ...io.managers.source_manager import SourceManager
from ...io.carto import read_carto, to_carto, has_table, delete_table, rename_table, copy_table, create_table_from_query
CARTO_INDEX_KEY = 'cartodb_id'
class Geocoding(Service):
"""Geocoding using CARTO data services.
This requires a CARTO account with an API key that allows for using geocoding services;
(through explicit argument in constructor or via the default credentials).
To prevent having to geocode records that have been previously geocoded, and thus spend quota unnecessarily,
you should always preserve the ``the_geom`` and ``carto_geocode_hash`` columns generated by the
geocoding process. This will happen automatically if your input is a table from CARTO processed in place
(i.e. without a ``table_name`` parameter) or if you save your results in a CARTO table using the ``table_name``
parameter, and only use the resulting table for any further geocoding.
In case you're geocoding local data from a ``DataFrame`` that you plan to re-geocode again, (e.g. because
you're making your work reproducible by saving all the data preparation steps in a notebook),
we advise to save the geocoding results immediately to the same store from where the data is originally taken,
for example:
>>> df = pandas.read_csv('my_data')
>>> geocoded_df = Geocoding().geocode(df, 'address').data
>>> geocoded_df.to_csv('my_data')
As an alternative, you can use the ``cached`` option to store geocoding results in a CARTO table
and reuse them in later geocodings. To do this, you need to use the ``table_name`` parameter with the name
of the table used to cache the results.
If the same dataframe is geocoded repeatedly no credits will be spent, but note there is a time overhead
related to uploading the dataframe to a temporary table for checking for changes.
>>> df = pandas.read_csv('my_data')
>>> geocoded_df = Geocoding().geocode(df, 'address', table_name='my_data', cached=True).data
If you execute the previous code multiple times it will only spend credits on the first geocoding;
later ones will reuse the results stored in the ``my_data`` table. This will require extra processing
time. If the CSV file should ever change, cached results will only be applied to unmodified
records, and new geocoding will be performed only on new or changed records.
"""
def __init__(self, credentials=None):
super(Geocoding, self).__init__(credentials=credentials, quota_service=geocoding_constants.QUOTA_SERVICE)
def geocode(self, source, street,
city=None, state=None, country=None,
status=geocoding_constants.DEFAULT_STATUS,
table_name=None, if_exists='fail',
dry_run=False, cached=None,
null_geom_value=None):
"""Geocode method.
Args:
source (str, pandas.DataFrame, geopandas.GeoDataFrame):
table, SQL query or DataFrame object to be geocoded.
street (str): name of the column containing postal addresses
city (dict, optional): dictionary with either a `column` key
with the name of a column containing the addresses' city names or
a `value` key with a literal city value, e.g. 'New York'.
It also accepts a string, in which case `column` is implied.
state (dict, optional): dictionary with either a `column` key
with the name of a column containing the addresses' state names or
a `value` key with a literal state value, e.g. 'WA'.
It also accepts a string, in which case `column` is implied.
country (dict, optional): dictionary with either a `column` key
with the name of a column containing the addresses' country names or
a `value` key with a literal country value, e.g. 'US'.
It also accepts a string, in which case `column` is implied.
status (dict, optional): dictionary that defines a mapping from geocoding state
attributes ('relevance', 'precision', 'match_types') to column names.
(See https://carto.com/developers/data-services-api/reference/)
Columns will be added to the result data for the requested attributes.
By default a column ``gc_status_rel`` will be created for the geocoding
_relevance_. The special attribute '*' refers to all the status
attributes as a JSON object.
table_name (str, optional): the geocoding results will be placed in a new
CARTO table with this name.
if_exists (str, optional): Behavior for creating new datasets, only applicable
if table_name isn't None;
Options are 'fail', 'replace', or 'append'. Defaults to 'fail'.
cached (bool, optional): Use cache geocoding results, saving the results in a
table. This parameter should be used along with ``table_name``.
dry_run (bool, optional): no actual geocoding will be performed (useful to
check the needed quota)
null_geom_value (Object, optional): value for the `the_geom` column when it's null.
Defaults to None
Returns:
A named-tuple ``(data, metadata)`` containing either a ``data`` geopandas.GeoDataFrame
and a ``metadata`` dictionary with global information about
the geocoding process.
The ``data`` contains a ``geometry`` column with point locations for the geocoded addresses
and also a ``carto_geocode_hash`` that, if preserved, can avoid re-geocoding
unchanged data in future calls to geocode.
The ``metadata``, as described in https://carto.com/developers/data-services-api/reference/,
contains the following information:
+-------------+--------+------------------------------------------------------------+
| Name | Type | Description |
+=============+========+============================================================+
| precision | text | precise or interpolated |
+-------------+--------+------------------------------------------------------------+
| relevance | number | 0 to 1, higher being more relevant |
+-------------+--------+------------------------------------------------------------+
| match_types | array | list of match type strings |
| | | point_of_interest, country, state, county, locality, |
| | | district, street, intersection, street_number, postal_code |
+-------------+--------+------------------------------------------------------------+
By default the ``relevance`` is stored in an output column named ``gc_status_rel``. The name of the
column and in general what attributes are added as columns can be configured by using a ``status``
dictionary associating column names to status attribute.
Raises:
ValueError: if `chached` param is set without `table_name`.
Examples:
Geocode a DataFrame:
>>> df = pandas.DataFrame([['Gran Vía 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address','city'])
>>> geocoded_gdf, metadata = Geocoding().geocode(
... df, street='address', city='city', country={'value': 'Spain'})
>>> geocoded_gdf.head()
Geocode a table from CARTO:
>>> gdf = read_carto('table_name')
>>> geocoded_gdf, metadata = Geocoding().geocode(gdf, street='address')
>>> geocoded_gdf.head()
Geocode a query against a table from CARTO:
>>> gdf = read_carto('SELECT * FROM table_name WHERE value > 1000')
>>> geocoded_gdf, metadata = Geocoding().geocode(gdf, street='address')
>>> geocoded_gdf.head()
Obtain the number of credits needed to geocode a CARTO table:
>>> gdf = read_carto('table_name')
>>> geocoded_gdf, metadata = Geocoding().geocode(gdf, street='address', dry_run=True)
>>> print(metadata['required_quota'])
Filter results by relevance:
>>> df = pandas.DataFrame([['Gran Vía 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address','city'])
>>> geocoded_gdf, metadata = Geocoding().geocode(
... df,
... street='address',
... city='city',
... country={'value': 'Spain'},
... status=['relevance'])
>>> # show rows with relevance greater than 0.7:
>>> print(geocoded_gdf[geocoded_gdf['carto_geocode_relevance'] > 0.7, axis=1)])
"""
self._source_manager = SourceManager(source, self._credentials)
self.columns = self._source_manager.get_column_names()
if cached:
if not table_name:
raise ValueError('There is no "table_name" to cache the data')
return self._cached_geocode(source, table_name, street, city=city, state=state, country=country,
dry_run=dry_run, status=status)
city, state, country = [
geocoding_utils.column_or_value_arg(arg, self.columns) for arg in [city, state, country]
]
input_table_name, is_temporary = self._table_for_geocoding(source, table_name, if_exists, dry_run)
metadata = self._geocode(input_table_name, street, city, state, country, status, dry_run)
if dry_run:
return self.result(data=None, metadata=metadata)
gdf = read_carto(input_table_name, self._credentials, null_geom_value=null_geom_value)
if self._source_manager.is_dataframe() and CARTO_INDEX_KEY in gdf:
del gdf[CARTO_INDEX_KEY]
if is_temporary:
delete_table(input_table_name, self._credentials, log_enabled=False)
result = self.result(data=gdf, metadata=metadata)
log.info('Success! Data geocoded correctly')
return result
def _cached_geocode(self, source, table_name, street, city, state, country, status, dry_run):
"""Geocode a dataframe caching results into a table.
If the same dataframe if geocoded repeatedly no credits will be spent.
But note there is a time overhead related to uploading the dataframe to a
temporary table for checking for changes.
"""
has_cache = has_table(table_name, self._credentials)
cache_columns = []
if has_cache:
cache_source_manager = SourceManager(table_name, self._credentials)
cache_columns = cache_source_manager.get_column_names()
if geocoding_constants.HASH_COLUMN not in cache_columns:
raise ValueError('Cache table {} exists but is not a valid geocode table'.format(table_name))
if geocoding_constants.HASH_COLUMN in self.columns or not has_cache:
return self.geocode(
source, street=street, city=city, state=state, status=status,
country=country, table_name=table_name, dry_run=dry_run, if_exists='replace')
tmp_table_name = self._new_temporary_table_name()
if self._source_manager.is_table():
raise ValueError('cached geocoding cannot be used with tables')
to_carto(source, tmp_table_name, self._credentials, log_enabled=False)
_, status_columns = geocoding_utils.status_assignment_columns(status)
add_columns = [c for c in status_columns if c[0] in cache_columns]
add_columns += [(geocoding_constants.HASH_COLUMN, 'text')]
log.debug("Adding columns %s if needed", ', '.join([c[0] for c in add_columns]))
alter_sql = "ALTER TABLE {tmp_table} {add_columns};".format(
tmp_table=tmp_table_name,
add_columns=','.join([
'ADD COLUMN IF NOT EXISTS {} {}'.format(name, type) for name, type in add_columns]))
self._execute_query(alter_sql)
hcity, hstate, hcountry = [
geocoding_utils.column_or_value_arg(arg, self.columns) for arg in [city, state, country]
]
hash_expr = geocoding_utils.hash_expr(street, hcity, hstate, hcountry, table_prefix=tmp_table_name)
columns_to_update = [c[0] for c in add_columns]
columns_to_update.append('the_geom')
columns_expr = ','.join(["""{c} = {t}.{c} """.format(t=table_name, c=c) for c in columns_to_update])
self._execute_query(
"""
UPDATE {tmp_table}
SET {columns_to_update}
FROM {table} WHERE {hash_expr}={table}.{hash}
""".format(
tmp_table=tmp_table_name,
columns_to_update=columns_expr,
table=table_name,
hash=geocoding_constants.HASH_COLUMN,
hash_expr=hash_expr
))
rename_table(
table_name=tmp_table_name,
new_table_name=table_name,
credentials=self._credentials,
if_exists='replace',
log_enabled=False
)
# TODO: should remove the cartodb_id column from the result
# TODO: refactor to share code with geocode() and call self._geocode() here instead
# actually to keep hashing knowledge encapsulated (AFW) this should be handled by
# _geocode using an additional parameter for an input table
gdf, metadata = self.geocode(table_name, street=street, city=city, status=status,
state=state, country=country, dry_run=dry_run)
return self.result(data=gdf, metadata=metadata)
def _table_for_geocoding(self, source, table_name, if_exists, dry_run):
is_temporary = False
input_table_name = table_name
if self._source_manager.is_table():
if table_name:
copy_table(source, input_table_name, self._credentials, if_exists, log_enabled=False)
else:
input_table_name = source
elif self._source_manager.is_query():
if not input_table_name:
input_table_name = self._new_temporary_table_name()
is_temporary = True
create_table_from_query(source, input_table_name, self._credentials, if_exists, log_enabled=False)
elif self._source_manager.is_dataframe():
if not input_table_name:
input_table_name = self._new_temporary_table_name()
is_temporary = True
to_carto(source, input_table_name, self._credentials, if_exists, log_enabled=False)
return (input_table_name, is_temporary)
# Note that this can be optimized for non in-place cases (table_name is not None), e.g.
# injecting the input query in the geocoding expression,
# receiving geocoding results instead of storing in a table, etc.
# But that would make transition to using AFW harder.
def _geocode(self, table_name, street, city=None, state=None, country=None, status=None, dry_run=False):
# Internal Geocoding implementation.
# Geocode a table's rows not already geocoded in a dataset'
log.debug('table_name = "%s"', table_name)
log.debug('street = "%s"', street)
log.debug('city = "%s"', city)
log.debug('state = "%s"', state)
log.debug('country = "%s"', country)
log.debug('status = "%s"', status)
log.debug('dry_run = "%s"', dry_run)
output = {}
summary = {s: 0 for s in [
'new_geocoded', 'new_nongeocoded',
'changed_geocoded', 'changed_nongeocoded',
'previously_geocoded', 'previously_nongeocoded']}
# TODO: Use a single transaction so that reported changes (posterior - prior queries)
# are only caused by the geocoding process. Note that no rollback should be
# performed once the geocoding update is executed, since
# quota spent by the Dataservices function would not be rolled back;
# hence a Python `with` statement is not used here.
# transaction = connection.begin()
result = self._execute_prior_summary(table_name, street, city, state, country)
if result:
for row in result.get('rows'):
gc_state = row.get('gc_state')
count = row.get('count')
summary[gc_state] = count
geocoding_utils.set_pre_summary_info(summary, output)
aborted = False
if not dry_run:
provider = self.provider()
if provider not in ['google']: # Geocoder providers without server quota (use the client API key)
available_quota = self.available_quota()
if output['required_quota'] > available_quota:
raise Exception('Your CARTO account does not have enough Geocoding quota: {}/{}'.format(
output['required_quota'],
available_quota
))
if output['required_quota'] > 0:
with TableGeocodingLock(self._execute_query, table_name) as locked:
if not locked:
output['error'] = 'The table is already being geocoded'
output['aborted'] = aborted = True
else:
schema = self._schema()
sql, add_columns = geocoding_utils.geocode_query(
table_name, schema, street, city, state, country, status)
add_columns += [(geocoding_constants.HASH_COLUMN, 'text')]
log.debug("Adding columns %s if needed", ', '.join([c[0] for c in add_columns]))
alter_sql = "ALTER TABLE {table} {add_columns};".format(
table=table_name,
add_columns=','.join([
'ADD COLUMN IF NOT EXISTS {} {}'.format(name, type) for name, type in add_columns]))
self._execute_query(alter_sql)
log.debug("Executing query: %s", sql)
result = None
try:
result = self._execute_long_running_query(sql)
except Exception as err:
log.error(err)
msg = str(err)
output['error'] = msg
# FIXME: Python SDK should return proper exceptions
# see: https://github.com/CartoDB/cartoframes/issues/751
match = re.search(
r'Remaining quota:\s+(\d+)\.\s+Estimated cost:\s+(\d+)',
msg, re.MULTILINE | re.IGNORECASE
)
if match:
output['remaining_quota'] = int(match.group(1))
output['estimated_cost'] = int(match.group(2))
aborted = True
# Don't rollback to avoid losing any partial geocodification:
# TODO
# transaction.commit()
if result and not aborted:
# Number of updated rows not available for batch queries
# output['updated_rows'] = result.rowcount
# log.debug('Number of rows updated: %d', output['updated_rows'])
pass
if not aborted:
sql = geocoding_utils.posterior_summary_query(table_name)
log.debug("Executing result summary query: %s", sql)
result = self._execute_query(sql)
geocoding_utils.set_post_summary_info(summary, result, output)
if not aborted:
# TODO
# transaction.commit()
pass
return output # TODO: GeocodeResult object
def _execute_prior_summary(self, dataset_name, street, city, state, country):
sql = geocoding_utils.exists_column_query(dataset_name, geocoding_constants.HASH_COLUMN)
log.debug("Executing check first time query: %s", sql)
result = self._execute_query(sql)
if not result or result.get('total_rows', 0) == 0:
sql = geocoding_utils.first_time_summary_query(dataset_name, street, city, state, country)
log.debug("Executing first time summary query: %s", sql)
else:
sql = geocoding_utils.prior_summary_query(dataset_name, street, city, state, country)
log.debug("Executing summary query: %s", sql)
return self._execute_query(sql)
| CartoDB/cartoframes | cartoframes/data/services/geocoding.py | Python | bsd-3-clause | 21,461 |
#!/usr/bin/env python3
import getopt
import glob
import os
import sys
import diamond.debug as debug
import diamond.schema as schema
def Help():
debug.dprint("Usage: update_options [OPTIONS] ... [FILES]\n" + \
"\n" + \
"Updates flml, and adml files. If FILES is not specified, all .flml, and .adml files in\n" + \
"tests/*/., tests/*/*/., longtests/*/., longtests/*/*/. and examples/*/. will be updated. Options:\n" + \
"\n" + \
"-h Display this help\n" + \
"-v Verbose mode", 0)
return
try:
opts, args = getopt.getopt(sys.argv[1:], "hv")
except getopt.getoptError:
Help()
sys.exit(-1)
if ("-h", "") in opts:
Help()
sys.exit(0)
if not ("-v", "") in opts:
debug.SetDebugLevel(0)
rootDir = os.path.join(os.path.dirname(__file__), os.path.pardir)
testDir = os.path.join(rootDir, "tests")
longtestDir = os.path.join(rootDir, "longtests")
examplesDir = os.path.join(rootDir, "examples")
extdict = {"flml" : "fluidity_options.rng",
"adml" : "test_advection_diffusion_options.rng"}
# cache parsed schema files
schemadict = {}
for k,v in extdict.items():
schemadict[k] = schema.Schema(os.path.join(rootDir, "schemas", v))
filenames = args
if len(filenames) == 0:
filenames = []
for k,v in extdict.items():
filenames += glob.glob(os.path.join(testDir, "*", "*."+k))
filenames += glob.glob(os.path.join(testDir, "*", "*", "*."+k))
filenames += glob.glob(os.path.join(longtestDir, "*", "*."+k))
filenames += glob.glob(os.path.join(longtestDir, "*", "*", "*."+k))
filenames += glob.glob(os.path.join(examplesDir, "*", "*."+k))
invalidFiles = []
updated = 0
for filename in filenames:
debug.dprint("Processing " + str(filename), 1)
ext = filename.split(".")[-1]
sch = schemadict[ext]
# Read the file and check that either the file is valid, or diamond.schema
# can make the file valid by adding in the missing elements
optionsTree = sch.read(filename)
lost_eles, added_eles, lost_attrs, added_attrs = sch.read_errors()
if len(lost_eles) + len(lost_attrs) > 0 or not optionsTree.valid:
debug.deprint(str(filename) + ": Invalid", 0)
debug.deprint(str(filename) + " errors: " + str((lost_eles, added_eles, lost_attrs, added_attrs)), 1)
invalidFiles.append(filename)
continue
# Write out the updated options file
optionsTree.write(filename)
debug.dprint(str(filename) + ": Updated", 0)
updated += 1
debug.dprint("Summary:", 0)
debug.dprint("Invalid options files:", 0)
for filename in invalidFiles:
debug.dprint(filename, 0)
debug.dprint("Invalid: " + str(len(invalidFiles)), 0)
debug.dprint("Updated: " + str(updated), 0)
| FluidityStokes/fluidity | tools/update_options.py | Python | lgpl-2.1 | 2,714 |
import logging
import copy
from itertools import count
import claripy
from claripy.vsa import ValueSet, RegionAnnotation
from ..storage.memory import SimMemory, AddressWrapper, MemoryStoreRequest, RegionMap
from ..s_errors import SimMemoryError
from ..s_options import KEEP_MEMORY_READS_DISCRETE, AVOID_MULTIVALUED_READS
from .symbolic_memory import SimSymbolicMemory
from ..s_action_object import _raw_ast
l = logging.getLogger("simuvex.plugins.abstract_memory")
WRITE_TARGETS_LIMIT = 2048
READ_TARGETS_LIMIT = 4096
#pylint:disable=unidiomatic-typecheck
invalid_read_ctr = count()
class MemoryRegion(object):
def __init__(self, id, state, is_stack=False, related_function_addr=None, init_memory=True, backer_dict=None, endness=None): #pylint:disable=redefined-builtin,unused-argument
self._endness = endness
self._id = id
self._state = state
self._is_stack = id.startswith('stack_') # TODO: Fix it
self._related_function_addr = related_function_addr
# This is a map from tuple (basicblock_key, stmt_id) to
# AbstractLocation objects
self._alocs = { }
if init_memory:
if backer_dict is None:
self._memory = SimSymbolicMemory(memory_id=id, endness=self._endness, abstract_backer=True)
else:
self._memory = SimSymbolicMemory(memory_backer=backer_dict, memory_id=id, endness=self._endness, abstract_backer=True)
self._memory.set_state(state)
@property
def id(self):
return self._id
@property
def memory(self):
return self._memory
@property
def state(self):
return self._state
@property
def alocs(self):
return self._alocs
@property
def is_stack(self):
return self._is_stack
@property
def related_function_addr(self):
return self._related_function_addr
def get_abstract_locations(self, addr, size):
"""
Get a list of abstract locations that is within the range of [addr, addr + size]
This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad
implementation for now.
:param addr: Starting address of the memory region.
:param size: Size of the memory region, in bytes.
:return: A list of covered AbstractLocation objects, or an empty list if there is none.
"""
ret = [ ]
for aloc in self._alocs.itervalues():
for seg in aloc.segments:
if seg.offset >= addr and seg.offset < addr + size:
ret.append(aloc)
break
return ret
def addrs_for_name(self, name):
return self.memory.addrs_for_name(name)
def set_state(self, state):
self._state = state
self._memory.set_state(state)
def copy(self):
r = MemoryRegion(self._id, self.state,
is_stack=self._is_stack,
related_function_addr=self._related_function_addr,
init_memory=False, endness=self._endness)
r._memory = self.memory.copy()
r._alocs = copy.deepcopy(self._alocs)
return r
def store(self, request, bbl_addr, stmt_id, ins_addr):
if ins_addr is not None:
#aloc_id = (bbl_addr, stmt_id)
aloc_id = ins_addr
else:
# It comes from a SimProcedure. We'll use bbl_addr as the aloc_id
aloc_id = bbl_addr
if aloc_id not in self._alocs:
self._alocs[aloc_id] = self.state.se.AbstractLocation(bbl_addr,
stmt_id,
self.id,
region_offset=request.addr,
size=len(request.data) / 8)
return self.memory._store(request)
else:
if self._alocs[aloc_id].update(request.addr, len(request.data) / 8):
return self.memory._store(request)
else:
#return self.memory._store_with_merge(request)
return self.memory._store(request)
def load(self, addr, size, bbl_addr, stmt_idx, ins_addr): #pylint:disable=unused-argument
#if bbl_addr is not None and stmt_id is not None:
return self.memory.load(addr, size, inspect=False)
def _merge_alocs(self, other_region):
"""
Helper function for merging.
"""
merging_occurred = False
for aloc_id, aloc in other_region.alocs.iteritems():
if aloc_id not in self.alocs:
self.alocs[aloc_id] = aloc.copy()
merging_occurred = True
else:
# Update it
merging_occurred |= self.alocs[aloc_id].merge(aloc)
return merging_occurred
def merge(self, others, merge_conditions):
merging_occurred = False
for other_region in others:
merging_occurred |= self._merge_alocs(other_region)
merging_occurred |= self.memory.merge([other_region.memory], merge_conditions)
return merging_occurred
def widen(self, others):
widening_occurred = False
for other_region in others:
widening_occurred |= self._merge_alocs(other_region)
widening_occurred |= self.memory.widen([ other_region.memory ])
return widening_occurred
def __contains__(self, addr):
return addr in self.memory
def was_written_to(self, addr):
return self.memory.was_written_to(addr)
def dbg_print(self, indent=0):
"""
Print out debugging information
"""
print "%sA-locs:" % (" " * indent)
for aloc_id, aloc in self._alocs.items():
print "%s<0x%x> %s" % (" " * (indent + 2), aloc_id, aloc)
print "%sMemory:" % (" " * indent)
self.memory.dbg_print(indent=indent + 2)
class SimAbstractMemory(SimMemory): #pylint:disable=abstract-method
"""
This is an implementation of the abstract store in paper [TODO].
Some differences:
# For stack variables, we map the absolute stack address to each region so
that we can effectively trace stack accesses. When tracing into a new
function, you should call set_stack_address_mapping() to create a new mapping.
When exiting from a function, you should cancel the previous mapping by
calling unset_stack_address_mapping().
Currently this is only used for stack!
"""
def __init__(self, memory_backer=None, memory_id="mem", endness=None):
SimMemory.__init__(self, endness=endness)
self._regions = {}
self._stack_region_map = RegionMap(True)
self._generic_region_map = RegionMap(False)
self._stack_size = None
self._memory_id = memory_id
self.id = self._memory_id
if memory_backer is not None:
for region, backer_dict in memory_backer.items():
self._regions[region] = MemoryRegion(region, self.state,
init_memory=True,
backer_dict=backer_dict,
endness=self.endness)
@property
def regions(self):
return self._regions
def _region_base(self, region):
"""
Get the base address of a memory region.
:param str region: ID of the memory region
:return: Address of the memory region
:rtype: int
"""
if region == 'global':
region_base_addr = 0
elif region.startswith('stack_'):
region_base_addr = self._stack_region_map.absolutize(region, 0)
else:
region_base_addr = self._generic_region_map.absolutize(region, 0)
return region_base_addr
def stack_id(self, function_address):
"""
Return a memory region ID for a function. If the default region ID exists in the region mapping, an integer
will appended to the region name. In this way we can handle recursive function calls, or a function that
appears more than once in the call frame.
This also means that `stack_id()` should only be called when creating a new stack frame for a function. You are
not supposed to call this function every time you want to map a function address to a stack ID.
:param int function_address: Address of the function.
:return: ID of the new memory region.
:rtype; str
"""
region_id = 'stack_0x%x' % function_address
# deduplication
region_ids = self._stack_region_map.region_ids
if region_id not in region_ids:
return region_id
else:
for i in xrange(0, 2000):
new_region_id = region_id + '_%d' % i
if new_region_id not in region_ids:
return new_region_id
raise SimMemoryError('Cannot allocate region ID for function %#08x - recursion too deep' % function_address)
def set_stack_size(self, size):
self._stack_size = size
def set_stack_address_mapping(self, absolute_address, region_id, related_function_address=None):
self._stack_region_map.map(absolute_address, region_id, related_function_address=related_function_address)
def unset_stack_address_mapping(self, absolute_address, region_id, function_address): # pylint:disable=unused-argument
self._stack_region_map.unmap_by_address(absolute_address)
def _normalize_address(self, region_id, relative_address, target_region=None):
"""
If this is a stack address, we convert it to a correct region and address
:param region_id: a string indicating which region the address is relative to
:param relative_address: an address that is relative to the region parameter
:param target_region: the ideal target region that address is normalized to. None means picking the best fit.
:return: an AddressWrapper object
"""
if self._stack_region_map.is_empty and self._generic_region_map.is_empty:
# We don't have any mapped region right now
return AddressWrapper(region_id, 0, relative_address, False, None)
# We wanna convert this address to an absolute address first
if region_id.startswith('stack_'):
absolute_address = self._stack_region_map.absolutize(region_id, relative_address)
else:
absolute_address = self._generic_region_map.absolutize(region_id, relative_address)
stack_base = self._stack_region_map.stack_base
if (relative_address <= stack_base and
relative_address > stack_base - self._stack_size) or \
(target_region is not None and target_region.startswith('stack_')):
# The absolute address seems to be in the stack region.
# Map it to stack
new_region_id, new_relative_address, related_function_addr = self._stack_region_map.relativize(
absolute_address,
target_region_id=target_region
)
return AddressWrapper(new_region_id, self._region_base(new_region_id), new_relative_address, True,
related_function_addr
)
else:
new_region_id, new_relative_address, related_function_addr = self._generic_region_map.relativize(
absolute_address,
target_region_id=target_region
)
return AddressWrapper(new_region_id, self._region_base(new_region_id), new_relative_address, False, None)
def set_state(self, state):
"""
Overriding the SimStatePlugin.set_state() method
:param state: A SimState object
:return: None
"""
self.state = state
for _,v in self._regions.items():
v.set_state(state)
def normalize_address(self, addr, is_write=False, convert_to_valueset=False, target_region=None): #pylint:disable=arguments-differ
"""
Convert a ValueSet object into a list of addresses.
:param addr: A ValueSet object (which describes an address)
:param is_write: Is this address used in a write or not
:param convert_to_valueset: True if you want to have a list of ValueSet instances instead of AddressWrappers,
False otherwise
:param target_region: Which region to normalize the address to. To leave the decision to SimuVEX, set it to None
:return: A list of AddressWrapper or ValueSet objects
"""
if type(addr) in (int, long):
addr = self.state.se.BVV(addr, self.state.arch.bits)
addr_with_regions = self._normalize_address_type(addr)
address_wrappers = [ ]
for region, addr_si in addr_with_regions:
if is_write:
concrete_addrs = addr_si.eval(WRITE_TARGETS_LIMIT)
if len(concrete_addrs) == WRITE_TARGETS_LIMIT:
self.state.log.add_event('mem', message='too many targets to write to. address = %s' % addr_si)
else:
concrete_addrs = addr_si.eval(READ_TARGETS_LIMIT)
if len(concrete_addrs) == READ_TARGETS_LIMIT:
self.state.log.add_event('mem', message='too many targets to read from. address = %s' % addr_si)
for c in concrete_addrs:
aw = self._normalize_address(region, c, target_region=target_region)
address_wrappers.append(aw)
if convert_to_valueset:
return [ i.to_valueset(self.state) for i in address_wrappers ]
else:
return address_wrappers
def _normalize_address_type(self, addr): #pylint:disable=no-self-use
"""
Convert address of different types to a list of mapping between region IDs and offsets (strided intervals).
:param claripy.ast.Base addr: Address to convert
:return: A list of mapping between region IDs and offsets.
:rtype: dict
"""
addr_e = _raw_ast(addr)
if isinstance(addr_e, (claripy.bv.BVV, claripy.vsa.StridedInterval, claripy.vsa.ValueSet)):
raise SimMemoryError('_normalize_address_type() does not take claripy models.')
if isinstance(addr_e, claripy.ast.Base):
if not isinstance(addr_e._model_vsa, ValueSet):
# Convert it to a ValueSet first by annotating it
addr_e = addr_e.annotate(RegionAnnotation('global', 0, addr_e._model_vsa))
return addr_e._model_vsa.items()
else:
raise SimMemoryError('Unsupported address type %s' % type(addr_e))
# FIXME: symbolic_length is also a hack!
def _store(self, req):
address_wrappers = self.normalize_address(req.addr, is_write=True, convert_to_valueset=False)
req.actual_addresses = [ ]
req.fallback_values = [ ]
req.symbolic_sized_values = [ ]
req.conditional_values = [ ]
req.simplified_values = [ ]
req.stored_values = [ ]
for aw in address_wrappers:
r = self._do_store(aw.address, req.data, aw.region, req.endness,
is_stack=aw.is_on_stack, related_function_addr=aw.function_address)
if r.completed:
req.completed = True
req.actual_addresses.append(aw.to_valueset(self.state))
req.constraints.extend(r.constraints)
req.fallback_values.extend(r.fallback_values)
req.symbolic_sized_values.extend(r.symbolic_sized_values)
req.conditional_values.extend(r.conditional_values)
req.simplified_values.extend(r.simplified_values)
req.stored_values.extend(r.stored_values)
# No constraints are generated...
return req
def _do_store(self, addr, data, key, endness, is_stack=False, related_function_addr=None):
if type(key) is not str:
raise Exception('Incorrect type %s of region_key' % type(key))
bbl_addr, stmt_id, ins_addr = self.state.scratch.bbl_addr, self.state.scratch.stmt_idx, self.state.scratch.ins_addr
if key not in self._regions:
self._regions[key] = MemoryRegion(
key,
self.state,
is_stack=is_stack,
related_function_addr=related_function_addr,
endness=self.endness
)
r = MemoryStoreRequest(addr, data=data, endness=endness)
self._regions[key].store(r, bbl_addr, stmt_id, ins_addr)
return r
def _load(self, addr, size, condition=None, fallback=None):
address_wrappers = self.normalize_address(addr, is_write=False)
if isinstance(size, claripy.ast.BV) and isinstance(size._model_vsa, ValueSet):
# raise Exception('Unsupported type %s for size' % type(size._model_vsa))
l.warning('_load(): size %s is a ValueSet. Something is wrong.', size)
if self.state.scratch.ins_addr is not None:
var_name = 'invalid_read_%d_%#x' % (
invalid_read_ctr.next(),
self.state.scratch.ins_addr
)
else:
var_name = 'invalid_read_%d_None' % invalid_read_ctr.next()
return address_wrappers, self.state.se.Unconstrained(var_name, 32), [True]
val = None
if len(address_wrappers) > 1 and AVOID_MULTIVALUED_READS in self.state.options:
val = self.state.se.Unconstrained('unconstrained_read', size * 8)
return address_wrappers, val, [True]
for aw in address_wrappers:
new_val = self._do_load(aw.address, size, aw.region,
is_stack=aw.is_on_stack, related_function_addr=aw.function_address)
if val is None:
if KEEP_MEMORY_READS_DISCRETE in self.state.options:
val = self.state.se.DSIS(to_conv=new_val, max_card=100000)
else:
val = new_val
else:
val = val.union(new_val)
return address_wrappers, val, [True]
def _do_load(self, addr, size, key, is_stack=False, related_function_addr=None):
if type(key) is not str:
raise Exception('Incorrect type %s of region_key' % type(key))
bbl_addr, stmt_id, ins_addr = self.state.scratch.bbl_addr, self.state.scratch.stmt_idx, self.state.scratch.ins_addr
if key not in self._regions:
self._regions[key] = MemoryRegion(key, state=self.state, is_stack=is_stack, related_function_addr=related_function_addr, endness=self.endness)
return self._regions[key].load(addr, size, bbl_addr, stmt_id, ins_addr)
def find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
if type(addr) in (int, long):
addr = self.state.se.BVV(addr, self.state.arch.bits)
addr = self._normalize_address_type(addr)
# TODO: For now we are only finding in one region!
for region, si in addr:
si = self.state.se.SI(to_conv=si)
r, s, i = self._regions[region].memory.find(si, what, max_search=max_search,
max_symbolic_bytes=max_symbolic_bytes, default=default,
step=step
)
# Post process r so that it's still a ValueSet variable
region_base_addr = self._region_base(region)
r = self.state.se.ValueSet(r.size(), region, region_base_addr, r._model_vsa)
return r, s, i
def get_segments(self, addr, size):
"""
Get a segmented memory region based on AbstractLocation information available from VSA.
Here are some assumptions to make this method fast:
- The entire memory region [addr, addr + size] is located within the same MemoryRegion
- The address 'addr' has only one concrete value. It cannot be concretized to multiple values.
:param addr: An address
:param size: Size of the memory area in bytes
:return: An ordered list of sizes each segment in the requested memory region
"""
address_wrappers = self.normalize_address(addr, is_write=False)
# assert len(address_wrappers) > 0
aw = address_wrappers[0]
region_id = aw.region
if region_id in self.regions:
region = self.regions[region_id]
alocs = region.get_abstract_locations(aw.address, size)
# Collect all segments and sort them
segments = [ ]
for aloc in alocs:
segments.extend(aloc.segments)
segments = sorted(segments, key=lambda x: x.offset)
# Remove all overlapping segments
processed_segments = [ ]
last_seg = None
for seg in segments:
if last_seg is None:
last_seg = seg
processed_segments.append(seg)
else:
# Are they overlapping?
if seg.offset >= last_seg.offset and seg.offset <= last_seg.offset + size:
continue
processed_segments.append(seg)
# Make it a list of sizes
sizes = [ ]
next_pos = aw.address
for seg in processed_segments:
if seg.offset > next_pos:
gap = seg.offset - next_pos
assert gap > 0
sizes.append(gap)
next_pos += gap
if seg.size + next_pos > aw.address + size:
sizes.append(aw.address + size - next_pos)
next_pos += aw.address + size - next_pos
else:
sizes.append(seg.size)
next_pos += seg.size
if len(sizes) == 0:
return [ size ]
return sizes
else:
# The region doesn't exist. Then there is only one segment!
return [ size ]
def copy(self):
"""
Make a copy of this SimAbstractMemory object
:return:
"""
am = SimAbstractMemory(memory_id=self._memory_id, endness=self.endness)
for region_id, region in self._regions.items():
am._regions[region_id] = region.copy()
am._stack_region_map = self._stack_region_map.copy()
am._generic_region_map = self._generic_region_map.copy()
am._stack_size = self._stack_size
return am
def merge(self, others, merge_conditions):
"""
Merge this guy with another SimAbstractMemory instance
"""
merging_occurred = False
for o in others:
for region_id, region in o._regions.items():
if region_id in self._regions:
merging_occurred |= self._regions[region_id].merge([region], merge_conditions)
else:
merging_occurred = True
self._regions[region_id] = region
return merging_occurred
def widen(self, others):
widening_occurred = False
for o in others:
for region_id, region in o._regions.items():
if region_id in self._regions:
widening_occurred |= self._regions[region_id].widen([ region ])
else:
widening_occurred = True
self._regions[region_id] = region
return widening_occurred
def __contains__(self, dst):
if type(dst) in (int, long):
dst = self.state.se.BVV(dst, self.state.arch.bits)
addrs = self._normalize_address_type(dst)
for region, addr in addrs:
address_wrapper = self._normalize_address(region, addr.min)
return address_wrapper.address in self.regions[address_wrapper.region]
return False
def was_written_to(self, dst):
if type(dst) in (int, long):
dst = self.state.se.BVV(dst, self.state.arch.bits)
addrs = self._normalize_address_type(dst)
for region, addr in addrs:
address_wrapper = self._normalize_address(region, addr.min)
return self.regions[address_wrapper.region].was_written_to(address_wrapper.address)
return False
def dbg_print(self):
"""
Print out debugging information
"""
for region_id, region in self.regions.items():
print "Region [%s]:" % region_id
region.dbg_print(indent=2)
| chubbymaggie/simuvex | simuvex/plugins/abstract_memory.py | Python | bsd-2-clause | 25,033 |
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import hr
| OCA/hr | hr_job_category/models/__init__.py | Python | agpl-3.0 | 82 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.